1 // SPDX-License-Identifier: GPL-2.0
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 only,
9 * as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License version 2 for more details (a copy is included
15 * in the LICENSE file that accompanied this code).
17 * You should have received a copy of the GNU General Public License
18 * version 2 along with this program; If not, see
19 * http://www.gnu.org/licenses/gpl-2.0.html
24 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Use is subject to license terms.
27 * Copyright (c) 2010, 2015, Intel Corporation.
30 * This file is part of Lustre, http://www.lustre.org/
31 * Lustre is a trademark of Sun Microsystems, Inc.
34 #define DEBUG_SUBSYSTEM S_RPC
36 #include <obd_support.h>
37 #include <obd_class.h>
38 #include <lustre_net.h>
39 #include <lu_object.h>
40 #include <uapi/linux/lnet/lnet-types.h>
41 #include "ptlrpc_internal.h"
43 /* The following are visible and mutable through /sys/module/ptlrpc */
44 int test_req_buffer_pressure
;
45 module_param(test_req_buffer_pressure
, int, 0444);
46 MODULE_PARM_DESC(test_req_buffer_pressure
, "set non-zero to put pressure on request buffer pools");
47 module_param(at_min
, int, 0644);
48 MODULE_PARM_DESC(at_min
, "Adaptive timeout minimum (sec)");
49 module_param(at_max
, int, 0644);
50 MODULE_PARM_DESC(at_max
, "Adaptive timeout maximum (sec)");
51 module_param(at_history
, int, 0644);
52 MODULE_PARM_DESC(at_history
,
53 "Adaptive timeouts remember the slowest event that took place within this period (sec)");
54 module_param(at_early_margin
, int, 0644);
55 MODULE_PARM_DESC(at_early_margin
, "How soon before an RPC deadline to send an early reply");
56 module_param(at_extra
, int, 0644);
57 MODULE_PARM_DESC(at_extra
, "How much extra time to give with each early reply");
60 static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part
*svcpt
);
61 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request
*req
);
62 static void ptlrpc_at_remove_timed(struct ptlrpc_request
*req
);
64 /** Holds a list of all PTLRPC services */
65 LIST_HEAD(ptlrpc_all_services
);
66 /** Used to protect the \e ptlrpc_all_services list */
67 struct mutex ptlrpc_all_services_mutex
;
69 static struct ptlrpc_request_buffer_desc
*
70 ptlrpc_alloc_rqbd(struct ptlrpc_service_part
*svcpt
)
72 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
73 struct ptlrpc_request_buffer_desc
*rqbd
;
75 rqbd
= kzalloc_node(sizeof(*rqbd
), GFP_NOFS
,
76 cfs_cpt_spread_node(svc
->srv_cptable
,
81 rqbd
->rqbd_svcpt
= svcpt
;
82 rqbd
->rqbd_refcount
= 0;
83 rqbd
->rqbd_cbid
.cbid_fn
= request_in_callback
;
84 rqbd
->rqbd_cbid
.cbid_arg
= rqbd
;
85 INIT_LIST_HEAD(&rqbd
->rqbd_reqs
);
86 rqbd
->rqbd_buffer
= libcfs_kvzalloc_cpt(svc
->srv_cptable
,
90 if (!rqbd
->rqbd_buffer
) {
95 spin_lock(&svcpt
->scp_lock
);
96 list_add(&rqbd
->rqbd_list
, &svcpt
->scp_rqbd_idle
);
97 svcpt
->scp_nrqbds_total
++;
98 spin_unlock(&svcpt
->scp_lock
);
104 ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc
*rqbd
)
106 struct ptlrpc_service_part
*svcpt
= rqbd
->rqbd_svcpt
;
108 LASSERT(rqbd
->rqbd_refcount
== 0);
109 LASSERT(list_empty(&rqbd
->rqbd_reqs
));
111 spin_lock(&svcpt
->scp_lock
);
112 list_del(&rqbd
->rqbd_list
);
113 svcpt
->scp_nrqbds_total
--;
114 spin_unlock(&svcpt
->scp_lock
);
116 kvfree(rqbd
->rqbd_buffer
);
121 ptlrpc_grow_req_bufs(struct ptlrpc_service_part
*svcpt
, int post
)
123 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
124 struct ptlrpc_request_buffer_desc
*rqbd
;
128 if (svcpt
->scp_rqbd_allocating
)
131 spin_lock(&svcpt
->scp_lock
);
132 /* check again with lock */
133 if (svcpt
->scp_rqbd_allocating
) {
134 /* NB: we might allow more than one thread in the future */
135 LASSERT(svcpt
->scp_rqbd_allocating
== 1);
136 spin_unlock(&svcpt
->scp_lock
);
140 svcpt
->scp_rqbd_allocating
++;
141 spin_unlock(&svcpt
->scp_lock
);
143 for (i
= 0; i
< svc
->srv_nbuf_per_group
; i
++) {
144 /* NB: another thread might have recycled enough rqbds, we
145 * need to make sure it wouldn't over-allocate, see LU-1212.
147 if (svcpt
->scp_nrqbds_posted
>= svc
->srv_nbuf_per_group
)
150 rqbd
= ptlrpc_alloc_rqbd(svcpt
);
153 CERROR("%s: Can't allocate request buffer\n",
160 spin_lock(&svcpt
->scp_lock
);
162 LASSERT(svcpt
->scp_rqbd_allocating
== 1);
163 svcpt
->scp_rqbd_allocating
--;
165 spin_unlock(&svcpt
->scp_lock
);
168 "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
169 svc
->srv_name
, i
, svc
->srv_buf_size
, svcpt
->scp_nrqbds_posted
,
170 svcpt
->scp_nrqbds_total
, rc
);
174 rc
= ptlrpc_server_post_idle_rqbds(svcpt
);
179 struct ptlrpc_hr_partition
;
181 struct ptlrpc_hr_thread
{
182 int hrt_id
; /* thread ID */
184 wait_queue_head_t hrt_waitq
;
185 struct list_head hrt_queue
; /* RS queue */
186 struct ptlrpc_hr_partition
*hrt_partition
;
189 struct ptlrpc_hr_partition
{
190 /* # of started threads */
191 atomic_t hrp_nstarted
;
192 /* # of stopped threads */
193 atomic_t hrp_nstopped
;
194 /* cpu partition id */
196 /* round-robin rotor for choosing thread */
198 /* total number of threads on this partition */
201 struct ptlrpc_hr_thread
*hrp_thrs
;
204 #define HRT_RUNNING 0
205 #define HRT_STOPPING 1
207 struct ptlrpc_hr_service
{
208 /* CPU partition table, it's just cfs_cpt_table for now */
209 struct cfs_cpt_table
*hr_cpt_table
;
210 /** controller sleep waitq */
211 wait_queue_head_t hr_waitq
;
212 unsigned int hr_stopping
;
213 /** roundrobin rotor for non-affinity service */
214 unsigned int hr_rotor
;
216 struct ptlrpc_hr_partition
**hr_partitions
;
219 /** reply handling service. */
220 static struct ptlrpc_hr_service ptlrpc_hr
;
223 * Choose an hr thread to dispatch requests to.
225 static struct ptlrpc_hr_thread
*
226 ptlrpc_hr_select(struct ptlrpc_service_part
*svcpt
)
228 struct ptlrpc_hr_partition
*hrp
;
231 if (svcpt
->scp_cpt
>= 0 &&
232 svcpt
->scp_service
->srv_cptable
== ptlrpc_hr
.hr_cpt_table
) {
233 /* directly match partition */
234 hrp
= ptlrpc_hr
.hr_partitions
[svcpt
->scp_cpt
];
237 rotor
= ptlrpc_hr
.hr_rotor
++;
238 rotor
%= cfs_cpt_number(ptlrpc_hr
.hr_cpt_table
);
240 hrp
= ptlrpc_hr
.hr_partitions
[rotor
];
243 rotor
= hrp
->hrp_rotor
++;
244 return &hrp
->hrp_thrs
[rotor
% hrp
->hrp_nthrs
];
248 * Put reply state into a queue for processing because we received
249 * ACK from the client
251 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state
*rs
)
253 struct ptlrpc_hr_thread
*hrt
;
255 LASSERT(list_empty(&rs
->rs_list
));
257 hrt
= ptlrpc_hr_select(rs
->rs_svcpt
);
259 spin_lock(&hrt
->hrt_lock
);
260 list_add_tail(&rs
->rs_list
, &hrt
->hrt_queue
);
261 spin_unlock(&hrt
->hrt_lock
);
263 wake_up(&hrt
->hrt_waitq
);
267 ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state
*rs
)
269 assert_spin_locked(&rs
->rs_svcpt
->scp_rep_lock
);
270 assert_spin_locked(&rs
->rs_lock
);
271 LASSERT(rs
->rs_difficult
);
272 rs
->rs_scheduled_ever
= 1; /* flag any notification attempt */
274 if (rs
->rs_scheduled
) { /* being set up or already notified */
278 rs
->rs_scheduled
= 1;
279 list_del_init(&rs
->rs_list
);
280 ptlrpc_dispatch_difficult_reply(rs
);
282 EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply
);
285 ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part
*svcpt
)
287 struct ptlrpc_request_buffer_desc
*rqbd
;
292 spin_lock(&svcpt
->scp_lock
);
294 if (list_empty(&svcpt
->scp_rqbd_idle
)) {
295 spin_unlock(&svcpt
->scp_lock
);
299 rqbd
= list_entry(svcpt
->scp_rqbd_idle
.next
,
300 struct ptlrpc_request_buffer_desc
,
302 list_del(&rqbd
->rqbd_list
);
304 /* assume we will post successfully */
305 svcpt
->scp_nrqbds_posted
++;
306 list_add(&rqbd
->rqbd_list
, &svcpt
->scp_rqbd_posted
);
308 spin_unlock(&svcpt
->scp_lock
);
310 rc
= ptlrpc_register_rqbd(rqbd
);
317 spin_lock(&svcpt
->scp_lock
);
319 svcpt
->scp_nrqbds_posted
--;
320 list_del(&rqbd
->rqbd_list
);
321 list_add_tail(&rqbd
->rqbd_list
, &svcpt
->scp_rqbd_idle
);
323 /* Don't complain if no request buffers are posted right now; LNET
324 * won't drop requests because we set the portal lazy!
327 spin_unlock(&svcpt
->scp_lock
);
332 static void ptlrpc_at_timer(unsigned long castmeharder
)
334 struct ptlrpc_service_part
*svcpt
;
336 svcpt
= (struct ptlrpc_service_part
*)castmeharder
;
338 svcpt
->scp_at_check
= 1;
339 svcpt
->scp_at_checktime
= cfs_time_current();
340 wake_up(&svcpt
->scp_waitq
);
344 ptlrpc_server_nthreads_check(struct ptlrpc_service
*svc
,
345 struct ptlrpc_service_conf
*conf
)
347 struct ptlrpc_service_thr_conf
*tc
= &conf
->psc_thr
;
354 * Common code for estimating & validating threads number.
355 * CPT affinity service could have percpt thread-pool instead
356 * of a global thread-pool, which means user might not always
357 * get the threads number they give it in conf::tc_nthrs_user
358 * even they did set. It's because we need to validate threads
359 * number for each CPT to guarantee each pool will have enough
360 * threads to keep the service healthy.
362 init
= PTLRPC_NTHRS_INIT
+ (svc
->srv_ops
.so_hpreq_handler
!= NULL
);
363 init
= max_t(int, init
, tc
->tc_nthrs_init
);
365 /* NB: please see comments in lustre_lnet.h for definition
366 * details of these members
368 LASSERT(tc
->tc_nthrs_max
!= 0);
370 if (tc
->tc_nthrs_user
!= 0) {
371 /* In case there is a reason to test a service with many
372 * threads, we give a less strict check here, it can
373 * be up to 8 * nthrs_max
375 total
= min(tc
->tc_nthrs_max
* 8, tc
->tc_nthrs_user
);
376 nthrs
= total
/ svc
->srv_ncpts
;
377 init
= max(init
, nthrs
);
381 total
= tc
->tc_nthrs_max
;
382 if (tc
->tc_nthrs_base
== 0) {
383 /* don't care about base threads number per partition,
384 * this is most for non-affinity service
386 nthrs
= total
/ svc
->srv_ncpts
;
390 nthrs
= tc
->tc_nthrs_base
;
391 if (svc
->srv_ncpts
== 1) {
394 /* NB: Increase the base number if it's single partition
395 * and total number of cores/HTs is larger or equal to 4.
396 * result will always < 2 * nthrs_base
398 weight
= cfs_cpt_weight(svc
->srv_cptable
, CFS_CPT_ANY
);
399 for (i
= 1; (weight
>> (i
+ 1)) != 0 && /* >= 4 cores/HTs */
400 (tc
->tc_nthrs_base
>> i
) != 0; i
++)
401 nthrs
+= tc
->tc_nthrs_base
>> i
;
404 if (tc
->tc_thr_factor
!= 0) {
405 int factor
= tc
->tc_thr_factor
;
409 * User wants to increase number of threads with for
410 * each CPU core/HT, most likely the factor is larger then
411 * one thread/core because service threads are supposed to
412 * be blocked by lock or wait for IO.
415 * Amdahl's law says that adding processors wouldn't give
416 * a linear increasing of parallelism, so it's nonsense to
417 * have too many threads no matter how many cores/HTs
420 /* weight is # of HTs */
421 if (cpumask_weight(topology_sibling_cpumask(0)) > 1) {
422 /* depress thread factor for hyper-thread */
423 factor
= factor
- (factor
>> 1) + (factor
>> 3);
426 weight
= cfs_cpt_weight(svc
->srv_cptable
, 0);
429 for (; factor
> 0 && weight
> 0; factor
--, weight
-= fade
)
430 nthrs
+= min(weight
, fade
) * factor
;
433 if (nthrs
* svc
->srv_ncpts
> tc
->tc_nthrs_max
) {
434 nthrs
= max(tc
->tc_nthrs_base
,
435 tc
->tc_nthrs_max
/ svc
->srv_ncpts
);
438 nthrs
= max(nthrs
, tc
->tc_nthrs_init
);
439 svc
->srv_nthrs_cpt_limit
= nthrs
;
440 svc
->srv_nthrs_cpt_init
= init
;
442 if (nthrs
* svc
->srv_ncpts
> tc
->tc_nthrs_max
) {
443 CDEBUG(D_OTHER
, "%s: This service may have more threads (%d) than the given soft limit (%d)\n",
444 svc
->srv_name
, nthrs
* svc
->srv_ncpts
,
450 * Initialize percpt data for a service
453 ptlrpc_service_part_init(struct ptlrpc_service
*svc
,
454 struct ptlrpc_service_part
*svcpt
, int cpt
)
456 struct ptlrpc_at_array
*array
;
461 svcpt
->scp_cpt
= cpt
;
462 INIT_LIST_HEAD(&svcpt
->scp_threads
);
464 /* rqbd and incoming request queue */
465 spin_lock_init(&svcpt
->scp_lock
);
466 INIT_LIST_HEAD(&svcpt
->scp_rqbd_idle
);
467 INIT_LIST_HEAD(&svcpt
->scp_rqbd_posted
);
468 INIT_LIST_HEAD(&svcpt
->scp_req_incoming
);
469 init_waitqueue_head(&svcpt
->scp_waitq
);
470 /* history request & rqbd list */
471 INIT_LIST_HEAD(&svcpt
->scp_hist_reqs
);
472 INIT_LIST_HEAD(&svcpt
->scp_hist_rqbds
);
474 /* active requests and hp requests */
475 spin_lock_init(&svcpt
->scp_req_lock
);
478 spin_lock_init(&svcpt
->scp_rep_lock
);
479 INIT_LIST_HEAD(&svcpt
->scp_rep_active
);
480 INIT_LIST_HEAD(&svcpt
->scp_rep_idle
);
481 init_waitqueue_head(&svcpt
->scp_rep_waitq
);
482 atomic_set(&svcpt
->scp_nreps_difficult
, 0);
484 /* adaptive timeout */
485 spin_lock_init(&svcpt
->scp_at_lock
);
486 array
= &svcpt
->scp_at_array
;
488 size
= at_est2timeout(at_max
);
489 array
->paa_size
= size
;
490 array
->paa_count
= 0;
491 array
->paa_deadline
= -1;
493 /* allocate memory for scp_at_array (ptlrpc_at_array) */
494 array
->paa_reqs_array
=
495 kzalloc_node(sizeof(struct list_head
) * size
, GFP_NOFS
,
496 cfs_cpt_spread_node(svc
->srv_cptable
, cpt
));
497 if (!array
->paa_reqs_array
)
500 for (index
= 0; index
< size
; index
++)
501 INIT_LIST_HEAD(&array
->paa_reqs_array
[index
]);
503 array
->paa_reqs_count
=
504 kzalloc_node(sizeof(__u32
) * size
, GFP_NOFS
,
505 cfs_cpt_spread_node(svc
->srv_cptable
, cpt
));
506 if (!array
->paa_reqs_count
)
507 goto free_reqs_array
;
509 setup_timer(&svcpt
->scp_at_timer
, ptlrpc_at_timer
,
510 (unsigned long)svcpt
);
512 /* At SOW, service time should be quick; 10s seems generous. If client
513 * timeout is less than this, we'll be sending an early reply.
515 at_init(&svcpt
->scp_at_estimate
, 10, 0);
517 /* assign this before call ptlrpc_grow_req_bufs */
518 svcpt
->scp_service
= svc
;
519 /* Now allocate the request buffers, but don't post them now */
520 rc
= ptlrpc_grow_req_bufs(svcpt
, 0);
521 /* We shouldn't be under memory pressure at startup, so
522 * fail if we can't allocate all our buffers at this time.
525 goto free_reqs_count
;
530 kfree(array
->paa_reqs_count
);
531 array
->paa_reqs_count
= NULL
;
533 kfree(array
->paa_reqs_array
);
534 array
->paa_reqs_array
= NULL
;
540 * Initialize service on a given portal.
541 * This includes starting serving threads , allocating and posting rqbds and
544 struct ptlrpc_service
*
545 ptlrpc_register_service(struct ptlrpc_service_conf
*conf
,
547 struct dentry
*debugfs_entry
)
549 struct ptlrpc_service_cpt_conf
*cconf
= &conf
->psc_cpt
;
550 struct ptlrpc_service
*service
;
551 struct ptlrpc_service_part
*svcpt
;
552 struct cfs_cpt_table
*cptable
;
559 LASSERT(conf
->psc_buf
.bc_nbufs
> 0);
560 LASSERT(conf
->psc_buf
.bc_buf_size
>=
561 conf
->psc_buf
.bc_req_max_size
+ SPTLRPC_MAX_PAYLOAD
);
562 LASSERT(conf
->psc_thr
.tc_ctx_tags
!= 0);
564 cptable
= cconf
->cc_cptable
;
566 cptable
= cfs_cpt_table
;
568 if (!conf
->psc_thr
.tc_cpu_affinity
) {
571 ncpts
= cfs_cpt_number(cptable
);
572 if (cconf
->cc_pattern
) {
573 struct cfs_expr_list
*el
;
575 rc
= cfs_expr_list_parse(cconf
->cc_pattern
,
576 strlen(cconf
->cc_pattern
),
579 CERROR("%s: invalid CPT pattern string: %s",
580 conf
->psc_name
, cconf
->cc_pattern
);
581 return ERR_PTR(-EINVAL
);
584 rc
= cfs_expr_list_values(el
, ncpts
, &cpts
);
585 cfs_expr_list_free(el
);
587 CERROR("%s: failed to parse CPT array %s: %d\n",
588 conf
->psc_name
, cconf
->cc_pattern
, rc
);
590 return ERR_PTR(rc
< 0 ? rc
: -EINVAL
);
596 service
= kzalloc(offsetof(struct ptlrpc_service
, srv_parts
[ncpts
]),
600 return ERR_PTR(-ENOMEM
);
603 service
->srv_cptable
= cptable
;
604 service
->srv_cpts
= cpts
;
605 service
->srv_ncpts
= ncpts
;
607 service
->srv_cpt_bits
= 0; /* it's zero already, easy to read... */
608 while ((1 << service
->srv_cpt_bits
) < cfs_cpt_number(cptable
))
609 service
->srv_cpt_bits
++;
612 spin_lock_init(&service
->srv_lock
);
613 service
->srv_name
= conf
->psc_name
;
614 service
->srv_watchdog_factor
= conf
->psc_watchdog_factor
;
615 INIT_LIST_HEAD(&service
->srv_list
); /* for safety of cleanup */
617 /* buffer configuration */
618 service
->srv_nbuf_per_group
= test_req_buffer_pressure
?
619 1 : conf
->psc_buf
.bc_nbufs
;
620 service
->srv_max_req_size
= conf
->psc_buf
.bc_req_max_size
+
622 service
->srv_buf_size
= conf
->psc_buf
.bc_buf_size
;
623 service
->srv_rep_portal
= conf
->psc_buf
.bc_rep_portal
;
624 service
->srv_req_portal
= conf
->psc_buf
.bc_req_portal
;
626 /* Increase max reply size to next power of two */
627 service
->srv_max_reply_size
= 1;
628 while (service
->srv_max_reply_size
<
629 conf
->psc_buf
.bc_rep_max_size
+ SPTLRPC_MAX_PAYLOAD
)
630 service
->srv_max_reply_size
<<= 1;
632 service
->srv_thread_name
= conf
->psc_thr
.tc_thr_name
;
633 service
->srv_ctx_tags
= conf
->psc_thr
.tc_ctx_tags
;
634 service
->srv_hpreq_ratio
= PTLRPC_SVC_HP_RATIO
;
635 service
->srv_ops
= conf
->psc_ops
;
637 for (i
= 0; i
< ncpts
; i
++) {
638 if (!conf
->psc_thr
.tc_cpu_affinity
)
641 cpt
= cpts
? cpts
[i
] : i
;
643 svcpt
= kzalloc_node(sizeof(*svcpt
), GFP_NOFS
,
644 cfs_cpt_spread_node(cptable
, cpt
));
650 service
->srv_parts
[i
] = svcpt
;
651 rc
= ptlrpc_service_part_init(service
, svcpt
, cpt
);
656 ptlrpc_server_nthreads_check(service
, conf
);
658 rc
= LNetSetLazyPortal(service
->srv_req_portal
);
661 mutex_lock(&ptlrpc_all_services_mutex
);
662 list_add(&service
->srv_list
, &ptlrpc_all_services
);
663 mutex_unlock(&ptlrpc_all_services_mutex
);
666 rc
= ptlrpc_sysfs_register_service(parent
, service
);
671 if (!IS_ERR_OR_NULL(debugfs_entry
))
672 ptlrpc_ldebugfs_register_service(debugfs_entry
, service
);
674 rc
= ptlrpc_service_nrs_setup(service
);
678 CDEBUG(D_NET
, "%s: Started, listening on portal %d\n",
679 service
->srv_name
, service
->srv_req_portal
);
681 rc
= ptlrpc_start_threads(service
);
683 CERROR("Failed to start threads for service %s: %d\n",
684 service
->srv_name
, rc
);
690 ptlrpc_unregister_service(service
);
693 EXPORT_SYMBOL(ptlrpc_register_service
);
696 * to actually free the request, must be called without holding svc_lock.
697 * note it's caller's responsibility to unlink req->rq_list.
699 static void ptlrpc_server_free_request(struct ptlrpc_request
*req
)
701 LASSERT(atomic_read(&req
->rq_refcount
) == 0);
702 LASSERT(list_empty(&req
->rq_timed_list
));
704 /* DEBUG_REQ() assumes the reply state of a request with a valid
705 * ref will not be destroyed until that reference is dropped.
707 ptlrpc_req_drop_rs(req
);
709 sptlrpc_svc_ctx_decref(req
);
711 if (req
!= &req
->rq_rqbd
->rqbd_req
) {
712 /* NB request buffers use an embedded
713 * req if the incoming req unlinked the
714 * MD; this isn't one of them!
716 ptlrpc_request_cache_free(req
);
721 * drop a reference count of the request. if it reaches 0, we either
722 * put it into history list, or free it immediately.
724 static void ptlrpc_server_drop_request(struct ptlrpc_request
*req
)
726 struct ptlrpc_request_buffer_desc
*rqbd
= req
->rq_rqbd
;
727 struct ptlrpc_service_part
*svcpt
= rqbd
->rqbd_svcpt
;
728 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
730 struct list_head
*tmp
;
731 struct list_head
*nxt
;
733 if (!atomic_dec_and_test(&req
->rq_refcount
))
736 if (req
->rq_at_linked
) {
737 spin_lock(&svcpt
->scp_at_lock
);
738 /* recheck with lock, in case it's unlinked by
739 * ptlrpc_at_check_timed()
741 if (likely(req
->rq_at_linked
))
742 ptlrpc_at_remove_timed(req
);
743 spin_unlock(&svcpt
->scp_at_lock
);
746 LASSERT(list_empty(&req
->rq_timed_list
));
748 /* finalize request */
749 if (req
->rq_export
) {
750 class_export_put(req
->rq_export
);
751 req
->rq_export
= NULL
;
754 spin_lock(&svcpt
->scp_lock
);
756 list_add(&req
->rq_list
, &rqbd
->rqbd_reqs
);
758 refcount
= --(rqbd
->rqbd_refcount
);
760 /* request buffer is now idle: add to history */
761 list_del(&rqbd
->rqbd_list
);
763 list_add_tail(&rqbd
->rqbd_list
, &svcpt
->scp_hist_rqbds
);
764 svcpt
->scp_hist_nrqbds
++;
766 /* cull some history?
767 * I expect only about 1 or 2 rqbds need to be recycled here
769 while (svcpt
->scp_hist_nrqbds
> svc
->srv_hist_nrqbds_cpt_max
) {
770 rqbd
= list_entry(svcpt
->scp_hist_rqbds
.next
,
771 struct ptlrpc_request_buffer_desc
,
774 list_del(&rqbd
->rqbd_list
);
775 svcpt
->scp_hist_nrqbds
--;
777 /* remove rqbd's reqs from svc's req history while
778 * I've got the service lock
780 list_for_each(tmp
, &rqbd
->rqbd_reqs
) {
781 req
= list_entry(tmp
, struct ptlrpc_request
,
783 /* Track the highest culled req seq */
784 if (req
->rq_history_seq
>
785 svcpt
->scp_hist_seq_culled
) {
786 svcpt
->scp_hist_seq_culled
=
789 list_del(&req
->rq_history_list
);
792 spin_unlock(&svcpt
->scp_lock
);
794 list_for_each_safe(tmp
, nxt
, &rqbd
->rqbd_reqs
) {
795 req
= list_entry(rqbd
->rqbd_reqs
.next
,
796 struct ptlrpc_request
,
798 list_del(&req
->rq_list
);
799 ptlrpc_server_free_request(req
);
802 spin_lock(&svcpt
->scp_lock
);
804 * now all reqs including the embedded req has been
805 * disposed, schedule request buffer for re-use.
807 LASSERT(atomic_read(&rqbd
->rqbd_req
.rq_refcount
) ==
809 list_add_tail(&rqbd
->rqbd_list
, &svcpt
->scp_rqbd_idle
);
812 spin_unlock(&svcpt
->scp_lock
);
813 } else if (req
->rq_reply_state
&& req
->rq_reply_state
->rs_prealloc
) {
814 /* If we are low on memory, we are not interested in history */
815 list_del(&req
->rq_list
);
816 list_del_init(&req
->rq_history_list
);
818 /* Track the highest culled req seq */
819 if (req
->rq_history_seq
> svcpt
->scp_hist_seq_culled
)
820 svcpt
->scp_hist_seq_culled
= req
->rq_history_seq
;
822 spin_unlock(&svcpt
->scp_lock
);
824 ptlrpc_server_free_request(req
);
826 spin_unlock(&svcpt
->scp_lock
);
831 * to finish a request: stop sending more early replies, and release
834 static void ptlrpc_server_finish_request(struct ptlrpc_service_part
*svcpt
,
835 struct ptlrpc_request
*req
)
837 ptlrpc_server_hpreq_fini(req
);
839 if (req
->rq_session
.lc_thread
) {
840 lu_context_exit(&req
->rq_session
);
841 lu_context_fini(&req
->rq_session
);
844 ptlrpc_server_drop_request(req
);
848 * to finish a active request: stop sending more early replies, and release
849 * the request. should be called after we finished handling the request.
851 static void ptlrpc_server_finish_active_request(
852 struct ptlrpc_service_part
*svcpt
,
853 struct ptlrpc_request
*req
)
855 spin_lock(&svcpt
->scp_req_lock
);
856 ptlrpc_nrs_req_stop_nolock(req
);
857 svcpt
->scp_nreqs_active
--;
859 svcpt
->scp_nhreqs_active
--;
860 spin_unlock(&svcpt
->scp_req_lock
);
862 ptlrpc_nrs_req_finalize(req
);
865 class_export_rpc_dec(req
->rq_export
);
867 ptlrpc_server_finish_request(svcpt
, req
);
871 * Sanity check request \a req.
872 * Return 0 if all is ok, error code otherwise.
874 static int ptlrpc_check_req(struct ptlrpc_request
*req
)
876 struct obd_device
*obd
= req
->rq_export
->exp_obd
;
879 if (unlikely(lustre_msg_get_conn_cnt(req
->rq_reqmsg
) <
880 req
->rq_export
->exp_conn_cnt
)) {
881 DEBUG_REQ(D_RPCTRACE
, req
,
882 "DROPPING req from old connection %d < %d",
883 lustre_msg_get_conn_cnt(req
->rq_reqmsg
),
884 req
->rq_export
->exp_conn_cnt
);
887 if (unlikely(!obd
|| obd
->obd_fail
)) {
889 * Failing over, don't handle any more reqs, send
890 * error response instead.
892 CDEBUG(D_RPCTRACE
, "Dropping req %p for failed obd %s\n",
893 req
, obd
? obd
->obd_name
: "unknown");
895 } else if (lustre_msg_get_flags(req
->rq_reqmsg
) &
896 (MSG_REPLAY
| MSG_REQ_REPLAY_DONE
)) {
897 DEBUG_REQ(D_ERROR
, req
, "Invalid replay without recovery");
898 class_fail_export(req
->rq_export
);
900 } else if (lustre_msg_get_transno(req
->rq_reqmsg
) != 0) {
901 DEBUG_REQ(D_ERROR
, req
,
902 "Invalid req with transno %llu without recovery",
903 lustre_msg_get_transno(req
->rq_reqmsg
));
904 class_fail_export(req
->rq_export
);
908 if (unlikely(rc
< 0)) {
915 static void ptlrpc_at_set_timer(struct ptlrpc_service_part
*svcpt
)
917 struct ptlrpc_at_array
*array
= &svcpt
->scp_at_array
;
920 if (array
->paa_count
== 0) {
921 del_timer(&svcpt
->scp_at_timer
);
925 /* Set timer for closest deadline */
926 next
= (__s32
)(array
->paa_deadline
- ktime_get_real_seconds() -
929 ptlrpc_at_timer((unsigned long)svcpt
);
931 mod_timer(&svcpt
->scp_at_timer
, cfs_time_shift(next
));
932 CDEBUG(D_INFO
, "armed %s at %+ds\n",
933 svcpt
->scp_service
->srv_name
, next
);
937 /* Add rpc to early reply check list */
938 static int ptlrpc_at_add_timed(struct ptlrpc_request
*req
)
940 struct ptlrpc_service_part
*svcpt
= req
->rq_rqbd
->rqbd_svcpt
;
941 struct ptlrpc_at_array
*array
= &svcpt
->scp_at_array
;
942 struct ptlrpc_request
*rq
= NULL
;
948 if (req
->rq_no_reply
)
951 if ((lustre_msghdr_get_flags(req
->rq_reqmsg
) & MSGHDR_AT_SUPPORT
) == 0)
954 spin_lock(&svcpt
->scp_at_lock
);
955 LASSERT(list_empty(&req
->rq_timed_list
));
957 div_u64_rem(req
->rq_deadline
, array
->paa_size
, &index
);
958 if (array
->paa_reqs_count
[index
] > 0) {
959 /* latest rpcs will have the latest deadlines in the list,
960 * so search backward.
962 list_for_each_entry_reverse(rq
, &array
->paa_reqs_array
[index
],
964 if (req
->rq_deadline
>= rq
->rq_deadline
) {
965 list_add(&req
->rq_timed_list
,
972 /* Add the request at the head of the list */
973 if (list_empty(&req
->rq_timed_list
))
974 list_add(&req
->rq_timed_list
, &array
->paa_reqs_array
[index
]);
976 spin_lock(&req
->rq_lock
);
977 req
->rq_at_linked
= 1;
978 spin_unlock(&req
->rq_lock
);
979 req
->rq_at_index
= index
;
980 array
->paa_reqs_count
[index
]++;
982 if (array
->paa_count
== 1 || array
->paa_deadline
> req
->rq_deadline
) {
983 array
->paa_deadline
= req
->rq_deadline
;
984 ptlrpc_at_set_timer(svcpt
);
986 spin_unlock(&svcpt
->scp_at_lock
);
992 ptlrpc_at_remove_timed(struct ptlrpc_request
*req
)
994 struct ptlrpc_at_array
*array
;
996 array
= &req
->rq_rqbd
->rqbd_svcpt
->scp_at_array
;
998 /* NB: must call with hold svcpt::scp_at_lock */
999 LASSERT(!list_empty(&req
->rq_timed_list
));
1000 list_del_init(&req
->rq_timed_list
);
1002 spin_lock(&req
->rq_lock
);
1003 req
->rq_at_linked
= 0;
1004 spin_unlock(&req
->rq_lock
);
1006 array
->paa_reqs_count
[req
->rq_at_index
]--;
1011 * Attempt to extend the request deadline by sending an early reply to the
1014 static int ptlrpc_at_send_early_reply(struct ptlrpc_request
*req
)
1016 struct ptlrpc_service_part
*svcpt
= req
->rq_rqbd
->rqbd_svcpt
;
1017 struct ptlrpc_request
*reqcopy
;
1018 struct lustre_msg
*reqmsg
;
1019 long olddl
= req
->rq_deadline
- ktime_get_real_seconds();
1023 /* deadline is when the client expects us to reply, margin is the
1024 * difference between clients' and servers' expectations
1026 DEBUG_REQ(D_ADAPTTO
, req
,
1027 "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d",
1028 AT_OFF
? "AT off - not " : "",
1029 olddl
, olddl
- at_get(&svcpt
->scp_at_estimate
),
1030 at_get(&svcpt
->scp_at_estimate
), at_extra
);
1036 DEBUG_REQ(D_WARNING
, req
, "Already past deadline (%+lds), not sending early reply. Consider increasing at_early_margin (%d)?",
1037 olddl
, at_early_margin
);
1039 /* Return an error so we're not re-added to the timed list. */
1043 if (!(lustre_msghdr_get_flags(req
->rq_reqmsg
) & MSGHDR_AT_SUPPORT
)) {
1044 DEBUG_REQ(D_INFO
, req
, "Wanted to ask client for more time, but no AT support");
1049 * We want to extend the request deadline by at_extra seconds,
1050 * so we set our service estimate to reflect how much time has
1051 * passed since this request arrived plus an additional
1052 * at_extra seconds. The client will calculate the new deadline
1053 * based on this service estimate (plus some additional time to
1054 * account for network latency). See ptlrpc_at_recv_early_reply
1056 at_measured(&svcpt
->scp_at_estimate
, at_extra
+
1057 ktime_get_real_seconds() - req
->rq_arrival_time
.tv_sec
);
1058 newdl
= req
->rq_arrival_time
.tv_sec
+ at_get(&svcpt
->scp_at_estimate
);
1060 /* Check to see if we've actually increased the deadline -
1061 * we may be past adaptive_max
1063 if (req
->rq_deadline
>= newdl
) {
1064 DEBUG_REQ(D_WARNING
, req
, "Couldn't add any time (%ld/%lld), not sending early reply\n",
1065 olddl
, newdl
- ktime_get_real_seconds());
1069 reqcopy
= ptlrpc_request_cache_alloc(GFP_NOFS
);
1072 reqmsg
= libcfs_kvzalloc(req
->rq_reqlen
, GFP_NOFS
);
1079 reqcopy
->rq_reply_state
= NULL
;
1080 reqcopy
->rq_rep_swab_mask
= 0;
1081 reqcopy
->rq_pack_bulk
= 0;
1082 reqcopy
->rq_pack_udesc
= 0;
1083 reqcopy
->rq_packed_final
= 0;
1084 sptlrpc_svc_ctx_addref(reqcopy
);
1085 /* We only need the reqmsg for the magic */
1086 reqcopy
->rq_reqmsg
= reqmsg
;
1087 memcpy(reqmsg
, req
->rq_reqmsg
, req
->rq_reqlen
);
1089 LASSERT(atomic_read(&req
->rq_refcount
));
1090 /** if it is last refcount then early reply isn't needed */
1091 if (atomic_read(&req
->rq_refcount
) == 1) {
1092 DEBUG_REQ(D_ADAPTTO
, reqcopy
, "Normal reply already sent out, abort sending early reply\n");
1097 /* Connection ref */
1098 reqcopy
->rq_export
= class_conn2export(
1099 lustre_msg_get_handle(reqcopy
->rq_reqmsg
));
1100 if (!reqcopy
->rq_export
) {
1106 class_export_rpc_inc(reqcopy
->rq_export
);
1107 if (reqcopy
->rq_export
->exp_obd
&&
1108 reqcopy
->rq_export
->exp_obd
->obd_fail
) {
1113 rc
= lustre_pack_reply_flags(reqcopy
, 1, NULL
, NULL
, LPRFL_EARLY_REPLY
);
1117 rc
= ptlrpc_send_reply(reqcopy
, PTLRPC_REPLY_EARLY
);
1120 /* Adjust our own deadline to what we told the client */
1121 req
->rq_deadline
= newdl
;
1122 req
->rq_early_count
++; /* number sent, server side */
1124 DEBUG_REQ(D_ERROR
, req
, "Early reply send failed %d", rc
);
1127 /* Free the (early) reply state from lustre_pack_reply.
1128 * (ptlrpc_send_reply takes it's own rs ref, so this is safe here)
1130 ptlrpc_req_drop_rs(reqcopy
);
1133 class_export_rpc_dec(reqcopy
->rq_export
);
1134 class_export_put(reqcopy
->rq_export
);
1136 sptlrpc_svc_ctx_decref(reqcopy
);
1139 ptlrpc_request_cache_free(reqcopy
);
1143 /* Send early replies to everybody expiring within at_early_margin
1144 * asking for at_extra time
1146 static void ptlrpc_at_check_timed(struct ptlrpc_service_part
*svcpt
)
1148 struct ptlrpc_at_array
*array
= &svcpt
->scp_at_array
;
1149 struct ptlrpc_request
*rq
, *n
;
1150 struct list_head work_list
;
1153 time64_t now
= ktime_get_real_seconds();
1155 int first
, counter
= 0;
1157 spin_lock(&svcpt
->scp_at_lock
);
1158 if (svcpt
->scp_at_check
== 0) {
1159 spin_unlock(&svcpt
->scp_at_lock
);
1162 delay
= cfs_time_sub(cfs_time_current(), svcpt
->scp_at_checktime
);
1163 svcpt
->scp_at_check
= 0;
1165 if (array
->paa_count
== 0) {
1166 spin_unlock(&svcpt
->scp_at_lock
);
1170 /* The timer went off, but maybe the nearest rpc already completed. */
1171 first
= array
->paa_deadline
- now
;
1172 if (first
> at_early_margin
) {
1173 /* We've still got plenty of time. Reset the timer. */
1174 ptlrpc_at_set_timer(svcpt
);
1175 spin_unlock(&svcpt
->scp_at_lock
);
1179 /* We're close to a timeout, and we don't know how much longer the
1180 * server will take. Send early replies to everyone expiring soon.
1182 INIT_LIST_HEAD(&work_list
);
1184 div_u64_rem(array
->paa_deadline
, array
->paa_size
, &index
);
1185 count
= array
->paa_count
;
1187 count
-= array
->paa_reqs_count
[index
];
1188 list_for_each_entry_safe(rq
, n
, &array
->paa_reqs_array
[index
],
1190 if (rq
->rq_deadline
> now
+ at_early_margin
) {
1191 /* update the earliest deadline */
1192 if (deadline
== -1 ||
1193 rq
->rq_deadline
< deadline
)
1194 deadline
= rq
->rq_deadline
;
1198 ptlrpc_at_remove_timed(rq
);
1200 * ptlrpc_server_drop_request() may drop
1201 * refcount to 0 already. Let's check this and
1202 * don't add entry to work_list
1204 if (likely(atomic_inc_not_zero(&rq
->rq_refcount
)))
1205 list_add(&rq
->rq_timed_list
, &work_list
);
1209 if (++index
>= array
->paa_size
)
1212 array
->paa_deadline
= deadline
;
1213 /* we have a new earliest deadline, restart the timer */
1214 ptlrpc_at_set_timer(svcpt
);
1216 spin_unlock(&svcpt
->scp_at_lock
);
1218 CDEBUG(D_ADAPTTO
, "timeout in %+ds, asking for %d secs on %d early replies\n",
1219 first
, at_extra
, counter
);
1221 /* We're already past request deadlines before we even get a
1222 * chance to send early replies
1224 LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
1225 svcpt
->scp_service
->srv_name
);
1226 CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
1227 counter
, svcpt
->scp_nreqs_incoming
,
1228 svcpt
->scp_nreqs_active
,
1229 at_get(&svcpt
->scp_at_estimate
), delay
);
1232 /* we took additional refcount so entries can't be deleted from list, no
1235 while (!list_empty(&work_list
)) {
1236 rq
= list_entry(work_list
.next
, struct ptlrpc_request
,
1238 list_del_init(&rq
->rq_timed_list
);
1240 if (ptlrpc_at_send_early_reply(rq
) == 0)
1241 ptlrpc_at_add_timed(rq
);
1243 ptlrpc_server_drop_request(rq
);
1248 * Put the request to the export list if the request may become
1249 * a high priority one.
1251 static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part
*svcpt
,
1252 struct ptlrpc_request
*req
)
1256 if (svcpt
->scp_service
->srv_ops
.so_hpreq_handler
) {
1257 rc
= svcpt
->scp_service
->srv_ops
.so_hpreq_handler(req
);
1262 if (req
->rq_export
&& req
->rq_ops
) {
1263 /* Perform request specific check. We should do this check
1264 * before the request is added into exp_hp_rpcs list otherwise
1265 * it may hit swab race at LU-1044.
1267 if (req
->rq_ops
->hpreq_check
) {
1268 rc
= req
->rq_ops
->hpreq_check(req
);
1269 if (rc
== -ESTALE
) {
1270 req
->rq_status
= rc
;
1273 /** can only return error,
1274 * 0 for normal request,
1275 * or 1 for high priority request
1280 spin_lock_bh(&req
->rq_export
->exp_rpc_lock
);
1281 list_add(&req
->rq_exp_list
, &req
->rq_export
->exp_hp_rpcs
);
1282 spin_unlock_bh(&req
->rq_export
->exp_rpc_lock
);
1285 ptlrpc_nrs_req_initialize(svcpt
, req
, rc
);
1290 /** Remove the request from the export list. */
1291 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request
*req
)
1293 if (req
->rq_export
&& req
->rq_ops
) {
1294 /* refresh lock timeout again so that client has more
1295 * room to send lock cancel RPC.
1297 if (req
->rq_ops
->hpreq_fini
)
1298 req
->rq_ops
->hpreq_fini(req
);
1300 spin_lock_bh(&req
->rq_export
->exp_rpc_lock
);
1301 list_del_init(&req
->rq_exp_list
);
1302 spin_unlock_bh(&req
->rq_export
->exp_rpc_lock
);
1306 static int ptlrpc_server_request_add(struct ptlrpc_service_part
*svcpt
,
1307 struct ptlrpc_request
*req
)
1311 rc
= ptlrpc_server_hpreq_init(svcpt
, req
);
1315 ptlrpc_nrs_req_add(svcpt
, req
, !!rc
);
1321 * Allow to handle high priority request
1322 * User can call it w/o any lock but need to hold
1323 * ptlrpc_service_part::scp_req_lock to get reliable result
1325 static bool ptlrpc_server_allow_high(struct ptlrpc_service_part
*svcpt
,
1328 int running
= svcpt
->scp_nthrs_running
;
1330 if (!nrs_svcpt_has_hp(svcpt
))
1336 if (unlikely(svcpt
->scp_service
->srv_req_portal
== MDS_REQUEST_PORTAL
&&
1337 CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND
))) {
1338 /* leave just 1 thread for normal RPCs */
1339 running
= PTLRPC_NTHRS_INIT
;
1340 if (svcpt
->scp_service
->srv_ops
.so_hpreq_handler
)
1344 if (svcpt
->scp_nreqs_active
>= running
- 1)
1347 if (svcpt
->scp_nhreqs_active
== 0)
1350 return !ptlrpc_nrs_req_pending_nolock(svcpt
, false) ||
1351 svcpt
->scp_hreq_count
< svcpt
->scp_service
->srv_hpreq_ratio
;
1354 static bool ptlrpc_server_high_pending(struct ptlrpc_service_part
*svcpt
,
1357 return ptlrpc_server_allow_high(svcpt
, force
) &&
1358 ptlrpc_nrs_req_pending_nolock(svcpt
, true);
1362 * Only allow normal priority requests on a service that has a high-priority
1363 * queue if forced (i.e. cleanup), if there are other high priority requests
1364 * already being processed (i.e. those threads can service more high-priority
1365 * requests), or if there are enough idle threads that a later thread can do
1366 * a high priority request.
1367 * User can call it w/o any lock but need to hold
1368 * ptlrpc_service_part::scp_req_lock to get reliable result
1370 static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part
*svcpt
,
1373 int running
= svcpt
->scp_nthrs_running
;
1375 if (unlikely(svcpt
->scp_service
->srv_req_portal
== MDS_REQUEST_PORTAL
&&
1376 CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND
))) {
1377 /* leave just 1 thread for normal RPCs */
1378 running
= PTLRPC_NTHRS_INIT
;
1379 if (svcpt
->scp_service
->srv_ops
.so_hpreq_handler
)
1384 svcpt
->scp_nreqs_active
< running
- 2)
1387 if (svcpt
->scp_nreqs_active
>= running
- 1)
1390 return svcpt
->scp_nhreqs_active
> 0 || !nrs_svcpt_has_hp(svcpt
);
1393 static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part
*svcpt
,
1396 return ptlrpc_server_allow_normal(svcpt
, force
) &&
1397 ptlrpc_nrs_req_pending_nolock(svcpt
, false);
1401 * Returns true if there are requests available in incoming
1402 * request queue for processing and it is allowed to fetch them.
1403 * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
1404 * to get reliable result
1405 * \see ptlrpc_server_allow_normal
1406 * \see ptlrpc_server_allow high
1409 ptlrpc_server_request_pending(struct ptlrpc_service_part
*svcpt
, bool force
)
1411 return ptlrpc_server_high_pending(svcpt
, force
) ||
1412 ptlrpc_server_normal_pending(svcpt
, force
);
1416 * Fetch a request for processing from queue of unprocessed requests.
1417 * Favors high-priority requests.
1418 * Returns a pointer to fetched request.
1420 static struct ptlrpc_request
*
1421 ptlrpc_server_request_get(struct ptlrpc_service_part
*svcpt
, bool force
)
1423 struct ptlrpc_request
*req
= NULL
;
1425 spin_lock(&svcpt
->scp_req_lock
);
1427 if (ptlrpc_server_high_pending(svcpt
, force
)) {
1428 req
= ptlrpc_nrs_req_get_nolock(svcpt
, true, force
);
1430 svcpt
->scp_hreq_count
++;
1435 if (ptlrpc_server_normal_pending(svcpt
, force
)) {
1436 req
= ptlrpc_nrs_req_get_nolock(svcpt
, false, force
);
1438 svcpt
->scp_hreq_count
= 0;
1443 spin_unlock(&svcpt
->scp_req_lock
);
1447 svcpt
->scp_nreqs_active
++;
1449 svcpt
->scp_nhreqs_active
++;
1451 spin_unlock(&svcpt
->scp_req_lock
);
1453 if (likely(req
->rq_export
))
1454 class_export_rpc_inc(req
->rq_export
);
1460 * Handle freshly incoming reqs, add to timed early reply list,
1461 * pass on to regular request queue.
1462 * All incoming requests pass through here before getting into
1463 * ptlrpc_server_handle_req later on.
1466 ptlrpc_server_handle_req_in(struct ptlrpc_service_part
*svcpt
,
1467 struct ptlrpc_thread
*thread
)
1469 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
1470 struct ptlrpc_request
*req
;
1474 spin_lock(&svcpt
->scp_lock
);
1475 if (list_empty(&svcpt
->scp_req_incoming
)) {
1476 spin_unlock(&svcpt
->scp_lock
);
1480 req
= list_entry(svcpt
->scp_req_incoming
.next
,
1481 struct ptlrpc_request
, rq_list
);
1482 list_del_init(&req
->rq_list
);
1483 svcpt
->scp_nreqs_incoming
--;
1484 /* Consider this still a "queued" request as far as stats are
1487 spin_unlock(&svcpt
->scp_lock
);
1489 /* go through security check/transform */
1490 rc
= sptlrpc_svc_unwrap_request(req
);
1494 case SECSVC_COMPLETE
:
1495 target_send_reply(req
, 0, OBD_FAIL_MDS_ALL_REPLY_NET
);
1504 * for null-flavored rpc, msg has been unpacked by sptlrpc, although
1505 * redo it wouldn't be harmful.
1507 if (SPTLRPC_FLVR_POLICY(req
->rq_flvr
.sf_rpc
) != SPTLRPC_POLICY_NULL
) {
1508 rc
= ptlrpc_unpack_req_msg(req
, req
->rq_reqlen
);
1510 CERROR("error unpacking request: ptl %d from %s x%llu\n",
1511 svc
->srv_req_portal
, libcfs_id2str(req
->rq_peer
),
1517 rc
= lustre_unpack_req_ptlrpc_body(req
, MSG_PTLRPC_BODY_OFF
);
1519 CERROR("error unpacking ptlrpc body: ptl %d from %s x%llu\n",
1520 svc
->srv_req_portal
, libcfs_id2str(req
->rq_peer
),
1525 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC
) &&
1526 lustre_msg_get_opc(req
->rq_reqmsg
) == cfs_fail_val
) {
1527 CERROR("drop incoming rpc opc %u, x%llu\n",
1528 cfs_fail_val
, req
->rq_xid
);
1533 if (lustre_msg_get_type(req
->rq_reqmsg
) != PTL_RPC_MSG_REQUEST
) {
1534 CERROR("wrong packet type received (type=%u) from %s\n",
1535 lustre_msg_get_type(req
->rq_reqmsg
),
1536 libcfs_id2str(req
->rq_peer
));
1540 switch (lustre_msg_get_opc(req
->rq_reqmsg
)) {
1543 req
->rq_bulk_write
= 1;
1547 case MGS_CONFIG_READ
:
1548 req
->rq_bulk_read
= 1;
1552 CDEBUG(D_RPCTRACE
, "got req x%llu\n", req
->rq_xid
);
1554 req
->rq_export
= class_conn2export(
1555 lustre_msg_get_handle(req
->rq_reqmsg
));
1556 if (req
->rq_export
) {
1557 rc
= ptlrpc_check_req(req
);
1559 rc
= sptlrpc_target_export_check(req
->rq_export
, req
);
1561 DEBUG_REQ(D_ERROR
, req
, "DROPPING req with illegal security flavor,");
1568 /* req_in handling should/must be fast */
1569 if (ktime_get_real_seconds() - req
->rq_arrival_time
.tv_sec
> 5)
1570 DEBUG_REQ(D_WARNING
, req
, "Slow req_in handling %llds",
1571 (s64
)(ktime_get_real_seconds() -
1572 req
->rq_arrival_time
.tv_sec
));
1574 /* Set rpc server deadline and add it to the timed list */
1575 deadline
= (lustre_msghdr_get_flags(req
->rq_reqmsg
) &
1576 MSGHDR_AT_SUPPORT
) ?
1577 /* The max time the client expects us to take */
1578 lustre_msg_get_timeout(req
->rq_reqmsg
) : obd_timeout
;
1579 req
->rq_deadline
= req
->rq_arrival_time
.tv_sec
+ deadline
;
1580 if (unlikely(deadline
== 0)) {
1581 DEBUG_REQ(D_ERROR
, req
, "Dropping request with 0 timeout");
1585 req
->rq_svc_thread
= thread
;
1587 /* initialize request session, it is needed for request
1588 * processing by target
1590 rc
= lu_context_init(&req
->rq_session
,
1591 LCT_SERVER_SESSION
| LCT_NOREF
);
1593 CERROR("%s: failure to initialize session: rc = %d\n",
1594 thread
->t_name
, rc
);
1597 req
->rq_session
.lc_thread
= thread
;
1598 lu_context_enter(&req
->rq_session
);
1599 req
->rq_svc_thread
->t_env
->le_ses
= &req
->rq_session
;
1602 ptlrpc_at_add_timed(req
);
1604 /* Move it over to the request processing queue */
1605 rc
= ptlrpc_server_request_add(svcpt
, req
);
1609 wake_up(&svcpt
->scp_waitq
);
1613 ptlrpc_server_finish_request(svcpt
, req
);
1619 * Main incoming request handling logic.
1620 * Calls handler function from service to do actual processing.
1623 ptlrpc_server_handle_request(struct ptlrpc_service_part
*svcpt
,
1624 struct ptlrpc_thread
*thread
)
1626 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
1627 struct ptlrpc_request
*request
;
1628 struct timespec64 work_start
;
1629 struct timespec64 work_end
;
1630 struct timespec64 timediff
;
1631 struct timespec64 arrived
;
1632 unsigned long timediff_usecs
;
1633 unsigned long arrived_usecs
;
1636 request
= ptlrpc_server_request_get(svcpt
, false);
1640 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT
))
1641 fail_opc
= OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT
;
1642 else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT
))
1643 fail_opc
= OBD_FAIL_PTLRPC_HPREQ_TIMEOUT
;
1645 if (unlikely(fail_opc
)) {
1646 if (request
->rq_export
&& request
->rq_ops
)
1647 OBD_FAIL_TIMEOUT(fail_opc
, 4);
1650 ptlrpc_rqphase_move(request
, RQ_PHASE_INTERPRET
);
1652 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG
))
1653 libcfs_debug_dumplog();
1655 ktime_get_real_ts64(&work_start
);
1656 timediff
= timespec64_sub(work_start
, request
->rq_arrival_time
);
1657 timediff_usecs
= timediff
.tv_sec
* USEC_PER_SEC
+
1658 timediff
.tv_nsec
/ NSEC_PER_USEC
;
1659 if (likely(svc
->srv_stats
)) {
1660 lprocfs_counter_add(svc
->srv_stats
, PTLRPC_REQWAIT_CNTR
,
1662 lprocfs_counter_add(svc
->srv_stats
, PTLRPC_REQQDEPTH_CNTR
,
1663 svcpt
->scp_nreqs_incoming
);
1664 lprocfs_counter_add(svc
->srv_stats
, PTLRPC_REQACTIVE_CNTR
,
1665 svcpt
->scp_nreqs_active
);
1666 lprocfs_counter_add(svc
->srv_stats
, PTLRPC_TIMEOUT
,
1667 at_get(&svcpt
->scp_at_estimate
));
1670 if (likely(request
->rq_export
)) {
1671 if (unlikely(ptlrpc_check_req(request
)))
1675 /* Discard requests queued for longer than the deadline.
1676 * The deadline is increased if we send an early reply.
1678 if (ktime_get_real_seconds() > request
->rq_deadline
) {
1679 DEBUG_REQ(D_ERROR
, request
, "Dropping timed-out request from %s: deadline %lld:%llds ago\n",
1680 libcfs_id2str(request
->rq_peer
),
1681 request
->rq_deadline
-
1682 request
->rq_arrival_time
.tv_sec
,
1683 ktime_get_real_seconds() - request
->rq_deadline
);
1687 CDEBUG(D_RPCTRACE
, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d\n",
1689 (request
->rq_export
?
1690 (char *)request
->rq_export
->exp_client_uuid
.uuid
: "0"),
1691 (request
->rq_export
?
1692 atomic_read(&request
->rq_export
->exp_refcount
) : -99),
1693 lustre_msg_get_status(request
->rq_reqmsg
), request
->rq_xid
,
1694 libcfs_id2str(request
->rq_peer
),
1695 lustre_msg_get_opc(request
->rq_reqmsg
));
1697 if (lustre_msg_get_opc(request
->rq_reqmsg
) != OBD_PING
)
1698 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ
, cfs_fail_val
);
1700 CDEBUG(D_NET
, "got req %llu\n", request
->rq_xid
);
1702 /* re-assign request and sesson thread to the current one */
1703 request
->rq_svc_thread
= thread
;
1705 LASSERT(request
->rq_session
.lc_thread
);
1706 request
->rq_session
.lc_thread
= thread
;
1707 request
->rq_session
.lc_cookie
= 0x55;
1708 thread
->t_env
->le_ses
= &request
->rq_session
;
1710 svc
->srv_ops
.so_req_handler(request
);
1712 ptlrpc_rqphase_move(request
, RQ_PHASE_COMPLETE
);
1715 if (unlikely(ktime_get_real_seconds() > request
->rq_deadline
)) {
1716 DEBUG_REQ(D_WARNING
, request
,
1717 "Request took longer than estimated (%lld:%llds); "
1718 "client may timeout.",
1719 (s64
)request
->rq_deadline
-
1720 request
->rq_arrival_time
.tv_sec
,
1721 (s64
)ktime_get_real_seconds() - request
->rq_deadline
);
1724 ktime_get_real_ts64(&work_end
);
1725 timediff
= timespec64_sub(work_end
, work_start
);
1726 timediff_usecs
= timediff
.tv_sec
* USEC_PER_SEC
+
1727 timediff
.tv_nsec
/ NSEC_PER_USEC
;
1728 arrived
= timespec64_sub(work_end
, request
->rq_arrival_time
);
1729 arrived_usecs
= arrived
.tv_sec
* USEC_PER_SEC
+
1730 arrived
.tv_nsec
/ NSEC_PER_USEC
;
1731 CDEBUG(D_RPCTRACE
, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request processed in %ldus (%ldus total) trans %llu rc %d/%d\n",
1733 (request
->rq_export
?
1734 (char *)request
->rq_export
->exp_client_uuid
.uuid
: "0"),
1735 (request
->rq_export
?
1736 atomic_read(&request
->rq_export
->exp_refcount
) : -99),
1737 lustre_msg_get_status(request
->rq_reqmsg
),
1739 libcfs_id2str(request
->rq_peer
),
1740 lustre_msg_get_opc(request
->rq_reqmsg
),
1743 (request
->rq_repmsg
?
1744 lustre_msg_get_transno(request
->rq_repmsg
) :
1745 request
->rq_transno
),
1747 (request
->rq_repmsg
?
1748 lustre_msg_get_status(request
->rq_repmsg
) : -999));
1749 if (likely(svc
->srv_stats
&& request
->rq_reqmsg
)) {
1750 __u32 op
= lustre_msg_get_opc(request
->rq_reqmsg
);
1751 int opc
= opcode_offset(op
);
1753 if (opc
> 0 && !(op
== LDLM_ENQUEUE
|| op
== MDS_REINT
)) {
1754 LASSERT(opc
< LUSTRE_MAX_OPCODES
);
1755 lprocfs_counter_add(svc
->srv_stats
,
1756 opc
+ EXTRA_MAX_OPCODES
,
1760 if (unlikely(request
->rq_early_count
)) {
1761 DEBUG_REQ(D_ADAPTTO
, request
,
1762 "sent %d early replies before finishing in %llds",
1763 request
->rq_early_count
,
1764 (s64
)work_end
.tv_sec
-
1765 request
->rq_arrival_time
.tv_sec
);
1768 ptlrpc_server_finish_active_request(svcpt
, request
);
1774 * An internal function to process a single reply state object.
1777 ptlrpc_handle_rs(struct ptlrpc_reply_state
*rs
)
1779 struct ptlrpc_service_part
*svcpt
= rs
->rs_svcpt
;
1780 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
1781 struct obd_export
*exp
;
1785 exp
= rs
->rs_export
;
1787 LASSERT(rs
->rs_difficult
);
1788 LASSERT(rs
->rs_scheduled
);
1789 LASSERT(list_empty(&rs
->rs_list
));
1791 spin_lock(&exp
->exp_lock
);
1792 /* Noop if removed already */
1793 list_del_init(&rs
->rs_exp_list
);
1794 spin_unlock(&exp
->exp_lock
);
1796 /* The disk commit callback holds exp_uncommitted_replies_lock while it
1797 * iterates over newly committed replies, removing them from
1798 * exp_uncommitted_replies. It then drops this lock and schedules the
1799 * replies it found for handling here.
1801 * We can avoid contention for exp_uncommitted_replies_lock between the
1802 * HRT threads and further commit callbacks by checking rs_committed
1803 * which is set in the commit callback while it holds both
1804 * rs_lock and exp_uncommitted_reples.
1806 * If we see rs_committed clear, the commit callback _may_ not have
1807 * handled this reply yet and we race with it to grab
1808 * exp_uncommitted_replies_lock before removing the reply from
1809 * exp_uncommitted_replies. Note that if we lose the race and the
1810 * reply has already been removed, list_del_init() is a noop.
1812 * If we see rs_committed set, we know the commit callback is handling,
1813 * or has handled this reply since store reordering might allow us to
1814 * see rs_committed set out of sequence. But since this is done
1815 * holding rs_lock, we can be sure it has all completed once we hold
1816 * rs_lock, which we do right next.
1818 if (!rs
->rs_committed
) {
1819 spin_lock(&exp
->exp_uncommitted_replies_lock
);
1820 list_del_init(&rs
->rs_obd_list
);
1821 spin_unlock(&exp
->exp_uncommitted_replies_lock
);
1824 spin_lock(&rs
->rs_lock
);
1826 been_handled
= rs
->rs_handled
;
1829 nlocks
= rs
->rs_nlocks
; /* atomic "steal", but */
1830 rs
->rs_nlocks
= 0; /* locks still on rs_locks! */
1832 if (nlocks
== 0 && !been_handled
) {
1833 /* If we see this, we should already have seen the warning
1834 * in mds_steal_ack_locks()
1836 CDEBUG(D_HA
, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n",
1838 rs
->rs_xid
, rs
->rs_transno
, rs
->rs_opc
,
1839 libcfs_nid2str(exp
->exp_connection
->c_peer
.nid
));
1842 if ((!been_handled
&& rs
->rs_on_net
) || nlocks
> 0) {
1843 spin_unlock(&rs
->rs_lock
);
1845 if (!been_handled
&& rs
->rs_on_net
) {
1846 LNetMDUnlink(rs
->rs_md_h
);
1847 /* Ignore return code; we're racing with completion */
1850 while (nlocks
-- > 0)
1851 ldlm_lock_decref(&rs
->rs_locks
[nlocks
],
1852 rs
->rs_modes
[nlocks
]);
1854 spin_lock(&rs
->rs_lock
);
1857 rs
->rs_scheduled
= 0;
1859 if (!rs
->rs_on_net
) {
1861 spin_unlock(&rs
->rs_lock
);
1863 class_export_put(exp
);
1864 rs
->rs_export
= NULL
;
1865 ptlrpc_rs_decref(rs
);
1866 if (atomic_dec_and_test(&svcpt
->scp_nreps_difficult
) &&
1867 svc
->srv_is_stopping
)
1868 wake_up_all(&svcpt
->scp_waitq
);
1872 /* still on the net; callback will schedule */
1873 spin_unlock(&rs
->rs_lock
);
1878 ptlrpc_check_rqbd_pool(struct ptlrpc_service_part
*svcpt
)
1880 int avail
= svcpt
->scp_nrqbds_posted
;
1881 int low_water
= test_req_buffer_pressure
? 0 :
1882 svcpt
->scp_service
->srv_nbuf_per_group
/ 2;
1884 /* NB I'm not locking; just looking. */
1886 /* CAVEAT EMPTOR: We might be allocating buffers here because we've
1887 * allowed the request history to grow out of control. We could put a
1888 * sanity check on that here and cull some history if we need the
1892 if (avail
<= low_water
)
1893 ptlrpc_grow_req_bufs(svcpt
, 1);
1895 if (svcpt
->scp_service
->srv_stats
) {
1896 lprocfs_counter_add(svcpt
->scp_service
->srv_stats
,
1897 PTLRPC_REQBUF_AVAIL_CNTR
, avail
);
1902 ptlrpc_retry_rqbds(void *arg
)
1904 struct ptlrpc_service_part
*svcpt
= arg
;
1906 svcpt
->scp_rqbd_timeout
= 0;
1911 ptlrpc_threads_enough(struct ptlrpc_service_part
*svcpt
)
1913 return svcpt
->scp_nreqs_active
<
1914 svcpt
->scp_nthrs_running
- 1 -
1915 (svcpt
->scp_service
->srv_ops
.so_hpreq_handler
!= NULL
);
1919 * allowed to create more threads
1920 * user can call it w/o any lock but need to hold
1921 * ptlrpc_service_part::scp_lock to get reliable result
1924 ptlrpc_threads_increasable(struct ptlrpc_service_part
*svcpt
)
1926 return svcpt
->scp_nthrs_running
+
1927 svcpt
->scp_nthrs_starting
<
1928 svcpt
->scp_service
->srv_nthrs_cpt_limit
;
1932 * too many requests and allowed to create more threads
1935 ptlrpc_threads_need_create(struct ptlrpc_service_part
*svcpt
)
1937 return !ptlrpc_threads_enough(svcpt
) &&
1938 ptlrpc_threads_increasable(svcpt
);
1942 ptlrpc_thread_stopping(struct ptlrpc_thread
*thread
)
1944 return thread_is_stopping(thread
) ||
1945 thread
->t_svcpt
->scp_service
->srv_is_stopping
;
1949 ptlrpc_rqbd_pending(struct ptlrpc_service_part
*svcpt
)
1951 return !list_empty(&svcpt
->scp_rqbd_idle
) &&
1952 svcpt
->scp_rqbd_timeout
== 0;
1956 ptlrpc_at_check(struct ptlrpc_service_part
*svcpt
)
1958 return svcpt
->scp_at_check
;
1962 * requests wait on preprocessing
1963 * user can call it w/o any lock but need to hold
1964 * ptlrpc_service_part::scp_lock to get reliable result
1967 ptlrpc_server_request_incoming(struct ptlrpc_service_part
*svcpt
)
1969 return !list_empty(&svcpt
->scp_req_incoming
);
1972 static __attribute__((__noinline__
)) int
1973 ptlrpc_wait_event(struct ptlrpc_service_part
*svcpt
,
1974 struct ptlrpc_thread
*thread
)
1976 /* Don't exit while there are replies to be handled */
1977 struct l_wait_info lwi
= LWI_TIMEOUT(svcpt
->scp_rqbd_timeout
,
1978 ptlrpc_retry_rqbds
, svcpt
);
1980 /* XXX: Add this back when libcfs watchdog is merged upstream
1981 lc_watchdog_disable(thread->t_watchdog);
1986 l_wait_event_exclusive_head(svcpt
->scp_waitq
,
1987 ptlrpc_thread_stopping(thread
) ||
1988 ptlrpc_server_request_incoming(svcpt
) ||
1989 ptlrpc_server_request_pending(svcpt
,
1991 ptlrpc_rqbd_pending(svcpt
) ||
1992 ptlrpc_at_check(svcpt
), &lwi
);
1994 if (ptlrpc_thread_stopping(thread
))
1998 lc_watchdog_touch(thread->t_watchdog,
1999 ptlrpc_server_get_timeout(svcpt));
2005 * Main thread body for service threads.
2006 * Waits in a loop waiting for new requests to process to appear.
2007 * Every time an incoming requests is added to its queue, a waitq
2008 * is woken up and one of the threads will handle it.
2010 static int ptlrpc_main(void *arg
)
2012 struct ptlrpc_thread
*thread
= arg
;
2013 struct ptlrpc_service_part
*svcpt
= thread
->t_svcpt
;
2014 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
2015 struct ptlrpc_reply_state
*rs
;
2016 struct group_info
*ginfo
= NULL
;
2018 int counter
= 0, rc
= 0;
2020 thread
->t_pid
= current_pid();
2021 unshare_fs_struct();
2023 /* NB: we will call cfs_cpt_bind() for all threads, because we
2024 * might want to run lustre server only on a subset of system CPUs,
2025 * in that case ->scp_cpt is CFS_CPT_ANY
2027 rc
= cfs_cpt_bind(svc
->srv_cptable
, svcpt
->scp_cpt
);
2029 CWARN("%s: failed to bind %s on CPT %d\n",
2030 svc
->srv_name
, thread
->t_name
, svcpt
->scp_cpt
);
2033 ginfo
= groups_alloc(0);
2039 set_current_groups(ginfo
);
2040 put_group_info(ginfo
);
2042 if (svc
->srv_ops
.so_thr_init
) {
2043 rc
= svc
->srv_ops
.so_thr_init(thread
);
2048 env
= kzalloc(sizeof(*env
), GFP_NOFS
);
2054 rc
= lu_context_init(&env
->le_ctx
,
2055 svc
->srv_ctx_tags
| LCT_REMEMBER
| LCT_NOREF
);
2059 thread
->t_env
= env
;
2060 env
->le_ctx
.lc_thread
= thread
;
2061 env
->le_ctx
.lc_cookie
= 0x6;
2063 while (!list_empty(&svcpt
->scp_rqbd_idle
)) {
2064 rc
= ptlrpc_server_post_idle_rqbds(svcpt
);
2068 CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
2069 svc
->srv_name
, svcpt
->scp_cpt
, rc
);
2073 /* Alloc reply state structure for this one */
2074 rs
= libcfs_kvzalloc(svc
->srv_max_reply_size
, GFP_NOFS
);
2080 spin_lock(&svcpt
->scp_lock
);
2082 LASSERT(thread_is_starting(thread
));
2083 thread_clear_flags(thread
, SVC_STARTING
);
2085 LASSERT(svcpt
->scp_nthrs_starting
== 1);
2086 svcpt
->scp_nthrs_starting
--;
2088 /* SVC_STOPPING may already be set here if someone else is trying
2089 * to stop the service while this new thread has been dynamically
2090 * forked. We still set SVC_RUNNING to let our creator know that
2091 * we are now running, however we will exit as soon as possible
2093 thread_add_flags(thread
, SVC_RUNNING
);
2094 svcpt
->scp_nthrs_running
++;
2095 spin_unlock(&svcpt
->scp_lock
);
2097 /* wake up our creator in case he's still waiting. */
2098 wake_up(&thread
->t_ctl_waitq
);
2101 thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
2105 spin_lock(&svcpt
->scp_rep_lock
);
2106 list_add(&rs
->rs_list
, &svcpt
->scp_rep_idle
);
2107 wake_up(&svcpt
->scp_rep_waitq
);
2108 spin_unlock(&svcpt
->scp_rep_lock
);
2110 CDEBUG(D_NET
, "service thread %d (#%d) started\n", thread
->t_id
,
2111 svcpt
->scp_nthrs_running
);
2113 /* XXX maintain a list of all managed devices: insert here */
2114 while (!ptlrpc_thread_stopping(thread
)) {
2115 if (ptlrpc_wait_event(svcpt
, thread
))
2118 ptlrpc_check_rqbd_pool(svcpt
);
2120 if (ptlrpc_threads_need_create(svcpt
)) {
2121 /* Ignore return code - we tried... */
2122 ptlrpc_start_thread(svcpt
, 0);
2125 /* Process all incoming reqs before handling any */
2126 if (ptlrpc_server_request_incoming(svcpt
)) {
2127 lu_context_enter(&env
->le_ctx
);
2129 ptlrpc_server_handle_req_in(svcpt
, thread
);
2130 lu_context_exit(&env
->le_ctx
);
2132 /* but limit ourselves in case of flood */
2133 if (counter
++ < 100)
2138 if (ptlrpc_at_check(svcpt
))
2139 ptlrpc_at_check_timed(svcpt
);
2141 if (ptlrpc_server_request_pending(svcpt
, false)) {
2142 lu_context_enter(&env
->le_ctx
);
2143 ptlrpc_server_handle_request(svcpt
, thread
);
2144 lu_context_exit(&env
->le_ctx
);
2147 if (ptlrpc_rqbd_pending(svcpt
) &&
2148 ptlrpc_server_post_idle_rqbds(svcpt
) < 0) {
2149 /* I just failed to repost request buffers.
2150 * Wait for a timeout (unless something else
2151 * happens) before I try again
2153 svcpt
->scp_rqbd_timeout
= cfs_time_seconds(1) / 10;
2154 CDEBUG(D_RPCTRACE
, "Posted buffers: %d\n",
2155 svcpt
->scp_nrqbds_posted
);
2160 lc_watchdog_delete(thread->t_watchdog);
2161 thread->t_watchdog = NULL;
2166 * deconstruct service specific state created by ptlrpc_start_thread()
2168 if (svc
->srv_ops
.so_thr_done
)
2169 svc
->srv_ops
.so_thr_done(thread
);
2172 lu_context_fini(&env
->le_ctx
);
2176 CDEBUG(D_RPCTRACE
, "service thread [ %p : %u ] %d exiting: rc %d\n",
2177 thread
, thread
->t_pid
, thread
->t_id
, rc
);
2179 spin_lock(&svcpt
->scp_lock
);
2180 if (thread_test_and_clear_flags(thread
, SVC_STARTING
))
2181 svcpt
->scp_nthrs_starting
--;
2183 if (thread_test_and_clear_flags(thread
, SVC_RUNNING
)) {
2184 /* must know immediately */
2185 svcpt
->scp_nthrs_running
--;
2189 thread_add_flags(thread
, SVC_STOPPED
);
2191 wake_up(&thread
->t_ctl_waitq
);
2192 spin_unlock(&svcpt
->scp_lock
);
2197 static int hrt_dont_sleep(struct ptlrpc_hr_thread
*hrt
,
2198 struct list_head
*replies
)
2202 spin_lock(&hrt
->hrt_lock
);
2204 list_splice_init(&hrt
->hrt_queue
, replies
);
2205 result
= ptlrpc_hr
.hr_stopping
|| !list_empty(replies
);
2207 spin_unlock(&hrt
->hrt_lock
);
2212 * Main body of "handle reply" function.
2213 * It processes acked reply states
2215 static int ptlrpc_hr_main(void *arg
)
2217 struct ptlrpc_hr_thread
*hrt
= arg
;
2218 struct ptlrpc_hr_partition
*hrp
= hrt
->hrt_partition
;
2220 char threadname
[20];
2223 snprintf(threadname
, sizeof(threadname
), "ptlrpc_hr%02d_%03d",
2224 hrp
->hrp_cpt
, hrt
->hrt_id
);
2225 unshare_fs_struct();
2227 rc
= cfs_cpt_bind(ptlrpc_hr
.hr_cpt_table
, hrp
->hrp_cpt
);
2229 CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
2230 threadname
, hrp
->hrp_cpt
, ptlrpc_hr
.hr_cpt_table
, rc
);
2233 atomic_inc(&hrp
->hrp_nstarted
);
2234 wake_up(&ptlrpc_hr
.hr_waitq
);
2236 while (!ptlrpc_hr
.hr_stopping
) {
2237 l_wait_condition(hrt
->hrt_waitq
, hrt_dont_sleep(hrt
, &replies
));
2239 while (!list_empty(&replies
)) {
2240 struct ptlrpc_reply_state
*rs
;
2242 rs
= list_entry(replies
.prev
, struct ptlrpc_reply_state
,
2244 list_del_init(&rs
->rs_list
);
2245 ptlrpc_handle_rs(rs
);
2249 atomic_inc(&hrp
->hrp_nstopped
);
2250 wake_up(&ptlrpc_hr
.hr_waitq
);
2255 static void ptlrpc_stop_hr_threads(void)
2257 struct ptlrpc_hr_partition
*hrp
;
2261 ptlrpc_hr
.hr_stopping
= 1;
2263 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2265 continue; /* uninitialized */
2266 for (j
= 0; j
< hrp
->hrp_nthrs
; j
++)
2267 wake_up_all(&hrp
->hrp_thrs
[j
].hrt_waitq
);
2270 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2272 continue; /* uninitialized */
2273 wait_event(ptlrpc_hr
.hr_waitq
,
2274 atomic_read(&hrp
->hrp_nstopped
) ==
2275 atomic_read(&hrp
->hrp_nstarted
));
2279 static int ptlrpc_start_hr_threads(void)
2281 struct ptlrpc_hr_partition
*hrp
;
2285 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2288 for (j
= 0; j
< hrp
->hrp_nthrs
; j
++) {
2289 struct ptlrpc_hr_thread
*hrt
= &hrp
->hrp_thrs
[j
];
2290 struct task_struct
*task
;
2292 task
= kthread_run(ptlrpc_hr_main
,
2294 "ptlrpc_hr%02d_%03d",
2295 hrp
->hrp_cpt
, hrt
->hrt_id
);
2301 wait_event(ptlrpc_hr
.hr_waitq
,
2302 atomic_read(&hrp
->hrp_nstarted
) == j
);
2305 CERROR("cannot start reply handler thread %d:%d: rc = %d\n",
2307 ptlrpc_stop_hr_threads();
2314 static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part
*svcpt
)
2316 struct l_wait_info lwi
= { 0 };
2317 struct ptlrpc_thread
*thread
;
2320 CDEBUG(D_INFO
, "Stopping threads for service %s\n",
2321 svcpt
->scp_service
->srv_name
);
2323 spin_lock(&svcpt
->scp_lock
);
2324 /* let the thread know that we would like it to stop asap */
2325 list_for_each_entry(thread
, &svcpt
->scp_threads
, t_link
) {
2326 CDEBUG(D_INFO
, "Stopping thread %s #%u\n",
2327 svcpt
->scp_service
->srv_thread_name
, thread
->t_id
);
2328 thread_add_flags(thread
, SVC_STOPPING
);
2331 wake_up_all(&svcpt
->scp_waitq
);
2333 while (!list_empty(&svcpt
->scp_threads
)) {
2334 thread
= list_entry(svcpt
->scp_threads
.next
,
2335 struct ptlrpc_thread
, t_link
);
2336 if (thread_is_stopped(thread
)) {
2337 list_del(&thread
->t_link
);
2338 list_add(&thread
->t_link
, &zombie
);
2341 spin_unlock(&svcpt
->scp_lock
);
2343 CDEBUG(D_INFO
, "waiting for stopping-thread %s #%u\n",
2344 svcpt
->scp_service
->srv_thread_name
, thread
->t_id
);
2345 l_wait_event(thread
->t_ctl_waitq
,
2346 thread_is_stopped(thread
), &lwi
);
2348 spin_lock(&svcpt
->scp_lock
);
2351 spin_unlock(&svcpt
->scp_lock
);
2353 while (!list_empty(&zombie
)) {
2354 thread
= list_entry(zombie
.next
,
2355 struct ptlrpc_thread
, t_link
);
2356 list_del(&thread
->t_link
);
2362 * Stops all threads of a particular service \a svc
2364 static void ptlrpc_stop_all_threads(struct ptlrpc_service
*svc
)
2366 struct ptlrpc_service_part
*svcpt
;
2369 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2370 if (svcpt
->scp_service
)
2371 ptlrpc_svcpt_stop_threads(svcpt
);
2375 int ptlrpc_start_threads(struct ptlrpc_service
*svc
)
2381 /* We require 2 threads min, see note in ptlrpc_server_handle_request */
2382 LASSERT(svc
->srv_nthrs_cpt_init
>= PTLRPC_NTHRS_INIT
);
2384 for (i
= 0; i
< svc
->srv_ncpts
; i
++) {
2385 for (j
= 0; j
< svc
->srv_nthrs_cpt_init
; j
++) {
2386 rc
= ptlrpc_start_thread(svc
->srv_parts
[i
], 1);
2392 /* We have enough threads, don't start more. b=15759 */
2399 CERROR("cannot start %s thread #%d_%d: rc %d\n",
2400 svc
->srv_thread_name
, i
, j
, rc
);
2401 ptlrpc_stop_all_threads(svc
);
2405 int ptlrpc_start_thread(struct ptlrpc_service_part
*svcpt
, int wait
)
2407 struct l_wait_info lwi
= { 0 };
2408 struct ptlrpc_thread
*thread
;
2409 struct ptlrpc_service
*svc
;
2410 struct task_struct
*task
;
2413 svc
= svcpt
->scp_service
;
2415 CDEBUG(D_RPCTRACE
, "%s[%d] started %d min %d max %d\n",
2416 svc
->srv_name
, svcpt
->scp_cpt
, svcpt
->scp_nthrs_running
,
2417 svc
->srv_nthrs_cpt_init
, svc
->srv_nthrs_cpt_limit
);
2420 if (unlikely(svc
->srv_is_stopping
))
2423 if (!ptlrpc_threads_increasable(svcpt
) ||
2424 (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS
) &&
2425 svcpt
->scp_nthrs_running
== svc
->srv_nthrs_cpt_init
- 1))
2428 thread
= kzalloc_node(sizeof(*thread
), GFP_NOFS
,
2429 cfs_cpt_spread_node(svc
->srv_cptable
,
2433 init_waitqueue_head(&thread
->t_ctl_waitq
);
2435 spin_lock(&svcpt
->scp_lock
);
2436 if (!ptlrpc_threads_increasable(svcpt
)) {
2437 spin_unlock(&svcpt
->scp_lock
);
2442 if (svcpt
->scp_nthrs_starting
!= 0) {
2443 /* serialize starting because some modules (obdfilter)
2444 * might require unique and contiguous t_id
2446 LASSERT(svcpt
->scp_nthrs_starting
== 1);
2447 spin_unlock(&svcpt
->scp_lock
);
2450 CDEBUG(D_INFO
, "Waiting for creating thread %s #%d\n",
2451 svc
->srv_thread_name
, svcpt
->scp_thr_nextid
);
2456 CDEBUG(D_INFO
, "Creating thread %s #%d race, retry later\n",
2457 svc
->srv_thread_name
, svcpt
->scp_thr_nextid
);
2461 svcpt
->scp_nthrs_starting
++;
2462 thread
->t_id
= svcpt
->scp_thr_nextid
++;
2463 thread_add_flags(thread
, SVC_STARTING
);
2464 thread
->t_svcpt
= svcpt
;
2466 list_add(&thread
->t_link
, &svcpt
->scp_threads
);
2467 spin_unlock(&svcpt
->scp_lock
);
2469 if (svcpt
->scp_cpt
>= 0) {
2470 snprintf(thread
->t_name
, sizeof(thread
->t_name
), "%s%02d_%03d",
2471 svc
->srv_thread_name
, svcpt
->scp_cpt
, thread
->t_id
);
2473 snprintf(thread
->t_name
, sizeof(thread
->t_name
), "%s_%04d",
2474 svc
->srv_thread_name
, thread
->t_id
);
2477 CDEBUG(D_RPCTRACE
, "starting thread '%s'\n", thread
->t_name
);
2478 task
= kthread_run(ptlrpc_main
, thread
, "%s", thread
->t_name
);
2481 CERROR("cannot start thread '%s': rc = %d\n",
2482 thread
->t_name
, rc
);
2483 spin_lock(&svcpt
->scp_lock
);
2484 --svcpt
->scp_nthrs_starting
;
2485 if (thread_is_stopping(thread
)) {
2486 /* this ptlrpc_thread is being handled
2487 * by ptlrpc_svcpt_stop_threads now
2489 thread_add_flags(thread
, SVC_STOPPED
);
2490 wake_up(&thread
->t_ctl_waitq
);
2491 spin_unlock(&svcpt
->scp_lock
);
2493 list_del(&thread
->t_link
);
2494 spin_unlock(&svcpt
->scp_lock
);
2503 l_wait_event(thread
->t_ctl_waitq
,
2504 thread_is_running(thread
) || thread_is_stopped(thread
),
2507 rc
= thread_is_stopped(thread
) ? thread
->t_id
: 0;
2511 int ptlrpc_hr_init(void)
2513 struct ptlrpc_hr_partition
*hrp
;
2514 struct ptlrpc_hr_thread
*hrt
;
2520 memset(&ptlrpc_hr
, 0, sizeof(ptlrpc_hr
));
2521 ptlrpc_hr
.hr_cpt_table
= cfs_cpt_table
;
2523 ptlrpc_hr
.hr_partitions
= cfs_percpt_alloc(ptlrpc_hr
.hr_cpt_table
,
2525 if (!ptlrpc_hr
.hr_partitions
)
2528 init_waitqueue_head(&ptlrpc_hr
.hr_waitq
);
2530 weight
= cpumask_weight(topology_sibling_cpumask(0));
2532 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2535 atomic_set(&hrp
->hrp_nstarted
, 0);
2536 atomic_set(&hrp
->hrp_nstopped
, 0);
2538 hrp
->hrp_nthrs
= cfs_cpt_weight(ptlrpc_hr
.hr_cpt_table
, i
);
2539 hrp
->hrp_nthrs
/= weight
;
2540 if (hrp
->hrp_nthrs
== 0)
2544 kzalloc_node(hrp
->hrp_nthrs
* sizeof(*hrt
), GFP_NOFS
,
2545 cfs_cpt_spread_node(ptlrpc_hr
.hr_cpt_table
,
2547 if (!hrp
->hrp_thrs
) {
2552 for (j
= 0; j
< hrp
->hrp_nthrs
; j
++) {
2553 hrt
= &hrp
->hrp_thrs
[j
];
2556 hrt
->hrt_partition
= hrp
;
2557 init_waitqueue_head(&hrt
->hrt_waitq
);
2558 spin_lock_init(&hrt
->hrt_lock
);
2559 INIT_LIST_HEAD(&hrt
->hrt_queue
);
2563 rc
= ptlrpc_start_hr_threads();
2570 void ptlrpc_hr_fini(void)
2572 struct ptlrpc_hr_partition
*hrp
;
2575 if (!ptlrpc_hr
.hr_partitions
)
2578 ptlrpc_stop_hr_threads();
2580 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2581 kfree(hrp
->hrp_thrs
);
2584 cfs_percpt_free(ptlrpc_hr
.hr_partitions
);
2585 ptlrpc_hr
.hr_partitions
= NULL
;
2589 * Wait until all already scheduled replies are processed.
2591 static void ptlrpc_wait_replies(struct ptlrpc_service_part
*svcpt
)
2595 struct l_wait_info lwi
= LWI_TIMEOUT(cfs_time_seconds(10),
2598 rc
= l_wait_event(svcpt
->scp_waitq
,
2599 atomic_read(&svcpt
->scp_nreps_difficult
) == 0,
2603 CWARN("Unexpectedly long timeout %s %p\n",
2604 svcpt
->scp_service
->srv_name
, svcpt
->scp_service
);
2609 ptlrpc_service_del_atimer(struct ptlrpc_service
*svc
)
2611 struct ptlrpc_service_part
*svcpt
;
2614 /* early disarm AT timer... */
2615 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2616 if (svcpt
->scp_service
)
2617 del_timer(&svcpt
->scp_at_timer
);
2622 ptlrpc_service_unlink_rqbd(struct ptlrpc_service
*svc
)
2624 struct ptlrpc_service_part
*svcpt
;
2625 struct ptlrpc_request_buffer_desc
*rqbd
;
2626 struct l_wait_info lwi
;
2630 /* All history will be culled when the next request buffer is
2631 * freed in ptlrpc_service_purge_all()
2633 svc
->srv_hist_nrqbds_cpt_max
= 0;
2635 rc
= LNetClearLazyPortal(svc
->srv_req_portal
);
2638 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2639 if (!svcpt
->scp_service
)
2642 /* Unlink all the request buffers. This forces a 'final'
2643 * event with its 'unlink' flag set for each posted rqbd
2645 list_for_each_entry(rqbd
, &svcpt
->scp_rqbd_posted
,
2647 rc
= LNetMDUnlink(rqbd
->rqbd_md_h
);
2648 LASSERT(rc
== 0 || rc
== -ENOENT
);
2652 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2653 if (!svcpt
->scp_service
)
2656 /* Wait for the network to release any buffers
2657 * it's currently filling
2659 spin_lock(&svcpt
->scp_lock
);
2660 while (svcpt
->scp_nrqbds_posted
!= 0) {
2661 spin_unlock(&svcpt
->scp_lock
);
2662 /* Network access will complete in finite time but
2663 * the HUGE timeout lets us CWARN for visibility
2666 lwi
= LWI_TIMEOUT_INTERVAL(
2667 cfs_time_seconds(LONG_UNLINK
),
2668 cfs_time_seconds(1), NULL
, NULL
);
2669 rc
= l_wait_event(svcpt
->scp_waitq
,
2670 svcpt
->scp_nrqbds_posted
== 0, &lwi
);
2671 if (rc
== -ETIMEDOUT
) {
2672 CWARN("Service %s waiting for request buffers\n",
2673 svcpt
->scp_service
->srv_name
);
2675 spin_lock(&svcpt
->scp_lock
);
2677 spin_unlock(&svcpt
->scp_lock
);
2682 ptlrpc_service_purge_all(struct ptlrpc_service
*svc
)
2684 struct ptlrpc_service_part
*svcpt
;
2685 struct ptlrpc_request_buffer_desc
*rqbd
;
2686 struct ptlrpc_request
*req
;
2687 struct ptlrpc_reply_state
*rs
;
2690 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2691 if (!svcpt
->scp_service
)
2694 spin_lock(&svcpt
->scp_rep_lock
);
2695 while (!list_empty(&svcpt
->scp_rep_active
)) {
2696 rs
= list_entry(svcpt
->scp_rep_active
.next
,
2697 struct ptlrpc_reply_state
, rs_list
);
2698 spin_lock(&rs
->rs_lock
);
2699 ptlrpc_schedule_difficult_reply(rs
);
2700 spin_unlock(&rs
->rs_lock
);
2702 spin_unlock(&svcpt
->scp_rep_lock
);
2704 /* purge the request queue. NB No new replies (rqbds
2705 * all unlinked) and no service threads, so I'm the only
2706 * thread noodling the request queue now
2708 while (!list_empty(&svcpt
->scp_req_incoming
)) {
2709 req
= list_entry(svcpt
->scp_req_incoming
.next
,
2710 struct ptlrpc_request
, rq_list
);
2712 list_del(&req
->rq_list
);
2713 svcpt
->scp_nreqs_incoming
--;
2714 ptlrpc_server_finish_request(svcpt
, req
);
2717 while (ptlrpc_server_request_pending(svcpt
, true)) {
2718 req
= ptlrpc_server_request_get(svcpt
, true);
2719 ptlrpc_server_finish_active_request(svcpt
, req
);
2722 LASSERT(list_empty(&svcpt
->scp_rqbd_posted
));
2723 LASSERT(svcpt
->scp_nreqs_incoming
== 0);
2724 LASSERT(svcpt
->scp_nreqs_active
== 0);
2725 /* history should have been culled by
2726 * ptlrpc_server_finish_request
2728 LASSERT(svcpt
->scp_hist_nrqbds
== 0);
2730 /* Now free all the request buffers since nothing
2731 * references them any more...
2734 while (!list_empty(&svcpt
->scp_rqbd_idle
)) {
2735 rqbd
= list_entry(svcpt
->scp_rqbd_idle
.next
,
2736 struct ptlrpc_request_buffer_desc
,
2738 ptlrpc_free_rqbd(rqbd
);
2740 ptlrpc_wait_replies(svcpt
);
2742 while (!list_empty(&svcpt
->scp_rep_idle
)) {
2743 rs
= list_entry(svcpt
->scp_rep_idle
.next
,
2744 struct ptlrpc_reply_state
,
2746 list_del(&rs
->rs_list
);
2753 ptlrpc_service_free(struct ptlrpc_service
*svc
)
2755 struct ptlrpc_service_part
*svcpt
;
2756 struct ptlrpc_at_array
*array
;
2759 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2760 if (!svcpt
->scp_service
)
2763 /* In case somebody rearmed this in the meantime */
2764 del_timer(&svcpt
->scp_at_timer
);
2765 array
= &svcpt
->scp_at_array
;
2767 kfree(array
->paa_reqs_array
);
2768 array
->paa_reqs_array
= NULL
;
2769 kfree(array
->paa_reqs_count
);
2770 array
->paa_reqs_count
= NULL
;
2773 ptlrpc_service_for_each_part(svcpt
, i
, svc
)
2777 cfs_expr_list_values_free(svc
->srv_cpts
, svc
->srv_ncpts
);
2782 int ptlrpc_unregister_service(struct ptlrpc_service
*service
)
2784 CDEBUG(D_NET
, "%s: tearing down\n", service
->srv_name
);
2786 service
->srv_is_stopping
= 1;
2788 mutex_lock(&ptlrpc_all_services_mutex
);
2789 list_del_init(&service
->srv_list
);
2790 mutex_unlock(&ptlrpc_all_services_mutex
);
2792 ptlrpc_service_del_atimer(service
);
2793 ptlrpc_stop_all_threads(service
);
2795 ptlrpc_service_unlink_rqbd(service
);
2796 ptlrpc_service_purge_all(service
);
2797 ptlrpc_service_nrs_cleanup(service
);
2799 ptlrpc_lprocfs_unregister_service(service
);
2800 ptlrpc_sysfs_unregister_service(service
);
2802 ptlrpc_service_free(service
);
2806 EXPORT_SYMBOL(ptlrpc_unregister_service
);