4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_RPC
35 #include <obd_support.h>
36 #include <obd_class.h>
37 #include <lustre_net.h>
38 #include <lu_object.h>
39 #include <uapi/linux/lnet/lnet-types.h>
40 #include "ptlrpc_internal.h"
42 /* The following are visible and mutable through /sys/module/ptlrpc */
43 int test_req_buffer_pressure
;
44 module_param(test_req_buffer_pressure
, int, 0444);
45 MODULE_PARM_DESC(test_req_buffer_pressure
, "set non-zero to put pressure on request buffer pools");
46 module_param(at_min
, int, 0644);
47 MODULE_PARM_DESC(at_min
, "Adaptive timeout minimum (sec)");
48 module_param(at_max
, int, 0644);
49 MODULE_PARM_DESC(at_max
, "Adaptive timeout maximum (sec)");
50 module_param(at_history
, int, 0644);
51 MODULE_PARM_DESC(at_history
,
52 "Adaptive timeouts remember the slowest event that took place within this period (sec)");
53 module_param(at_early_margin
, int, 0644);
54 MODULE_PARM_DESC(at_early_margin
, "How soon before an RPC deadline to send an early reply");
55 module_param(at_extra
, int, 0644);
56 MODULE_PARM_DESC(at_extra
, "How much extra time to give with each early reply");
59 static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part
*svcpt
);
60 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request
*req
);
61 static void ptlrpc_at_remove_timed(struct ptlrpc_request
*req
);
63 /** Holds a list of all PTLRPC services */
64 LIST_HEAD(ptlrpc_all_services
);
65 /** Used to protect the \e ptlrpc_all_services list */
66 struct mutex ptlrpc_all_services_mutex
;
68 static struct ptlrpc_request_buffer_desc
*
69 ptlrpc_alloc_rqbd(struct ptlrpc_service_part
*svcpt
)
71 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
72 struct ptlrpc_request_buffer_desc
*rqbd
;
74 rqbd
= kzalloc_node(sizeof(*rqbd
), GFP_NOFS
,
75 cfs_cpt_spread_node(svc
->srv_cptable
,
80 rqbd
->rqbd_svcpt
= svcpt
;
81 rqbd
->rqbd_refcount
= 0;
82 rqbd
->rqbd_cbid
.cbid_fn
= request_in_callback
;
83 rqbd
->rqbd_cbid
.cbid_arg
= rqbd
;
84 INIT_LIST_HEAD(&rqbd
->rqbd_reqs
);
85 rqbd
->rqbd_buffer
= libcfs_kvzalloc_cpt(svc
->srv_cptable
,
89 if (!rqbd
->rqbd_buffer
) {
94 spin_lock(&svcpt
->scp_lock
);
95 list_add(&rqbd
->rqbd_list
, &svcpt
->scp_rqbd_idle
);
96 svcpt
->scp_nrqbds_total
++;
97 spin_unlock(&svcpt
->scp_lock
);
103 ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc
*rqbd
)
105 struct ptlrpc_service_part
*svcpt
= rqbd
->rqbd_svcpt
;
107 LASSERT(rqbd
->rqbd_refcount
== 0);
108 LASSERT(list_empty(&rqbd
->rqbd_reqs
));
110 spin_lock(&svcpt
->scp_lock
);
111 list_del(&rqbd
->rqbd_list
);
112 svcpt
->scp_nrqbds_total
--;
113 spin_unlock(&svcpt
->scp_lock
);
115 kvfree(rqbd
->rqbd_buffer
);
120 ptlrpc_grow_req_bufs(struct ptlrpc_service_part
*svcpt
, int post
)
122 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
123 struct ptlrpc_request_buffer_desc
*rqbd
;
127 if (svcpt
->scp_rqbd_allocating
)
130 spin_lock(&svcpt
->scp_lock
);
131 /* check again with lock */
132 if (svcpt
->scp_rqbd_allocating
) {
133 /* NB: we might allow more than one thread in the future */
134 LASSERT(svcpt
->scp_rqbd_allocating
== 1);
135 spin_unlock(&svcpt
->scp_lock
);
139 svcpt
->scp_rqbd_allocating
++;
140 spin_unlock(&svcpt
->scp_lock
);
142 for (i
= 0; i
< svc
->srv_nbuf_per_group
; i
++) {
143 /* NB: another thread might have recycled enough rqbds, we
144 * need to make sure it wouldn't over-allocate, see LU-1212.
146 if (svcpt
->scp_nrqbds_posted
>= svc
->srv_nbuf_per_group
)
149 rqbd
= ptlrpc_alloc_rqbd(svcpt
);
152 CERROR("%s: Can't allocate request buffer\n",
159 spin_lock(&svcpt
->scp_lock
);
161 LASSERT(svcpt
->scp_rqbd_allocating
== 1);
162 svcpt
->scp_rqbd_allocating
--;
164 spin_unlock(&svcpt
->scp_lock
);
167 "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
168 svc
->srv_name
, i
, svc
->srv_buf_size
, svcpt
->scp_nrqbds_posted
,
169 svcpt
->scp_nrqbds_total
, rc
);
173 rc
= ptlrpc_server_post_idle_rqbds(svcpt
);
178 struct ptlrpc_hr_partition
;
180 struct ptlrpc_hr_thread
{
181 int hrt_id
; /* thread ID */
183 wait_queue_head_t hrt_waitq
;
184 struct list_head hrt_queue
; /* RS queue */
185 struct ptlrpc_hr_partition
*hrt_partition
;
188 struct ptlrpc_hr_partition
{
189 /* # of started threads */
190 atomic_t hrp_nstarted
;
191 /* # of stopped threads */
192 atomic_t hrp_nstopped
;
193 /* cpu partition id */
195 /* round-robin rotor for choosing thread */
197 /* total number of threads on this partition */
200 struct ptlrpc_hr_thread
*hrp_thrs
;
203 #define HRT_RUNNING 0
204 #define HRT_STOPPING 1
206 struct ptlrpc_hr_service
{
207 /* CPU partition table, it's just cfs_cpt_table for now */
208 struct cfs_cpt_table
*hr_cpt_table
;
209 /** controller sleep waitq */
210 wait_queue_head_t hr_waitq
;
211 unsigned int hr_stopping
;
212 /** roundrobin rotor for non-affinity service */
213 unsigned int hr_rotor
;
215 struct ptlrpc_hr_partition
**hr_partitions
;
218 /** reply handling service. */
219 static struct ptlrpc_hr_service ptlrpc_hr
;
222 * Choose an hr thread to dispatch requests to.
224 static struct ptlrpc_hr_thread
*
225 ptlrpc_hr_select(struct ptlrpc_service_part
*svcpt
)
227 struct ptlrpc_hr_partition
*hrp
;
230 if (svcpt
->scp_cpt
>= 0 &&
231 svcpt
->scp_service
->srv_cptable
== ptlrpc_hr
.hr_cpt_table
) {
232 /* directly match partition */
233 hrp
= ptlrpc_hr
.hr_partitions
[svcpt
->scp_cpt
];
236 rotor
= ptlrpc_hr
.hr_rotor
++;
237 rotor
%= cfs_cpt_number(ptlrpc_hr
.hr_cpt_table
);
239 hrp
= ptlrpc_hr
.hr_partitions
[rotor
];
242 rotor
= hrp
->hrp_rotor
++;
243 return &hrp
->hrp_thrs
[rotor
% hrp
->hrp_nthrs
];
247 * Put reply state into a queue for processing because we received
248 * ACK from the client
250 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state
*rs
)
252 struct ptlrpc_hr_thread
*hrt
;
254 LASSERT(list_empty(&rs
->rs_list
));
256 hrt
= ptlrpc_hr_select(rs
->rs_svcpt
);
258 spin_lock(&hrt
->hrt_lock
);
259 list_add_tail(&rs
->rs_list
, &hrt
->hrt_queue
);
260 spin_unlock(&hrt
->hrt_lock
);
262 wake_up(&hrt
->hrt_waitq
);
266 ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state
*rs
)
268 assert_spin_locked(&rs
->rs_svcpt
->scp_rep_lock
);
269 assert_spin_locked(&rs
->rs_lock
);
270 LASSERT(rs
->rs_difficult
);
271 rs
->rs_scheduled_ever
= 1; /* flag any notification attempt */
273 if (rs
->rs_scheduled
) { /* being set up or already notified */
277 rs
->rs_scheduled
= 1;
278 list_del_init(&rs
->rs_list
);
279 ptlrpc_dispatch_difficult_reply(rs
);
281 EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply
);
284 ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part
*svcpt
)
286 struct ptlrpc_request_buffer_desc
*rqbd
;
291 spin_lock(&svcpt
->scp_lock
);
293 if (list_empty(&svcpt
->scp_rqbd_idle
)) {
294 spin_unlock(&svcpt
->scp_lock
);
298 rqbd
= list_entry(svcpt
->scp_rqbd_idle
.next
,
299 struct ptlrpc_request_buffer_desc
,
301 list_del(&rqbd
->rqbd_list
);
303 /* assume we will post successfully */
304 svcpt
->scp_nrqbds_posted
++;
305 list_add(&rqbd
->rqbd_list
, &svcpt
->scp_rqbd_posted
);
307 spin_unlock(&svcpt
->scp_lock
);
309 rc
= ptlrpc_register_rqbd(rqbd
);
316 spin_lock(&svcpt
->scp_lock
);
318 svcpt
->scp_nrqbds_posted
--;
319 list_del(&rqbd
->rqbd_list
);
320 list_add_tail(&rqbd
->rqbd_list
, &svcpt
->scp_rqbd_idle
);
322 /* Don't complain if no request buffers are posted right now; LNET
323 * won't drop requests because we set the portal lazy!
326 spin_unlock(&svcpt
->scp_lock
);
331 static void ptlrpc_at_timer(unsigned long castmeharder
)
333 struct ptlrpc_service_part
*svcpt
;
335 svcpt
= (struct ptlrpc_service_part
*)castmeharder
;
337 svcpt
->scp_at_check
= 1;
338 svcpt
->scp_at_checktime
= cfs_time_current();
339 wake_up(&svcpt
->scp_waitq
);
343 ptlrpc_server_nthreads_check(struct ptlrpc_service
*svc
,
344 struct ptlrpc_service_conf
*conf
)
346 struct ptlrpc_service_thr_conf
*tc
= &conf
->psc_thr
;
353 * Common code for estimating & validating threads number.
354 * CPT affinity service could have percpt thread-pool instead
355 * of a global thread-pool, which means user might not always
356 * get the threads number they give it in conf::tc_nthrs_user
357 * even they did set. It's because we need to validate threads
358 * number for each CPT to guarantee each pool will have enough
359 * threads to keep the service healthy.
361 init
= PTLRPC_NTHRS_INIT
+ (svc
->srv_ops
.so_hpreq_handler
!= NULL
);
362 init
= max_t(int, init
, tc
->tc_nthrs_init
);
364 /* NB: please see comments in lustre_lnet.h for definition
365 * details of these members
367 LASSERT(tc
->tc_nthrs_max
!= 0);
369 if (tc
->tc_nthrs_user
!= 0) {
370 /* In case there is a reason to test a service with many
371 * threads, we give a less strict check here, it can
372 * be up to 8 * nthrs_max
374 total
= min(tc
->tc_nthrs_max
* 8, tc
->tc_nthrs_user
);
375 nthrs
= total
/ svc
->srv_ncpts
;
376 init
= max(init
, nthrs
);
380 total
= tc
->tc_nthrs_max
;
381 if (tc
->tc_nthrs_base
== 0) {
382 /* don't care about base threads number per partition,
383 * this is most for non-affinity service
385 nthrs
= total
/ svc
->srv_ncpts
;
389 nthrs
= tc
->tc_nthrs_base
;
390 if (svc
->srv_ncpts
== 1) {
393 /* NB: Increase the base number if it's single partition
394 * and total number of cores/HTs is larger or equal to 4.
395 * result will always < 2 * nthrs_base
397 weight
= cfs_cpt_weight(svc
->srv_cptable
, CFS_CPT_ANY
);
398 for (i
= 1; (weight
>> (i
+ 1)) != 0 && /* >= 4 cores/HTs */
399 (tc
->tc_nthrs_base
>> i
) != 0; i
++)
400 nthrs
+= tc
->tc_nthrs_base
>> i
;
403 if (tc
->tc_thr_factor
!= 0) {
404 int factor
= tc
->tc_thr_factor
;
408 * User wants to increase number of threads with for
409 * each CPU core/HT, most likely the factor is larger then
410 * one thread/core because service threads are supposed to
411 * be blocked by lock or wait for IO.
414 * Amdahl's law says that adding processors wouldn't give
415 * a linear increasing of parallelism, so it's nonsense to
416 * have too many threads no matter how many cores/HTs
419 /* weight is # of HTs */
420 if (cpumask_weight(topology_sibling_cpumask(0)) > 1) {
421 /* depress thread factor for hyper-thread */
422 factor
= factor
- (factor
>> 1) + (factor
>> 3);
425 weight
= cfs_cpt_weight(svc
->srv_cptable
, 0);
428 for (; factor
> 0 && weight
> 0; factor
--, weight
-= fade
)
429 nthrs
+= min(weight
, fade
) * factor
;
432 if (nthrs
* svc
->srv_ncpts
> tc
->tc_nthrs_max
) {
433 nthrs
= max(tc
->tc_nthrs_base
,
434 tc
->tc_nthrs_max
/ svc
->srv_ncpts
);
437 nthrs
= max(nthrs
, tc
->tc_nthrs_init
);
438 svc
->srv_nthrs_cpt_limit
= nthrs
;
439 svc
->srv_nthrs_cpt_init
= init
;
441 if (nthrs
* svc
->srv_ncpts
> tc
->tc_nthrs_max
) {
442 CDEBUG(D_OTHER
, "%s: This service may have more threads (%d) than the given soft limit (%d)\n",
443 svc
->srv_name
, nthrs
* svc
->srv_ncpts
,
449 * Initialize percpt data for a service
452 ptlrpc_service_part_init(struct ptlrpc_service
*svc
,
453 struct ptlrpc_service_part
*svcpt
, int cpt
)
455 struct ptlrpc_at_array
*array
;
460 svcpt
->scp_cpt
= cpt
;
461 INIT_LIST_HEAD(&svcpt
->scp_threads
);
463 /* rqbd and incoming request queue */
464 spin_lock_init(&svcpt
->scp_lock
);
465 INIT_LIST_HEAD(&svcpt
->scp_rqbd_idle
);
466 INIT_LIST_HEAD(&svcpt
->scp_rqbd_posted
);
467 INIT_LIST_HEAD(&svcpt
->scp_req_incoming
);
468 init_waitqueue_head(&svcpt
->scp_waitq
);
469 /* history request & rqbd list */
470 INIT_LIST_HEAD(&svcpt
->scp_hist_reqs
);
471 INIT_LIST_HEAD(&svcpt
->scp_hist_rqbds
);
473 /* active requests and hp requests */
474 spin_lock_init(&svcpt
->scp_req_lock
);
477 spin_lock_init(&svcpt
->scp_rep_lock
);
478 INIT_LIST_HEAD(&svcpt
->scp_rep_active
);
479 INIT_LIST_HEAD(&svcpt
->scp_rep_idle
);
480 init_waitqueue_head(&svcpt
->scp_rep_waitq
);
481 atomic_set(&svcpt
->scp_nreps_difficult
, 0);
483 /* adaptive timeout */
484 spin_lock_init(&svcpt
->scp_at_lock
);
485 array
= &svcpt
->scp_at_array
;
487 size
= at_est2timeout(at_max
);
488 array
->paa_size
= size
;
489 array
->paa_count
= 0;
490 array
->paa_deadline
= -1;
492 /* allocate memory for scp_at_array (ptlrpc_at_array) */
493 array
->paa_reqs_array
=
494 kzalloc_node(sizeof(struct list_head
) * size
, GFP_NOFS
,
495 cfs_cpt_spread_node(svc
->srv_cptable
, cpt
));
496 if (!array
->paa_reqs_array
)
499 for (index
= 0; index
< size
; index
++)
500 INIT_LIST_HEAD(&array
->paa_reqs_array
[index
]);
502 array
->paa_reqs_count
=
503 kzalloc_node(sizeof(__u32
) * size
, GFP_NOFS
,
504 cfs_cpt_spread_node(svc
->srv_cptable
, cpt
));
505 if (!array
->paa_reqs_count
)
506 goto free_reqs_array
;
508 setup_timer(&svcpt
->scp_at_timer
, ptlrpc_at_timer
,
509 (unsigned long)svcpt
);
511 /* At SOW, service time should be quick; 10s seems generous. If client
512 * timeout is less than this, we'll be sending an early reply.
514 at_init(&svcpt
->scp_at_estimate
, 10, 0);
516 /* assign this before call ptlrpc_grow_req_bufs */
517 svcpt
->scp_service
= svc
;
518 /* Now allocate the request buffers, but don't post them now */
519 rc
= ptlrpc_grow_req_bufs(svcpt
, 0);
520 /* We shouldn't be under memory pressure at startup, so
521 * fail if we can't allocate all our buffers at this time.
524 goto free_reqs_count
;
529 kfree(array
->paa_reqs_count
);
530 array
->paa_reqs_count
= NULL
;
532 kfree(array
->paa_reqs_array
);
533 array
->paa_reqs_array
= NULL
;
539 * Initialize service on a given portal.
540 * This includes starting serving threads , allocating and posting rqbds and
543 struct ptlrpc_service
*
544 ptlrpc_register_service(struct ptlrpc_service_conf
*conf
,
546 struct dentry
*debugfs_entry
)
548 struct ptlrpc_service_cpt_conf
*cconf
= &conf
->psc_cpt
;
549 struct ptlrpc_service
*service
;
550 struct ptlrpc_service_part
*svcpt
;
551 struct cfs_cpt_table
*cptable
;
558 LASSERT(conf
->psc_buf
.bc_nbufs
> 0);
559 LASSERT(conf
->psc_buf
.bc_buf_size
>=
560 conf
->psc_buf
.bc_req_max_size
+ SPTLRPC_MAX_PAYLOAD
);
561 LASSERT(conf
->psc_thr
.tc_ctx_tags
!= 0);
563 cptable
= cconf
->cc_cptable
;
565 cptable
= cfs_cpt_table
;
567 if (!conf
->psc_thr
.tc_cpu_affinity
) {
570 ncpts
= cfs_cpt_number(cptable
);
571 if (cconf
->cc_pattern
) {
572 struct cfs_expr_list
*el
;
574 rc
= cfs_expr_list_parse(cconf
->cc_pattern
,
575 strlen(cconf
->cc_pattern
),
578 CERROR("%s: invalid CPT pattern string: %s",
579 conf
->psc_name
, cconf
->cc_pattern
);
580 return ERR_PTR(-EINVAL
);
583 rc
= cfs_expr_list_values(el
, ncpts
, &cpts
);
584 cfs_expr_list_free(el
);
586 CERROR("%s: failed to parse CPT array %s: %d\n",
587 conf
->psc_name
, cconf
->cc_pattern
, rc
);
589 return ERR_PTR(rc
< 0 ? rc
: -EINVAL
);
595 service
= kzalloc(offsetof(struct ptlrpc_service
, srv_parts
[ncpts
]),
599 return ERR_PTR(-ENOMEM
);
602 service
->srv_cptable
= cptable
;
603 service
->srv_cpts
= cpts
;
604 service
->srv_ncpts
= ncpts
;
606 service
->srv_cpt_bits
= 0; /* it's zero already, easy to read... */
607 while ((1 << service
->srv_cpt_bits
) < cfs_cpt_number(cptable
))
608 service
->srv_cpt_bits
++;
611 spin_lock_init(&service
->srv_lock
);
612 service
->srv_name
= conf
->psc_name
;
613 service
->srv_watchdog_factor
= conf
->psc_watchdog_factor
;
614 INIT_LIST_HEAD(&service
->srv_list
); /* for safety of cleanup */
616 /* buffer configuration */
617 service
->srv_nbuf_per_group
= test_req_buffer_pressure
?
618 1 : conf
->psc_buf
.bc_nbufs
;
619 service
->srv_max_req_size
= conf
->psc_buf
.bc_req_max_size
+
621 service
->srv_buf_size
= conf
->psc_buf
.bc_buf_size
;
622 service
->srv_rep_portal
= conf
->psc_buf
.bc_rep_portal
;
623 service
->srv_req_portal
= conf
->psc_buf
.bc_req_portal
;
625 /* Increase max reply size to next power of two */
626 service
->srv_max_reply_size
= 1;
627 while (service
->srv_max_reply_size
<
628 conf
->psc_buf
.bc_rep_max_size
+ SPTLRPC_MAX_PAYLOAD
)
629 service
->srv_max_reply_size
<<= 1;
631 service
->srv_thread_name
= conf
->psc_thr
.tc_thr_name
;
632 service
->srv_ctx_tags
= conf
->psc_thr
.tc_ctx_tags
;
633 service
->srv_hpreq_ratio
= PTLRPC_SVC_HP_RATIO
;
634 service
->srv_ops
= conf
->psc_ops
;
636 for (i
= 0; i
< ncpts
; i
++) {
637 if (!conf
->psc_thr
.tc_cpu_affinity
)
640 cpt
= cpts
? cpts
[i
] : i
;
642 svcpt
= kzalloc_node(sizeof(*svcpt
), GFP_NOFS
,
643 cfs_cpt_spread_node(cptable
, cpt
));
649 service
->srv_parts
[i
] = svcpt
;
650 rc
= ptlrpc_service_part_init(service
, svcpt
, cpt
);
655 ptlrpc_server_nthreads_check(service
, conf
);
657 rc
= LNetSetLazyPortal(service
->srv_req_portal
);
660 mutex_lock(&ptlrpc_all_services_mutex
);
661 list_add(&service
->srv_list
, &ptlrpc_all_services
);
662 mutex_unlock(&ptlrpc_all_services_mutex
);
665 rc
= ptlrpc_sysfs_register_service(parent
, service
);
670 if (!IS_ERR_OR_NULL(debugfs_entry
))
671 ptlrpc_ldebugfs_register_service(debugfs_entry
, service
);
673 rc
= ptlrpc_service_nrs_setup(service
);
677 CDEBUG(D_NET
, "%s: Started, listening on portal %d\n",
678 service
->srv_name
, service
->srv_req_portal
);
680 rc
= ptlrpc_start_threads(service
);
682 CERROR("Failed to start threads for service %s: %d\n",
683 service
->srv_name
, rc
);
689 ptlrpc_unregister_service(service
);
692 EXPORT_SYMBOL(ptlrpc_register_service
);
695 * to actually free the request, must be called without holding svc_lock.
696 * note it's caller's responsibility to unlink req->rq_list.
698 static void ptlrpc_server_free_request(struct ptlrpc_request
*req
)
700 LASSERT(atomic_read(&req
->rq_refcount
) == 0);
701 LASSERT(list_empty(&req
->rq_timed_list
));
703 /* DEBUG_REQ() assumes the reply state of a request with a valid
704 * ref will not be destroyed until that reference is dropped.
706 ptlrpc_req_drop_rs(req
);
708 sptlrpc_svc_ctx_decref(req
);
710 if (req
!= &req
->rq_rqbd
->rqbd_req
) {
711 /* NB request buffers use an embedded
712 * req if the incoming req unlinked the
713 * MD; this isn't one of them!
715 ptlrpc_request_cache_free(req
);
720 * drop a reference count of the request. if it reaches 0, we either
721 * put it into history list, or free it immediately.
723 static void ptlrpc_server_drop_request(struct ptlrpc_request
*req
)
725 struct ptlrpc_request_buffer_desc
*rqbd
= req
->rq_rqbd
;
726 struct ptlrpc_service_part
*svcpt
= rqbd
->rqbd_svcpt
;
727 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
729 struct list_head
*tmp
;
730 struct list_head
*nxt
;
732 if (!atomic_dec_and_test(&req
->rq_refcount
))
735 if (req
->rq_at_linked
) {
736 spin_lock(&svcpt
->scp_at_lock
);
737 /* recheck with lock, in case it's unlinked by
738 * ptlrpc_at_check_timed()
740 if (likely(req
->rq_at_linked
))
741 ptlrpc_at_remove_timed(req
);
742 spin_unlock(&svcpt
->scp_at_lock
);
745 LASSERT(list_empty(&req
->rq_timed_list
));
747 /* finalize request */
748 if (req
->rq_export
) {
749 class_export_put(req
->rq_export
);
750 req
->rq_export
= NULL
;
753 spin_lock(&svcpt
->scp_lock
);
755 list_add(&req
->rq_list
, &rqbd
->rqbd_reqs
);
757 refcount
= --(rqbd
->rqbd_refcount
);
759 /* request buffer is now idle: add to history */
760 list_del(&rqbd
->rqbd_list
);
762 list_add_tail(&rqbd
->rqbd_list
, &svcpt
->scp_hist_rqbds
);
763 svcpt
->scp_hist_nrqbds
++;
765 /* cull some history?
766 * I expect only about 1 or 2 rqbds need to be recycled here
768 while (svcpt
->scp_hist_nrqbds
> svc
->srv_hist_nrqbds_cpt_max
) {
769 rqbd
= list_entry(svcpt
->scp_hist_rqbds
.next
,
770 struct ptlrpc_request_buffer_desc
,
773 list_del(&rqbd
->rqbd_list
);
774 svcpt
->scp_hist_nrqbds
--;
776 /* remove rqbd's reqs from svc's req history while
777 * I've got the service lock
779 list_for_each(tmp
, &rqbd
->rqbd_reqs
) {
780 req
= list_entry(tmp
, struct ptlrpc_request
,
782 /* Track the highest culled req seq */
783 if (req
->rq_history_seq
>
784 svcpt
->scp_hist_seq_culled
) {
785 svcpt
->scp_hist_seq_culled
=
788 list_del(&req
->rq_history_list
);
791 spin_unlock(&svcpt
->scp_lock
);
793 list_for_each_safe(tmp
, nxt
, &rqbd
->rqbd_reqs
) {
794 req
= list_entry(rqbd
->rqbd_reqs
.next
,
795 struct ptlrpc_request
,
797 list_del(&req
->rq_list
);
798 ptlrpc_server_free_request(req
);
801 spin_lock(&svcpt
->scp_lock
);
803 * now all reqs including the embedded req has been
804 * disposed, schedule request buffer for re-use.
806 LASSERT(atomic_read(&rqbd
->rqbd_req
.rq_refcount
) ==
808 list_add_tail(&rqbd
->rqbd_list
, &svcpt
->scp_rqbd_idle
);
811 spin_unlock(&svcpt
->scp_lock
);
812 } else if (req
->rq_reply_state
&& req
->rq_reply_state
->rs_prealloc
) {
813 /* If we are low on memory, we are not interested in history */
814 list_del(&req
->rq_list
);
815 list_del_init(&req
->rq_history_list
);
817 /* Track the highest culled req seq */
818 if (req
->rq_history_seq
> svcpt
->scp_hist_seq_culled
)
819 svcpt
->scp_hist_seq_culled
= req
->rq_history_seq
;
821 spin_unlock(&svcpt
->scp_lock
);
823 ptlrpc_server_free_request(req
);
825 spin_unlock(&svcpt
->scp_lock
);
830 * to finish a request: stop sending more early replies, and release
833 static void ptlrpc_server_finish_request(struct ptlrpc_service_part
*svcpt
,
834 struct ptlrpc_request
*req
)
836 ptlrpc_server_hpreq_fini(req
);
838 if (req
->rq_session
.lc_thread
) {
839 lu_context_exit(&req
->rq_session
);
840 lu_context_fini(&req
->rq_session
);
843 ptlrpc_server_drop_request(req
);
847 * to finish a active request: stop sending more early replies, and release
848 * the request. should be called after we finished handling the request.
850 static void ptlrpc_server_finish_active_request(
851 struct ptlrpc_service_part
*svcpt
,
852 struct ptlrpc_request
*req
)
854 spin_lock(&svcpt
->scp_req_lock
);
855 ptlrpc_nrs_req_stop_nolock(req
);
856 svcpt
->scp_nreqs_active
--;
858 svcpt
->scp_nhreqs_active
--;
859 spin_unlock(&svcpt
->scp_req_lock
);
861 ptlrpc_nrs_req_finalize(req
);
864 class_export_rpc_dec(req
->rq_export
);
866 ptlrpc_server_finish_request(svcpt
, req
);
870 * Sanity check request \a req.
871 * Return 0 if all is ok, error code otherwise.
873 static int ptlrpc_check_req(struct ptlrpc_request
*req
)
875 struct obd_device
*obd
= req
->rq_export
->exp_obd
;
878 if (unlikely(lustre_msg_get_conn_cnt(req
->rq_reqmsg
) <
879 req
->rq_export
->exp_conn_cnt
)) {
880 DEBUG_REQ(D_RPCTRACE
, req
,
881 "DROPPING req from old connection %d < %d",
882 lustre_msg_get_conn_cnt(req
->rq_reqmsg
),
883 req
->rq_export
->exp_conn_cnt
);
886 if (unlikely(!obd
|| obd
->obd_fail
)) {
888 * Failing over, don't handle any more reqs, send
889 * error response instead.
891 CDEBUG(D_RPCTRACE
, "Dropping req %p for failed obd %s\n",
892 req
, obd
? obd
->obd_name
: "unknown");
894 } else if (lustre_msg_get_flags(req
->rq_reqmsg
) &
895 (MSG_REPLAY
| MSG_REQ_REPLAY_DONE
)) {
896 DEBUG_REQ(D_ERROR
, req
, "Invalid replay without recovery");
897 class_fail_export(req
->rq_export
);
899 } else if (lustre_msg_get_transno(req
->rq_reqmsg
) != 0) {
900 DEBUG_REQ(D_ERROR
, req
,
901 "Invalid req with transno %llu without recovery",
902 lustre_msg_get_transno(req
->rq_reqmsg
));
903 class_fail_export(req
->rq_export
);
907 if (unlikely(rc
< 0)) {
914 static void ptlrpc_at_set_timer(struct ptlrpc_service_part
*svcpt
)
916 struct ptlrpc_at_array
*array
= &svcpt
->scp_at_array
;
919 if (array
->paa_count
== 0) {
920 del_timer(&svcpt
->scp_at_timer
);
924 /* Set timer for closest deadline */
925 next
= (__s32
)(array
->paa_deadline
- ktime_get_real_seconds() -
928 ptlrpc_at_timer((unsigned long)svcpt
);
930 mod_timer(&svcpt
->scp_at_timer
, cfs_time_shift(next
));
931 CDEBUG(D_INFO
, "armed %s at %+ds\n",
932 svcpt
->scp_service
->srv_name
, next
);
936 /* Add rpc to early reply check list */
937 static int ptlrpc_at_add_timed(struct ptlrpc_request
*req
)
939 struct ptlrpc_service_part
*svcpt
= req
->rq_rqbd
->rqbd_svcpt
;
940 struct ptlrpc_at_array
*array
= &svcpt
->scp_at_array
;
941 struct ptlrpc_request
*rq
= NULL
;
947 if (req
->rq_no_reply
)
950 if ((lustre_msghdr_get_flags(req
->rq_reqmsg
) & MSGHDR_AT_SUPPORT
) == 0)
953 spin_lock(&svcpt
->scp_at_lock
);
954 LASSERT(list_empty(&req
->rq_timed_list
));
956 div_u64_rem(req
->rq_deadline
, array
->paa_size
, &index
);
957 if (array
->paa_reqs_count
[index
] > 0) {
958 /* latest rpcs will have the latest deadlines in the list,
959 * so search backward.
961 list_for_each_entry_reverse(rq
, &array
->paa_reqs_array
[index
],
963 if (req
->rq_deadline
>= rq
->rq_deadline
) {
964 list_add(&req
->rq_timed_list
,
971 /* Add the request at the head of the list */
972 if (list_empty(&req
->rq_timed_list
))
973 list_add(&req
->rq_timed_list
, &array
->paa_reqs_array
[index
]);
975 spin_lock(&req
->rq_lock
);
976 req
->rq_at_linked
= 1;
977 spin_unlock(&req
->rq_lock
);
978 req
->rq_at_index
= index
;
979 array
->paa_reqs_count
[index
]++;
981 if (array
->paa_count
== 1 || array
->paa_deadline
> req
->rq_deadline
) {
982 array
->paa_deadline
= req
->rq_deadline
;
983 ptlrpc_at_set_timer(svcpt
);
985 spin_unlock(&svcpt
->scp_at_lock
);
991 ptlrpc_at_remove_timed(struct ptlrpc_request
*req
)
993 struct ptlrpc_at_array
*array
;
995 array
= &req
->rq_rqbd
->rqbd_svcpt
->scp_at_array
;
997 /* NB: must call with hold svcpt::scp_at_lock */
998 LASSERT(!list_empty(&req
->rq_timed_list
));
999 list_del_init(&req
->rq_timed_list
);
1001 spin_lock(&req
->rq_lock
);
1002 req
->rq_at_linked
= 0;
1003 spin_unlock(&req
->rq_lock
);
1005 array
->paa_reqs_count
[req
->rq_at_index
]--;
1010 * Attempt to extend the request deadline by sending an early reply to the
1013 static int ptlrpc_at_send_early_reply(struct ptlrpc_request
*req
)
1015 struct ptlrpc_service_part
*svcpt
= req
->rq_rqbd
->rqbd_svcpt
;
1016 struct ptlrpc_request
*reqcopy
;
1017 struct lustre_msg
*reqmsg
;
1018 long olddl
= req
->rq_deadline
- ktime_get_real_seconds();
1022 /* deadline is when the client expects us to reply, margin is the
1023 * difference between clients' and servers' expectations
1025 DEBUG_REQ(D_ADAPTTO
, req
,
1026 "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d",
1027 AT_OFF
? "AT off - not " : "",
1028 olddl
, olddl
- at_get(&svcpt
->scp_at_estimate
),
1029 at_get(&svcpt
->scp_at_estimate
), at_extra
);
1035 DEBUG_REQ(D_WARNING
, req
, "Already past deadline (%+lds), not sending early reply. Consider increasing at_early_margin (%d)?",
1036 olddl
, at_early_margin
);
1038 /* Return an error so we're not re-added to the timed list. */
1042 if (!(lustre_msghdr_get_flags(req
->rq_reqmsg
) & MSGHDR_AT_SUPPORT
)) {
1043 DEBUG_REQ(D_INFO
, req
, "Wanted to ask client for more time, but no AT support");
1048 * We want to extend the request deadline by at_extra seconds,
1049 * so we set our service estimate to reflect how much time has
1050 * passed since this request arrived plus an additional
1051 * at_extra seconds. The client will calculate the new deadline
1052 * based on this service estimate (plus some additional time to
1053 * account for network latency). See ptlrpc_at_recv_early_reply
1055 at_measured(&svcpt
->scp_at_estimate
, at_extra
+
1056 ktime_get_real_seconds() - req
->rq_arrival_time
.tv_sec
);
1057 newdl
= req
->rq_arrival_time
.tv_sec
+ at_get(&svcpt
->scp_at_estimate
);
1059 /* Check to see if we've actually increased the deadline -
1060 * we may be past adaptive_max
1062 if (req
->rq_deadline
>= newdl
) {
1063 DEBUG_REQ(D_WARNING
, req
, "Couldn't add any time (%ld/%lld), not sending early reply\n",
1064 olddl
, newdl
- ktime_get_real_seconds());
1068 reqcopy
= ptlrpc_request_cache_alloc(GFP_NOFS
);
1071 reqmsg
= libcfs_kvzalloc(req
->rq_reqlen
, GFP_NOFS
);
1078 reqcopy
->rq_reply_state
= NULL
;
1079 reqcopy
->rq_rep_swab_mask
= 0;
1080 reqcopy
->rq_pack_bulk
= 0;
1081 reqcopy
->rq_pack_udesc
= 0;
1082 reqcopy
->rq_packed_final
= 0;
1083 sptlrpc_svc_ctx_addref(reqcopy
);
1084 /* We only need the reqmsg for the magic */
1085 reqcopy
->rq_reqmsg
= reqmsg
;
1086 memcpy(reqmsg
, req
->rq_reqmsg
, req
->rq_reqlen
);
1088 LASSERT(atomic_read(&req
->rq_refcount
));
1089 /** if it is last refcount then early reply isn't needed */
1090 if (atomic_read(&req
->rq_refcount
) == 1) {
1091 DEBUG_REQ(D_ADAPTTO
, reqcopy
, "Normal reply already sent out, abort sending early reply\n");
1096 /* Connection ref */
1097 reqcopy
->rq_export
= class_conn2export(
1098 lustre_msg_get_handle(reqcopy
->rq_reqmsg
));
1099 if (!reqcopy
->rq_export
) {
1105 class_export_rpc_inc(reqcopy
->rq_export
);
1106 if (reqcopy
->rq_export
->exp_obd
&&
1107 reqcopy
->rq_export
->exp_obd
->obd_fail
) {
1112 rc
= lustre_pack_reply_flags(reqcopy
, 1, NULL
, NULL
, LPRFL_EARLY_REPLY
);
1116 rc
= ptlrpc_send_reply(reqcopy
, PTLRPC_REPLY_EARLY
);
1119 /* Adjust our own deadline to what we told the client */
1120 req
->rq_deadline
= newdl
;
1121 req
->rq_early_count
++; /* number sent, server side */
1123 DEBUG_REQ(D_ERROR
, req
, "Early reply send failed %d", rc
);
1126 /* Free the (early) reply state from lustre_pack_reply.
1127 * (ptlrpc_send_reply takes it's own rs ref, so this is safe here)
1129 ptlrpc_req_drop_rs(reqcopy
);
1132 class_export_rpc_dec(reqcopy
->rq_export
);
1133 class_export_put(reqcopy
->rq_export
);
1135 sptlrpc_svc_ctx_decref(reqcopy
);
1138 ptlrpc_request_cache_free(reqcopy
);
1142 /* Send early replies to everybody expiring within at_early_margin
1143 * asking for at_extra time
1145 static void ptlrpc_at_check_timed(struct ptlrpc_service_part
*svcpt
)
1147 struct ptlrpc_at_array
*array
= &svcpt
->scp_at_array
;
1148 struct ptlrpc_request
*rq
, *n
;
1149 struct list_head work_list
;
1152 time64_t now
= ktime_get_real_seconds();
1154 int first
, counter
= 0;
1156 spin_lock(&svcpt
->scp_at_lock
);
1157 if (svcpt
->scp_at_check
== 0) {
1158 spin_unlock(&svcpt
->scp_at_lock
);
1161 delay
= cfs_time_sub(cfs_time_current(), svcpt
->scp_at_checktime
);
1162 svcpt
->scp_at_check
= 0;
1164 if (array
->paa_count
== 0) {
1165 spin_unlock(&svcpt
->scp_at_lock
);
1169 /* The timer went off, but maybe the nearest rpc already completed. */
1170 first
= array
->paa_deadline
- now
;
1171 if (first
> at_early_margin
) {
1172 /* We've still got plenty of time. Reset the timer. */
1173 ptlrpc_at_set_timer(svcpt
);
1174 spin_unlock(&svcpt
->scp_at_lock
);
1178 /* We're close to a timeout, and we don't know how much longer the
1179 * server will take. Send early replies to everyone expiring soon.
1181 INIT_LIST_HEAD(&work_list
);
1183 div_u64_rem(array
->paa_deadline
, array
->paa_size
, &index
);
1184 count
= array
->paa_count
;
1186 count
-= array
->paa_reqs_count
[index
];
1187 list_for_each_entry_safe(rq
, n
, &array
->paa_reqs_array
[index
],
1189 if (rq
->rq_deadline
> now
+ at_early_margin
) {
1190 /* update the earliest deadline */
1191 if (deadline
== -1 ||
1192 rq
->rq_deadline
< deadline
)
1193 deadline
= rq
->rq_deadline
;
1197 ptlrpc_at_remove_timed(rq
);
1199 * ptlrpc_server_drop_request() may drop
1200 * refcount to 0 already. Let's check this and
1201 * don't add entry to work_list
1203 if (likely(atomic_inc_not_zero(&rq
->rq_refcount
)))
1204 list_add(&rq
->rq_timed_list
, &work_list
);
1208 if (++index
>= array
->paa_size
)
1211 array
->paa_deadline
= deadline
;
1212 /* we have a new earliest deadline, restart the timer */
1213 ptlrpc_at_set_timer(svcpt
);
1215 spin_unlock(&svcpt
->scp_at_lock
);
1217 CDEBUG(D_ADAPTTO
, "timeout in %+ds, asking for %d secs on %d early replies\n",
1218 first
, at_extra
, counter
);
1220 /* We're already past request deadlines before we even get a
1221 * chance to send early replies
1223 LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
1224 svcpt
->scp_service
->srv_name
);
1225 CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
1226 counter
, svcpt
->scp_nreqs_incoming
,
1227 svcpt
->scp_nreqs_active
,
1228 at_get(&svcpt
->scp_at_estimate
), delay
);
1231 /* we took additional refcount so entries can't be deleted from list, no
1234 while (!list_empty(&work_list
)) {
1235 rq
= list_entry(work_list
.next
, struct ptlrpc_request
,
1237 list_del_init(&rq
->rq_timed_list
);
1239 if (ptlrpc_at_send_early_reply(rq
) == 0)
1240 ptlrpc_at_add_timed(rq
);
1242 ptlrpc_server_drop_request(rq
);
1247 * Put the request to the export list if the request may become
1248 * a high priority one.
1250 static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part
*svcpt
,
1251 struct ptlrpc_request
*req
)
1255 if (svcpt
->scp_service
->srv_ops
.so_hpreq_handler
) {
1256 rc
= svcpt
->scp_service
->srv_ops
.so_hpreq_handler(req
);
1261 if (req
->rq_export
&& req
->rq_ops
) {
1262 /* Perform request specific check. We should do this check
1263 * before the request is added into exp_hp_rpcs list otherwise
1264 * it may hit swab race at LU-1044.
1266 if (req
->rq_ops
->hpreq_check
) {
1267 rc
= req
->rq_ops
->hpreq_check(req
);
1268 if (rc
== -ESTALE
) {
1269 req
->rq_status
= rc
;
1272 /** can only return error,
1273 * 0 for normal request,
1274 * or 1 for high priority request
1279 spin_lock_bh(&req
->rq_export
->exp_rpc_lock
);
1280 list_add(&req
->rq_exp_list
, &req
->rq_export
->exp_hp_rpcs
);
1281 spin_unlock_bh(&req
->rq_export
->exp_rpc_lock
);
1284 ptlrpc_nrs_req_initialize(svcpt
, req
, rc
);
1289 /** Remove the request from the export list. */
1290 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request
*req
)
1292 if (req
->rq_export
&& req
->rq_ops
) {
1293 /* refresh lock timeout again so that client has more
1294 * room to send lock cancel RPC.
1296 if (req
->rq_ops
->hpreq_fini
)
1297 req
->rq_ops
->hpreq_fini(req
);
1299 spin_lock_bh(&req
->rq_export
->exp_rpc_lock
);
1300 list_del_init(&req
->rq_exp_list
);
1301 spin_unlock_bh(&req
->rq_export
->exp_rpc_lock
);
1305 static int ptlrpc_server_request_add(struct ptlrpc_service_part
*svcpt
,
1306 struct ptlrpc_request
*req
)
1310 rc
= ptlrpc_server_hpreq_init(svcpt
, req
);
1314 ptlrpc_nrs_req_add(svcpt
, req
, !!rc
);
1320 * Allow to handle high priority request
1321 * User can call it w/o any lock but need to hold
1322 * ptlrpc_service_part::scp_req_lock to get reliable result
1324 static bool ptlrpc_server_allow_high(struct ptlrpc_service_part
*svcpt
,
1327 int running
= svcpt
->scp_nthrs_running
;
1329 if (!nrs_svcpt_has_hp(svcpt
))
1335 if (unlikely(svcpt
->scp_service
->srv_req_portal
== MDS_REQUEST_PORTAL
&&
1336 CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND
))) {
1337 /* leave just 1 thread for normal RPCs */
1338 running
= PTLRPC_NTHRS_INIT
;
1339 if (svcpt
->scp_service
->srv_ops
.so_hpreq_handler
)
1343 if (svcpt
->scp_nreqs_active
>= running
- 1)
1346 if (svcpt
->scp_nhreqs_active
== 0)
1349 return !ptlrpc_nrs_req_pending_nolock(svcpt
, false) ||
1350 svcpt
->scp_hreq_count
< svcpt
->scp_service
->srv_hpreq_ratio
;
1353 static bool ptlrpc_server_high_pending(struct ptlrpc_service_part
*svcpt
,
1356 return ptlrpc_server_allow_high(svcpt
, force
) &&
1357 ptlrpc_nrs_req_pending_nolock(svcpt
, true);
1361 * Only allow normal priority requests on a service that has a high-priority
1362 * queue if forced (i.e. cleanup), if there are other high priority requests
1363 * already being processed (i.e. those threads can service more high-priority
1364 * requests), or if there are enough idle threads that a later thread can do
1365 * a high priority request.
1366 * User can call it w/o any lock but need to hold
1367 * ptlrpc_service_part::scp_req_lock to get reliable result
1369 static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part
*svcpt
,
1372 int running
= svcpt
->scp_nthrs_running
;
1374 if (unlikely(svcpt
->scp_service
->srv_req_portal
== MDS_REQUEST_PORTAL
&&
1375 CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND
))) {
1376 /* leave just 1 thread for normal RPCs */
1377 running
= PTLRPC_NTHRS_INIT
;
1378 if (svcpt
->scp_service
->srv_ops
.so_hpreq_handler
)
1383 svcpt
->scp_nreqs_active
< running
- 2)
1386 if (svcpt
->scp_nreqs_active
>= running
- 1)
1389 return svcpt
->scp_nhreqs_active
> 0 || !nrs_svcpt_has_hp(svcpt
);
1392 static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part
*svcpt
,
1395 return ptlrpc_server_allow_normal(svcpt
, force
) &&
1396 ptlrpc_nrs_req_pending_nolock(svcpt
, false);
1400 * Returns true if there are requests available in incoming
1401 * request queue for processing and it is allowed to fetch them.
1402 * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
1403 * to get reliable result
1404 * \see ptlrpc_server_allow_normal
1405 * \see ptlrpc_server_allow high
1408 ptlrpc_server_request_pending(struct ptlrpc_service_part
*svcpt
, bool force
)
1410 return ptlrpc_server_high_pending(svcpt
, force
) ||
1411 ptlrpc_server_normal_pending(svcpt
, force
);
1415 * Fetch a request for processing from queue of unprocessed requests.
1416 * Favors high-priority requests.
1417 * Returns a pointer to fetched request.
1419 static struct ptlrpc_request
*
1420 ptlrpc_server_request_get(struct ptlrpc_service_part
*svcpt
, bool force
)
1422 struct ptlrpc_request
*req
= NULL
;
1424 spin_lock(&svcpt
->scp_req_lock
);
1426 if (ptlrpc_server_high_pending(svcpt
, force
)) {
1427 req
= ptlrpc_nrs_req_get_nolock(svcpt
, true, force
);
1429 svcpt
->scp_hreq_count
++;
1434 if (ptlrpc_server_normal_pending(svcpt
, force
)) {
1435 req
= ptlrpc_nrs_req_get_nolock(svcpt
, false, force
);
1437 svcpt
->scp_hreq_count
= 0;
1442 spin_unlock(&svcpt
->scp_req_lock
);
1446 svcpt
->scp_nreqs_active
++;
1448 svcpt
->scp_nhreqs_active
++;
1450 spin_unlock(&svcpt
->scp_req_lock
);
1452 if (likely(req
->rq_export
))
1453 class_export_rpc_inc(req
->rq_export
);
1459 * Handle freshly incoming reqs, add to timed early reply list,
1460 * pass on to regular request queue.
1461 * All incoming requests pass through here before getting into
1462 * ptlrpc_server_handle_req later on.
1465 ptlrpc_server_handle_req_in(struct ptlrpc_service_part
*svcpt
,
1466 struct ptlrpc_thread
*thread
)
1468 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
1469 struct ptlrpc_request
*req
;
1473 spin_lock(&svcpt
->scp_lock
);
1474 if (list_empty(&svcpt
->scp_req_incoming
)) {
1475 spin_unlock(&svcpt
->scp_lock
);
1479 req
= list_entry(svcpt
->scp_req_incoming
.next
,
1480 struct ptlrpc_request
, rq_list
);
1481 list_del_init(&req
->rq_list
);
1482 svcpt
->scp_nreqs_incoming
--;
1483 /* Consider this still a "queued" request as far as stats are
1486 spin_unlock(&svcpt
->scp_lock
);
1488 /* go through security check/transform */
1489 rc
= sptlrpc_svc_unwrap_request(req
);
1493 case SECSVC_COMPLETE
:
1494 target_send_reply(req
, 0, OBD_FAIL_MDS_ALL_REPLY_NET
);
1503 * for null-flavored rpc, msg has been unpacked by sptlrpc, although
1504 * redo it wouldn't be harmful.
1506 if (SPTLRPC_FLVR_POLICY(req
->rq_flvr
.sf_rpc
) != SPTLRPC_POLICY_NULL
) {
1507 rc
= ptlrpc_unpack_req_msg(req
, req
->rq_reqlen
);
1509 CERROR("error unpacking request: ptl %d from %s x%llu\n",
1510 svc
->srv_req_portal
, libcfs_id2str(req
->rq_peer
),
1516 rc
= lustre_unpack_req_ptlrpc_body(req
, MSG_PTLRPC_BODY_OFF
);
1518 CERROR("error unpacking ptlrpc body: ptl %d from %s x%llu\n",
1519 svc
->srv_req_portal
, libcfs_id2str(req
->rq_peer
),
1524 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC
) &&
1525 lustre_msg_get_opc(req
->rq_reqmsg
) == cfs_fail_val
) {
1526 CERROR("drop incoming rpc opc %u, x%llu\n",
1527 cfs_fail_val
, req
->rq_xid
);
1532 if (lustre_msg_get_type(req
->rq_reqmsg
) != PTL_RPC_MSG_REQUEST
) {
1533 CERROR("wrong packet type received (type=%u) from %s\n",
1534 lustre_msg_get_type(req
->rq_reqmsg
),
1535 libcfs_id2str(req
->rq_peer
));
1539 switch (lustre_msg_get_opc(req
->rq_reqmsg
)) {
1542 req
->rq_bulk_write
= 1;
1546 case MGS_CONFIG_READ
:
1547 req
->rq_bulk_read
= 1;
1551 CDEBUG(D_RPCTRACE
, "got req x%llu\n", req
->rq_xid
);
1553 req
->rq_export
= class_conn2export(
1554 lustre_msg_get_handle(req
->rq_reqmsg
));
1555 if (req
->rq_export
) {
1556 rc
= ptlrpc_check_req(req
);
1558 rc
= sptlrpc_target_export_check(req
->rq_export
, req
);
1560 DEBUG_REQ(D_ERROR
, req
, "DROPPING req with illegal security flavor,");
1567 /* req_in handling should/must be fast */
1568 if (ktime_get_real_seconds() - req
->rq_arrival_time
.tv_sec
> 5)
1569 DEBUG_REQ(D_WARNING
, req
, "Slow req_in handling %llds",
1570 (s64
)(ktime_get_real_seconds() -
1571 req
->rq_arrival_time
.tv_sec
));
1573 /* Set rpc server deadline and add it to the timed list */
1574 deadline
= (lustre_msghdr_get_flags(req
->rq_reqmsg
) &
1575 MSGHDR_AT_SUPPORT
) ?
1576 /* The max time the client expects us to take */
1577 lustre_msg_get_timeout(req
->rq_reqmsg
) : obd_timeout
;
1578 req
->rq_deadline
= req
->rq_arrival_time
.tv_sec
+ deadline
;
1579 if (unlikely(deadline
== 0)) {
1580 DEBUG_REQ(D_ERROR
, req
, "Dropping request with 0 timeout");
1584 req
->rq_svc_thread
= thread
;
1586 /* initialize request session, it is needed for request
1587 * processing by target
1589 rc
= lu_context_init(&req
->rq_session
,
1590 LCT_SERVER_SESSION
| LCT_NOREF
);
1592 CERROR("%s: failure to initialize session: rc = %d\n",
1593 thread
->t_name
, rc
);
1596 req
->rq_session
.lc_thread
= thread
;
1597 lu_context_enter(&req
->rq_session
);
1598 req
->rq_svc_thread
->t_env
->le_ses
= &req
->rq_session
;
1601 ptlrpc_at_add_timed(req
);
1603 /* Move it over to the request processing queue */
1604 rc
= ptlrpc_server_request_add(svcpt
, req
);
1608 wake_up(&svcpt
->scp_waitq
);
1612 ptlrpc_server_finish_request(svcpt
, req
);
1618 * Main incoming request handling logic.
1619 * Calls handler function from service to do actual processing.
1622 ptlrpc_server_handle_request(struct ptlrpc_service_part
*svcpt
,
1623 struct ptlrpc_thread
*thread
)
1625 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
1626 struct ptlrpc_request
*request
;
1627 struct timespec64 work_start
;
1628 struct timespec64 work_end
;
1629 struct timespec64 timediff
;
1630 struct timespec64 arrived
;
1631 unsigned long timediff_usecs
;
1632 unsigned long arrived_usecs
;
1635 request
= ptlrpc_server_request_get(svcpt
, false);
1639 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT
))
1640 fail_opc
= OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT
;
1641 else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT
))
1642 fail_opc
= OBD_FAIL_PTLRPC_HPREQ_TIMEOUT
;
1644 if (unlikely(fail_opc
)) {
1645 if (request
->rq_export
&& request
->rq_ops
)
1646 OBD_FAIL_TIMEOUT(fail_opc
, 4);
1649 ptlrpc_rqphase_move(request
, RQ_PHASE_INTERPRET
);
1651 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG
))
1652 libcfs_debug_dumplog();
1654 ktime_get_real_ts64(&work_start
);
1655 timediff
= timespec64_sub(work_start
, request
->rq_arrival_time
);
1656 timediff_usecs
= timediff
.tv_sec
* USEC_PER_SEC
+
1657 timediff
.tv_nsec
/ NSEC_PER_USEC
;
1658 if (likely(svc
->srv_stats
)) {
1659 lprocfs_counter_add(svc
->srv_stats
, PTLRPC_REQWAIT_CNTR
,
1661 lprocfs_counter_add(svc
->srv_stats
, PTLRPC_REQQDEPTH_CNTR
,
1662 svcpt
->scp_nreqs_incoming
);
1663 lprocfs_counter_add(svc
->srv_stats
, PTLRPC_REQACTIVE_CNTR
,
1664 svcpt
->scp_nreqs_active
);
1665 lprocfs_counter_add(svc
->srv_stats
, PTLRPC_TIMEOUT
,
1666 at_get(&svcpt
->scp_at_estimate
));
1669 if (likely(request
->rq_export
)) {
1670 if (unlikely(ptlrpc_check_req(request
)))
1674 /* Discard requests queued for longer than the deadline.
1675 * The deadline is increased if we send an early reply.
1677 if (ktime_get_real_seconds() > request
->rq_deadline
) {
1678 DEBUG_REQ(D_ERROR
, request
, "Dropping timed-out request from %s: deadline %lld:%llds ago\n",
1679 libcfs_id2str(request
->rq_peer
),
1680 request
->rq_deadline
-
1681 request
->rq_arrival_time
.tv_sec
,
1682 ktime_get_real_seconds() - request
->rq_deadline
);
1686 CDEBUG(D_RPCTRACE
, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d\n",
1688 (request
->rq_export
?
1689 (char *)request
->rq_export
->exp_client_uuid
.uuid
: "0"),
1690 (request
->rq_export
?
1691 atomic_read(&request
->rq_export
->exp_refcount
) : -99),
1692 lustre_msg_get_status(request
->rq_reqmsg
), request
->rq_xid
,
1693 libcfs_id2str(request
->rq_peer
),
1694 lustre_msg_get_opc(request
->rq_reqmsg
));
1696 if (lustre_msg_get_opc(request
->rq_reqmsg
) != OBD_PING
)
1697 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ
, cfs_fail_val
);
1699 CDEBUG(D_NET
, "got req %llu\n", request
->rq_xid
);
1701 /* re-assign request and sesson thread to the current one */
1702 request
->rq_svc_thread
= thread
;
1704 LASSERT(request
->rq_session
.lc_thread
);
1705 request
->rq_session
.lc_thread
= thread
;
1706 request
->rq_session
.lc_cookie
= 0x55;
1707 thread
->t_env
->le_ses
= &request
->rq_session
;
1709 svc
->srv_ops
.so_req_handler(request
);
1711 ptlrpc_rqphase_move(request
, RQ_PHASE_COMPLETE
);
1714 if (unlikely(ktime_get_real_seconds() > request
->rq_deadline
)) {
1715 DEBUG_REQ(D_WARNING
, request
,
1716 "Request took longer than estimated (%lld:%llds); "
1717 "client may timeout.",
1718 (s64
)request
->rq_deadline
-
1719 request
->rq_arrival_time
.tv_sec
,
1720 (s64
)ktime_get_real_seconds() - request
->rq_deadline
);
1723 ktime_get_real_ts64(&work_end
);
1724 timediff
= timespec64_sub(work_end
, work_start
);
1725 timediff_usecs
= timediff
.tv_sec
* USEC_PER_SEC
+
1726 timediff
.tv_nsec
/ NSEC_PER_USEC
;
1727 arrived
= timespec64_sub(work_end
, request
->rq_arrival_time
);
1728 arrived_usecs
= arrived
.tv_sec
* USEC_PER_SEC
+
1729 arrived
.tv_nsec
/ NSEC_PER_USEC
;
1730 CDEBUG(D_RPCTRACE
, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request processed in %ldus (%ldus total) trans %llu rc %d/%d\n",
1732 (request
->rq_export
?
1733 (char *)request
->rq_export
->exp_client_uuid
.uuid
: "0"),
1734 (request
->rq_export
?
1735 atomic_read(&request
->rq_export
->exp_refcount
) : -99),
1736 lustre_msg_get_status(request
->rq_reqmsg
),
1738 libcfs_id2str(request
->rq_peer
),
1739 lustre_msg_get_opc(request
->rq_reqmsg
),
1742 (request
->rq_repmsg
?
1743 lustre_msg_get_transno(request
->rq_repmsg
) :
1744 request
->rq_transno
),
1746 (request
->rq_repmsg
?
1747 lustre_msg_get_status(request
->rq_repmsg
) : -999));
1748 if (likely(svc
->srv_stats
&& request
->rq_reqmsg
)) {
1749 __u32 op
= lustre_msg_get_opc(request
->rq_reqmsg
);
1750 int opc
= opcode_offset(op
);
1752 if (opc
> 0 && !(op
== LDLM_ENQUEUE
|| op
== MDS_REINT
)) {
1753 LASSERT(opc
< LUSTRE_MAX_OPCODES
);
1754 lprocfs_counter_add(svc
->srv_stats
,
1755 opc
+ EXTRA_MAX_OPCODES
,
1759 if (unlikely(request
->rq_early_count
)) {
1760 DEBUG_REQ(D_ADAPTTO
, request
,
1761 "sent %d early replies before finishing in %llds",
1762 request
->rq_early_count
,
1763 (s64
)work_end
.tv_sec
-
1764 request
->rq_arrival_time
.tv_sec
);
1767 ptlrpc_server_finish_active_request(svcpt
, request
);
1773 * An internal function to process a single reply state object.
1776 ptlrpc_handle_rs(struct ptlrpc_reply_state
*rs
)
1778 struct ptlrpc_service_part
*svcpt
= rs
->rs_svcpt
;
1779 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
1780 struct obd_export
*exp
;
1784 exp
= rs
->rs_export
;
1786 LASSERT(rs
->rs_difficult
);
1787 LASSERT(rs
->rs_scheduled
);
1788 LASSERT(list_empty(&rs
->rs_list
));
1790 spin_lock(&exp
->exp_lock
);
1791 /* Noop if removed already */
1792 list_del_init(&rs
->rs_exp_list
);
1793 spin_unlock(&exp
->exp_lock
);
1795 /* The disk commit callback holds exp_uncommitted_replies_lock while it
1796 * iterates over newly committed replies, removing them from
1797 * exp_uncommitted_replies. It then drops this lock and schedules the
1798 * replies it found for handling here.
1800 * We can avoid contention for exp_uncommitted_replies_lock between the
1801 * HRT threads and further commit callbacks by checking rs_committed
1802 * which is set in the commit callback while it holds both
1803 * rs_lock and exp_uncommitted_reples.
1805 * If we see rs_committed clear, the commit callback _may_ not have
1806 * handled this reply yet and we race with it to grab
1807 * exp_uncommitted_replies_lock before removing the reply from
1808 * exp_uncommitted_replies. Note that if we lose the race and the
1809 * reply has already been removed, list_del_init() is a noop.
1811 * If we see rs_committed set, we know the commit callback is handling,
1812 * or has handled this reply since store reordering might allow us to
1813 * see rs_committed set out of sequence. But since this is done
1814 * holding rs_lock, we can be sure it has all completed once we hold
1815 * rs_lock, which we do right next.
1817 if (!rs
->rs_committed
) {
1818 spin_lock(&exp
->exp_uncommitted_replies_lock
);
1819 list_del_init(&rs
->rs_obd_list
);
1820 spin_unlock(&exp
->exp_uncommitted_replies_lock
);
1823 spin_lock(&rs
->rs_lock
);
1825 been_handled
= rs
->rs_handled
;
1828 nlocks
= rs
->rs_nlocks
; /* atomic "steal", but */
1829 rs
->rs_nlocks
= 0; /* locks still on rs_locks! */
1831 if (nlocks
== 0 && !been_handled
) {
1832 /* If we see this, we should already have seen the warning
1833 * in mds_steal_ack_locks()
1835 CDEBUG(D_HA
, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n",
1837 rs
->rs_xid
, rs
->rs_transno
, rs
->rs_opc
,
1838 libcfs_nid2str(exp
->exp_connection
->c_peer
.nid
));
1841 if ((!been_handled
&& rs
->rs_on_net
) || nlocks
> 0) {
1842 spin_unlock(&rs
->rs_lock
);
1844 if (!been_handled
&& rs
->rs_on_net
) {
1845 LNetMDUnlink(rs
->rs_md_h
);
1846 /* Ignore return code; we're racing with completion */
1849 while (nlocks
-- > 0)
1850 ldlm_lock_decref(&rs
->rs_locks
[nlocks
],
1851 rs
->rs_modes
[nlocks
]);
1853 spin_lock(&rs
->rs_lock
);
1856 rs
->rs_scheduled
= 0;
1858 if (!rs
->rs_on_net
) {
1860 spin_unlock(&rs
->rs_lock
);
1862 class_export_put(exp
);
1863 rs
->rs_export
= NULL
;
1864 ptlrpc_rs_decref(rs
);
1865 if (atomic_dec_and_test(&svcpt
->scp_nreps_difficult
) &&
1866 svc
->srv_is_stopping
)
1867 wake_up_all(&svcpt
->scp_waitq
);
1871 /* still on the net; callback will schedule */
1872 spin_unlock(&rs
->rs_lock
);
1877 ptlrpc_check_rqbd_pool(struct ptlrpc_service_part
*svcpt
)
1879 int avail
= svcpt
->scp_nrqbds_posted
;
1880 int low_water
= test_req_buffer_pressure
? 0 :
1881 svcpt
->scp_service
->srv_nbuf_per_group
/ 2;
1883 /* NB I'm not locking; just looking. */
1885 /* CAVEAT EMPTOR: We might be allocating buffers here because we've
1886 * allowed the request history to grow out of control. We could put a
1887 * sanity check on that here and cull some history if we need the
1891 if (avail
<= low_water
)
1892 ptlrpc_grow_req_bufs(svcpt
, 1);
1894 if (svcpt
->scp_service
->srv_stats
) {
1895 lprocfs_counter_add(svcpt
->scp_service
->srv_stats
,
1896 PTLRPC_REQBUF_AVAIL_CNTR
, avail
);
1901 ptlrpc_retry_rqbds(void *arg
)
1903 struct ptlrpc_service_part
*svcpt
= arg
;
1905 svcpt
->scp_rqbd_timeout
= 0;
1910 ptlrpc_threads_enough(struct ptlrpc_service_part
*svcpt
)
1912 return svcpt
->scp_nreqs_active
<
1913 svcpt
->scp_nthrs_running
- 1 -
1914 (svcpt
->scp_service
->srv_ops
.so_hpreq_handler
!= NULL
);
1918 * allowed to create more threads
1919 * user can call it w/o any lock but need to hold
1920 * ptlrpc_service_part::scp_lock to get reliable result
1923 ptlrpc_threads_increasable(struct ptlrpc_service_part
*svcpt
)
1925 return svcpt
->scp_nthrs_running
+
1926 svcpt
->scp_nthrs_starting
<
1927 svcpt
->scp_service
->srv_nthrs_cpt_limit
;
1931 * too many requests and allowed to create more threads
1934 ptlrpc_threads_need_create(struct ptlrpc_service_part
*svcpt
)
1936 return !ptlrpc_threads_enough(svcpt
) &&
1937 ptlrpc_threads_increasable(svcpt
);
1941 ptlrpc_thread_stopping(struct ptlrpc_thread
*thread
)
1943 return thread_is_stopping(thread
) ||
1944 thread
->t_svcpt
->scp_service
->srv_is_stopping
;
1948 ptlrpc_rqbd_pending(struct ptlrpc_service_part
*svcpt
)
1950 return !list_empty(&svcpt
->scp_rqbd_idle
) &&
1951 svcpt
->scp_rqbd_timeout
== 0;
1955 ptlrpc_at_check(struct ptlrpc_service_part
*svcpt
)
1957 return svcpt
->scp_at_check
;
1961 * requests wait on preprocessing
1962 * user can call it w/o any lock but need to hold
1963 * ptlrpc_service_part::scp_lock to get reliable result
1966 ptlrpc_server_request_incoming(struct ptlrpc_service_part
*svcpt
)
1968 return !list_empty(&svcpt
->scp_req_incoming
);
1971 static __attribute__((__noinline__
)) int
1972 ptlrpc_wait_event(struct ptlrpc_service_part
*svcpt
,
1973 struct ptlrpc_thread
*thread
)
1975 /* Don't exit while there are replies to be handled */
1976 struct l_wait_info lwi
= LWI_TIMEOUT(svcpt
->scp_rqbd_timeout
,
1977 ptlrpc_retry_rqbds
, svcpt
);
1979 /* XXX: Add this back when libcfs watchdog is merged upstream
1980 lc_watchdog_disable(thread->t_watchdog);
1985 l_wait_event_exclusive_head(svcpt
->scp_waitq
,
1986 ptlrpc_thread_stopping(thread
) ||
1987 ptlrpc_server_request_incoming(svcpt
) ||
1988 ptlrpc_server_request_pending(svcpt
,
1990 ptlrpc_rqbd_pending(svcpt
) ||
1991 ptlrpc_at_check(svcpt
), &lwi
);
1993 if (ptlrpc_thread_stopping(thread
))
1997 lc_watchdog_touch(thread->t_watchdog,
1998 ptlrpc_server_get_timeout(svcpt));
2004 * Main thread body for service threads.
2005 * Waits in a loop waiting for new requests to process to appear.
2006 * Every time an incoming requests is added to its queue, a waitq
2007 * is woken up and one of the threads will handle it.
2009 static int ptlrpc_main(void *arg
)
2011 struct ptlrpc_thread
*thread
= arg
;
2012 struct ptlrpc_service_part
*svcpt
= thread
->t_svcpt
;
2013 struct ptlrpc_service
*svc
= svcpt
->scp_service
;
2014 struct ptlrpc_reply_state
*rs
;
2015 struct group_info
*ginfo
= NULL
;
2017 int counter
= 0, rc
= 0;
2019 thread
->t_pid
= current_pid();
2020 unshare_fs_struct();
2022 /* NB: we will call cfs_cpt_bind() for all threads, because we
2023 * might want to run lustre server only on a subset of system CPUs,
2024 * in that case ->scp_cpt is CFS_CPT_ANY
2026 rc
= cfs_cpt_bind(svc
->srv_cptable
, svcpt
->scp_cpt
);
2028 CWARN("%s: failed to bind %s on CPT %d\n",
2029 svc
->srv_name
, thread
->t_name
, svcpt
->scp_cpt
);
2032 ginfo
= groups_alloc(0);
2038 set_current_groups(ginfo
);
2039 put_group_info(ginfo
);
2041 if (svc
->srv_ops
.so_thr_init
) {
2042 rc
= svc
->srv_ops
.so_thr_init(thread
);
2047 env
= kzalloc(sizeof(*env
), GFP_NOFS
);
2053 rc
= lu_context_init(&env
->le_ctx
,
2054 svc
->srv_ctx_tags
| LCT_REMEMBER
| LCT_NOREF
);
2058 thread
->t_env
= env
;
2059 env
->le_ctx
.lc_thread
= thread
;
2060 env
->le_ctx
.lc_cookie
= 0x6;
2062 while (!list_empty(&svcpt
->scp_rqbd_idle
)) {
2063 rc
= ptlrpc_server_post_idle_rqbds(svcpt
);
2067 CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
2068 svc
->srv_name
, svcpt
->scp_cpt
, rc
);
2072 /* Alloc reply state structure for this one */
2073 rs
= libcfs_kvzalloc(svc
->srv_max_reply_size
, GFP_NOFS
);
2079 spin_lock(&svcpt
->scp_lock
);
2081 LASSERT(thread_is_starting(thread
));
2082 thread_clear_flags(thread
, SVC_STARTING
);
2084 LASSERT(svcpt
->scp_nthrs_starting
== 1);
2085 svcpt
->scp_nthrs_starting
--;
2087 /* SVC_STOPPING may already be set here if someone else is trying
2088 * to stop the service while this new thread has been dynamically
2089 * forked. We still set SVC_RUNNING to let our creator know that
2090 * we are now running, however we will exit as soon as possible
2092 thread_add_flags(thread
, SVC_RUNNING
);
2093 svcpt
->scp_nthrs_running
++;
2094 spin_unlock(&svcpt
->scp_lock
);
2096 /* wake up our creator in case he's still waiting. */
2097 wake_up(&thread
->t_ctl_waitq
);
2100 thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
2104 spin_lock(&svcpt
->scp_rep_lock
);
2105 list_add(&rs
->rs_list
, &svcpt
->scp_rep_idle
);
2106 wake_up(&svcpt
->scp_rep_waitq
);
2107 spin_unlock(&svcpt
->scp_rep_lock
);
2109 CDEBUG(D_NET
, "service thread %d (#%d) started\n", thread
->t_id
,
2110 svcpt
->scp_nthrs_running
);
2112 /* XXX maintain a list of all managed devices: insert here */
2113 while (!ptlrpc_thread_stopping(thread
)) {
2114 if (ptlrpc_wait_event(svcpt
, thread
))
2117 ptlrpc_check_rqbd_pool(svcpt
);
2119 if (ptlrpc_threads_need_create(svcpt
)) {
2120 /* Ignore return code - we tried... */
2121 ptlrpc_start_thread(svcpt
, 0);
2124 /* Process all incoming reqs before handling any */
2125 if (ptlrpc_server_request_incoming(svcpt
)) {
2126 lu_context_enter(&env
->le_ctx
);
2128 ptlrpc_server_handle_req_in(svcpt
, thread
);
2129 lu_context_exit(&env
->le_ctx
);
2131 /* but limit ourselves in case of flood */
2132 if (counter
++ < 100)
2137 if (ptlrpc_at_check(svcpt
))
2138 ptlrpc_at_check_timed(svcpt
);
2140 if (ptlrpc_server_request_pending(svcpt
, false)) {
2141 lu_context_enter(&env
->le_ctx
);
2142 ptlrpc_server_handle_request(svcpt
, thread
);
2143 lu_context_exit(&env
->le_ctx
);
2146 if (ptlrpc_rqbd_pending(svcpt
) &&
2147 ptlrpc_server_post_idle_rqbds(svcpt
) < 0) {
2148 /* I just failed to repost request buffers.
2149 * Wait for a timeout (unless something else
2150 * happens) before I try again
2152 svcpt
->scp_rqbd_timeout
= cfs_time_seconds(1) / 10;
2153 CDEBUG(D_RPCTRACE
, "Posted buffers: %d\n",
2154 svcpt
->scp_nrqbds_posted
);
2159 lc_watchdog_delete(thread->t_watchdog);
2160 thread->t_watchdog = NULL;
2165 * deconstruct service specific state created by ptlrpc_start_thread()
2167 if (svc
->srv_ops
.so_thr_done
)
2168 svc
->srv_ops
.so_thr_done(thread
);
2171 lu_context_fini(&env
->le_ctx
);
2175 CDEBUG(D_RPCTRACE
, "service thread [ %p : %u ] %d exiting: rc %d\n",
2176 thread
, thread
->t_pid
, thread
->t_id
, rc
);
2178 spin_lock(&svcpt
->scp_lock
);
2179 if (thread_test_and_clear_flags(thread
, SVC_STARTING
))
2180 svcpt
->scp_nthrs_starting
--;
2182 if (thread_test_and_clear_flags(thread
, SVC_RUNNING
)) {
2183 /* must know immediately */
2184 svcpt
->scp_nthrs_running
--;
2188 thread_add_flags(thread
, SVC_STOPPED
);
2190 wake_up(&thread
->t_ctl_waitq
);
2191 spin_unlock(&svcpt
->scp_lock
);
2196 static int hrt_dont_sleep(struct ptlrpc_hr_thread
*hrt
,
2197 struct list_head
*replies
)
2201 spin_lock(&hrt
->hrt_lock
);
2203 list_splice_init(&hrt
->hrt_queue
, replies
);
2204 result
= ptlrpc_hr
.hr_stopping
|| !list_empty(replies
);
2206 spin_unlock(&hrt
->hrt_lock
);
2211 * Main body of "handle reply" function.
2212 * It processes acked reply states
2214 static int ptlrpc_hr_main(void *arg
)
2216 struct ptlrpc_hr_thread
*hrt
= arg
;
2217 struct ptlrpc_hr_partition
*hrp
= hrt
->hrt_partition
;
2219 char threadname
[20];
2222 snprintf(threadname
, sizeof(threadname
), "ptlrpc_hr%02d_%03d",
2223 hrp
->hrp_cpt
, hrt
->hrt_id
);
2224 unshare_fs_struct();
2226 rc
= cfs_cpt_bind(ptlrpc_hr
.hr_cpt_table
, hrp
->hrp_cpt
);
2228 CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
2229 threadname
, hrp
->hrp_cpt
, ptlrpc_hr
.hr_cpt_table
, rc
);
2232 atomic_inc(&hrp
->hrp_nstarted
);
2233 wake_up(&ptlrpc_hr
.hr_waitq
);
2235 while (!ptlrpc_hr
.hr_stopping
) {
2236 l_wait_condition(hrt
->hrt_waitq
, hrt_dont_sleep(hrt
, &replies
));
2238 while (!list_empty(&replies
)) {
2239 struct ptlrpc_reply_state
*rs
;
2241 rs
= list_entry(replies
.prev
, struct ptlrpc_reply_state
,
2243 list_del_init(&rs
->rs_list
);
2244 ptlrpc_handle_rs(rs
);
2248 atomic_inc(&hrp
->hrp_nstopped
);
2249 wake_up(&ptlrpc_hr
.hr_waitq
);
2254 static void ptlrpc_stop_hr_threads(void)
2256 struct ptlrpc_hr_partition
*hrp
;
2260 ptlrpc_hr
.hr_stopping
= 1;
2262 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2264 continue; /* uninitialized */
2265 for (j
= 0; j
< hrp
->hrp_nthrs
; j
++)
2266 wake_up_all(&hrp
->hrp_thrs
[j
].hrt_waitq
);
2269 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2271 continue; /* uninitialized */
2272 wait_event(ptlrpc_hr
.hr_waitq
,
2273 atomic_read(&hrp
->hrp_nstopped
) ==
2274 atomic_read(&hrp
->hrp_nstarted
));
2278 static int ptlrpc_start_hr_threads(void)
2280 struct ptlrpc_hr_partition
*hrp
;
2284 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2287 for (j
= 0; j
< hrp
->hrp_nthrs
; j
++) {
2288 struct ptlrpc_hr_thread
*hrt
= &hrp
->hrp_thrs
[j
];
2289 struct task_struct
*task
;
2291 task
= kthread_run(ptlrpc_hr_main
,
2293 "ptlrpc_hr%02d_%03d",
2294 hrp
->hrp_cpt
, hrt
->hrt_id
);
2300 wait_event(ptlrpc_hr
.hr_waitq
,
2301 atomic_read(&hrp
->hrp_nstarted
) == j
);
2304 CERROR("cannot start reply handler thread %d:%d: rc = %d\n",
2306 ptlrpc_stop_hr_threads();
2313 static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part
*svcpt
)
2315 struct l_wait_info lwi
= { 0 };
2316 struct ptlrpc_thread
*thread
;
2319 CDEBUG(D_INFO
, "Stopping threads for service %s\n",
2320 svcpt
->scp_service
->srv_name
);
2322 spin_lock(&svcpt
->scp_lock
);
2323 /* let the thread know that we would like it to stop asap */
2324 list_for_each_entry(thread
, &svcpt
->scp_threads
, t_link
) {
2325 CDEBUG(D_INFO
, "Stopping thread %s #%u\n",
2326 svcpt
->scp_service
->srv_thread_name
, thread
->t_id
);
2327 thread_add_flags(thread
, SVC_STOPPING
);
2330 wake_up_all(&svcpt
->scp_waitq
);
2332 while (!list_empty(&svcpt
->scp_threads
)) {
2333 thread
= list_entry(svcpt
->scp_threads
.next
,
2334 struct ptlrpc_thread
, t_link
);
2335 if (thread_is_stopped(thread
)) {
2336 list_del(&thread
->t_link
);
2337 list_add(&thread
->t_link
, &zombie
);
2340 spin_unlock(&svcpt
->scp_lock
);
2342 CDEBUG(D_INFO
, "waiting for stopping-thread %s #%u\n",
2343 svcpt
->scp_service
->srv_thread_name
, thread
->t_id
);
2344 l_wait_event(thread
->t_ctl_waitq
,
2345 thread_is_stopped(thread
), &lwi
);
2347 spin_lock(&svcpt
->scp_lock
);
2350 spin_unlock(&svcpt
->scp_lock
);
2352 while (!list_empty(&zombie
)) {
2353 thread
= list_entry(zombie
.next
,
2354 struct ptlrpc_thread
, t_link
);
2355 list_del(&thread
->t_link
);
2361 * Stops all threads of a particular service \a svc
2363 static void ptlrpc_stop_all_threads(struct ptlrpc_service
*svc
)
2365 struct ptlrpc_service_part
*svcpt
;
2368 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2369 if (svcpt
->scp_service
)
2370 ptlrpc_svcpt_stop_threads(svcpt
);
2374 int ptlrpc_start_threads(struct ptlrpc_service
*svc
)
2380 /* We require 2 threads min, see note in ptlrpc_server_handle_request */
2381 LASSERT(svc
->srv_nthrs_cpt_init
>= PTLRPC_NTHRS_INIT
);
2383 for (i
= 0; i
< svc
->srv_ncpts
; i
++) {
2384 for (j
= 0; j
< svc
->srv_nthrs_cpt_init
; j
++) {
2385 rc
= ptlrpc_start_thread(svc
->srv_parts
[i
], 1);
2391 /* We have enough threads, don't start more. b=15759 */
2398 CERROR("cannot start %s thread #%d_%d: rc %d\n",
2399 svc
->srv_thread_name
, i
, j
, rc
);
2400 ptlrpc_stop_all_threads(svc
);
2404 int ptlrpc_start_thread(struct ptlrpc_service_part
*svcpt
, int wait
)
2406 struct l_wait_info lwi
= { 0 };
2407 struct ptlrpc_thread
*thread
;
2408 struct ptlrpc_service
*svc
;
2409 struct task_struct
*task
;
2412 svc
= svcpt
->scp_service
;
2414 CDEBUG(D_RPCTRACE
, "%s[%d] started %d min %d max %d\n",
2415 svc
->srv_name
, svcpt
->scp_cpt
, svcpt
->scp_nthrs_running
,
2416 svc
->srv_nthrs_cpt_init
, svc
->srv_nthrs_cpt_limit
);
2419 if (unlikely(svc
->srv_is_stopping
))
2422 if (!ptlrpc_threads_increasable(svcpt
) ||
2423 (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS
) &&
2424 svcpt
->scp_nthrs_running
== svc
->srv_nthrs_cpt_init
- 1))
2427 thread
= kzalloc_node(sizeof(*thread
), GFP_NOFS
,
2428 cfs_cpt_spread_node(svc
->srv_cptable
,
2432 init_waitqueue_head(&thread
->t_ctl_waitq
);
2434 spin_lock(&svcpt
->scp_lock
);
2435 if (!ptlrpc_threads_increasable(svcpt
)) {
2436 spin_unlock(&svcpt
->scp_lock
);
2441 if (svcpt
->scp_nthrs_starting
!= 0) {
2442 /* serialize starting because some modules (obdfilter)
2443 * might require unique and contiguous t_id
2445 LASSERT(svcpt
->scp_nthrs_starting
== 1);
2446 spin_unlock(&svcpt
->scp_lock
);
2449 CDEBUG(D_INFO
, "Waiting for creating thread %s #%d\n",
2450 svc
->srv_thread_name
, svcpt
->scp_thr_nextid
);
2455 CDEBUG(D_INFO
, "Creating thread %s #%d race, retry later\n",
2456 svc
->srv_thread_name
, svcpt
->scp_thr_nextid
);
2460 svcpt
->scp_nthrs_starting
++;
2461 thread
->t_id
= svcpt
->scp_thr_nextid
++;
2462 thread_add_flags(thread
, SVC_STARTING
);
2463 thread
->t_svcpt
= svcpt
;
2465 list_add(&thread
->t_link
, &svcpt
->scp_threads
);
2466 spin_unlock(&svcpt
->scp_lock
);
2468 if (svcpt
->scp_cpt
>= 0) {
2469 snprintf(thread
->t_name
, sizeof(thread
->t_name
), "%s%02d_%03d",
2470 svc
->srv_thread_name
, svcpt
->scp_cpt
, thread
->t_id
);
2472 snprintf(thread
->t_name
, sizeof(thread
->t_name
), "%s_%04d",
2473 svc
->srv_thread_name
, thread
->t_id
);
2476 CDEBUG(D_RPCTRACE
, "starting thread '%s'\n", thread
->t_name
);
2477 task
= kthread_run(ptlrpc_main
, thread
, "%s", thread
->t_name
);
2480 CERROR("cannot start thread '%s': rc = %d\n",
2481 thread
->t_name
, rc
);
2482 spin_lock(&svcpt
->scp_lock
);
2483 --svcpt
->scp_nthrs_starting
;
2484 if (thread_is_stopping(thread
)) {
2485 /* this ptlrpc_thread is being handled
2486 * by ptlrpc_svcpt_stop_threads now
2488 thread_add_flags(thread
, SVC_STOPPED
);
2489 wake_up(&thread
->t_ctl_waitq
);
2490 spin_unlock(&svcpt
->scp_lock
);
2492 list_del(&thread
->t_link
);
2493 spin_unlock(&svcpt
->scp_lock
);
2502 l_wait_event(thread
->t_ctl_waitq
,
2503 thread_is_running(thread
) || thread_is_stopped(thread
),
2506 rc
= thread_is_stopped(thread
) ? thread
->t_id
: 0;
2510 int ptlrpc_hr_init(void)
2512 struct ptlrpc_hr_partition
*hrp
;
2513 struct ptlrpc_hr_thread
*hrt
;
2519 memset(&ptlrpc_hr
, 0, sizeof(ptlrpc_hr
));
2520 ptlrpc_hr
.hr_cpt_table
= cfs_cpt_table
;
2522 ptlrpc_hr
.hr_partitions
= cfs_percpt_alloc(ptlrpc_hr
.hr_cpt_table
,
2524 if (!ptlrpc_hr
.hr_partitions
)
2527 init_waitqueue_head(&ptlrpc_hr
.hr_waitq
);
2529 weight
= cpumask_weight(topology_sibling_cpumask(0));
2531 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2534 atomic_set(&hrp
->hrp_nstarted
, 0);
2535 atomic_set(&hrp
->hrp_nstopped
, 0);
2537 hrp
->hrp_nthrs
= cfs_cpt_weight(ptlrpc_hr
.hr_cpt_table
, i
);
2538 hrp
->hrp_nthrs
/= weight
;
2539 if (hrp
->hrp_nthrs
== 0)
2543 kzalloc_node(hrp
->hrp_nthrs
* sizeof(*hrt
), GFP_NOFS
,
2544 cfs_cpt_spread_node(ptlrpc_hr
.hr_cpt_table
,
2546 if (!hrp
->hrp_thrs
) {
2551 for (j
= 0; j
< hrp
->hrp_nthrs
; j
++) {
2552 hrt
= &hrp
->hrp_thrs
[j
];
2555 hrt
->hrt_partition
= hrp
;
2556 init_waitqueue_head(&hrt
->hrt_waitq
);
2557 spin_lock_init(&hrt
->hrt_lock
);
2558 INIT_LIST_HEAD(&hrt
->hrt_queue
);
2562 rc
= ptlrpc_start_hr_threads();
2569 void ptlrpc_hr_fini(void)
2571 struct ptlrpc_hr_partition
*hrp
;
2574 if (!ptlrpc_hr
.hr_partitions
)
2577 ptlrpc_stop_hr_threads();
2579 cfs_percpt_for_each(hrp
, i
, ptlrpc_hr
.hr_partitions
) {
2580 kfree(hrp
->hrp_thrs
);
2583 cfs_percpt_free(ptlrpc_hr
.hr_partitions
);
2584 ptlrpc_hr
.hr_partitions
= NULL
;
2588 * Wait until all already scheduled replies are processed.
2590 static void ptlrpc_wait_replies(struct ptlrpc_service_part
*svcpt
)
2594 struct l_wait_info lwi
= LWI_TIMEOUT(cfs_time_seconds(10),
2597 rc
= l_wait_event(svcpt
->scp_waitq
,
2598 atomic_read(&svcpt
->scp_nreps_difficult
) == 0,
2602 CWARN("Unexpectedly long timeout %s %p\n",
2603 svcpt
->scp_service
->srv_name
, svcpt
->scp_service
);
2608 ptlrpc_service_del_atimer(struct ptlrpc_service
*svc
)
2610 struct ptlrpc_service_part
*svcpt
;
2613 /* early disarm AT timer... */
2614 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2615 if (svcpt
->scp_service
)
2616 del_timer(&svcpt
->scp_at_timer
);
2621 ptlrpc_service_unlink_rqbd(struct ptlrpc_service
*svc
)
2623 struct ptlrpc_service_part
*svcpt
;
2624 struct ptlrpc_request_buffer_desc
*rqbd
;
2625 struct l_wait_info lwi
;
2629 /* All history will be culled when the next request buffer is
2630 * freed in ptlrpc_service_purge_all()
2632 svc
->srv_hist_nrqbds_cpt_max
= 0;
2634 rc
= LNetClearLazyPortal(svc
->srv_req_portal
);
2637 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2638 if (!svcpt
->scp_service
)
2641 /* Unlink all the request buffers. This forces a 'final'
2642 * event with its 'unlink' flag set for each posted rqbd
2644 list_for_each_entry(rqbd
, &svcpt
->scp_rqbd_posted
,
2646 rc
= LNetMDUnlink(rqbd
->rqbd_md_h
);
2647 LASSERT(rc
== 0 || rc
== -ENOENT
);
2651 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2652 if (!svcpt
->scp_service
)
2655 /* Wait for the network to release any buffers
2656 * it's currently filling
2658 spin_lock(&svcpt
->scp_lock
);
2659 while (svcpt
->scp_nrqbds_posted
!= 0) {
2660 spin_unlock(&svcpt
->scp_lock
);
2661 /* Network access will complete in finite time but
2662 * the HUGE timeout lets us CWARN for visibility
2665 lwi
= LWI_TIMEOUT_INTERVAL(
2666 cfs_time_seconds(LONG_UNLINK
),
2667 cfs_time_seconds(1), NULL
, NULL
);
2668 rc
= l_wait_event(svcpt
->scp_waitq
,
2669 svcpt
->scp_nrqbds_posted
== 0, &lwi
);
2670 if (rc
== -ETIMEDOUT
) {
2671 CWARN("Service %s waiting for request buffers\n",
2672 svcpt
->scp_service
->srv_name
);
2674 spin_lock(&svcpt
->scp_lock
);
2676 spin_unlock(&svcpt
->scp_lock
);
2681 ptlrpc_service_purge_all(struct ptlrpc_service
*svc
)
2683 struct ptlrpc_service_part
*svcpt
;
2684 struct ptlrpc_request_buffer_desc
*rqbd
;
2685 struct ptlrpc_request
*req
;
2686 struct ptlrpc_reply_state
*rs
;
2689 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2690 if (!svcpt
->scp_service
)
2693 spin_lock(&svcpt
->scp_rep_lock
);
2694 while (!list_empty(&svcpt
->scp_rep_active
)) {
2695 rs
= list_entry(svcpt
->scp_rep_active
.next
,
2696 struct ptlrpc_reply_state
, rs_list
);
2697 spin_lock(&rs
->rs_lock
);
2698 ptlrpc_schedule_difficult_reply(rs
);
2699 spin_unlock(&rs
->rs_lock
);
2701 spin_unlock(&svcpt
->scp_rep_lock
);
2703 /* purge the request queue. NB No new replies (rqbds
2704 * all unlinked) and no service threads, so I'm the only
2705 * thread noodling the request queue now
2707 while (!list_empty(&svcpt
->scp_req_incoming
)) {
2708 req
= list_entry(svcpt
->scp_req_incoming
.next
,
2709 struct ptlrpc_request
, rq_list
);
2711 list_del(&req
->rq_list
);
2712 svcpt
->scp_nreqs_incoming
--;
2713 ptlrpc_server_finish_request(svcpt
, req
);
2716 while (ptlrpc_server_request_pending(svcpt
, true)) {
2717 req
= ptlrpc_server_request_get(svcpt
, true);
2718 ptlrpc_server_finish_active_request(svcpt
, req
);
2721 LASSERT(list_empty(&svcpt
->scp_rqbd_posted
));
2722 LASSERT(svcpt
->scp_nreqs_incoming
== 0);
2723 LASSERT(svcpt
->scp_nreqs_active
== 0);
2724 /* history should have been culled by
2725 * ptlrpc_server_finish_request
2727 LASSERT(svcpt
->scp_hist_nrqbds
== 0);
2729 /* Now free all the request buffers since nothing
2730 * references them any more...
2733 while (!list_empty(&svcpt
->scp_rqbd_idle
)) {
2734 rqbd
= list_entry(svcpt
->scp_rqbd_idle
.next
,
2735 struct ptlrpc_request_buffer_desc
,
2737 ptlrpc_free_rqbd(rqbd
);
2739 ptlrpc_wait_replies(svcpt
);
2741 while (!list_empty(&svcpt
->scp_rep_idle
)) {
2742 rs
= list_entry(svcpt
->scp_rep_idle
.next
,
2743 struct ptlrpc_reply_state
,
2745 list_del(&rs
->rs_list
);
2752 ptlrpc_service_free(struct ptlrpc_service
*svc
)
2754 struct ptlrpc_service_part
*svcpt
;
2755 struct ptlrpc_at_array
*array
;
2758 ptlrpc_service_for_each_part(svcpt
, i
, svc
) {
2759 if (!svcpt
->scp_service
)
2762 /* In case somebody rearmed this in the meantime */
2763 del_timer(&svcpt
->scp_at_timer
);
2764 array
= &svcpt
->scp_at_array
;
2766 kfree(array
->paa_reqs_array
);
2767 array
->paa_reqs_array
= NULL
;
2768 kfree(array
->paa_reqs_count
);
2769 array
->paa_reqs_count
= NULL
;
2772 ptlrpc_service_for_each_part(svcpt
, i
, svc
)
2776 cfs_expr_list_values_free(svc
->srv_cpts
, svc
->srv_ncpts
);
2781 int ptlrpc_unregister_service(struct ptlrpc_service
*service
)
2783 CDEBUG(D_NET
, "%s: tearing down\n", service
->srv_name
);
2785 service
->srv_is_stopping
= 1;
2787 mutex_lock(&ptlrpc_all_services_mutex
);
2788 list_del_init(&service
->srv_list
);
2789 mutex_unlock(&ptlrpc_all_services_mutex
);
2791 ptlrpc_service_del_atimer(service
);
2792 ptlrpc_stop_all_threads(service
);
2794 ptlrpc_service_unlink_rqbd(service
);
2795 ptlrpc_service_purge_all(service
);
2796 ptlrpc_service_nrs_cleanup(service
);
2798 ptlrpc_lprocfs_unregister_service(service
);
2799 ptlrpc_sysfs_unregister_service(service
);
2801 ptlrpc_service_free(service
);
2805 EXPORT_SYMBOL(ptlrpc_unregister_service
);