1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/svc.c
5 * High-level RPC service routines
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 * Multiple threads pools and NUMAisation
10 * Copyright (c) 2006 Silicon Graphics, Inc.
11 * by Greg Banks <gnb@melbourne.sgi.com>
14 #include <linux/linkage.h>
15 #include <linux/sched/signal.h>
16 #include <linux/errno.h>
17 #include <linux/net.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/slab.h>
25 #include <linux/sunrpc/types.h>
26 #include <linux/sunrpc/xdr.h>
27 #include <linux/sunrpc/stats.h>
28 #include <linux/sunrpc/svcsock.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/bc_xprt.h>
32 #include <trace/events/sunrpc.h>
36 #define RPCDBG_FACILITY RPCDBG_SVCDSP
38 static void svc_unregister(const struct svc_serv
*serv
, struct net
*net
);
40 #define svc_serv_is_pooled(serv) ((serv)->sv_ops->svo_function)
42 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
45 * Structure for mapping cpus to pools and vice versa.
46 * Setup once during sunrpc initialisation.
48 struct svc_pool_map svc_pool_map
= {
49 .mode
= SVC_POOL_DEFAULT
51 EXPORT_SYMBOL_GPL(svc_pool_map
);
53 static DEFINE_MUTEX(svc_pool_map_mutex
);/* protects svc_pool_map.count only */
56 param_set_pool_mode(const char *val
, const struct kernel_param
*kp
)
58 int *ip
= (int *)kp
->arg
;
59 struct svc_pool_map
*m
= &svc_pool_map
;
62 mutex_lock(&svc_pool_map_mutex
);
69 if (!strncmp(val
, "auto", 4))
71 else if (!strncmp(val
, "global", 6))
72 *ip
= SVC_POOL_GLOBAL
;
73 else if (!strncmp(val
, "percpu", 6))
74 *ip
= SVC_POOL_PERCPU
;
75 else if (!strncmp(val
, "pernode", 7))
76 *ip
= SVC_POOL_PERNODE
;
81 mutex_unlock(&svc_pool_map_mutex
);
86 param_get_pool_mode(char *buf
, const struct kernel_param
*kp
)
88 int *ip
= (int *)kp
->arg
;
93 return strlcpy(buf
, "auto\n", 20);
95 return strlcpy(buf
, "global\n", 20);
97 return strlcpy(buf
, "percpu\n", 20);
98 case SVC_POOL_PERNODE
:
99 return strlcpy(buf
, "pernode\n", 20);
101 return sprintf(buf
, "%d\n", *ip
);
105 module_param_call(pool_mode
, param_set_pool_mode
, param_get_pool_mode
,
106 &svc_pool_map
.mode
, 0644);
109 * Detect best pool mapping mode heuristically,
110 * according to the machine's topology.
113 svc_pool_map_choose_mode(void)
117 if (nr_online_nodes
> 1) {
119 * Actually have multiple NUMA nodes,
120 * so split pools on NUMA node boundaries
122 return SVC_POOL_PERNODE
;
125 node
= first_online_node
;
126 if (nr_cpus_node(node
) > 2) {
128 * Non-trivial SMP, or CONFIG_NUMA on
129 * non-NUMA hardware, e.g. with a generic
130 * x86_64 kernel on Xeons. In this case we
131 * want to divide the pools on cpu boundaries.
133 return SVC_POOL_PERCPU
;
136 /* default: one global pool */
137 return SVC_POOL_GLOBAL
;
141 * Allocate the to_pool[] and pool_to[] arrays.
142 * Returns 0 on success or an errno.
145 svc_pool_map_alloc_arrays(struct svc_pool_map
*m
, unsigned int maxpools
)
147 m
->to_pool
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
150 m
->pool_to
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
164 * Initialise the pool map for SVC_POOL_PERCPU mode.
165 * Returns number of pools or <0 on error.
168 svc_pool_map_init_percpu(struct svc_pool_map
*m
)
170 unsigned int maxpools
= nr_cpu_ids
;
171 unsigned int pidx
= 0;
175 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
179 for_each_online_cpu(cpu
) {
180 BUG_ON(pidx
>= maxpools
);
181 m
->to_pool
[cpu
] = pidx
;
182 m
->pool_to
[pidx
] = cpu
;
185 /* cpus brought online later all get mapped to pool0, sorry */
192 * Initialise the pool map for SVC_POOL_PERNODE mode.
193 * Returns number of pools or <0 on error.
196 svc_pool_map_init_pernode(struct svc_pool_map
*m
)
198 unsigned int maxpools
= nr_node_ids
;
199 unsigned int pidx
= 0;
203 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
207 for_each_node_with_cpus(node
) {
208 /* some architectures (e.g. SN2) have cpuless nodes */
209 BUG_ON(pidx
> maxpools
);
210 m
->to_pool
[node
] = pidx
;
211 m
->pool_to
[pidx
] = node
;
214 /* nodes brought online later all get mapped to pool0, sorry */
221 * Add a reference to the global map of cpus to pools (and
222 * vice versa). Initialise the map if we're the first user.
223 * Returns the number of pools.
226 svc_pool_map_get(void)
228 struct svc_pool_map
*m
= &svc_pool_map
;
231 mutex_lock(&svc_pool_map_mutex
);
234 mutex_unlock(&svc_pool_map_mutex
);
238 if (m
->mode
== SVC_POOL_AUTO
)
239 m
->mode
= svc_pool_map_choose_mode();
242 case SVC_POOL_PERCPU
:
243 npools
= svc_pool_map_init_percpu(m
);
245 case SVC_POOL_PERNODE
:
246 npools
= svc_pool_map_init_pernode(m
);
251 /* default, or memory allocation failure */
253 m
->mode
= SVC_POOL_GLOBAL
;
257 mutex_unlock(&svc_pool_map_mutex
);
260 EXPORT_SYMBOL_GPL(svc_pool_map_get
);
263 * Drop a reference to the global map of cpus to pools.
264 * When the last reference is dropped, the map data is
265 * freed; this allows the sysadmin to change the pool
266 * mode using the pool_mode module option without
267 * rebooting or re-loading sunrpc.ko.
270 svc_pool_map_put(void)
272 struct svc_pool_map
*m
= &svc_pool_map
;
274 mutex_lock(&svc_pool_map_mutex
);
284 mutex_unlock(&svc_pool_map_mutex
);
286 EXPORT_SYMBOL_GPL(svc_pool_map_put
);
288 static int svc_pool_map_get_node(unsigned int pidx
)
290 const struct svc_pool_map
*m
= &svc_pool_map
;
293 if (m
->mode
== SVC_POOL_PERCPU
)
294 return cpu_to_node(m
->pool_to
[pidx
]);
295 if (m
->mode
== SVC_POOL_PERNODE
)
296 return m
->pool_to
[pidx
];
301 * Set the given thread's cpus_allowed mask so that it
302 * will only run on cpus in the given pool.
305 svc_pool_map_set_cpumask(struct task_struct
*task
, unsigned int pidx
)
307 struct svc_pool_map
*m
= &svc_pool_map
;
308 unsigned int node
= m
->pool_to
[pidx
];
311 * The caller checks for sv_nrpools > 1, which
312 * implies that we've been initialized.
314 WARN_ON_ONCE(m
->count
== 0);
319 case SVC_POOL_PERCPU
:
321 set_cpus_allowed_ptr(task
, cpumask_of(node
));
324 case SVC_POOL_PERNODE
:
326 set_cpus_allowed_ptr(task
, cpumask_of_node(node
));
333 * Use the mapping mode to choose a pool for a given CPU.
334 * Used when enqueueing an incoming RPC. Always returns
335 * a non-NULL pool pointer.
338 svc_pool_for_cpu(struct svc_serv
*serv
, int cpu
)
340 struct svc_pool_map
*m
= &svc_pool_map
;
341 unsigned int pidx
= 0;
344 * An uninitialised map happens in a pure client when
345 * lockd is brought up, so silently treat it the
346 * same as SVC_POOL_GLOBAL.
348 if (svc_serv_is_pooled(serv
)) {
350 case SVC_POOL_PERCPU
:
351 pidx
= m
->to_pool
[cpu
];
353 case SVC_POOL_PERNODE
:
354 pidx
= m
->to_pool
[cpu_to_node(cpu
)];
358 return &serv
->sv_pools
[pidx
% serv
->sv_nrpools
];
361 int svc_rpcb_setup(struct svc_serv
*serv
, struct net
*net
)
365 err
= rpcb_create_local(net
);
369 /* Remove any stale portmap registrations */
370 svc_unregister(serv
, net
);
373 EXPORT_SYMBOL_GPL(svc_rpcb_setup
);
375 void svc_rpcb_cleanup(struct svc_serv
*serv
, struct net
*net
)
377 svc_unregister(serv
, net
);
380 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup
);
382 static int svc_uses_rpcbind(struct svc_serv
*serv
)
384 struct svc_program
*progp
;
387 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
388 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
389 if (progp
->pg_vers
[i
] == NULL
)
391 if (!progp
->pg_vers
[i
]->vs_hidden
)
399 int svc_bind(struct svc_serv
*serv
, struct net
*net
)
401 if (!svc_uses_rpcbind(serv
))
403 return svc_rpcb_setup(serv
, net
);
405 EXPORT_SYMBOL_GPL(svc_bind
);
407 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
409 __svc_init_bc(struct svc_serv
*serv
)
411 INIT_LIST_HEAD(&serv
->sv_cb_list
);
412 spin_lock_init(&serv
->sv_cb_lock
);
413 init_waitqueue_head(&serv
->sv_cb_waitq
);
417 __svc_init_bc(struct svc_serv
*serv
)
423 * Create an RPC service
425 static struct svc_serv
*
426 __svc_create(struct svc_program
*prog
, unsigned int bufsize
, int npools
,
427 const struct svc_serv_ops
*ops
)
429 struct svc_serv
*serv
;
431 unsigned int xdrsize
;
434 if (!(serv
= kzalloc(sizeof(*serv
), GFP_KERNEL
)))
436 serv
->sv_name
= prog
->pg_name
;
437 serv
->sv_program
= prog
;
438 serv
->sv_nrthreads
= 1;
439 serv
->sv_stats
= prog
->pg_stats
;
440 if (bufsize
> RPCSVC_MAXPAYLOAD
)
441 bufsize
= RPCSVC_MAXPAYLOAD
;
442 serv
->sv_max_payload
= bufsize
? bufsize
: 4096;
443 serv
->sv_max_mesg
= roundup(serv
->sv_max_payload
+ PAGE_SIZE
, PAGE_SIZE
);
447 prog
->pg_lovers
= prog
->pg_nvers
-1;
448 for (vers
=0; vers
<prog
->pg_nvers
; vers
++)
449 if (prog
->pg_vers
[vers
]) {
450 prog
->pg_hivers
= vers
;
451 if (prog
->pg_lovers
> vers
)
452 prog
->pg_lovers
= vers
;
453 if (prog
->pg_vers
[vers
]->vs_xdrsize
> xdrsize
)
454 xdrsize
= prog
->pg_vers
[vers
]->vs_xdrsize
;
456 prog
= prog
->pg_next
;
458 serv
->sv_xdrsize
= xdrsize
;
459 INIT_LIST_HEAD(&serv
->sv_tempsocks
);
460 INIT_LIST_HEAD(&serv
->sv_permsocks
);
461 timer_setup(&serv
->sv_temptimer
, NULL
, 0);
462 spin_lock_init(&serv
->sv_lock
);
466 serv
->sv_nrpools
= npools
;
468 kcalloc(serv
->sv_nrpools
, sizeof(struct svc_pool
),
470 if (!serv
->sv_pools
) {
475 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
476 struct svc_pool
*pool
= &serv
->sv_pools
[i
];
478 dprintk("svc: initialising pool %u for %s\n",
482 INIT_LIST_HEAD(&pool
->sp_sockets
);
483 INIT_LIST_HEAD(&pool
->sp_all_threads
);
484 spin_lock_init(&pool
->sp_lock
);
491 svc_create(struct svc_program
*prog
, unsigned int bufsize
,
492 const struct svc_serv_ops
*ops
)
494 return __svc_create(prog
, bufsize
, /*npools*/1, ops
);
496 EXPORT_SYMBOL_GPL(svc_create
);
499 svc_create_pooled(struct svc_program
*prog
, unsigned int bufsize
,
500 const struct svc_serv_ops
*ops
)
502 struct svc_serv
*serv
;
503 unsigned int npools
= svc_pool_map_get();
505 serv
= __svc_create(prog
, bufsize
, npools
, ops
);
513 EXPORT_SYMBOL_GPL(svc_create_pooled
);
515 void svc_shutdown_net(struct svc_serv
*serv
, struct net
*net
)
517 svc_close_net(serv
, net
);
519 if (serv
->sv_ops
->svo_shutdown
)
520 serv
->sv_ops
->svo_shutdown(serv
, net
);
522 EXPORT_SYMBOL_GPL(svc_shutdown_net
);
525 * Destroy an RPC service. Should be called with appropriate locking to
526 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
529 svc_destroy(struct svc_serv
*serv
)
531 dprintk("svc: svc_destroy(%s, %d)\n",
532 serv
->sv_program
->pg_name
,
535 if (serv
->sv_nrthreads
) {
536 if (--(serv
->sv_nrthreads
) != 0) {
537 svc_sock_update_bufs(serv
);
541 printk("svc_destroy: no threads for serv=%p!\n", serv
);
543 del_timer_sync(&serv
->sv_temptimer
);
546 * The last user is gone and thus all sockets have to be destroyed to
547 * the point. Check this.
549 BUG_ON(!list_empty(&serv
->sv_permsocks
));
550 BUG_ON(!list_empty(&serv
->sv_tempsocks
));
552 cache_clean_deferred(serv
);
554 if (svc_serv_is_pooled(serv
))
557 kfree(serv
->sv_pools
);
560 EXPORT_SYMBOL_GPL(svc_destroy
);
563 * Allocate an RPC server's buffer space.
564 * We allocate pages and place them in rq_pages.
567 svc_init_buffer(struct svc_rqst
*rqstp
, unsigned int size
, int node
)
569 unsigned int pages
, arghi
;
571 /* bc_xprt uses fore channel allocated buffers */
572 if (svc_is_backchannel(rqstp
))
575 pages
= size
/ PAGE_SIZE
+ 1; /* extra page as we hold both request and reply.
576 * We assume one is at most one page
579 WARN_ON_ONCE(pages
> RPCSVC_MAXPAGES
);
580 if (pages
> RPCSVC_MAXPAGES
)
581 pages
= RPCSVC_MAXPAGES
;
583 struct page
*p
= alloc_pages_node(node
, GFP_KERNEL
, 0);
586 rqstp
->rq_pages
[arghi
++] = p
;
593 * Release an RPC server buffer
596 svc_release_buffer(struct svc_rqst
*rqstp
)
600 for (i
= 0; i
< ARRAY_SIZE(rqstp
->rq_pages
); i
++)
601 if (rqstp
->rq_pages
[i
])
602 put_page(rqstp
->rq_pages
[i
]);
606 svc_rqst_alloc(struct svc_serv
*serv
, struct svc_pool
*pool
, int node
)
608 struct svc_rqst
*rqstp
;
610 rqstp
= kzalloc_node(sizeof(*rqstp
), GFP_KERNEL
, node
);
614 __set_bit(RQ_BUSY
, &rqstp
->rq_flags
);
615 spin_lock_init(&rqstp
->rq_lock
);
616 rqstp
->rq_server
= serv
;
617 rqstp
->rq_pool
= pool
;
619 rqstp
->rq_scratch_page
= alloc_pages_node(node
, GFP_KERNEL
, 0);
620 if (!rqstp
->rq_scratch_page
)
623 rqstp
->rq_argp
= kmalloc_node(serv
->sv_xdrsize
, GFP_KERNEL
, node
);
627 rqstp
->rq_resp
= kmalloc_node(serv
->sv_xdrsize
, GFP_KERNEL
, node
);
631 if (!svc_init_buffer(rqstp
, serv
->sv_max_mesg
, node
))
636 svc_rqst_free(rqstp
);
639 EXPORT_SYMBOL_GPL(svc_rqst_alloc
);
642 svc_prepare_thread(struct svc_serv
*serv
, struct svc_pool
*pool
, int node
)
644 struct svc_rqst
*rqstp
;
646 rqstp
= svc_rqst_alloc(serv
, pool
, node
);
648 return ERR_PTR(-ENOMEM
);
650 serv
->sv_nrthreads
++;
651 spin_lock_bh(&pool
->sp_lock
);
652 pool
->sp_nrthreads
++;
653 list_add_rcu(&rqstp
->rq_all
, &pool
->sp_all_threads
);
654 spin_unlock_bh(&pool
->sp_lock
);
657 EXPORT_SYMBOL_GPL(svc_prepare_thread
);
660 * Choose a pool in which to create a new thread, for svc_set_num_threads
662 static inline struct svc_pool
*
663 choose_pool(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
668 return &serv
->sv_pools
[(*state
)++ % serv
->sv_nrpools
];
672 * Choose a thread to kill, for svc_set_num_threads
674 static inline struct task_struct
*
675 choose_victim(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
678 struct task_struct
*task
= NULL
;
681 spin_lock_bh(&pool
->sp_lock
);
683 /* choose a pool in round-robin fashion */
684 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
685 pool
= &serv
->sv_pools
[--(*state
) % serv
->sv_nrpools
];
686 spin_lock_bh(&pool
->sp_lock
);
687 if (!list_empty(&pool
->sp_all_threads
))
689 spin_unlock_bh(&pool
->sp_lock
);
695 if (!list_empty(&pool
->sp_all_threads
)) {
696 struct svc_rqst
*rqstp
;
699 * Remove from the pool->sp_all_threads list
700 * so we don't try to kill it again.
702 rqstp
= list_entry(pool
->sp_all_threads
.next
, struct svc_rqst
, rq_all
);
703 set_bit(RQ_VICTIM
, &rqstp
->rq_flags
);
704 list_del_rcu(&rqstp
->rq_all
);
705 task
= rqstp
->rq_task
;
707 spin_unlock_bh(&pool
->sp_lock
);
712 /* create new threads */
714 svc_start_kthreads(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
716 struct svc_rqst
*rqstp
;
717 struct task_struct
*task
;
718 struct svc_pool
*chosen_pool
;
719 unsigned int state
= serv
->sv_nrthreads
-1;
724 chosen_pool
= choose_pool(serv
, pool
, &state
);
726 node
= svc_pool_map_get_node(chosen_pool
->sp_id
);
727 rqstp
= svc_prepare_thread(serv
, chosen_pool
, node
);
729 return PTR_ERR(rqstp
);
731 __module_get(serv
->sv_ops
->svo_module
);
732 task
= kthread_create_on_node(serv
->sv_ops
->svo_function
, rqstp
,
733 node
, "%s", serv
->sv_name
);
735 module_put(serv
->sv_ops
->svo_module
);
736 svc_exit_thread(rqstp
);
737 return PTR_ERR(task
);
740 rqstp
->rq_task
= task
;
741 if (serv
->sv_nrpools
> 1)
742 svc_pool_map_set_cpumask(task
, chosen_pool
->sp_id
);
744 svc_sock_update_bufs(serv
);
745 wake_up_process(task
);
746 } while (nrservs
> 0);
752 /* destroy old threads */
754 svc_signal_kthreads(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
756 struct task_struct
*task
;
757 unsigned int state
= serv
->sv_nrthreads
-1;
759 /* destroy old threads */
761 task
= choose_victim(serv
, pool
, &state
);
764 send_sig(SIGINT
, task
, 1);
766 } while (nrservs
< 0);
772 * Create or destroy enough new threads to make the number
773 * of threads the given number. If `pool' is non-NULL, applies
774 * only to threads in that pool, otherwise round-robins between
775 * all pools. Caller must ensure that mutual exclusion between this and
776 * server startup or shutdown.
778 * Destroying threads relies on the service threads filling in
779 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
780 * has been created using svc_create_pooled().
782 * Based on code that used to be in nfsd_svc() but tweaked
786 svc_set_num_threads(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
789 /* The -1 assumes caller has done a svc_get() */
790 nrservs
-= (serv
->sv_nrthreads
-1);
792 spin_lock_bh(&pool
->sp_lock
);
793 nrservs
-= pool
->sp_nrthreads
;
794 spin_unlock_bh(&pool
->sp_lock
);
798 return svc_start_kthreads(serv
, pool
, nrservs
);
800 return svc_signal_kthreads(serv
, pool
, nrservs
);
803 EXPORT_SYMBOL_GPL(svc_set_num_threads
);
805 /* destroy old threads */
807 svc_stop_kthreads(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
809 struct task_struct
*task
;
810 unsigned int state
= serv
->sv_nrthreads
-1;
812 /* destroy old threads */
814 task
= choose_victim(serv
, pool
, &state
);
819 } while (nrservs
< 0);
824 svc_set_num_threads_sync(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
827 /* The -1 assumes caller has done a svc_get() */
828 nrservs
-= (serv
->sv_nrthreads
-1);
830 spin_lock_bh(&pool
->sp_lock
);
831 nrservs
-= pool
->sp_nrthreads
;
832 spin_unlock_bh(&pool
->sp_lock
);
836 return svc_start_kthreads(serv
, pool
, nrservs
);
838 return svc_stop_kthreads(serv
, pool
, nrservs
);
841 EXPORT_SYMBOL_GPL(svc_set_num_threads_sync
);
844 * svc_rqst_replace_page - Replace one page in rq_pages[]
845 * @rqstp: svc_rqst with pages to replace
846 * @page: replacement page
848 * When replacing a page in rq_pages, batch the release of the
849 * replaced pages to avoid hammering the page allocator.
851 void svc_rqst_replace_page(struct svc_rqst
*rqstp
, struct page
*page
)
853 if (*rqstp
->rq_next_page
) {
854 if (!pagevec_space(&rqstp
->rq_pvec
))
855 __pagevec_release(&rqstp
->rq_pvec
);
856 pagevec_add(&rqstp
->rq_pvec
, *rqstp
->rq_next_page
);
860 *(rqstp
->rq_next_page
++) = page
;
862 EXPORT_SYMBOL_GPL(svc_rqst_replace_page
);
865 * Called from a server thread as it's exiting. Caller must hold the "service
866 * mutex" for the service.
869 svc_rqst_free(struct svc_rqst
*rqstp
)
871 svc_release_buffer(rqstp
);
872 if (rqstp
->rq_scratch_page
)
873 put_page(rqstp
->rq_scratch_page
);
874 kfree(rqstp
->rq_resp
);
875 kfree(rqstp
->rq_argp
);
876 kfree(rqstp
->rq_auth_data
);
877 kfree_rcu(rqstp
, rq_rcu_head
);
879 EXPORT_SYMBOL_GPL(svc_rqst_free
);
882 svc_exit_thread(struct svc_rqst
*rqstp
)
884 struct svc_serv
*serv
= rqstp
->rq_server
;
885 struct svc_pool
*pool
= rqstp
->rq_pool
;
887 spin_lock_bh(&pool
->sp_lock
);
888 pool
->sp_nrthreads
--;
889 if (!test_and_set_bit(RQ_VICTIM
, &rqstp
->rq_flags
))
890 list_del_rcu(&rqstp
->rq_all
);
891 spin_unlock_bh(&pool
->sp_lock
);
893 svc_rqst_free(rqstp
);
895 /* Release the server */
899 EXPORT_SYMBOL_GPL(svc_exit_thread
);
902 * Register an "inet" protocol family netid with the local
903 * rpcbind daemon via an rpcbind v4 SET request.
905 * No netconfig infrastructure is available in the kernel, so
906 * we map IP_ protocol numbers to netids by hand.
908 * Returns zero on success; a negative errno value is returned
909 * if any error occurs.
911 static int __svc_rpcb_register4(struct net
*net
, const u32 program
,
913 const unsigned short protocol
,
914 const unsigned short port
)
916 const struct sockaddr_in sin
= {
917 .sin_family
= AF_INET
,
918 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
919 .sin_port
= htons(port
),
926 netid
= RPCBIND_NETID_UDP
;
929 netid
= RPCBIND_NETID_TCP
;
935 error
= rpcb_v4_register(net
, program
, version
,
936 (const struct sockaddr
*)&sin
, netid
);
939 * User space didn't support rpcbind v4, so retry this
940 * registration request with the legacy rpcbind v2 protocol.
942 if (error
== -EPROTONOSUPPORT
)
943 error
= rpcb_register(net
, program
, version
, protocol
, port
);
948 #if IS_ENABLED(CONFIG_IPV6)
950 * Register an "inet6" protocol family netid with the local
951 * rpcbind daemon via an rpcbind v4 SET request.
953 * No netconfig infrastructure is available in the kernel, so
954 * we map IP_ protocol numbers to netids by hand.
956 * Returns zero on success; a negative errno value is returned
957 * if any error occurs.
959 static int __svc_rpcb_register6(struct net
*net
, const u32 program
,
961 const unsigned short protocol
,
962 const unsigned short port
)
964 const struct sockaddr_in6 sin6
= {
965 .sin6_family
= AF_INET6
,
966 .sin6_addr
= IN6ADDR_ANY_INIT
,
967 .sin6_port
= htons(port
),
974 netid
= RPCBIND_NETID_UDP6
;
977 netid
= RPCBIND_NETID_TCP6
;
983 error
= rpcb_v4_register(net
, program
, version
,
984 (const struct sockaddr
*)&sin6
, netid
);
987 * User space didn't support rpcbind version 4, so we won't
988 * use a PF_INET6 listener.
990 if (error
== -EPROTONOSUPPORT
)
991 error
= -EAFNOSUPPORT
;
995 #endif /* IS_ENABLED(CONFIG_IPV6) */
998 * Register a kernel RPC service via rpcbind version 4.
1000 * Returns zero on success; a negative errno value is returned
1001 * if any error occurs.
1003 static int __svc_register(struct net
*net
, const char *progname
,
1004 const u32 program
, const u32 version
,
1006 const unsigned short protocol
,
1007 const unsigned short port
)
1009 int error
= -EAFNOSUPPORT
;
1013 error
= __svc_rpcb_register4(net
, program
, version
,
1016 #if IS_ENABLED(CONFIG_IPV6)
1018 error
= __svc_rpcb_register6(net
, program
, version
,
1023 trace_svc_register(progname
, version
, protocol
, port
, family
, error
);
1027 int svc_rpcbind_set_version(struct net
*net
,
1028 const struct svc_program
*progp
,
1029 u32 version
, int family
,
1030 unsigned short proto
,
1031 unsigned short port
)
1033 return __svc_register(net
, progp
->pg_name
, progp
->pg_prog
,
1034 version
, family
, proto
, port
);
1037 EXPORT_SYMBOL_GPL(svc_rpcbind_set_version
);
1039 int svc_generic_rpcbind_set(struct net
*net
,
1040 const struct svc_program
*progp
,
1041 u32 version
, int family
,
1042 unsigned short proto
,
1043 unsigned short port
)
1045 const struct svc_version
*vers
= progp
->pg_vers
[version
];
1051 if (vers
->vs_hidden
) {
1052 trace_svc_noregister(progp
->pg_name
, version
, proto
,
1058 * Don't register a UDP port if we need congestion
1061 if (vers
->vs_need_cong_ctrl
&& proto
== IPPROTO_UDP
)
1064 error
= svc_rpcbind_set_version(net
, progp
, version
,
1065 family
, proto
, port
);
1067 return (vers
->vs_rpcb_optnl
) ? 0 : error
;
1069 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set
);
1072 * svc_register - register an RPC service with the local portmapper
1073 * @serv: svc_serv struct for the service to register
1074 * @net: net namespace for the service to register
1075 * @family: protocol family of service's listener socket
1076 * @proto: transport protocol number to advertise
1077 * @port: port to advertise
1079 * Service is registered for any address in the passed-in protocol family
1081 int svc_register(const struct svc_serv
*serv
, struct net
*net
,
1082 const int family
, const unsigned short proto
,
1083 const unsigned short port
)
1085 struct svc_program
*progp
;
1089 WARN_ON_ONCE(proto
== 0 && port
== 0);
1090 if (proto
== 0 && port
== 0)
1093 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
1094 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
1096 error
= progp
->pg_rpcbind_set(net
, progp
, i
,
1097 family
, proto
, port
);
1099 printk(KERN_WARNING
"svc: failed to register "
1100 "%sv%u RPC service (errno %d).\n",
1101 progp
->pg_name
, i
, -error
);
1111 * If user space is running rpcbind, it should take the v4 UNSET
1112 * and clear everything for this [program, version]. If user space
1113 * is running portmap, it will reject the v4 UNSET, but won't have
1114 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
1115 * in this case to clear all existing entries for [program, version].
1117 static void __svc_unregister(struct net
*net
, const u32 program
, const u32 version
,
1118 const char *progname
)
1122 error
= rpcb_v4_register(net
, program
, version
, NULL
, "");
1125 * User space didn't support rpcbind v4, so retry this
1126 * request with the legacy rpcbind v2 protocol.
1128 if (error
== -EPROTONOSUPPORT
)
1129 error
= rpcb_register(net
, program
, version
, 0, 0);
1131 trace_svc_unregister(progname
, version
, error
);
1135 * All netids, bind addresses and ports registered for [program, version]
1136 * are removed from the local rpcbind database (if the service is not
1137 * hidden) to make way for a new instance of the service.
1139 * The result of unregistration is reported via dprintk for those who want
1140 * verification of the result, but is otherwise not important.
1142 static void svc_unregister(const struct svc_serv
*serv
, struct net
*net
)
1144 struct svc_program
*progp
;
1145 unsigned long flags
;
1148 clear_thread_flag(TIF_SIGPENDING
);
1150 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
1151 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
1152 if (progp
->pg_vers
[i
] == NULL
)
1154 if (progp
->pg_vers
[i
]->vs_hidden
)
1156 __svc_unregister(net
, progp
->pg_prog
, i
, progp
->pg_name
);
1160 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
1161 recalc_sigpending();
1162 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
1166 * dprintk the given error with the address of the client that caused it.
1168 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1169 static __printf(2, 3)
1170 void svc_printk(struct svc_rqst
*rqstp
, const char *fmt
, ...)
1172 struct va_format vaf
;
1174 char buf
[RPC_MAX_ADDRBUFLEN
];
1176 va_start(args
, fmt
);
1181 dprintk("svc: %s: %pV", svc_print_addr(rqstp
, buf
, sizeof(buf
)), &vaf
);
1186 static __printf(2,3) void svc_printk(struct svc_rqst
*rqstp
, const char *fmt
, ...) {}
1190 svc_generic_dispatch(struct svc_rqst
*rqstp
, __be32
*statp
)
1192 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
1193 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1194 const struct svc_procedure
*procp
= rqstp
->rq_procinfo
;
1198 * XXX: why do we ignore the return value?
1200 if (procp
->pc_decode
&&
1201 !procp
->pc_decode(rqstp
, argv
->iov_base
)) {
1202 *statp
= rpc_garbage_args
;
1206 *statp
= procp
->pc_func(rqstp
);
1208 if (*statp
== rpc_drop_reply
||
1209 test_bit(RQ_DROPME
, &rqstp
->rq_flags
))
1212 if (rqstp
->rq_auth_stat
!= rpc_auth_ok
)
1215 if (*statp
!= rpc_success
)
1219 if (procp
->pc_encode
&&
1220 !procp
->pc_encode(rqstp
, resv
->iov_base
+ resv
->iov_len
)) {
1221 dprintk("svc: failed to encode reply\n");
1222 /* serv->sv_stats->rpcsystemerr++; */
1223 *statp
= rpc_system_err
;
1229 svc_generic_init_request(struct svc_rqst
*rqstp
,
1230 const struct svc_program
*progp
,
1231 struct svc_process_info
*ret
)
1233 const struct svc_version
*versp
= NULL
; /* compiler food */
1234 const struct svc_procedure
*procp
= NULL
;
1236 if (rqstp
->rq_vers
>= progp
->pg_nvers
)
1238 versp
= progp
->pg_vers
[rqstp
->rq_vers
];
1243 * Some protocol versions (namely NFSv4) require some form of
1244 * congestion control. (See RFC 7530 section 3.1 paragraph 2)
1245 * In other words, UDP is not allowed. We mark those when setting
1246 * up the svc_xprt, and verify that here.
1248 * The spec is not very clear about what error should be returned
1249 * when someone tries to access a server that is listening on UDP
1250 * for lower versions. RPC_PROG_MISMATCH seems to be the closest
1253 if (versp
->vs_need_cong_ctrl
&& rqstp
->rq_xprt
&&
1254 !test_bit(XPT_CONG_CTRL
, &rqstp
->rq_xprt
->xpt_flags
))
1257 if (rqstp
->rq_proc
>= versp
->vs_nproc
)
1259 rqstp
->rq_procinfo
= procp
= &versp
->vs_proc
[rqstp
->rq_proc
];
1263 /* Initialize storage for argp and resp */
1264 memset(rqstp
->rq_argp
, 0, procp
->pc_argsize
);
1265 memset(rqstp
->rq_resp
, 0, procp
->pc_ressize
);
1267 /* Bump per-procedure stats counter */
1268 versp
->vs_count
[rqstp
->rq_proc
]++;
1270 ret
->dispatch
= versp
->vs_dispatch
;
1273 ret
->mismatch
.lovers
= progp
->pg_lovers
;
1274 ret
->mismatch
.hivers
= progp
->pg_hivers
;
1275 return rpc_prog_mismatch
;
1277 return rpc_proc_unavail
;
1279 EXPORT_SYMBOL_GPL(svc_generic_init_request
);
1282 * Common routine for processing the RPC request.
1285 svc_process_common(struct svc_rqst
*rqstp
, struct kvec
*argv
, struct kvec
*resv
)
1287 struct svc_program
*progp
;
1288 const struct svc_procedure
*procp
= NULL
;
1289 struct svc_serv
*serv
= rqstp
->rq_server
;
1290 struct svc_process_info process
;
1295 __be32
*reply_statp
;
1297 rpc_stat
= rpc_success
;
1299 if (argv
->iov_len
< 6*4)
1302 /* Will be turned off by GSS integrity and privacy services */
1303 set_bit(RQ_SPLICE_OK
, &rqstp
->rq_flags
);
1304 /* Will be turned off only when NFSv4 Sessions are used */
1305 set_bit(RQ_USEDEFERRAL
, &rqstp
->rq_flags
);
1306 clear_bit(RQ_DROPME
, &rqstp
->rq_flags
);
1308 svc_putu32(resv
, rqstp
->rq_xid
);
1310 vers
= svc_getnl(argv
);
1312 /* First words of reply: */
1313 svc_putnl(resv
, 1); /* REPLY */
1315 if (vers
!= 2) /* RPC version number */
1318 /* Save position in case we later decide to reject: */
1319 reply_statp
= resv
->iov_base
+ resv
->iov_len
;
1321 svc_putnl(resv
, 0); /* ACCEPT */
1323 rqstp
->rq_prog
= prog
= svc_getnl(argv
); /* program number */
1324 rqstp
->rq_vers
= svc_getnl(argv
); /* version number */
1325 rqstp
->rq_proc
= svc_getnl(argv
); /* procedure number */
1327 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
)
1328 if (prog
== progp
->pg_prog
)
1332 * Decode auth data, and add verifier to reply buffer.
1333 * We do this before anything else in order to get a decent
1336 auth_res
= svc_authenticate(rqstp
);
1337 /* Also give the program a chance to reject this call: */
1338 if (auth_res
== SVC_OK
&& progp
)
1339 auth_res
= progp
->pg_authenticate(rqstp
);
1340 if (auth_res
!= SVC_OK
)
1341 trace_svc_authenticate(rqstp
, auth_res
);
1348 rpc_stat
= rpc_system_err
;
1363 rpc_stat
= progp
->pg_init_request(rqstp
, progp
, &process
);
1367 case rpc_prog_unavail
:
1369 case rpc_prog_mismatch
:
1371 case rpc_proc_unavail
:
1375 procp
= rqstp
->rq_procinfo
;
1376 /* Should this check go into the dispatcher? */
1377 if (!procp
|| !procp
->pc_func
)
1380 /* Syntactic check complete */
1381 serv
->sv_stats
->rpccnt
++;
1382 trace_svc_process(rqstp
, progp
->pg_name
);
1384 /* Build the reply header. */
1385 statp
= resv
->iov_base
+resv
->iov_len
;
1386 svc_putnl(resv
, RPC_SUCCESS
);
1388 /* un-reserve some of the out-queue now that we have a
1389 * better idea of reply size
1391 if (procp
->pc_xdrressize
)
1392 svc_reserve_auth(rqstp
, procp
->pc_xdrressize
<<2);
1394 /* Call the function that processes the request. */
1395 if (!process
.dispatch
) {
1396 if (!svc_generic_dispatch(rqstp
, statp
))
1397 goto release_dropit
;
1398 if (*statp
== rpc_garbage_args
)
1401 dprintk("svc: calling dispatcher\n");
1402 if (!process
.dispatch(rqstp
, statp
))
1403 goto release_dropit
; /* Release reply info */
1406 if (rqstp
->rq_auth_stat
!= rpc_auth_ok
)
1407 goto err_release_bad_auth
;
1409 /* Check RPC status result */
1410 if (*statp
!= rpc_success
)
1411 resv
->iov_len
= ((void*)statp
) - resv
->iov_base
+ 4;
1413 /* Release reply info */
1414 if (procp
->pc_release
)
1415 procp
->pc_release(rqstp
);
1417 if (procp
->pc_encode
== NULL
)
1421 if (svc_authorise(rqstp
))
1423 return 1; /* Caller can now send it */
1426 if (procp
->pc_release
)
1427 procp
->pc_release(rqstp
);
1429 svc_authorise(rqstp
); /* doesn't hurt to call this twice */
1430 dprintk("svc: svc_process dropit\n");
1434 svc_authorise(rqstp
);
1436 if (rqstp
->rq_xprt
&& test_bit(XPT_TEMP
, &rqstp
->rq_xprt
->xpt_flags
))
1437 svc_close_xprt(rqstp
->rq_xprt
);
1438 dprintk("svc: svc_process close\n");
1442 svc_printk(rqstp
, "short len %zd, dropping request\n",
1447 serv
->sv_stats
->rpcbadfmt
++;
1448 svc_putnl(resv
, 1); /* REJECT */
1449 svc_putnl(resv
, 0); /* RPC_MISMATCH */
1450 svc_putnl(resv
, 2); /* Only RPCv2 supported */
1454 err_release_bad_auth
:
1455 if (procp
->pc_release
)
1456 procp
->pc_release(rqstp
);
1458 dprintk("svc: authentication failed (%d)\n",
1459 be32_to_cpu(rqstp
->rq_auth_stat
));
1460 serv
->sv_stats
->rpcbadauth
++;
1461 /* Restore write pointer to location of accept status: */
1462 xdr_ressize_check(rqstp
, reply_statp
);
1463 svc_putnl(resv
, 1); /* REJECT */
1464 svc_putnl(resv
, 1); /* AUTH_ERROR */
1465 svc_putu32(resv
, rqstp
->rq_auth_stat
); /* status */
1469 dprintk("svc: unknown program %d\n", prog
);
1470 serv
->sv_stats
->rpcbadfmt
++;
1471 svc_putnl(resv
, RPC_PROG_UNAVAIL
);
1475 svc_printk(rqstp
, "unknown version (%d for prog %d, %s)\n",
1476 rqstp
->rq_vers
, rqstp
->rq_prog
, progp
->pg_name
);
1478 serv
->sv_stats
->rpcbadfmt
++;
1479 svc_putnl(resv
, RPC_PROG_MISMATCH
);
1480 svc_putnl(resv
, process
.mismatch
.lovers
);
1481 svc_putnl(resv
, process
.mismatch
.hivers
);
1485 svc_printk(rqstp
, "unknown procedure (%d)\n", rqstp
->rq_proc
);
1487 serv
->sv_stats
->rpcbadfmt
++;
1488 svc_putnl(resv
, RPC_PROC_UNAVAIL
);
1492 svc_printk(rqstp
, "failed to decode args\n");
1494 rpc_stat
= rpc_garbage_args
;
1496 serv
->sv_stats
->rpcbadfmt
++;
1497 svc_putnl(resv
, ntohl(rpc_stat
));
1502 * Process the RPC request.
1505 svc_process(struct svc_rqst
*rqstp
)
1507 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
1508 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1509 struct svc_serv
*serv
= rqstp
->rq_server
;
1512 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
1513 if (!fail_sunrpc
.ignore_server_disconnect
&&
1514 should_fail(&fail_sunrpc
.attr
, 1))
1515 svc_xprt_deferred_close(rqstp
->rq_xprt
);
1519 * Setup response xdr_buf.
1520 * Initially it has just one page
1522 rqstp
->rq_next_page
= &rqstp
->rq_respages
[1];
1523 resv
->iov_base
= page_address(rqstp
->rq_respages
[0]);
1525 rqstp
->rq_res
.pages
= rqstp
->rq_respages
+ 1;
1526 rqstp
->rq_res
.len
= 0;
1527 rqstp
->rq_res
.page_base
= 0;
1528 rqstp
->rq_res
.page_len
= 0;
1529 rqstp
->rq_res
.buflen
= PAGE_SIZE
;
1530 rqstp
->rq_res
.tail
[0].iov_base
= NULL
;
1531 rqstp
->rq_res
.tail
[0].iov_len
= 0;
1533 dir
= svc_getnl(argv
);
1535 /* direction != CALL */
1536 svc_printk(rqstp
, "bad direction %d, dropping request\n", dir
);
1537 serv
->sv_stats
->rpcbadfmt
++;
1541 /* Returns 1 for send, 0 for drop */
1542 if (likely(svc_process_common(rqstp
, argv
, resv
)))
1543 return svc_send(rqstp
);
1549 EXPORT_SYMBOL_GPL(svc_process
);
1551 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1553 * Process a backchannel RPC request that arrived over an existing
1554 * outbound connection
1557 bc_svc_process(struct svc_serv
*serv
, struct rpc_rqst
*req
,
1558 struct svc_rqst
*rqstp
)
1560 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
1561 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1562 struct rpc_task
*task
;
1566 dprintk("svc: %s(%p)\n", __func__
, req
);
1568 /* Build the svc_rqst used by the common processing routine */
1569 rqstp
->rq_xid
= req
->rq_xid
;
1570 rqstp
->rq_prot
= req
->rq_xprt
->prot
;
1571 rqstp
->rq_server
= serv
;
1572 rqstp
->rq_bc_net
= req
->rq_xprt
->xprt_net
;
1574 rqstp
->rq_addrlen
= sizeof(req
->rq_xprt
->addr
);
1575 memcpy(&rqstp
->rq_addr
, &req
->rq_xprt
->addr
, rqstp
->rq_addrlen
);
1576 memcpy(&rqstp
->rq_arg
, &req
->rq_rcv_buf
, sizeof(rqstp
->rq_arg
));
1577 memcpy(&rqstp
->rq_res
, &req
->rq_snd_buf
, sizeof(rqstp
->rq_res
));
1579 /* Adjust the argument buffer length */
1580 rqstp
->rq_arg
.len
= req
->rq_private_buf
.len
;
1581 if (rqstp
->rq_arg
.len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
1582 rqstp
->rq_arg
.head
[0].iov_len
= rqstp
->rq_arg
.len
;
1583 rqstp
->rq_arg
.page_len
= 0;
1584 } else if (rqstp
->rq_arg
.len
<= rqstp
->rq_arg
.head
[0].iov_len
+
1585 rqstp
->rq_arg
.page_len
)
1586 rqstp
->rq_arg
.page_len
= rqstp
->rq_arg
.len
-
1587 rqstp
->rq_arg
.head
[0].iov_len
;
1589 rqstp
->rq_arg
.len
= rqstp
->rq_arg
.head
[0].iov_len
+
1590 rqstp
->rq_arg
.page_len
;
1592 /* reset result send buffer "put" position */
1596 * Skip the next two words because they've already been
1597 * processed in the transport
1599 svc_getu32(argv
); /* XID */
1600 svc_getnl(argv
); /* CALLDIR */
1602 /* Parse and execute the bc call */
1603 proc_error
= svc_process_common(rqstp
, argv
, resv
);
1605 atomic_dec(&req
->rq_xprt
->bc_slot_count
);
1607 /* Processing error: drop the request */
1608 xprt_free_bc_request(req
);
1612 /* Finally, send the reply synchronously */
1613 memcpy(&req
->rq_snd_buf
, &rqstp
->rq_res
, sizeof(req
->rq_snd_buf
));
1614 task
= rpc_run_bc_task(req
);
1616 error
= PTR_ERR(task
);
1620 WARN_ON_ONCE(atomic_read(&task
->tk_count
) != 1);
1621 error
= task
->tk_status
;
1625 dprintk("svc: %s(), error=%d\n", __func__
, error
);
1628 EXPORT_SYMBOL_GPL(bc_svc_process
);
1629 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1632 * Return (transport-specific) limit on the rpc payload.
1634 u32
svc_max_payload(const struct svc_rqst
*rqstp
)
1636 u32 max
= rqstp
->rq_xprt
->xpt_class
->xcl_max_payload
;
1638 if (rqstp
->rq_server
->sv_max_payload
< max
)
1639 max
= rqstp
->rq_server
->sv_max_payload
;
1642 EXPORT_SYMBOL_GPL(svc_max_payload
);
1645 * svc_proc_name - Return RPC procedure name in string form
1646 * @rqstp: svc_rqst to operate on
1649 * Pointer to a NUL-terminated string
1651 const char *svc_proc_name(const struct svc_rqst
*rqstp
)
1653 if (rqstp
&& rqstp
->rq_procinfo
)
1654 return rqstp
->rq_procinfo
->pc_name
;
1660 * svc_encode_result_payload - mark a range of bytes as a result payload
1661 * @rqstp: svc_rqst to operate on
1662 * @offset: payload's byte offset in rqstp->rq_res
1663 * @length: size of payload, in bytes
1665 * Returns zero on success, or a negative errno if a permanent
1668 int svc_encode_result_payload(struct svc_rqst
*rqstp
, unsigned int offset
,
1669 unsigned int length
)
1671 return rqstp
->rq_xprt
->xpt_ops
->xpo_result_payload(rqstp
, offset
,
1674 EXPORT_SYMBOL_GPL(svc_encode_result_payload
);
1677 * svc_fill_write_vector - Construct data argument for VFS write call
1678 * @rqstp: svc_rqst to operate on
1679 * @payload: xdr_buf containing only the write data payload
1681 * Fills in rqstp::rq_vec, and returns the number of elements.
1683 unsigned int svc_fill_write_vector(struct svc_rqst
*rqstp
,
1684 struct xdr_buf
*payload
)
1686 struct page
**pages
= payload
->pages
;
1687 struct kvec
*first
= payload
->head
;
1688 struct kvec
*vec
= rqstp
->rq_vec
;
1689 size_t total
= payload
->len
;
1692 /* Some types of transport can present the write payload
1693 * entirely in rq_arg.pages. In this case, @first is empty.
1696 if (first
->iov_len
) {
1697 vec
[i
].iov_base
= first
->iov_base
;
1698 vec
[i
].iov_len
= min_t(size_t, total
, first
->iov_len
);
1699 total
-= vec
[i
].iov_len
;
1704 vec
[i
].iov_base
= page_address(*pages
);
1705 vec
[i
].iov_len
= min_t(size_t, total
, PAGE_SIZE
);
1706 total
-= vec
[i
].iov_len
;
1711 WARN_ON_ONCE(i
> ARRAY_SIZE(rqstp
->rq_vec
));
1714 EXPORT_SYMBOL_GPL(svc_fill_write_vector
);
1717 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
1718 * @rqstp: svc_rqst to operate on
1719 * @first: buffer containing first section of pathname
1720 * @p: buffer containing remaining section of pathname
1721 * @total: total length of the pathname argument
1723 * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
1724 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
1725 * the returned string.
1727 char *svc_fill_symlink_pathname(struct svc_rqst
*rqstp
, struct kvec
*first
,
1728 void *p
, size_t total
)
1730 size_t len
, remaining
;
1733 result
= kmalloc(total
+ 1, GFP_KERNEL
);
1735 return ERR_PTR(-ESERVERFAULT
);
1740 len
= min_t(size_t, total
, first
->iov_len
);
1742 memcpy(dst
, first
->iov_base
, len
);
1748 len
= min_t(size_t, remaining
, PAGE_SIZE
);
1749 memcpy(dst
, p
, len
);
1755 /* Sanity check: Linux doesn't allow the pathname argument to
1756 * contain a NUL byte.
1758 if (strlen(result
) != total
) {
1760 return ERR_PTR(-EINVAL
);
1764 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname
);