]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/kmod.c
Merge tag 'nfs-for-4.13-4' of git://git.linux-nfs.org/projects/anna/linux-nfs
[mirror_ubuntu-artful-kernel.git] / kernel / kmod.c
1 /*
2 kmod, the new module loader (replaces kerneld)
3 Kirk Petersen
4
5 Reorganized not to be a daemon by Adam Richter, with guidance
6 from Greg Zornetzer.
7
8 Modified to avoid chroot and file sharing problems.
9 Mikael Pettersson
10
11 Limit the concurrent number of kmod modprobes to catch loops from
12 "modprobe needs a service that is in a module".
13 Keith Owens <kaos@ocs.com.au> December 1999
14
15 Unblock all signals when we exec a usermode process.
16 Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
17
18 call_usermodehelper wait flag, and remove exec_usermodehelper.
19 Rusty Russell <rusty@rustcorp.com.au> Jan 2003
20 */
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task.h>
24 #include <linux/binfmts.h>
25 #include <linux/syscalls.h>
26 #include <linux/unistd.h>
27 #include <linux/kmod.h>
28 #include <linux/slab.h>
29 #include <linux/completion.h>
30 #include <linux/cred.h>
31 #include <linux/file.h>
32 #include <linux/fdtable.h>
33 #include <linux/workqueue.h>
34 #include <linux/security.h>
35 #include <linux/mount.h>
36 #include <linux/kernel.h>
37 #include <linux/init.h>
38 #include <linux/resource.h>
39 #include <linux/notifier.h>
40 #include <linux/suspend.h>
41 #include <linux/rwsem.h>
42 #include <linux/ptrace.h>
43 #include <linux/async.h>
44 #include <linux/uaccess.h>
45
46 #include <trace/events/module.h>
47
48 #define CAP_BSET (void *)1
49 #define CAP_PI (void *)2
50
51 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
52 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
53 static DEFINE_SPINLOCK(umh_sysctl_lock);
54 static DECLARE_RWSEM(umhelper_sem);
55
56 #ifdef CONFIG_MODULES
57 /*
58 * Assuming:
59 *
60 * threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
61 * (u64) THREAD_SIZE * 8UL);
62 *
63 * If you need less than 50 threads would mean we're dealing with systems
64 * smaller than 3200 pages. This assuems you are capable of having ~13M memory,
65 * and this would only be an be an upper limit, after which the OOM killer
66 * would take effect. Systems like these are very unlikely if modules are
67 * enabled.
68 */
69 #define MAX_KMOD_CONCURRENT 50
70 static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
71 static DECLARE_WAIT_QUEUE_HEAD(kmod_wq);
72
73 /*
74 modprobe_path is set via /proc/sys.
75 */
76 char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
77
78 static void free_modprobe_argv(struct subprocess_info *info)
79 {
80 kfree(info->argv[3]); /* check call_modprobe() */
81 kfree(info->argv);
82 }
83
84 static int call_modprobe(char *module_name, int wait)
85 {
86 struct subprocess_info *info;
87 static char *envp[] = {
88 "HOME=/",
89 "TERM=linux",
90 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
91 NULL
92 };
93
94 char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
95 if (!argv)
96 goto out;
97
98 module_name = kstrdup(module_name, GFP_KERNEL);
99 if (!module_name)
100 goto free_argv;
101
102 argv[0] = modprobe_path;
103 argv[1] = "-q";
104 argv[2] = "--";
105 argv[3] = module_name; /* check free_modprobe_argv() */
106 argv[4] = NULL;
107
108 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
109 NULL, free_modprobe_argv, NULL);
110 if (!info)
111 goto free_module_name;
112
113 return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
114
115 free_module_name:
116 kfree(module_name);
117 free_argv:
118 kfree(argv);
119 out:
120 return -ENOMEM;
121 }
122
123 /**
124 * __request_module - try to load a kernel module
125 * @wait: wait (or not) for the operation to complete
126 * @fmt: printf style format string for the name of the module
127 * @...: arguments as specified in the format string
128 *
129 * Load a module using the user mode module loader. The function returns
130 * zero on success or a negative errno code or positive exit code from
131 * "modprobe" on failure. Note that a successful module load does not mean
132 * the module did not then unload and exit on an error of its own. Callers
133 * must check that the service they requested is now available not blindly
134 * invoke it.
135 *
136 * If module auto-loading support is disabled then this function
137 * becomes a no-operation.
138 */
139 int __request_module(bool wait, const char *fmt, ...)
140 {
141 va_list args;
142 char module_name[MODULE_NAME_LEN];
143 int ret;
144
145 /*
146 * We don't allow synchronous module loading from async. Module
147 * init may invoke async_synchronize_full() which will end up
148 * waiting for this task which already is waiting for the module
149 * loading to complete, leading to a deadlock.
150 */
151 WARN_ON_ONCE(wait && current_is_async());
152
153 if (!modprobe_path[0])
154 return 0;
155
156 va_start(args, fmt);
157 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
158 va_end(args);
159 if (ret >= MODULE_NAME_LEN)
160 return -ENAMETOOLONG;
161
162 ret = security_kernel_module_request(module_name);
163 if (ret)
164 return ret;
165
166 if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
167 pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
168 atomic_read(&kmod_concurrent_max),
169 MAX_KMOD_CONCURRENT, module_name);
170 wait_event_interruptible(kmod_wq,
171 atomic_dec_if_positive(&kmod_concurrent_max) >= 0);
172 }
173
174 trace_module_request(module_name, wait, _RET_IP_);
175
176 ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
177
178 atomic_inc(&kmod_concurrent_max);
179 wake_up(&kmod_wq);
180
181 return ret;
182 }
183 EXPORT_SYMBOL(__request_module);
184
185 #endif /* CONFIG_MODULES */
186
187 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
188 {
189 if (info->cleanup)
190 (*info->cleanup)(info);
191 kfree(info);
192 }
193
194 static void umh_complete(struct subprocess_info *sub_info)
195 {
196 struct completion *comp = xchg(&sub_info->complete, NULL);
197 /*
198 * See call_usermodehelper_exec(). If xchg() returns NULL
199 * we own sub_info, the UMH_KILLABLE caller has gone away
200 * or the caller used UMH_NO_WAIT.
201 */
202 if (comp)
203 complete(comp);
204 else
205 call_usermodehelper_freeinfo(sub_info);
206 }
207
208 /*
209 * This is the task which runs the usermode application
210 */
211 static int call_usermodehelper_exec_async(void *data)
212 {
213 struct subprocess_info *sub_info = data;
214 struct cred *new;
215 int retval;
216
217 spin_lock_irq(&current->sighand->siglock);
218 flush_signal_handlers(current, 1);
219 spin_unlock_irq(&current->sighand->siglock);
220
221 /*
222 * Our parent (unbound workqueue) runs with elevated scheduling
223 * priority. Avoid propagating that into the userspace child.
224 */
225 set_user_nice(current, 0);
226
227 retval = -ENOMEM;
228 new = prepare_kernel_cred(current);
229 if (!new)
230 goto out;
231
232 spin_lock(&umh_sysctl_lock);
233 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
234 new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
235 new->cap_inheritable);
236 spin_unlock(&umh_sysctl_lock);
237
238 if (sub_info->init) {
239 retval = sub_info->init(sub_info, new);
240 if (retval) {
241 abort_creds(new);
242 goto out;
243 }
244 }
245
246 commit_creds(new);
247
248 retval = do_execve(getname_kernel(sub_info->path),
249 (const char __user *const __user *)sub_info->argv,
250 (const char __user *const __user *)sub_info->envp);
251 out:
252 sub_info->retval = retval;
253 /*
254 * call_usermodehelper_exec_sync() will call umh_complete
255 * if UHM_WAIT_PROC.
256 */
257 if (!(sub_info->wait & UMH_WAIT_PROC))
258 umh_complete(sub_info);
259 if (!retval)
260 return 0;
261 do_exit(0);
262 }
263
264 /* Handles UMH_WAIT_PROC. */
265 static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
266 {
267 pid_t pid;
268
269 /* If SIGCLD is ignored sys_wait4 won't populate the status. */
270 kernel_sigaction(SIGCHLD, SIG_DFL);
271 pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
272 if (pid < 0) {
273 sub_info->retval = pid;
274 } else {
275 int ret = -ECHILD;
276 /*
277 * Normally it is bogus to call wait4() from in-kernel because
278 * wait4() wants to write the exit code to a userspace address.
279 * But call_usermodehelper_exec_sync() always runs as kernel
280 * thread (workqueue) and put_user() to a kernel address works
281 * OK for kernel threads, due to their having an mm_segment_t
282 * which spans the entire address space.
283 *
284 * Thus the __user pointer cast is valid here.
285 */
286 sys_wait4(pid, (int __user *)&ret, 0, NULL);
287
288 /*
289 * If ret is 0, either call_usermodehelper_exec_async failed and
290 * the real error code is already in sub_info->retval or
291 * sub_info->retval is 0 anyway, so don't mess with it then.
292 */
293 if (ret)
294 sub_info->retval = ret;
295 }
296
297 /* Restore default kernel sig handler */
298 kernel_sigaction(SIGCHLD, SIG_IGN);
299
300 umh_complete(sub_info);
301 }
302
303 /*
304 * We need to create the usermodehelper kernel thread from a task that is affine
305 * to an optimized set of CPUs (or nohz housekeeping ones) such that they
306 * inherit a widest affinity irrespective of call_usermodehelper() callers with
307 * possibly reduced affinity (eg: per-cpu workqueues). We don't want
308 * usermodehelper targets to contend a busy CPU.
309 *
310 * Unbound workqueues provide such wide affinity and allow to block on
311 * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
312 *
313 * Besides, workqueues provide the privilege level that caller might not have
314 * to perform the usermodehelper request.
315 *
316 */
317 static void call_usermodehelper_exec_work(struct work_struct *work)
318 {
319 struct subprocess_info *sub_info =
320 container_of(work, struct subprocess_info, work);
321
322 if (sub_info->wait & UMH_WAIT_PROC) {
323 call_usermodehelper_exec_sync(sub_info);
324 } else {
325 pid_t pid;
326 /*
327 * Use CLONE_PARENT to reparent it to kthreadd; we do not
328 * want to pollute current->children, and we need a parent
329 * that always ignores SIGCHLD to ensure auto-reaping.
330 */
331 pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
332 CLONE_PARENT | SIGCHLD);
333 if (pid < 0) {
334 sub_info->retval = pid;
335 umh_complete(sub_info);
336 }
337 }
338 }
339
340 /*
341 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
342 * (used for preventing user land processes from being created after the user
343 * land has been frozen during a system-wide hibernation or suspend operation).
344 * Should always be manipulated under umhelper_sem acquired for write.
345 */
346 static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
347
348 /* Number of helpers running */
349 static atomic_t running_helpers = ATOMIC_INIT(0);
350
351 /*
352 * Wait queue head used by usermodehelper_disable() to wait for all running
353 * helpers to finish.
354 */
355 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
356
357 /*
358 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
359 * to become 'false'.
360 */
361 static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
362
363 /*
364 * Time to wait for running_helpers to become zero before the setting of
365 * usermodehelper_disabled in usermodehelper_disable() fails
366 */
367 #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
368
369 int usermodehelper_read_trylock(void)
370 {
371 DEFINE_WAIT(wait);
372 int ret = 0;
373
374 down_read(&umhelper_sem);
375 for (;;) {
376 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
377 TASK_INTERRUPTIBLE);
378 if (!usermodehelper_disabled)
379 break;
380
381 if (usermodehelper_disabled == UMH_DISABLED)
382 ret = -EAGAIN;
383
384 up_read(&umhelper_sem);
385
386 if (ret)
387 break;
388
389 schedule();
390 try_to_freeze();
391
392 down_read(&umhelper_sem);
393 }
394 finish_wait(&usermodehelper_disabled_waitq, &wait);
395 return ret;
396 }
397 EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
398
399 long usermodehelper_read_lock_wait(long timeout)
400 {
401 DEFINE_WAIT(wait);
402
403 if (timeout < 0)
404 return -EINVAL;
405
406 down_read(&umhelper_sem);
407 for (;;) {
408 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
409 TASK_UNINTERRUPTIBLE);
410 if (!usermodehelper_disabled)
411 break;
412
413 up_read(&umhelper_sem);
414
415 timeout = schedule_timeout(timeout);
416 if (!timeout)
417 break;
418
419 down_read(&umhelper_sem);
420 }
421 finish_wait(&usermodehelper_disabled_waitq, &wait);
422 return timeout;
423 }
424 EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
425
426 void usermodehelper_read_unlock(void)
427 {
428 up_read(&umhelper_sem);
429 }
430 EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
431
432 /**
433 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
434 * @depth: New value to assign to usermodehelper_disabled.
435 *
436 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
437 * writing) and wakeup tasks waiting for it to change.
438 */
439 void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
440 {
441 down_write(&umhelper_sem);
442 usermodehelper_disabled = depth;
443 wake_up(&usermodehelper_disabled_waitq);
444 up_write(&umhelper_sem);
445 }
446
447 /**
448 * __usermodehelper_disable - Prevent new helpers from being started.
449 * @depth: New value to assign to usermodehelper_disabled.
450 *
451 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
452 */
453 int __usermodehelper_disable(enum umh_disable_depth depth)
454 {
455 long retval;
456
457 if (!depth)
458 return -EINVAL;
459
460 down_write(&umhelper_sem);
461 usermodehelper_disabled = depth;
462 up_write(&umhelper_sem);
463
464 /*
465 * From now on call_usermodehelper_exec() won't start any new
466 * helpers, so it is sufficient if running_helpers turns out to
467 * be zero at one point (it may be increased later, but that
468 * doesn't matter).
469 */
470 retval = wait_event_timeout(running_helpers_waitq,
471 atomic_read(&running_helpers) == 0,
472 RUNNING_HELPERS_TIMEOUT);
473 if (retval)
474 return 0;
475
476 __usermodehelper_set_disable_depth(UMH_ENABLED);
477 return -EAGAIN;
478 }
479
480 static void helper_lock(void)
481 {
482 atomic_inc(&running_helpers);
483 smp_mb__after_atomic();
484 }
485
486 static void helper_unlock(void)
487 {
488 if (atomic_dec_and_test(&running_helpers))
489 wake_up(&running_helpers_waitq);
490 }
491
492 /**
493 * call_usermodehelper_setup - prepare to call a usermode helper
494 * @path: path to usermode executable
495 * @argv: arg vector for process
496 * @envp: environment for process
497 * @gfp_mask: gfp mask for memory allocation
498 * @cleanup: a cleanup function
499 * @init: an init function
500 * @data: arbitrary context sensitive data
501 *
502 * Returns either %NULL on allocation failure, or a subprocess_info
503 * structure. This should be passed to call_usermodehelper_exec to
504 * exec the process and free the structure.
505 *
506 * The init function is used to customize the helper process prior to
507 * exec. A non-zero return code causes the process to error out, exit,
508 * and return the failure to the calling process
509 *
510 * The cleanup function is just before ethe subprocess_info is about to
511 * be freed. This can be used for freeing the argv and envp. The
512 * Function must be runnable in either a process context or the
513 * context in which call_usermodehelper_exec is called.
514 */
515 struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
516 char **envp, gfp_t gfp_mask,
517 int (*init)(struct subprocess_info *info, struct cred *new),
518 void (*cleanup)(struct subprocess_info *info),
519 void *data)
520 {
521 struct subprocess_info *sub_info;
522 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
523 if (!sub_info)
524 goto out;
525
526 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
527
528 #ifdef CONFIG_STATIC_USERMODEHELPER
529 sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
530 #else
531 sub_info->path = path;
532 #endif
533 sub_info->argv = argv;
534 sub_info->envp = envp;
535
536 sub_info->cleanup = cleanup;
537 sub_info->init = init;
538 sub_info->data = data;
539 out:
540 return sub_info;
541 }
542 EXPORT_SYMBOL(call_usermodehelper_setup);
543
544 /**
545 * call_usermodehelper_exec - start a usermode application
546 * @sub_info: information about the subprocessa
547 * @wait: wait for the application to finish and return status.
548 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
549 * when the program couldn't be exec'ed. This makes it safe to call
550 * from interrupt context.
551 *
552 * Runs a user-space application. The application is started
553 * asynchronously if wait is not set, and runs as a child of system workqueues.
554 * (ie. it runs with full root capabilities and optimized affinity).
555 */
556 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
557 {
558 DECLARE_COMPLETION_ONSTACK(done);
559 int retval = 0;
560
561 if (!sub_info->path) {
562 call_usermodehelper_freeinfo(sub_info);
563 return -EINVAL;
564 }
565 helper_lock();
566 if (usermodehelper_disabled) {
567 retval = -EBUSY;
568 goto out;
569 }
570
571 /*
572 * If there is no binary for us to call, then just return and get out of
573 * here. This allows us to set STATIC_USERMODEHELPER_PATH to "" and
574 * disable all call_usermodehelper() calls.
575 */
576 if (strlen(sub_info->path) == 0)
577 goto out;
578
579 /*
580 * Set the completion pointer only if there is a waiter.
581 * This makes it possible to use umh_complete to free
582 * the data structure in case of UMH_NO_WAIT.
583 */
584 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
585 sub_info->wait = wait;
586
587 queue_work(system_unbound_wq, &sub_info->work);
588 if (wait == UMH_NO_WAIT) /* task has freed sub_info */
589 goto unlock;
590
591 if (wait & UMH_KILLABLE) {
592 retval = wait_for_completion_killable(&done);
593 if (!retval)
594 goto wait_done;
595
596 /* umh_complete() will see NULL and free sub_info */
597 if (xchg(&sub_info->complete, NULL))
598 goto unlock;
599 /* fallthrough, umh_complete() was already called */
600 }
601
602 wait_for_completion(&done);
603 wait_done:
604 retval = sub_info->retval;
605 out:
606 call_usermodehelper_freeinfo(sub_info);
607 unlock:
608 helper_unlock();
609 return retval;
610 }
611 EXPORT_SYMBOL(call_usermodehelper_exec);
612
613 /**
614 * call_usermodehelper() - prepare and start a usermode application
615 * @path: path to usermode executable
616 * @argv: arg vector for process
617 * @envp: environment for process
618 * @wait: wait for the application to finish and return status.
619 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
620 * when the program couldn't be exec'ed. This makes it safe to call
621 * from interrupt context.
622 *
623 * This function is the equivalent to use call_usermodehelper_setup() and
624 * call_usermodehelper_exec().
625 */
626 int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
627 {
628 struct subprocess_info *info;
629 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
630
631 info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
632 NULL, NULL, NULL);
633 if (info == NULL)
634 return -ENOMEM;
635
636 return call_usermodehelper_exec(info, wait);
637 }
638 EXPORT_SYMBOL(call_usermodehelper);
639
640 static int proc_cap_handler(struct ctl_table *table, int write,
641 void __user *buffer, size_t *lenp, loff_t *ppos)
642 {
643 struct ctl_table t;
644 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
645 kernel_cap_t new_cap;
646 int err, i;
647
648 if (write && (!capable(CAP_SETPCAP) ||
649 !capable(CAP_SYS_MODULE)))
650 return -EPERM;
651
652 /*
653 * convert from the global kernel_cap_t to the ulong array to print to
654 * userspace if this is a read.
655 */
656 spin_lock(&umh_sysctl_lock);
657 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) {
658 if (table->data == CAP_BSET)
659 cap_array[i] = usermodehelper_bset.cap[i];
660 else if (table->data == CAP_PI)
661 cap_array[i] = usermodehelper_inheritable.cap[i];
662 else
663 BUG();
664 }
665 spin_unlock(&umh_sysctl_lock);
666
667 t = *table;
668 t.data = &cap_array;
669
670 /*
671 * actually read or write and array of ulongs from userspace. Remember
672 * these are least significant 32 bits first
673 */
674 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
675 if (err < 0)
676 return err;
677
678 /*
679 * convert from the sysctl array of ulongs to the kernel_cap_t
680 * internal representation
681 */
682 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
683 new_cap.cap[i] = cap_array[i];
684
685 /*
686 * Drop everything not in the new_cap (but don't add things)
687 */
688 spin_lock(&umh_sysctl_lock);
689 if (write) {
690 if (table->data == CAP_BSET)
691 usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
692 if (table->data == CAP_PI)
693 usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
694 }
695 spin_unlock(&umh_sysctl_lock);
696
697 return 0;
698 }
699
700 struct ctl_table usermodehelper_table[] = {
701 {
702 .procname = "bset",
703 .data = CAP_BSET,
704 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
705 .mode = 0600,
706 .proc_handler = proc_cap_handler,
707 },
708 {
709 .procname = "inheritable",
710 .data = CAP_PI,
711 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
712 .mode = 0600,
713 .proc_handler = proc_cap_handler,
714 },
715 { }
716 };