]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - security/yama/yama_lsm.c
Yama: add RCU to drop read locking
[mirror_ubuntu-focal-kernel.git] / security / yama / yama_lsm.c
CommitLineData
2d514487
KC
1/*
2 * Yama Linux Security Module
3 *
4 * Author: Kees Cook <keescook@chromium.org>
5 *
6 * Copyright (C) 2010 Canonical, Ltd.
7 * Copyright (C) 2011 The Chromium OS Authors.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2, as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/security.h>
16#include <linux/sysctl.h>
17#include <linux/ptrace.h>
18#include <linux/prctl.h>
19#include <linux/ratelimit.h>
20
389da25f
KC
21#define YAMA_SCOPE_DISABLED 0
22#define YAMA_SCOPE_RELATIONAL 1
23#define YAMA_SCOPE_CAPABILITY 2
24#define YAMA_SCOPE_NO_ATTACH 3
25
26static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
2d514487
KC
27
28/* describe a ptrace relationship for potential exception */
29struct ptrace_relation {
30 struct task_struct *tracer;
31 struct task_struct *tracee;
32 struct list_head node;
93b69d43 33 struct rcu_head rcu;
2d514487
KC
34};
35
36static LIST_HEAD(ptracer_relations);
37static DEFINE_SPINLOCK(ptracer_relations_lock);
38
39/**
40 * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
41 * @tracer: the task_struct of the process doing the ptrace
42 * @tracee: the task_struct of the process to be ptraced
43 *
44 * Each tracee can have, at most, one tracer registered. Each time this
45 * is called, the prior registered tracer will be replaced for the tracee.
46 *
47 * Returns 0 if relationship was added, -ve on error.
48 */
49static int yama_ptracer_add(struct task_struct *tracer,
50 struct task_struct *tracee)
51{
93b69d43 52 struct ptrace_relation *relation, *added;
2d514487
KC
53
54 added = kmalloc(sizeof(*added), GFP_KERNEL);
55 if (!added)
56 return -ENOMEM;
57
93b69d43
KC
58 added->tracee = tracee;
59 added->tracer = tracer;
60
2d514487 61 spin_lock_bh(&ptracer_relations_lock);
93b69d43
KC
62 rcu_read_lock();
63 list_for_each_entry_rcu(relation, &ptracer_relations, node) {
64 if (relation->tracee == tracee) {
65 list_replace_rcu(&relation->node, &added->node);
66 kfree_rcu(relation, rcu);
67 goto out;
2d514487 68 }
2d514487 69 }
2d514487 70
93b69d43 71 list_add_rcu(&added->node, &ptracer_relations);
2d514487 72
93b69d43
KC
73out:
74 rcu_read_unlock();
75 spin_unlock_bh(&ptracer_relations_lock);
76 return 0;
2d514487
KC
77}
78
79/**
80 * yama_ptracer_del - remove exceptions related to the given tasks
81 * @tracer: remove any relation where tracer task matches
82 * @tracee: remove any relation where tracee task matches
83 */
84static void yama_ptracer_del(struct task_struct *tracer,
85 struct task_struct *tracee)
86{
93b69d43 87 struct ptrace_relation *relation;
2d514487
KC
88
89 spin_lock_bh(&ptracer_relations_lock);
93b69d43
KC
90 rcu_read_lock();
91 list_for_each_entry_rcu(relation, &ptracer_relations, node) {
2d514487 92 if (relation->tracee == tracee ||
bf06189e 93 (tracer && relation->tracer == tracer)) {
93b69d43
KC
94 list_del_rcu(&relation->node);
95 kfree_rcu(relation, rcu);
2d514487 96 }
93b69d43
KC
97 }
98 rcu_read_unlock();
2d514487
KC
99 spin_unlock_bh(&ptracer_relations_lock);
100}
101
102/**
103 * yama_task_free - check for task_pid to remove from exception list
104 * @task: task being removed
105 */
c6993e4a 106void yama_task_free(struct task_struct *task)
2d514487
KC
107{
108 yama_ptracer_del(task, task);
109}
110
111/**
112 * yama_task_prctl - check for Yama-specific prctl operations
113 * @option: operation
114 * @arg2: argument
115 * @arg3: argument
116 * @arg4: argument
117 * @arg5: argument
118 *
119 * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
120 * does not handle the given option.
121 */
c6993e4a 122int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
2d514487
KC
123 unsigned long arg4, unsigned long arg5)
124{
125 int rc;
126 struct task_struct *myself = current;
127
128 rc = cap_task_prctl(option, arg2, arg3, arg4, arg5);
129 if (rc != -ENOSYS)
130 return rc;
131
132 switch (option) {
133 case PR_SET_PTRACER:
134 /* Since a thread can call prctl(), find the group leader
135 * before calling _add() or _del() on it, since we want
136 * process-level granularity of control. The tracer group
137 * leader checking is handled later when walking the ancestry
138 * at the time of PTRACE_ATTACH check.
139 */
140 rcu_read_lock();
141 if (!thread_group_leader(myself))
142 myself = rcu_dereference(myself->group_leader);
143 get_task_struct(myself);
144 rcu_read_unlock();
145
146 if (arg2 == 0) {
147 yama_ptracer_del(NULL, myself);
148 rc = 0;
2e4930eb 149 } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
bf06189e 150 rc = yama_ptracer_add(NULL, myself);
2d514487
KC
151 } else {
152 struct task_struct *tracer;
153
154 rcu_read_lock();
155 tracer = find_task_by_vpid(arg2);
156 if (tracer)
157 get_task_struct(tracer);
158 else
159 rc = -EINVAL;
160 rcu_read_unlock();
161
162 if (tracer) {
163 rc = yama_ptracer_add(tracer, myself);
164 put_task_struct(tracer);
165 }
166 }
167
168 put_task_struct(myself);
169 break;
170 }
171
172 return rc;
173}
174
175/**
176 * task_is_descendant - walk up a process family tree looking for a match
177 * @parent: the process to compare against while walking up from child
178 * @child: the process to start from while looking upwards for parent
179 *
180 * Returns 1 if child is a descendant of parent, 0 if not.
181 */
182static int task_is_descendant(struct task_struct *parent,
183 struct task_struct *child)
184{
185 int rc = 0;
186 struct task_struct *walker = child;
187
188 if (!parent || !child)
189 return 0;
190
191 rcu_read_lock();
192 if (!thread_group_leader(parent))
193 parent = rcu_dereference(parent->group_leader);
194 while (walker->pid > 0) {
195 if (!thread_group_leader(walker))
196 walker = rcu_dereference(walker->group_leader);
197 if (walker == parent) {
198 rc = 1;
199 break;
200 }
201 walker = rcu_dereference(walker->real_parent);
202 }
203 rcu_read_unlock();
204
205 return rc;
206}
207
208/**
209 * ptracer_exception_found - tracer registered as exception for this tracee
210 * @tracer: the task_struct of the process attempting ptrace
211 * @tracee: the task_struct of the process to be ptraced
212 *
213 * Returns 1 if tracer has is ptracer exception ancestor for tracee.
214 */
215static int ptracer_exception_found(struct task_struct *tracer,
216 struct task_struct *tracee)
217{
218 int rc = 0;
219 struct ptrace_relation *relation;
220 struct task_struct *parent = NULL;
bf06189e 221 bool found = false;
2d514487 222
2d514487
KC
223 rcu_read_lock();
224 if (!thread_group_leader(tracee))
225 tracee = rcu_dereference(tracee->group_leader);
93b69d43 226 list_for_each_entry_rcu(relation, &ptracer_relations, node)
2d514487
KC
227 if (relation->tracee == tracee) {
228 parent = relation->tracer;
bf06189e 229 found = true;
2d514487
KC
230 break;
231 }
232
bf06189e 233 if (found && (parent == NULL || task_is_descendant(parent, tracer)))
2d514487
KC
234 rc = 1;
235 rcu_read_unlock();
2d514487
KC
236
237 return rc;
238}
239
240/**
241 * yama_ptrace_access_check - validate PTRACE_ATTACH calls
242 * @child: task that current task is attempting to ptrace
243 * @mode: ptrace attach mode
244 *
245 * Returns 0 if following the ptrace is allowed, -ve on error.
246 */
c6993e4a 247int yama_ptrace_access_check(struct task_struct *child,
2d514487
KC
248 unsigned int mode)
249{
250 int rc;
251
252 /* If standard caps disallows it, so does Yama. We should
253 * only tighten restrictions further.
254 */
255 rc = cap_ptrace_access_check(child, mode);
256 if (rc)
257 return rc;
258
259 /* require ptrace target be a child of ptracer on attach */
389da25f
KC
260 if (mode == PTRACE_MODE_ATTACH) {
261 switch (ptrace_scope) {
262 case YAMA_SCOPE_DISABLED:
263 /* No additional restrictions. */
264 break;
265 case YAMA_SCOPE_RELATIONAL:
266 if (!task_is_descendant(current, child) &&
267 !ptracer_exception_found(current, child) &&
2cc8a716 268 !ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
389da25f
KC
269 rc = -EPERM;
270 break;
271 case YAMA_SCOPE_CAPABILITY:
2cc8a716 272 if (!ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
389da25f
KC
273 rc = -EPERM;
274 break;
275 case YAMA_SCOPE_NO_ATTACH:
276 default:
277 rc = -EPERM;
278 break;
279 }
280 }
2d514487
KC
281
282 if (rc) {
389da25f
KC
283 printk_ratelimited(KERN_NOTICE
284 "ptrace of pid %d was attempted by: %s (pid %d)\n",
7612bfee 285 child->pid, current->comm, current->pid);
2d514487
KC
286 }
287
288 return rc;
289}
290
9d8dad74
KC
291/**
292 * yama_ptrace_traceme - validate PTRACE_TRACEME calls
293 * @parent: task that will become the ptracer of the current task
294 *
295 * Returns 0 if following the ptrace is allowed, -ve on error.
296 */
c6993e4a 297int yama_ptrace_traceme(struct task_struct *parent)
9d8dad74
KC
298{
299 int rc;
300
301 /* If standard caps disallows it, so does Yama. We should
302 * only tighten restrictions further.
303 */
304 rc = cap_ptrace_traceme(parent);
305 if (rc)
306 return rc;
307
308 /* Only disallow PTRACE_TRACEME on more aggressive settings. */
309 switch (ptrace_scope) {
310 case YAMA_SCOPE_CAPABILITY:
311 if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE))
312 rc = -EPERM;
313 break;
314 case YAMA_SCOPE_NO_ATTACH:
315 rc = -EPERM;
316 break;
317 }
318
319 if (rc) {
9d8dad74
KC
320 printk_ratelimited(KERN_NOTICE
321 "ptraceme of pid %d was attempted by: %s (pid %d)\n",
7612bfee 322 current->pid, parent->comm, parent->pid);
9d8dad74
KC
323 }
324
325 return rc;
326}
327
c6993e4a 328#ifndef CONFIG_SECURITY_YAMA_STACKED
2d514487
KC
329static struct security_operations yama_ops = {
330 .name = "yama",
331
332 .ptrace_access_check = yama_ptrace_access_check,
9d8dad74 333 .ptrace_traceme = yama_ptrace_traceme,
2d514487
KC
334 .task_prctl = yama_task_prctl,
335 .task_free = yama_task_free,
336};
c6993e4a 337#endif
2d514487
KC
338
339#ifdef CONFIG_SYSCTL
389da25f
KC
340static int yama_dointvec_minmax(struct ctl_table *table, int write,
341 void __user *buffer, size_t *lenp, loff_t *ppos)
342{
343 int rc;
344
345 if (write && !capable(CAP_SYS_PTRACE))
346 return -EPERM;
347
348 rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
349 if (rc)
350 return rc;
351
352 /* Lock the max value if it ever gets set. */
353 if (write && *(int *)table->data == *(int *)table->extra2)
354 table->extra1 = table->extra2;
355
356 return rc;
357}
358
2d514487 359static int zero;
389da25f 360static int max_scope = YAMA_SCOPE_NO_ATTACH;
2d514487
KC
361
362struct ctl_path yama_sysctl_path[] = {
363 { .procname = "kernel", },
364 { .procname = "yama", },
365 { }
366};
367
368static struct ctl_table yama_sysctl_table[] = {
369 {
370 .procname = "ptrace_scope",
371 .data = &ptrace_scope,
372 .maxlen = sizeof(int),
373 .mode = 0644,
389da25f 374 .proc_handler = yama_dointvec_minmax,
2d514487 375 .extra1 = &zero,
389da25f 376 .extra2 = &max_scope,
2d514487
KC
377 },
378 { }
379};
380#endif /* CONFIG_SYSCTL */
381
382static __init int yama_init(void)
383{
c6993e4a 384#ifndef CONFIG_SECURITY_YAMA_STACKED
2d514487
KC
385 if (!security_module_enable(&yama_ops))
386 return 0;
c6993e4a 387#endif
2d514487
KC
388
389 printk(KERN_INFO "Yama: becoming mindful.\n");
390
c6993e4a 391#ifndef CONFIG_SECURITY_YAMA_STACKED
2d514487
KC
392 if (register_security(&yama_ops))
393 panic("Yama: kernel registration failed.\n");
c6993e4a 394#endif
2d514487
KC
395
396#ifdef CONFIG_SYSCTL
397 if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
398 panic("Yama: sysctl registration failed.\n");
399#endif
400
401 return 0;
402}
403
404security_initcall(yama_init);