]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
2d514487 KC |
2 | /* |
3 | * Yama Linux Security Module | |
4 | * | |
5 | * Author: Kees Cook <keescook@chromium.org> | |
6 | * | |
7 | * Copyright (C) 2010 Canonical, Ltd. | |
8 | * Copyright (C) 2011 The Chromium OS Authors. | |
2d514487 KC |
9 | */ |
10 | ||
3c4ed7bd | 11 | #include <linux/lsm_hooks.h> |
2d514487 KC |
12 | #include <linux/sysctl.h> |
13 | #include <linux/ptrace.h> | |
14 | #include <linux/prctl.h> | |
15 | #include <linux/ratelimit.h> | |
235e7527 | 16 | #include <linux/workqueue.h> |
8a56038c | 17 | #include <linux/string_helpers.h> |
dca6b414 JH |
18 | #include <linux/task_work.h> |
19 | #include <linux/sched.h> | |
20 | #include <linux/spinlock.h> | |
2d514487 | 21 | |
389da25f KC |
22 | #define YAMA_SCOPE_DISABLED 0 |
23 | #define YAMA_SCOPE_RELATIONAL 1 | |
24 | #define YAMA_SCOPE_CAPABILITY 2 | |
25 | #define YAMA_SCOPE_NO_ATTACH 3 | |
26 | ||
27 | static int ptrace_scope = YAMA_SCOPE_RELATIONAL; | |
2d514487 KC |
28 | |
29 | /* describe a ptrace relationship for potential exception */ | |
30 | struct ptrace_relation { | |
31 | struct task_struct *tracer; | |
32 | struct task_struct *tracee; | |
235e7527 | 33 | bool invalid; |
2d514487 | 34 | struct list_head node; |
93b69d43 | 35 | struct rcu_head rcu; |
2d514487 KC |
36 | }; |
37 | ||
38 | static LIST_HEAD(ptracer_relations); | |
39 | static DEFINE_SPINLOCK(ptracer_relations_lock); | |
40 | ||
235e7527 KC |
41 | static void yama_relation_cleanup(struct work_struct *work); |
42 | static DECLARE_WORK(yama_relation_work, yama_relation_cleanup); | |
43 | ||
dca6b414 JH |
44 | struct access_report_info { |
45 | struct callback_head work; | |
46 | const char *access; | |
47 | struct task_struct *target; | |
48 | struct task_struct *agent; | |
49 | }; | |
50 | ||
51 | static void __report_access(struct callback_head *work) | |
8a56038c | 52 | { |
dca6b414 JH |
53 | struct access_report_info *info = |
54 | container_of(work, struct access_report_info, work); | |
8a56038c KC |
55 | char *target_cmd, *agent_cmd; |
56 | ||
dca6b414 JH |
57 | target_cmd = kstrdup_quotable_cmdline(info->target, GFP_KERNEL); |
58 | agent_cmd = kstrdup_quotable_cmdline(info->agent, GFP_KERNEL); | |
8a56038c KC |
59 | |
60 | pr_notice_ratelimited( | |
61 | "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n", | |
dca6b414 JH |
62 | info->access, target_cmd, info->target->pid, agent_cmd, |
63 | info->agent->pid); | |
8a56038c KC |
64 | |
65 | kfree(agent_cmd); | |
66 | kfree(target_cmd); | |
dca6b414 JH |
67 | |
68 | put_task_struct(info->agent); | |
69 | put_task_struct(info->target); | |
70 | kfree(info); | |
71 | } | |
72 | ||
73 | /* defers execution because cmdline access can sleep */ | |
74 | static void report_access(const char *access, struct task_struct *target, | |
75 | struct task_struct *agent) | |
76 | { | |
77 | struct access_report_info *info; | |
78 | char agent_comm[sizeof(agent->comm)]; | |
79 | ||
80 | assert_spin_locked(&target->alloc_lock); /* for target->comm */ | |
81 | ||
82 | if (current->flags & PF_KTHREAD) { | |
83 | /* I don't think kthreads call task_work_run() before exiting. | |
84 | * Imagine angry ranting about procfs here. | |
85 | */ | |
86 | pr_notice_ratelimited( | |
87 | "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n", | |
88 | access, target->comm, target->pid, | |
89 | get_task_comm(agent_comm, agent), agent->pid); | |
90 | return; | |
91 | } | |
92 | ||
93 | info = kmalloc(sizeof(*info), GFP_ATOMIC); | |
94 | if (!info) | |
95 | return; | |
96 | init_task_work(&info->work, __report_access); | |
97 | get_task_struct(target); | |
98 | get_task_struct(agent); | |
99 | info->access = access; | |
100 | info->target = target; | |
101 | info->agent = agent; | |
91989c70 | 102 | if (task_work_add(current, &info->work, TWA_RESUME) == 0) |
dca6b414 JH |
103 | return; /* success */ |
104 | ||
105 | WARN(1, "report_access called from exiting task"); | |
106 | put_task_struct(target); | |
107 | put_task_struct(agent); | |
108 | kfree(info); | |
8a56038c KC |
109 | } |
110 | ||
235e7527 KC |
111 | /** |
112 | * yama_relation_cleanup - remove invalid entries from the relation list | |
113 | * | |
114 | */ | |
115 | static void yama_relation_cleanup(struct work_struct *work) | |
116 | { | |
117 | struct ptrace_relation *relation; | |
118 | ||
119 | spin_lock(&ptracer_relations_lock); | |
120 | rcu_read_lock(); | |
121 | list_for_each_entry_rcu(relation, &ptracer_relations, node) { | |
122 | if (relation->invalid) { | |
123 | list_del_rcu(&relation->node); | |
124 | kfree_rcu(relation, rcu); | |
125 | } | |
126 | } | |
127 | rcu_read_unlock(); | |
128 | spin_unlock(&ptracer_relations_lock); | |
129 | } | |
130 | ||
2d514487 KC |
131 | /** |
132 | * yama_ptracer_add - add/replace an exception for this tracer/tracee pair | |
133 | * @tracer: the task_struct of the process doing the ptrace | |
134 | * @tracee: the task_struct of the process to be ptraced | |
135 | * | |
136 | * Each tracee can have, at most, one tracer registered. Each time this | |
137 | * is called, the prior registered tracer will be replaced for the tracee. | |
138 | * | |
139 | * Returns 0 if relationship was added, -ve on error. | |
140 | */ | |
141 | static int yama_ptracer_add(struct task_struct *tracer, | |
142 | struct task_struct *tracee) | |
143 | { | |
93b69d43 | 144 | struct ptrace_relation *relation, *added; |
2d514487 KC |
145 | |
146 | added = kmalloc(sizeof(*added), GFP_KERNEL); | |
147 | if (!added) | |
148 | return -ENOMEM; | |
149 | ||
93b69d43 KC |
150 | added->tracee = tracee; |
151 | added->tracer = tracer; | |
235e7527 | 152 | added->invalid = false; |
93b69d43 | 153 | |
235e7527 | 154 | spin_lock(&ptracer_relations_lock); |
93b69d43 KC |
155 | rcu_read_lock(); |
156 | list_for_each_entry_rcu(relation, &ptracer_relations, node) { | |
235e7527 KC |
157 | if (relation->invalid) |
158 | continue; | |
93b69d43 KC |
159 | if (relation->tracee == tracee) { |
160 | list_replace_rcu(&relation->node, &added->node); | |
161 | kfree_rcu(relation, rcu); | |
162 | goto out; | |
2d514487 | 163 | } |
2d514487 | 164 | } |
2d514487 | 165 | |
93b69d43 | 166 | list_add_rcu(&added->node, &ptracer_relations); |
2d514487 | 167 | |
93b69d43 KC |
168 | out: |
169 | rcu_read_unlock(); | |
235e7527 | 170 | spin_unlock(&ptracer_relations_lock); |
93b69d43 | 171 | return 0; |
2d514487 KC |
172 | } |
173 | ||
174 | /** | |
175 | * yama_ptracer_del - remove exceptions related to the given tasks | |
176 | * @tracer: remove any relation where tracer task matches | |
177 | * @tracee: remove any relation where tracee task matches | |
178 | */ | |
179 | static void yama_ptracer_del(struct task_struct *tracer, | |
180 | struct task_struct *tracee) | |
181 | { | |
93b69d43 | 182 | struct ptrace_relation *relation; |
235e7527 | 183 | bool marked = false; |
2d514487 | 184 | |
93b69d43 KC |
185 | rcu_read_lock(); |
186 | list_for_each_entry_rcu(relation, &ptracer_relations, node) { | |
235e7527 KC |
187 | if (relation->invalid) |
188 | continue; | |
2d514487 | 189 | if (relation->tracee == tracee || |
bf06189e | 190 | (tracer && relation->tracer == tracer)) { |
235e7527 KC |
191 | relation->invalid = true; |
192 | marked = true; | |
2d514487 | 193 | } |
93b69d43 KC |
194 | } |
195 | rcu_read_unlock(); | |
235e7527 KC |
196 | |
197 | if (marked) | |
198 | schedule_work(&yama_relation_work); | |
2d514487 KC |
199 | } |
200 | ||
201 | /** | |
202 | * yama_task_free - check for task_pid to remove from exception list | |
203 | * @task: task being removed | |
204 | */ | |
1aa176ef | 205 | static void yama_task_free(struct task_struct *task) |
2d514487 KC |
206 | { |
207 | yama_ptracer_del(task, task); | |
208 | } | |
209 | ||
210 | /** | |
211 | * yama_task_prctl - check for Yama-specific prctl operations | |
212 | * @option: operation | |
213 | * @arg2: argument | |
214 | * @arg3: argument | |
215 | * @arg4: argument | |
216 | * @arg5: argument | |
217 | * | |
218 | * Return 0 on success, -ve on error. -ENOSYS is returned when Yama | |
219 | * does not handle the given option. | |
220 | */ | |
1aa176ef | 221 | static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, |
2d514487 KC |
222 | unsigned long arg4, unsigned long arg5) |
223 | { | |
b1d9e6b0 | 224 | int rc = -ENOSYS; |
2d514487 KC |
225 | struct task_struct *myself = current; |
226 | ||
2d514487 KC |
227 | switch (option) { |
228 | case PR_SET_PTRACER: | |
229 | /* Since a thread can call prctl(), find the group leader | |
230 | * before calling _add() or _del() on it, since we want | |
231 | * process-level granularity of control. The tracer group | |
232 | * leader checking is handled later when walking the ancestry | |
233 | * at the time of PTRACE_ATTACH check. | |
234 | */ | |
235 | rcu_read_lock(); | |
236 | if (!thread_group_leader(myself)) | |
237 | myself = rcu_dereference(myself->group_leader); | |
238 | get_task_struct(myself); | |
239 | rcu_read_unlock(); | |
240 | ||
241 | if (arg2 == 0) { | |
242 | yama_ptracer_del(NULL, myself); | |
243 | rc = 0; | |
2e4930eb | 244 | } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) { |
bf06189e | 245 | rc = yama_ptracer_add(NULL, myself); |
2d514487 KC |
246 | } else { |
247 | struct task_struct *tracer; | |
248 | ||
2ee08260 MR |
249 | tracer = find_get_task_by_vpid(arg2); |
250 | if (!tracer) { | |
2d514487 | 251 | rc = -EINVAL; |
2ee08260 | 252 | } else { |
2d514487 KC |
253 | rc = yama_ptracer_add(tracer, myself); |
254 | put_task_struct(tracer); | |
255 | } | |
256 | } | |
257 | ||
258 | put_task_struct(myself); | |
259 | break; | |
260 | } | |
261 | ||
262 | return rc; | |
263 | } | |
264 | ||
265 | /** | |
266 | * task_is_descendant - walk up a process family tree looking for a match | |
267 | * @parent: the process to compare against while walking up from child | |
268 | * @child: the process to start from while looking upwards for parent | |
269 | * | |
270 | * Returns 1 if child is a descendant of parent, 0 if not. | |
271 | */ | |
272 | static int task_is_descendant(struct task_struct *parent, | |
273 | struct task_struct *child) | |
274 | { | |
275 | int rc = 0; | |
276 | struct task_struct *walker = child; | |
277 | ||
278 | if (!parent || !child) | |
279 | return 0; | |
280 | ||
281 | rcu_read_lock(); | |
282 | if (!thread_group_leader(parent)) | |
283 | parent = rcu_dereference(parent->group_leader); | |
284 | while (walker->pid > 0) { | |
285 | if (!thread_group_leader(walker)) | |
286 | walker = rcu_dereference(walker->group_leader); | |
287 | if (walker == parent) { | |
288 | rc = 1; | |
289 | break; | |
290 | } | |
291 | walker = rcu_dereference(walker->real_parent); | |
292 | } | |
293 | rcu_read_unlock(); | |
294 | ||
295 | return rc; | |
296 | } | |
297 | ||
298 | /** | |
299 | * ptracer_exception_found - tracer registered as exception for this tracee | |
300 | * @tracer: the task_struct of the process attempting ptrace | |
301 | * @tracee: the task_struct of the process to be ptraced | |
302 | * | |
50523a29 | 303 | * Returns 1 if tracer has a ptracer exception ancestor for tracee. |
2d514487 KC |
304 | */ |
305 | static int ptracer_exception_found(struct task_struct *tracer, | |
306 | struct task_struct *tracee) | |
307 | { | |
308 | int rc = 0; | |
309 | struct ptrace_relation *relation; | |
310 | struct task_struct *parent = NULL; | |
bf06189e | 311 | bool found = false; |
2d514487 | 312 | |
2d514487 | 313 | rcu_read_lock(); |
50523a29 JS |
314 | |
315 | /* | |
316 | * If there's already an active tracing relationship, then make an | |
317 | * exception for the sake of other accesses, like process_vm_rw(). | |
318 | */ | |
319 | parent = ptrace_parent(tracee); | |
320 | if (parent != NULL && same_thread_group(parent, tracer)) { | |
321 | rc = 1; | |
322 | goto unlock; | |
323 | } | |
324 | ||
325 | /* Look for a PR_SET_PTRACER relationship. */ | |
2d514487 KC |
326 | if (!thread_group_leader(tracee)) |
327 | tracee = rcu_dereference(tracee->group_leader); | |
235e7527 KC |
328 | list_for_each_entry_rcu(relation, &ptracer_relations, node) { |
329 | if (relation->invalid) | |
330 | continue; | |
2d514487 KC |
331 | if (relation->tracee == tracee) { |
332 | parent = relation->tracer; | |
bf06189e | 333 | found = true; |
2d514487 KC |
334 | break; |
335 | } | |
235e7527 | 336 | } |
2d514487 | 337 | |
bf06189e | 338 | if (found && (parent == NULL || task_is_descendant(parent, tracer))) |
2d514487 | 339 | rc = 1; |
50523a29 JS |
340 | |
341 | unlock: | |
2d514487 | 342 | rcu_read_unlock(); |
2d514487 KC |
343 | |
344 | return rc; | |
345 | } | |
346 | ||
347 | /** | |
348 | * yama_ptrace_access_check - validate PTRACE_ATTACH calls | |
349 | * @child: task that current task is attempting to ptrace | |
350 | * @mode: ptrace attach mode | |
351 | * | |
352 | * Returns 0 if following the ptrace is allowed, -ve on error. | |
353 | */ | |
b1d9e6b0 | 354 | static int yama_ptrace_access_check(struct task_struct *child, |
2d514487 KC |
355 | unsigned int mode) |
356 | { | |
b1d9e6b0 | 357 | int rc = 0; |
2d514487 KC |
358 | |
359 | /* require ptrace target be a child of ptracer on attach */ | |
3dfb7d8c | 360 | if (mode & PTRACE_MODE_ATTACH) { |
389da25f KC |
361 | switch (ptrace_scope) { |
362 | case YAMA_SCOPE_DISABLED: | |
363 | /* No additional restrictions. */ | |
364 | break; | |
365 | case YAMA_SCOPE_RELATIONAL: | |
4c44aaaf | 366 | rcu_read_lock(); |
9474f4e7 KC |
367 | if (!pid_alive(child)) |
368 | rc = -EPERM; | |
369 | if (!rc && !task_is_descendant(current, child) && | |
389da25f | 370 | !ptracer_exception_found(current, child) && |
4c44aaaf | 371 | !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) |
389da25f | 372 | rc = -EPERM; |
4c44aaaf | 373 | rcu_read_unlock(); |
389da25f KC |
374 | break; |
375 | case YAMA_SCOPE_CAPABILITY: | |
4c44aaaf EB |
376 | rcu_read_lock(); |
377 | if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) | |
389da25f | 378 | rc = -EPERM; |
4c44aaaf | 379 | rcu_read_unlock(); |
389da25f KC |
380 | break; |
381 | case YAMA_SCOPE_NO_ATTACH: | |
382 | default: | |
383 | rc = -EPERM; | |
384 | break; | |
385 | } | |
386 | } | |
2d514487 | 387 | |
8a56038c KC |
388 | if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) |
389 | report_access("attach", child, current); | |
2d514487 KC |
390 | |
391 | return rc; | |
392 | } | |
393 | ||
9d8dad74 KC |
394 | /** |
395 | * yama_ptrace_traceme - validate PTRACE_TRACEME calls | |
396 | * @parent: task that will become the ptracer of the current task | |
397 | * | |
398 | * Returns 0 if following the ptrace is allowed, -ve on error. | |
399 | */ | |
1aa176ef | 400 | static int yama_ptrace_traceme(struct task_struct *parent) |
9d8dad74 | 401 | { |
b1d9e6b0 | 402 | int rc = 0; |
9d8dad74 KC |
403 | |
404 | /* Only disallow PTRACE_TRACEME on more aggressive settings. */ | |
405 | switch (ptrace_scope) { | |
406 | case YAMA_SCOPE_CAPABILITY: | |
eddc0a3a | 407 | if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE)) |
9d8dad74 KC |
408 | rc = -EPERM; |
409 | break; | |
410 | case YAMA_SCOPE_NO_ATTACH: | |
411 | rc = -EPERM; | |
412 | break; | |
413 | } | |
414 | ||
dca6b414 JH |
415 | if (rc) { |
416 | task_lock(current); | |
8a56038c | 417 | report_access("traceme", current, parent); |
dca6b414 JH |
418 | task_unlock(current); |
419 | } | |
9d8dad74 KC |
420 | |
421 | return rc; | |
422 | } | |
423 | ||
f4d92e4c CS |
424 | static struct lsm_id yama_lsmid __lsm_ro_after_init = { |
425 | .lsm = "yama", | |
426 | .slot = LSMBLOB_NOT_NEEDED | |
427 | }; | |
428 | ||
ca97d939 | 429 | static struct security_hook_list yama_hooks[] __lsm_ro_after_init = { |
e20b043a CS |
430 | LSM_HOOK_INIT(ptrace_access_check, yama_ptrace_access_check), |
431 | LSM_HOOK_INIT(ptrace_traceme, yama_ptrace_traceme), | |
432 | LSM_HOOK_INIT(task_prctl, yama_task_prctl), | |
433 | LSM_HOOK_INIT(task_free, yama_task_free), | |
2d514487 | 434 | }; |
b1d9e6b0 | 435 | |
2d514487 | 436 | #ifdef CONFIG_SYSCTL |
389da25f | 437 | static int yama_dointvec_minmax(struct ctl_table *table, int write, |
32927393 | 438 | void *buffer, size_t *lenp, loff_t *ppos) |
389da25f | 439 | { |
41a4695c | 440 | struct ctl_table table_copy; |
389da25f KC |
441 | |
442 | if (write && !capable(CAP_SYS_PTRACE)) | |
443 | return -EPERM; | |
444 | ||
389da25f | 445 | /* Lock the max value if it ever gets set. */ |
41a4695c KC |
446 | table_copy = *table; |
447 | if (*(int *)table_copy.data == *(int *)table_copy.extra2) | |
448 | table_copy.extra1 = table_copy.extra2; | |
389da25f | 449 | |
41a4695c | 450 | return proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos); |
389da25f KC |
451 | } |
452 | ||
389da25f | 453 | static int max_scope = YAMA_SCOPE_NO_ATTACH; |
2d514487 | 454 | |
1aa176ef | 455 | static struct ctl_path yama_sysctl_path[] = { |
2d514487 KC |
456 | { .procname = "kernel", }, |
457 | { .procname = "yama", }, | |
458 | { } | |
459 | }; | |
460 | ||
461 | static struct ctl_table yama_sysctl_table[] = { | |
462 | { | |
463 | .procname = "ptrace_scope", | |
464 | .data = &ptrace_scope, | |
465 | .maxlen = sizeof(int), | |
466 | .mode = 0644, | |
389da25f | 467 | .proc_handler = yama_dointvec_minmax, |
eec4844f | 468 | .extra1 = SYSCTL_ZERO, |
389da25f | 469 | .extra2 = &max_scope, |
2d514487 KC |
470 | }, |
471 | { } | |
472 | }; | |
730daa16 | 473 | static void __init yama_init_sysctl(void) |
2d514487 | 474 | { |
2d514487 KC |
475 | if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table)) |
476 | panic("Yama: sysctl registration failed.\n"); | |
2d514487 | 477 | } |
730daa16 KC |
478 | #else |
479 | static inline void yama_init_sysctl(void) { } | |
480 | #endif /* CONFIG_SYSCTL */ | |
2d514487 | 481 | |
d6aed64b | 482 | static int __init yama_init(void) |
730daa16 KC |
483 | { |
484 | pr_info("Yama: becoming mindful.\n"); | |
f4d92e4c | 485 | security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks), &yama_lsmid); |
730daa16 | 486 | yama_init_sysctl(); |
d6aed64b | 487 | return 0; |
730daa16 | 488 | } |
d6aed64b KC |
489 | |
490 | DEFINE_LSM(yama) = { | |
491 | .name = "yama", | |
492 | .init = yama_init, | |
493 | }; |