Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[sfrench/cifs-2.6.git] / security / yama / yama_lsm.c
1 /*
2  * Yama Linux Security Module
3  *
4  * Author: Kees Cook <keescook@chromium.org>
5  *
6  * Copyright (C) 2010 Canonical, Ltd.
7  * Copyright (C) 2011 The Chromium OS Authors.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2, as
11  * published by the Free Software Foundation.
12  *
13  */
14
15 #include <linux/security.h>
16 #include <linux/sysctl.h>
17 #include <linux/ptrace.h>
18 #include <linux/prctl.h>
19 #include <linux/ratelimit.h>
20
21 #define YAMA_SCOPE_DISABLED     0
22 #define YAMA_SCOPE_RELATIONAL   1
23 #define YAMA_SCOPE_CAPABILITY   2
24 #define YAMA_SCOPE_NO_ATTACH    3
25
26 static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
27
28 /* describe a ptrace relationship for potential exception */
29 struct ptrace_relation {
30         struct task_struct *tracer;
31         struct task_struct *tracee;
32         struct list_head node;
33 };
34
35 static LIST_HEAD(ptracer_relations);
36 static DEFINE_SPINLOCK(ptracer_relations_lock);
37
38 /**
39  * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
40  * @tracer: the task_struct of the process doing the ptrace
41  * @tracee: the task_struct of the process to be ptraced
42  *
43  * Each tracee can have, at most, one tracer registered. Each time this
44  * is called, the prior registered tracer will be replaced for the tracee.
45  *
46  * Returns 0 if relationship was added, -ve on error.
47  */
48 static int yama_ptracer_add(struct task_struct *tracer,
49                             struct task_struct *tracee)
50 {
51         int rc = 0;
52         struct ptrace_relation *added;
53         struct ptrace_relation *entry, *relation = NULL;
54
55         added = kmalloc(sizeof(*added), GFP_KERNEL);
56         if (!added)
57                 return -ENOMEM;
58
59         spin_lock_bh(&ptracer_relations_lock);
60         list_for_each_entry(entry, &ptracer_relations, node)
61                 if (entry->tracee == tracee) {
62                         relation = entry;
63                         break;
64                 }
65         if (!relation) {
66                 relation = added;
67                 relation->tracee = tracee;
68                 list_add(&relation->node, &ptracer_relations);
69         }
70         relation->tracer = tracer;
71
72         spin_unlock_bh(&ptracer_relations_lock);
73         if (added != relation)
74                 kfree(added);
75
76         return rc;
77 }
78
79 /**
80  * yama_ptracer_del - remove exceptions related to the given tasks
81  * @tracer: remove any relation where tracer task matches
82  * @tracee: remove any relation where tracee task matches
83  */
84 static void yama_ptracer_del(struct task_struct *tracer,
85                              struct task_struct *tracee)
86 {
87         struct ptrace_relation *relation, *safe;
88
89         spin_lock_bh(&ptracer_relations_lock);
90         list_for_each_entry_safe(relation, safe, &ptracer_relations, node)
91                 if (relation->tracee == tracee ||
92                     (tracer && relation->tracer == tracer)) {
93                         list_del(&relation->node);
94                         kfree(relation);
95                 }
96         spin_unlock_bh(&ptracer_relations_lock);
97 }
98
99 /**
100  * yama_task_free - check for task_pid to remove from exception list
101  * @task: task being removed
102  */
103 void yama_task_free(struct task_struct *task)
104 {
105         yama_ptracer_del(task, task);
106 }
107
108 /**
109  * yama_task_prctl - check for Yama-specific prctl operations
110  * @option: operation
111  * @arg2: argument
112  * @arg3: argument
113  * @arg4: argument
114  * @arg5: argument
115  *
116  * Return 0 on success, -ve on error.  -ENOSYS is returned when Yama
117  * does not handle the given option.
118  */
119 int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
120                            unsigned long arg4, unsigned long arg5)
121 {
122         int rc;
123         struct task_struct *myself = current;
124
125         rc = cap_task_prctl(option, arg2, arg3, arg4, arg5);
126         if (rc != -ENOSYS)
127                 return rc;
128
129         switch (option) {
130         case PR_SET_PTRACER:
131                 /* Since a thread can call prctl(), find the group leader
132                  * before calling _add() or _del() on it, since we want
133                  * process-level granularity of control. The tracer group
134                  * leader checking is handled later when walking the ancestry
135                  * at the time of PTRACE_ATTACH check.
136                  */
137                 rcu_read_lock();
138                 if (!thread_group_leader(myself))
139                         myself = rcu_dereference(myself->group_leader);
140                 get_task_struct(myself);
141                 rcu_read_unlock();
142
143                 if (arg2 == 0) {
144                         yama_ptracer_del(NULL, myself);
145                         rc = 0;
146                 } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
147                         rc = yama_ptracer_add(NULL, myself);
148                 } else {
149                         struct task_struct *tracer;
150
151                         rcu_read_lock();
152                         tracer = find_task_by_vpid(arg2);
153                         if (tracer)
154                                 get_task_struct(tracer);
155                         else
156                                 rc = -EINVAL;
157                         rcu_read_unlock();
158
159                         if (tracer) {
160                                 rc = yama_ptracer_add(tracer, myself);
161                                 put_task_struct(tracer);
162                         }
163                 }
164
165                 put_task_struct(myself);
166                 break;
167         }
168
169         return rc;
170 }
171
172 /**
173  * task_is_descendant - walk up a process family tree looking for a match
174  * @parent: the process to compare against while walking up from child
175  * @child: the process to start from while looking upwards for parent
176  *
177  * Returns 1 if child is a descendant of parent, 0 if not.
178  */
179 static int task_is_descendant(struct task_struct *parent,
180                               struct task_struct *child)
181 {
182         int rc = 0;
183         struct task_struct *walker = child;
184
185         if (!parent || !child)
186                 return 0;
187
188         rcu_read_lock();
189         if (!thread_group_leader(parent))
190                 parent = rcu_dereference(parent->group_leader);
191         while (walker->pid > 0) {
192                 if (!thread_group_leader(walker))
193                         walker = rcu_dereference(walker->group_leader);
194                 if (walker == parent) {
195                         rc = 1;
196                         break;
197                 }
198                 walker = rcu_dereference(walker->real_parent);
199         }
200         rcu_read_unlock();
201
202         return rc;
203 }
204
205 /**
206  * ptracer_exception_found - tracer registered as exception for this tracee
207  * @tracer: the task_struct of the process attempting ptrace
208  * @tracee: the task_struct of the process to be ptraced
209  *
210  * Returns 1 if tracer has is ptracer exception ancestor for tracee.
211  */
212 static int ptracer_exception_found(struct task_struct *tracer,
213                                    struct task_struct *tracee)
214 {
215         int rc = 0;
216         struct ptrace_relation *relation;
217         struct task_struct *parent = NULL;
218         bool found = false;
219
220         spin_lock_bh(&ptracer_relations_lock);
221         rcu_read_lock();
222         if (!thread_group_leader(tracee))
223                 tracee = rcu_dereference(tracee->group_leader);
224         list_for_each_entry(relation, &ptracer_relations, node)
225                 if (relation->tracee == tracee) {
226                         parent = relation->tracer;
227                         found = true;
228                         break;
229                 }
230
231         if (found && (parent == NULL || task_is_descendant(parent, tracer)))
232                 rc = 1;
233         rcu_read_unlock();
234         spin_unlock_bh(&ptracer_relations_lock);
235
236         return rc;
237 }
238
239 /**
240  * yama_ptrace_access_check - validate PTRACE_ATTACH calls
241  * @child: task that current task is attempting to ptrace
242  * @mode: ptrace attach mode
243  *
244  * Returns 0 if following the ptrace is allowed, -ve on error.
245  */
246 int yama_ptrace_access_check(struct task_struct *child,
247                                     unsigned int mode)
248 {
249         int rc;
250
251         /* If standard caps disallows it, so does Yama.  We should
252          * only tighten restrictions further.
253          */
254         rc = cap_ptrace_access_check(child, mode);
255         if (rc)
256                 return rc;
257
258         /* require ptrace target be a child of ptracer on attach */
259         if (mode == PTRACE_MODE_ATTACH) {
260                 switch (ptrace_scope) {
261                 case YAMA_SCOPE_DISABLED:
262                         /* No additional restrictions. */
263                         break;
264                 case YAMA_SCOPE_RELATIONAL:
265                         if (!task_is_descendant(current, child) &&
266                             !ptracer_exception_found(current, child) &&
267                             !ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
268                                 rc = -EPERM;
269                         break;
270                 case YAMA_SCOPE_CAPABILITY:
271                         if (!ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
272                                 rc = -EPERM;
273                         break;
274                 case YAMA_SCOPE_NO_ATTACH:
275                 default:
276                         rc = -EPERM;
277                         break;
278                 }
279         }
280
281         if (rc) {
282                 printk_ratelimited(KERN_NOTICE
283                         "ptrace of pid %d was attempted by: %s (pid %d)\n",
284                         child->pid, current->comm, current->pid);
285         }
286
287         return rc;
288 }
289
290 /**
291  * yama_ptrace_traceme - validate PTRACE_TRACEME calls
292  * @parent: task that will become the ptracer of the current task
293  *
294  * Returns 0 if following the ptrace is allowed, -ve on error.
295  */
296 int yama_ptrace_traceme(struct task_struct *parent)
297 {
298         int rc;
299
300         /* If standard caps disallows it, so does Yama.  We should
301          * only tighten restrictions further.
302          */
303         rc = cap_ptrace_traceme(parent);
304         if (rc)
305                 return rc;
306
307         /* Only disallow PTRACE_TRACEME on more aggressive settings. */
308         switch (ptrace_scope) {
309         case YAMA_SCOPE_CAPABILITY:
310                 if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE))
311                         rc = -EPERM;
312                 break;
313         case YAMA_SCOPE_NO_ATTACH:
314                 rc = -EPERM;
315                 break;
316         }
317
318         if (rc) {
319                 printk_ratelimited(KERN_NOTICE
320                         "ptraceme of pid %d was attempted by: %s (pid %d)\n",
321                         current->pid, parent->comm, parent->pid);
322         }
323
324         return rc;
325 }
326
327 #ifndef CONFIG_SECURITY_YAMA_STACKED
328 static struct security_operations yama_ops = {
329         .name =                 "yama",
330
331         .ptrace_access_check =  yama_ptrace_access_check,
332         .ptrace_traceme =       yama_ptrace_traceme,
333         .task_prctl =           yama_task_prctl,
334         .task_free =            yama_task_free,
335 };
336 #endif
337
338 #ifdef CONFIG_SYSCTL
339 static int yama_dointvec_minmax(struct ctl_table *table, int write,
340                                 void __user *buffer, size_t *lenp, loff_t *ppos)
341 {
342         int rc;
343
344         if (write && !capable(CAP_SYS_PTRACE))
345                 return -EPERM;
346
347         rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
348         if (rc)
349                 return rc;
350
351         /* Lock the max value if it ever gets set. */
352         if (write && *(int *)table->data == *(int *)table->extra2)
353                 table->extra1 = table->extra2;
354
355         return rc;
356 }
357
358 static int zero;
359 static int max_scope = YAMA_SCOPE_NO_ATTACH;
360
361 struct ctl_path yama_sysctl_path[] = {
362         { .procname = "kernel", },
363         { .procname = "yama", },
364         { }
365 };
366
367 static struct ctl_table yama_sysctl_table[] = {
368         {
369                 .procname       = "ptrace_scope",
370                 .data           = &ptrace_scope,
371                 .maxlen         = sizeof(int),
372                 .mode           = 0644,
373                 .proc_handler   = yama_dointvec_minmax,
374                 .extra1         = &zero,
375                 .extra2         = &max_scope,
376         },
377         { }
378 };
379 #endif /* CONFIG_SYSCTL */
380
381 static __init int yama_init(void)
382 {
383 #ifndef CONFIG_SECURITY_YAMA_STACKED
384         if (!security_module_enable(&yama_ops))
385                 return 0;
386 #endif
387
388         printk(KERN_INFO "Yama: becoming mindful.\n");
389
390 #ifndef CONFIG_SECURITY_YAMA_STACKED
391         if (register_security(&yama_ops))
392                 panic("Yama: kernel registration failed.\n");
393 #endif
394
395 #ifdef CONFIG_SYSCTL
396         if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
397                 panic("Yama: sysctl registration failed.\n");
398 #endif
399
400         return 0;
401 }
402
403 security_initcall(yama_init);