blob: 656476eedb1bfe9a4de0286d7dd35aad8023c8d1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/errno.h>
13#include <linux/mm.h>
14#include <linux/highmem.h>
15#include <linux/pagemap.h>
16#include <linux/smp_lock.h>
17#include <linux/ptrace.h>
18#include <linux/security.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070019#include <linux/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21#include <asm/pgtable.h>
22#include <asm/uaccess.h>
23
24/*
25 * ptrace a task: make the debugger its new parent and
26 * move it to the ptrace list.
27 *
28 * Must be called with the tasklist lock write-held.
29 */
30void __ptrace_link(task_t *child, task_t *new_parent)
31{
32 if (!list_empty(&child->ptrace_list))
33 BUG();
34 if (child->parent == new_parent)
35 return;
36 list_add(&child->ptrace_list, &child->parent->ptrace_children);
37 REMOVE_LINKS(child);
38 child->parent = new_parent;
39 SET_LINKS(child);
40}
41
42/*
43 * Turn a tracing stop into a normal stop now, since with no tracer there
44 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
45 * signal sent that would resume the child, but didn't because it was in
46 * TASK_TRACED, resume it now.
47 * Requires that irqs be disabled.
48 */
49void ptrace_untrace(task_t *child)
50{
51 spin_lock(&child->sighand->siglock);
52 if (child->state == TASK_TRACED) {
53 if (child->signal->flags & SIGNAL_STOP_STOPPED) {
54 child->state = TASK_STOPPED;
55 } else {
56 signal_wake_up(child, 1);
57 }
58 }
Andrea Arcangeli30e0fca2005-10-30 15:02:38 -080059 if (child->signal->flags & SIGNAL_GROUP_EXIT) {
60 sigaddset(&child->pending.signal, SIGKILL);
61 signal_wake_up(child, 1);
62 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 spin_unlock(&child->sighand->siglock);
64}
65
66/*
67 * unptrace a task: move it back to its original parent and
68 * remove it from the ptrace list.
69 *
70 * Must be called with the tasklist lock write-held.
71 */
72void __ptrace_unlink(task_t *child)
73{
74 if (!child->ptrace)
75 BUG();
76 child->ptrace = 0;
77 if (!list_empty(&child->ptrace_list)) {
78 list_del_init(&child->ptrace_list);
79 REMOVE_LINKS(child);
80 child->parent = child->real_parent;
81 SET_LINKS(child);
82 }
83
Andrea Arcangeli30e0fca2005-10-30 15:02:38 -080084 ptrace_untrace(child);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
87/*
88 * Check that we have indeed attached to the thing..
89 */
90int ptrace_check_attach(struct task_struct *child, int kill)
91{
92 int ret = -ESRCH;
93
94 /*
95 * We take the read lock around doing both checks to close a
96 * possible race where someone else was tracing our child and
97 * detached between these two checks. After this locked check,
98 * we are sure that this is our traced child and that can only
99 * be changed by us so it's not changing right after this.
100 */
101 read_lock(&tasklist_lock);
102 if ((child->ptrace & PT_PTRACED) && child->parent == current &&
103 (!(child->ptrace & PT_ATTACHED) || child->real_parent != current)
104 && child->signal != NULL) {
105 ret = 0;
106 spin_lock_irq(&child->sighand->siglock);
107 if (child->state == TASK_STOPPED) {
108 child->state = TASK_TRACED;
109 } else if (child->state != TASK_TRACED && !kill) {
110 ret = -ESRCH;
111 }
112 spin_unlock_irq(&child->sighand->siglock);
113 }
114 read_unlock(&tasklist_lock);
115
116 if (!ret && !kill) {
117 wait_task_inactive(child);
118 }
119
120 /* All systems go.. */
121 return ret;
122}
123
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700124static int may_attach(struct task_struct *task)
125{
126 if (!task->mm)
127 return -EPERM;
128 if (((current->uid != task->euid) ||
129 (current->uid != task->suid) ||
130 (current->uid != task->uid) ||
131 (current->gid != task->egid) ||
132 (current->gid != task->sgid) ||
133 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
134 return -EPERM;
135 smp_rmb();
136 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
137 return -EPERM;
138
139 return security_ptrace(current, task);
140}
141
142int ptrace_may_attach(struct task_struct *task)
143{
144 int err;
145 task_lock(task);
146 err = may_attach(task);
147 task_unlock(task);
148 return !err;
149}
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151int ptrace_attach(struct task_struct *task)
152{
153 int retval;
154 task_lock(task);
155 retval = -EPERM;
156 if (task->pid <= 1)
157 goto bad;
Linus Torvalds28d838c2005-11-09 11:33:07 -0800158 if (task->tgid == current->tgid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 /* the same process cannot be attached many times */
161 if (task->ptrace & PT_PTRACED)
162 goto bad;
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700163 retval = may_attach(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 if (retval)
165 goto bad;
166
167 /* Go */
168 task->ptrace |= PT_PTRACED | ((task->real_parent != current)
169 ? PT_ATTACHED : 0);
170 if (capable(CAP_SYS_PTRACE))
171 task->ptrace |= PT_PTRACE_CAP;
172 task_unlock(task);
173
174 write_lock_irq(&tasklist_lock);
175 __ptrace_link(task, current);
176 write_unlock_irq(&tasklist_lock);
177
178 force_sig_specific(SIGSTOP, task);
179 return 0;
180
181bad:
182 task_unlock(task);
183 return retval;
184}
185
186int ptrace_detach(struct task_struct *child, unsigned int data)
187{
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700188 if (!valid_signal(data))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 return -EIO;
190
191 /* Architecture-specific hardware disable .. */
192 ptrace_disable(child);
193
194 /* .. re-parent .. */
195 child->exit_code = data;
196
197 write_lock_irq(&tasklist_lock);
198 __ptrace_unlink(child);
199 /* .. and wake it up. */
200 if (child->exit_state != EXIT_ZOMBIE)
201 wake_up_process(child);
202 write_unlock_irq(&tasklist_lock);
203
204 return 0;
205}
206
207/*
208 * Access another process' address space.
209 * Source/target buffer must be kernel space,
210 * Do not walk the page table directly, use get_user_pages
211 */
212
213int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
214{
215 struct mm_struct *mm;
216 struct vm_area_struct *vma;
217 struct page *page;
218 void *old_buf = buf;
219
220 mm = get_task_mm(tsk);
221 if (!mm)
222 return 0;
223
224 down_read(&mm->mmap_sem);
225 /* ignore errors, just check how much was sucessfully transfered */
226 while (len) {
227 int bytes, ret, offset;
228 void *maddr;
229
230 ret = get_user_pages(tsk, mm, addr, 1,
231 write, 1, &page, &vma);
232 if (ret <= 0)
233 break;
234
235 bytes = len;
236 offset = addr & (PAGE_SIZE-1);
237 if (bytes > PAGE_SIZE-offset)
238 bytes = PAGE_SIZE-offset;
239
240 maddr = kmap(page);
241 if (write) {
242 copy_to_user_page(vma, page, addr,
243 maddr + offset, buf, bytes);
David Gibson5bd01902005-11-29 19:34:32 -0800244 if (!PageCompound(page))
245 set_page_dirty_lock(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 } else {
247 copy_from_user_page(vma, page, addr,
248 buf, maddr + offset, bytes);
249 }
250 kunmap(page);
251 page_cache_release(page);
252 len -= bytes;
253 buf += bytes;
254 addr += bytes;
255 }
256 up_read(&mm->mmap_sem);
257 mmput(mm);
258
259 return buf - old_buf;
260}
261
262int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
263{
264 int copied = 0;
265
266 while (len > 0) {
267 char buf[128];
268 int this_len, retval;
269
270 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
271 retval = access_process_vm(tsk, src, buf, this_len, 0);
272 if (!retval) {
273 if (copied)
274 break;
275 return -EIO;
276 }
277 if (copy_to_user(dst, buf, retval))
278 return -EFAULT;
279 copied += retval;
280 src += retval;
281 dst += retval;
282 len -= retval;
283 }
284 return copied;
285}
286
287int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
288{
289 int copied = 0;
290
291 while (len > 0) {
292 char buf[128];
293 int this_len, retval;
294
295 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
296 if (copy_from_user(buf, src, this_len))
297 return -EFAULT;
298 retval = access_process_vm(tsk, dst, buf, this_len, 1);
299 if (!retval) {
300 if (copied)
301 break;
302 return -EIO;
303 }
304 copied += retval;
305 src += retval;
306 dst += retval;
307 len -= retval;
308 }
309 return copied;
310}
311
312static int ptrace_setoptions(struct task_struct *child, long data)
313{
314 child->ptrace &= ~PT_TRACE_MASK;
315
316 if (data & PTRACE_O_TRACESYSGOOD)
317 child->ptrace |= PT_TRACESYSGOOD;
318
319 if (data & PTRACE_O_TRACEFORK)
320 child->ptrace |= PT_TRACE_FORK;
321
322 if (data & PTRACE_O_TRACEVFORK)
323 child->ptrace |= PT_TRACE_VFORK;
324
325 if (data & PTRACE_O_TRACECLONE)
326 child->ptrace |= PT_TRACE_CLONE;
327
328 if (data & PTRACE_O_TRACEEXEC)
329 child->ptrace |= PT_TRACE_EXEC;
330
331 if (data & PTRACE_O_TRACEVFORKDONE)
332 child->ptrace |= PT_TRACE_VFORK_DONE;
333
334 if (data & PTRACE_O_TRACEEXIT)
335 child->ptrace |= PT_TRACE_EXIT;
336
337 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
338}
339
340static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data)
341{
342 siginfo_t lastinfo;
343 int error = -ESRCH;
344
345 read_lock(&tasklist_lock);
346 if (likely(child->sighand != NULL)) {
347 error = -EINVAL;
348 spin_lock_irq(&child->sighand->siglock);
349 if (likely(child->last_siginfo != NULL)) {
350 lastinfo = *child->last_siginfo;
351 error = 0;
352 }
353 spin_unlock_irq(&child->sighand->siglock);
354 }
355 read_unlock(&tasklist_lock);
356 if (!error)
357 return copy_siginfo_to_user(data, &lastinfo);
358 return error;
359}
360
361static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data)
362{
363 siginfo_t newinfo;
364 int error = -ESRCH;
365
366 if (copy_from_user(&newinfo, data, sizeof (siginfo_t)))
367 return -EFAULT;
368
369 read_lock(&tasklist_lock);
370 if (likely(child->sighand != NULL)) {
371 error = -EINVAL;
372 spin_lock_irq(&child->sighand->siglock);
373 if (likely(child->last_siginfo != NULL)) {
374 *child->last_siginfo = newinfo;
375 error = 0;
376 }
377 spin_unlock_irq(&child->sighand->siglock);
378 }
379 read_unlock(&tasklist_lock);
380 return error;
381}
382
383int ptrace_request(struct task_struct *child, long request,
384 long addr, long data)
385{
386 int ret = -EIO;
387
388 switch (request) {
389#ifdef PTRACE_OLDSETOPTIONS
390 case PTRACE_OLDSETOPTIONS:
391#endif
392 case PTRACE_SETOPTIONS:
393 ret = ptrace_setoptions(child, data);
394 break;
395 case PTRACE_GETEVENTMSG:
396 ret = put_user(child->ptrace_message, (unsigned long __user *) data);
397 break;
398 case PTRACE_GETSIGINFO:
399 ret = ptrace_getsiginfo(child, (siginfo_t __user *) data);
400 break;
401 case PTRACE_SETSIGINFO:
402 ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
403 break;
404 default:
405 break;
406 }
407
408 return ret;
409}
Christoph Hellwig481bed42005-11-07 00:59:47 -0800410
411#ifndef __ARCH_SYS_PTRACE
412static int ptrace_get_task_struct(long request, long pid,
413 struct task_struct **childp)
414{
415 struct task_struct *child;
416 int ret;
417
418 /*
419 * Callers use child == NULL as an indication to exit early even
420 * when the return value is 0, so make sure it is non-NULL here.
421 */
422 *childp = NULL;
423
424 if (request == PTRACE_TRACEME) {
425 /*
426 * Are we already being traced?
427 */
428 if (current->ptrace & PT_PTRACED)
429 return -EPERM;
430 ret = security_ptrace(current->parent, current);
431 if (ret)
432 return -EPERM;
433 /*
434 * Set the ptrace bit in the process ptrace flags.
435 */
436 current->ptrace |= PT_PTRACED;
437 return 0;
438 }
439
440 /*
441 * You may not mess with init
442 */
443 if (pid == 1)
444 return -EPERM;
445
446 ret = -ESRCH;
447 read_lock(&tasklist_lock);
448 child = find_task_by_pid(pid);
449 if (child)
450 get_task_struct(child);
451 read_unlock(&tasklist_lock);
452 if (!child)
453 return -ESRCH;
454
455 *childp = child;
456 return 0;
457}
458
459asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
460{
461 struct task_struct *child;
462 long ret;
463
464 /*
465 * This lock_kernel fixes a subtle race with suid exec
466 */
467 lock_kernel();
468 ret = ptrace_get_task_struct(request, pid, &child);
469 if (!child)
470 goto out;
471
472 if (request == PTRACE_ATTACH) {
473 ret = ptrace_attach(child);
Christoph Hellwig005f18d2005-11-13 16:06:33 -0800474 goto out_put_task_struct;
Christoph Hellwig481bed42005-11-07 00:59:47 -0800475 }
476
477 ret = ptrace_check_attach(child, request == PTRACE_KILL);
478 if (ret < 0)
479 goto out_put_task_struct;
480
481 ret = arch_ptrace(child, request, addr, data);
482 if (ret < 0)
483 goto out_put_task_struct;
484
485 out_put_task_struct:
486 put_task_struct(child);
487 out:
488 unlock_kernel();
489 return ret;
490}
491#endif /* __ARCH_SYS_PTRACE */