Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * linux/kernel/ptrace.c
4 : *
5 : * (C) Copyright 1999 Linus Torvalds
6 : *
7 : * Common interfaces for "ptrace()" which we do not want
8 : * to continually duplicate across every architecture.
9 : */
10 :
11 : #include <linux/capability.h>
12 : #include <linux/export.h>
13 : #include <linux/sched.h>
14 : #include <linux/sched/mm.h>
15 : #include <linux/sched/coredump.h>
16 : #include <linux/sched/task.h>
17 : #include <linux/errno.h>
18 : #include <linux/mm.h>
19 : #include <linux/highmem.h>
20 : #include <linux/pagemap.h>
21 : #include <linux/ptrace.h>
22 : #include <linux/security.h>
23 : #include <linux/signal.h>
24 : #include <linux/uio.h>
25 : #include <linux/audit.h>
26 : #include <linux/pid_namespace.h>
27 : #include <linux/syscalls.h>
28 : #include <linux/uaccess.h>
29 : #include <linux/regset.h>
30 : #include <linux/hw_breakpoint.h>
31 : #include <linux/cn_proc.h>
32 : #include <linux/compat.h>
33 : #include <linux/sched/signal.h>
34 : #include <linux/minmax.h>
35 :
36 : #include <asm/syscall.h> /* for syscall_get_* */
37 :
38 : /*
39 : * Access another process' address space via ptrace.
40 : * Source/target buffer must be kernel space,
41 : * Do not walk the page table directly, use get_user_pages
42 : */
43 0 : int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
44 : void *buf, int len, unsigned int gup_flags)
45 : {
46 : struct mm_struct *mm;
47 : int ret;
48 :
49 0 : mm = get_task_mm(tsk);
50 0 : if (!mm)
51 : return 0;
52 :
53 0 : if (!tsk->ptrace ||
54 0 : (current != tsk->parent) ||
55 0 : ((get_dumpable(mm) != SUID_DUMP_USER) &&
56 0 : !ptracer_capable(tsk, mm->user_ns))) {
57 0 : mmput(mm);
58 0 : return 0;
59 : }
60 :
61 0 : ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
62 0 : mmput(mm);
63 :
64 0 : return ret;
65 : }
66 :
67 :
68 0 : void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
69 : const struct cred *ptracer_cred)
70 : {
71 0 : BUG_ON(!list_empty(&child->ptrace_entry));
72 0 : list_add(&child->ptrace_entry, &new_parent->ptraced);
73 0 : child->parent = new_parent;
74 0 : child->ptracer_cred = get_cred(ptracer_cred);
75 0 : }
76 :
77 : /*
78 : * ptrace a task: make the debugger its new parent and
79 : * move it to the ptrace list.
80 : *
81 : * Must be called with the tasklist lock write-held.
82 : */
83 : static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
84 : {
85 0 : __ptrace_link(child, new_parent, current_cred());
86 : }
87 :
88 : /**
89 : * __ptrace_unlink - unlink ptracee and restore its execution state
90 : * @child: ptracee to be unlinked
91 : *
92 : * Remove @child from the ptrace list, move it back to the original parent,
93 : * and restore the execution state so that it conforms to the group stop
94 : * state.
95 : *
96 : * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
97 : * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
98 : * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
99 : * If the ptracer is exiting, the ptracee can be in any state.
100 : *
101 : * After detach, the ptracee should be in a state which conforms to the
102 : * group stop. If the group is stopped or in the process of stopping, the
103 : * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
104 : * up from TASK_TRACED.
105 : *
106 : * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
107 : * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
108 : * to but in the opposite direction of what happens while attaching to a
109 : * stopped task. However, in this direction, the intermediate RUNNING
110 : * state is not hidden even from the current ptracer and if it immediately
111 : * re-attaches and performs a WNOHANG wait(2), it may fail.
112 : *
113 : * CONTEXT:
114 : * write_lock_irq(tasklist_lock)
115 : */
116 0 : void __ptrace_unlink(struct task_struct *child)
117 : {
118 : const struct cred *old_cred;
119 0 : BUG_ON(!child->ptrace);
120 :
121 0 : clear_task_syscall_work(child, SYSCALL_TRACE);
122 : #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
123 : clear_task_syscall_work(child, SYSCALL_EMU);
124 : #endif
125 :
126 0 : child->parent = child->real_parent;
127 0 : list_del_init(&child->ptrace_entry);
128 0 : old_cred = child->ptracer_cred;
129 0 : child->ptracer_cred = NULL;
130 0 : put_cred(old_cred);
131 :
132 0 : spin_lock(&child->sighand->siglock);
133 0 : child->ptrace = 0;
134 : /*
135 : * Clear all pending traps and TRAPPING. TRAPPING should be
136 : * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
137 : */
138 0 : task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
139 0 : task_clear_jobctl_trapping(child);
140 :
141 : /*
142 : * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
143 : * @child isn't dead.
144 : */
145 0 : if (!(child->flags & PF_EXITING) &&
146 0 : (child->signal->flags & SIGNAL_STOP_STOPPED ||
147 : child->signal->group_stop_count)) {
148 0 : child->jobctl |= JOBCTL_STOP_PENDING;
149 :
150 : /*
151 : * This is only possible if this thread was cloned by the
152 : * traced task running in the stopped group, set the signal
153 : * for the future reports.
154 : * FIXME: we should change ptrace_init_task() to handle this
155 : * case.
156 : */
157 0 : if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
158 0 : child->jobctl |= SIGSTOP;
159 : }
160 :
161 : /*
162 : * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
163 : * @child in the butt. Note that @resume should be used iff @child
164 : * is in TASK_TRACED; otherwise, we might unduly disrupt
165 : * TASK_KILLABLE sleeps.
166 : */
167 0 : if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
168 : ptrace_signal_wake_up(child, true);
169 :
170 0 : spin_unlock(&child->sighand->siglock);
171 0 : }
172 :
173 : static bool looks_like_a_spurious_pid(struct task_struct *task)
174 : {
175 0 : if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
176 : return false;
177 :
178 0 : if (task_pid_vnr(task) == task->ptrace_message)
179 : return false;
180 : /*
181 : * The tracee changed its pid but the PTRACE_EVENT_EXEC event
182 : * was not wait()'ed, most probably debugger targets the old
183 : * leader which was destroyed in de_thread().
184 : */
185 : return true;
186 : }
187 :
188 : /* Ensure that nothing can wake it up, even SIGKILL */
189 0 : static bool ptrace_freeze_traced(struct task_struct *task)
190 : {
191 0 : bool ret = false;
192 :
193 : /* Lockless, nobody but us can set this flag */
194 0 : if (task->jobctl & JOBCTL_LISTENING)
195 : return ret;
196 :
197 0 : spin_lock_irq(&task->sighand->siglock);
198 0 : if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
199 0 : !__fatal_signal_pending(task)) {
200 0 : WRITE_ONCE(task->__state, __TASK_TRACED);
201 0 : ret = true;
202 : }
203 0 : spin_unlock_irq(&task->sighand->siglock);
204 :
205 0 : return ret;
206 : }
207 :
208 0 : static void ptrace_unfreeze_traced(struct task_struct *task)
209 : {
210 0 : if (READ_ONCE(task->__state) != __TASK_TRACED)
211 : return;
212 :
213 0 : WARN_ON(!task->ptrace || task->parent != current);
214 :
215 : /*
216 : * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
217 : * Recheck state under the lock to close this race.
218 : */
219 0 : spin_lock_irq(&task->sighand->siglock);
220 0 : if (READ_ONCE(task->__state) == __TASK_TRACED) {
221 0 : if (__fatal_signal_pending(task))
222 0 : wake_up_state(task, __TASK_TRACED);
223 : else
224 0 : WRITE_ONCE(task->__state, TASK_TRACED);
225 : }
226 0 : spin_unlock_irq(&task->sighand->siglock);
227 : }
228 :
229 : /**
230 : * ptrace_check_attach - check whether ptracee is ready for ptrace operation
231 : * @child: ptracee to check for
232 : * @ignore_state: don't check whether @child is currently %TASK_TRACED
233 : *
234 : * Check whether @child is being ptraced by %current and ready for further
235 : * ptrace operations. If @ignore_state is %false, @child also should be in
236 : * %TASK_TRACED state and on return the child is guaranteed to be traced
237 : * and not executing. If @ignore_state is %true, @child can be in any
238 : * state.
239 : *
240 : * CONTEXT:
241 : * Grabs and releases tasklist_lock and @child->sighand->siglock.
242 : *
243 : * RETURNS:
244 : * 0 on success, -ESRCH if %child is not ready.
245 : */
246 0 : static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
247 : {
248 0 : int ret = -ESRCH;
249 :
250 : /*
251 : * We take the read lock around doing both checks to close a
252 : * possible race where someone else was tracing our child and
253 : * detached between these two checks. After this locked check,
254 : * we are sure that this is our traced child and that can only
255 : * be changed by us so it's not changing right after this.
256 : */
257 0 : read_lock(&tasklist_lock);
258 0 : if (child->ptrace && child->parent == current) {
259 0 : WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
260 : /*
261 : * child->sighand can't be NULL, release_task()
262 : * does ptrace_unlink() before __exit_signal().
263 : */
264 0 : if (ignore_state || ptrace_freeze_traced(child))
265 : ret = 0;
266 : }
267 0 : read_unlock(&tasklist_lock);
268 :
269 : if (!ret && !ignore_state) {
270 : if (!wait_task_inactive(child, __TASK_TRACED)) {
271 : /*
272 : * This can only happen if may_ptrace_stop() fails and
273 : * ptrace_stop() changes ->state back to TASK_RUNNING,
274 : * so we should not worry about leaking __TASK_TRACED.
275 : */
276 : WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
277 : ret = -ESRCH;
278 : }
279 : }
280 :
281 0 : return ret;
282 : }
283 :
284 0 : static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
285 : {
286 0 : if (mode & PTRACE_MODE_NOAUDIT)
287 0 : return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
288 0 : return ns_capable(ns, CAP_SYS_PTRACE);
289 : }
290 :
291 : /* Returns 0 on success, -errno on denial. */
292 0 : static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
293 : {
294 0 : const struct cred *cred = current_cred(), *tcred;
295 : struct mm_struct *mm;
296 : kuid_t caller_uid;
297 : kgid_t caller_gid;
298 :
299 0 : if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
300 0 : WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
301 0 : return -EPERM;
302 : }
303 :
304 : /* May we inspect the given task?
305 : * This check is used both for attaching with ptrace
306 : * and for allowing access to sensitive information in /proc.
307 : *
308 : * ptrace_attach denies several cases that /proc allows
309 : * because setting up the necessary parent/child relationship
310 : * or halting the specified task is impossible.
311 : */
312 :
313 : /* Don't let security modules deny introspection */
314 0 : if (same_thread_group(task, current))
315 : return 0;
316 : rcu_read_lock();
317 0 : if (mode & PTRACE_MODE_FSCREDS) {
318 0 : caller_uid = cred->fsuid;
319 0 : caller_gid = cred->fsgid;
320 : } else {
321 : /*
322 : * Using the euid would make more sense here, but something
323 : * in userland might rely on the old behavior, and this
324 : * shouldn't be a security problem since
325 : * PTRACE_MODE_REALCREDS implies that the caller explicitly
326 : * used a syscall that requests access to another process
327 : * (and not a filesystem syscall to procfs).
328 : */
329 0 : caller_uid = cred->uid;
330 0 : caller_gid = cred->gid;
331 : }
332 0 : tcred = __task_cred(task);
333 0 : if (uid_eq(caller_uid, tcred->euid) &&
334 0 : uid_eq(caller_uid, tcred->suid) &&
335 0 : uid_eq(caller_uid, tcred->uid) &&
336 0 : gid_eq(caller_gid, tcred->egid) &&
337 0 : gid_eq(caller_gid, tcred->sgid) &&
338 0 : gid_eq(caller_gid, tcred->gid))
339 : goto ok;
340 0 : if (ptrace_has_cap(tcred->user_ns, mode))
341 : goto ok;
342 : rcu_read_unlock();
343 0 : return -EPERM;
344 : ok:
345 : rcu_read_unlock();
346 : /*
347 : * If a task drops privileges and becomes nondumpable (through a syscall
348 : * like setresuid()) while we are trying to access it, we must ensure
349 : * that the dumpability is read after the credentials; otherwise,
350 : * we may be able to attach to a task that we shouldn't be able to
351 : * attach to (as if the task had dropped privileges without becoming
352 : * nondumpable).
353 : * Pairs with a write barrier in commit_creds().
354 : */
355 0 : smp_rmb();
356 0 : mm = task->mm;
357 0 : if (mm &&
358 0 : ((get_dumpable(mm) != SUID_DUMP_USER) &&
359 0 : !ptrace_has_cap(mm->user_ns, mode)))
360 : return -EPERM;
361 :
362 0 : return security_ptrace_access_check(task, mode);
363 : }
364 :
365 0 : bool ptrace_may_access(struct task_struct *task, unsigned int mode)
366 : {
367 : int err;
368 0 : task_lock(task);
369 0 : err = __ptrace_may_access(task, mode);
370 0 : task_unlock(task);
371 0 : return !err;
372 : }
373 :
374 : static int check_ptrace_options(unsigned long data)
375 : {
376 0 : if (data & ~(unsigned long)PTRACE_O_MASK)
377 : return -EINVAL;
378 :
379 0 : if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
380 : if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
381 : !IS_ENABLED(CONFIG_SECCOMP))
382 : return -EINVAL;
383 :
384 : if (!capable(CAP_SYS_ADMIN))
385 : return -EPERM;
386 :
387 : if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED ||
388 : current->ptrace & PT_SUSPEND_SECCOMP)
389 : return -EPERM;
390 : }
391 : return 0;
392 : }
393 :
394 0 : static int ptrace_attach(struct task_struct *task, long request,
395 : unsigned long addr,
396 : unsigned long flags)
397 : {
398 0 : bool seize = (request == PTRACE_SEIZE);
399 : int retval;
400 :
401 0 : retval = -EIO;
402 0 : if (seize) {
403 0 : if (addr != 0)
404 : goto out;
405 : /*
406 : * This duplicates the check in check_ptrace_options() because
407 : * ptrace_attach() and ptrace_setoptions() have historically
408 : * used different error codes for unknown ptrace options.
409 : */
410 0 : if (flags & ~(unsigned long)PTRACE_O_MASK)
411 : goto out;
412 0 : retval = check_ptrace_options(flags);
413 0 : if (retval)
414 : return retval;
415 0 : flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
416 : } else {
417 : flags = PT_PTRACED;
418 : }
419 :
420 0 : audit_ptrace(task);
421 :
422 0 : retval = -EPERM;
423 0 : if (unlikely(task->flags & PF_KTHREAD))
424 : goto out;
425 0 : if (same_thread_group(task, current))
426 : goto out;
427 :
428 : /*
429 : * Protect exec's credential calculations against our interference;
430 : * SUID, SGID and LSM creds get determined differently
431 : * under ptrace.
432 : */
433 0 : retval = -ERESTARTNOINTR;
434 0 : if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
435 : goto out;
436 :
437 0 : task_lock(task);
438 0 : retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
439 0 : task_unlock(task);
440 0 : if (retval)
441 : goto unlock_creds;
442 :
443 0 : write_lock_irq(&tasklist_lock);
444 0 : retval = -EPERM;
445 0 : if (unlikely(task->exit_state))
446 : goto unlock_tasklist;
447 0 : if (task->ptrace)
448 : goto unlock_tasklist;
449 :
450 0 : task->ptrace = flags;
451 :
452 0 : ptrace_link(task, current);
453 :
454 : /* SEIZE doesn't trap tracee on attach */
455 0 : if (!seize)
456 0 : send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
457 :
458 0 : spin_lock(&task->sighand->siglock);
459 :
460 : /*
461 : * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
462 : * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
463 : * will be cleared if the child completes the transition or any
464 : * event which clears the group stop states happens. We'll wait
465 : * for the transition to complete before returning from this
466 : * function.
467 : *
468 : * This hides STOPPED -> RUNNING -> TRACED transition from the
469 : * attaching thread but a different thread in the same group can
470 : * still observe the transient RUNNING state. IOW, if another
471 : * thread's WNOHANG wait(2) on the stopped tracee races against
472 : * ATTACH, the wait(2) may fail due to the transient RUNNING.
473 : *
474 : * The following task_is_stopped() test is safe as both transitions
475 : * in and out of STOPPED are protected by siglock.
476 : */
477 0 : if (task_is_stopped(task) &&
478 0 : task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
479 0 : signal_wake_up_state(task, __TASK_STOPPED);
480 :
481 0 : spin_unlock(&task->sighand->siglock);
482 :
483 0 : retval = 0;
484 : unlock_tasklist:
485 0 : write_unlock_irq(&tasklist_lock);
486 : unlock_creds:
487 0 : mutex_unlock(&task->signal->cred_guard_mutex);
488 : out:
489 0 : if (!retval) {
490 : /*
491 : * We do not bother to change retval or clear JOBCTL_TRAPPING
492 : * if wait_on_bit() was interrupted by SIGKILL. The tracer will
493 : * not return to user-mode, it will exit and clear this bit in
494 : * __ptrace_unlink() if it wasn't already cleared by the tracee;
495 : * and until then nobody can ptrace this task.
496 : */
497 0 : wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
498 0 : proc_ptrace_connector(task, PTRACE_ATTACH);
499 : }
500 :
501 : return retval;
502 : }
503 :
504 : /**
505 : * ptrace_traceme -- helper for PTRACE_TRACEME
506 : *
507 : * Performs checks and sets PT_PTRACED.
508 : * Should be used by all ptrace implementations for PTRACE_TRACEME.
509 : */
510 0 : static int ptrace_traceme(void)
511 : {
512 0 : int ret = -EPERM;
513 :
514 0 : write_lock_irq(&tasklist_lock);
515 : /* Are we already being traced? */
516 0 : if (!current->ptrace) {
517 0 : ret = security_ptrace_traceme(current->parent);
518 : /*
519 : * Check PF_EXITING to ensure ->real_parent has not passed
520 : * exit_ptrace(). Otherwise we don't report the error but
521 : * pretend ->real_parent untraces us right after return.
522 : */
523 0 : if (!ret && !(current->real_parent->flags & PF_EXITING)) {
524 0 : current->ptrace = PT_PTRACED;
525 0 : ptrace_link(current, current->real_parent);
526 : }
527 : }
528 0 : write_unlock_irq(&tasklist_lock);
529 :
530 0 : return ret;
531 : }
532 :
533 : /*
534 : * Called with irqs disabled, returns true if childs should reap themselves.
535 : */
536 : static int ignoring_children(struct sighand_struct *sigh)
537 : {
538 : int ret;
539 0 : spin_lock(&sigh->siglock);
540 0 : ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
541 0 : (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
542 0 : spin_unlock(&sigh->siglock);
543 : return ret;
544 : }
545 :
546 : /*
547 : * Called with tasklist_lock held for writing.
548 : * Unlink a traced task, and clean it up if it was a traced zombie.
549 : * Return true if it needs to be reaped with release_task().
550 : * (We can't call release_task() here because we already hold tasklist_lock.)
551 : *
552 : * If it's a zombie, our attachedness prevented normal parent notification
553 : * or self-reaping. Do notification now if it would have happened earlier.
554 : * If it should reap itself, return true.
555 : *
556 : * If it's our own child, there is no notification to do. But if our normal
557 : * children self-reap, then this child was prevented by ptrace and we must
558 : * reap it now, in that case we must also wake up sub-threads sleeping in
559 : * do_wait().
560 : */
561 0 : static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
562 : {
563 : bool dead;
564 :
565 0 : __ptrace_unlink(p);
566 :
567 0 : if (p->exit_state != EXIT_ZOMBIE)
568 : return false;
569 :
570 0 : dead = !thread_group_leader(p);
571 :
572 0 : if (!dead && thread_group_empty(p)) {
573 0 : if (!same_thread_group(p->real_parent, tracer))
574 0 : dead = do_notify_parent(p, p->exit_signal);
575 0 : else if (ignoring_children(tracer->sighand)) {
576 0 : __wake_up_parent(p, tracer);
577 0 : dead = true;
578 : }
579 : }
580 : /* Mark it as in the process of being reaped. */
581 0 : if (dead)
582 0 : p->exit_state = EXIT_DEAD;
583 : return dead;
584 : }
585 :
586 0 : static int ptrace_detach(struct task_struct *child, unsigned int data)
587 : {
588 0 : if (!valid_signal(data))
589 : return -EIO;
590 :
591 : /* Architecture-specific hardware disable .. */
592 0 : ptrace_disable(child);
593 :
594 0 : write_lock_irq(&tasklist_lock);
595 : /*
596 : * We rely on ptrace_freeze_traced(). It can't be killed and
597 : * untraced by another thread, it can't be a zombie.
598 : */
599 0 : WARN_ON(!child->ptrace || child->exit_state);
600 : /*
601 : * tasklist_lock avoids the race with wait_task_stopped(), see
602 : * the comment in ptrace_resume().
603 : */
604 0 : child->exit_code = data;
605 0 : __ptrace_detach(current, child);
606 0 : write_unlock_irq(&tasklist_lock);
607 :
608 0 : proc_ptrace_connector(child, PTRACE_DETACH);
609 :
610 0 : return 0;
611 : }
612 :
613 : /*
614 : * Detach all tasks we were using ptrace on. Called with tasklist held
615 : * for writing.
616 : */
617 0 : void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
618 : {
619 : struct task_struct *p, *n;
620 :
621 0 : list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
622 0 : if (unlikely(p->ptrace & PT_EXITKILL))
623 0 : send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
624 :
625 0 : if (__ptrace_detach(tracer, p))
626 0 : list_add(&p->ptrace_entry, dead);
627 : }
628 0 : }
629 :
630 0 : int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
631 : {
632 0 : int copied = 0;
633 :
634 0 : while (len > 0) {
635 : char buf[128];
636 : int this_len, retval;
637 :
638 0 : this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
639 0 : retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
640 :
641 0 : if (!retval) {
642 0 : if (copied)
643 : break;
644 0 : return -EIO;
645 : }
646 0 : if (copy_to_user(dst, buf, retval))
647 : return -EFAULT;
648 0 : copied += retval;
649 0 : src += retval;
650 0 : dst += retval;
651 0 : len -= retval;
652 : }
653 : return copied;
654 : }
655 :
656 0 : int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
657 : {
658 0 : int copied = 0;
659 :
660 0 : while (len > 0) {
661 : char buf[128];
662 : int this_len, retval;
663 :
664 0 : this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
665 0 : if (copy_from_user(buf, src, this_len))
666 0 : return -EFAULT;
667 0 : retval = ptrace_access_vm(tsk, dst, buf, this_len,
668 : FOLL_FORCE | FOLL_WRITE);
669 0 : if (!retval) {
670 0 : if (copied)
671 : break;
672 : return -EIO;
673 : }
674 0 : copied += retval;
675 0 : src += retval;
676 0 : dst += retval;
677 0 : len -= retval;
678 : }
679 : return copied;
680 : }
681 :
682 : static int ptrace_setoptions(struct task_struct *child, unsigned long data)
683 : {
684 : unsigned flags;
685 : int ret;
686 :
687 0 : ret = check_ptrace_options(data);
688 0 : if (ret)
689 : return ret;
690 :
691 : /* Avoid intermediate state when all opts are cleared */
692 0 : flags = child->ptrace;
693 0 : flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
694 0 : flags |= (data << PT_OPT_FLAG_SHIFT);
695 0 : child->ptrace = flags;
696 :
697 : return 0;
698 : }
699 :
700 0 : static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
701 : {
702 : unsigned long flags;
703 0 : int error = -ESRCH;
704 :
705 0 : if (lock_task_sighand(child, &flags)) {
706 0 : error = -EINVAL;
707 0 : if (likely(child->last_siginfo != NULL)) {
708 0 : copy_siginfo(info, child->last_siginfo);
709 0 : error = 0;
710 : }
711 0 : unlock_task_sighand(child, &flags);
712 : }
713 0 : return error;
714 : }
715 :
716 0 : static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
717 : {
718 : unsigned long flags;
719 0 : int error = -ESRCH;
720 :
721 0 : if (lock_task_sighand(child, &flags)) {
722 0 : error = -EINVAL;
723 0 : if (likely(child->last_siginfo != NULL)) {
724 0 : copy_siginfo(child->last_siginfo, info);
725 0 : error = 0;
726 : }
727 0 : unlock_task_sighand(child, &flags);
728 : }
729 0 : return error;
730 : }
731 :
732 0 : static int ptrace_peek_siginfo(struct task_struct *child,
733 : unsigned long addr,
734 : unsigned long data)
735 : {
736 : struct ptrace_peeksiginfo_args arg;
737 : struct sigpending *pending;
738 : struct sigqueue *q;
739 : int ret, i;
740 :
741 0 : ret = copy_from_user(&arg, (void __user *) addr,
742 : sizeof(struct ptrace_peeksiginfo_args));
743 0 : if (ret)
744 : return -EFAULT;
745 :
746 0 : if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
747 : return -EINVAL; /* unknown flags */
748 :
749 0 : if (arg.nr < 0)
750 : return -EINVAL;
751 :
752 : /* Ensure arg.off fits in an unsigned long */
753 : if (arg.off > ULONG_MAX)
754 : return 0;
755 :
756 0 : if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
757 0 : pending = &child->signal->shared_pending;
758 : else
759 0 : pending = &child->pending;
760 :
761 0 : for (i = 0; i < arg.nr; ) {
762 : kernel_siginfo_t info;
763 0 : unsigned long off = arg.off + i;
764 0 : bool found = false;
765 :
766 0 : spin_lock_irq(&child->sighand->siglock);
767 0 : list_for_each_entry(q, &pending->list, list) {
768 0 : if (!off--) {
769 0 : found = true;
770 0 : copy_siginfo(&info, &q->info);
771 : break;
772 : }
773 : }
774 0 : spin_unlock_irq(&child->sighand->siglock);
775 :
776 0 : if (!found) /* beyond the end of the list */
777 : break;
778 :
779 : #ifdef CONFIG_COMPAT
780 : if (unlikely(in_compat_syscall())) {
781 : compat_siginfo_t __user *uinfo = compat_ptr(data);
782 :
783 : if (copy_siginfo_to_user32(uinfo, &info)) {
784 : ret = -EFAULT;
785 : break;
786 : }
787 :
788 : } else
789 : #endif
790 : {
791 0 : siginfo_t __user *uinfo = (siginfo_t __user *) data;
792 :
793 0 : if (copy_siginfo_to_user(uinfo, &info)) {
794 : ret = -EFAULT;
795 : break;
796 : }
797 : }
798 :
799 0 : data += sizeof(siginfo_t);
800 0 : i++;
801 :
802 0 : if (signal_pending(current))
803 : break;
804 :
805 0 : cond_resched();
806 : }
807 :
808 0 : if (i > 0)
809 : return i;
810 :
811 0 : return ret;
812 : }
813 :
814 : #ifdef CONFIG_RSEQ
815 : static long ptrace_get_rseq_configuration(struct task_struct *task,
816 : unsigned long size, void __user *data)
817 : {
818 : struct ptrace_rseq_configuration conf = {
819 : .rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
820 : .rseq_abi_size = sizeof(*task->rseq),
821 : .signature = task->rseq_sig,
822 : .flags = 0,
823 : };
824 :
825 : size = min_t(unsigned long, size, sizeof(conf));
826 : if (copy_to_user(data, &conf, size))
827 : return -EFAULT;
828 : return sizeof(conf);
829 : }
830 : #endif
831 :
832 : #ifdef PTRACE_SINGLESTEP
833 : #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
834 : #else
835 : #define is_singlestep(request) 0
836 : #endif
837 :
838 : #ifdef PTRACE_SINGLEBLOCK
839 : #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
840 : #else
841 : #define is_singleblock(request) 0
842 : #endif
843 :
844 : #ifdef PTRACE_SYSEMU
845 : #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
846 : #else
847 : #define is_sysemu_singlestep(request) 0
848 : #endif
849 :
850 0 : static int ptrace_resume(struct task_struct *child, long request,
851 : unsigned long data)
852 : {
853 : bool need_siglock;
854 :
855 0 : if (!valid_signal(data))
856 : return -EIO;
857 :
858 0 : if (request == PTRACE_SYSCALL)
859 0 : set_task_syscall_work(child, SYSCALL_TRACE);
860 : else
861 0 : clear_task_syscall_work(child, SYSCALL_TRACE);
862 :
863 : #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
864 : if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
865 : set_task_syscall_work(child, SYSCALL_EMU);
866 : else
867 : clear_task_syscall_work(child, SYSCALL_EMU);
868 : #endif
869 :
870 : if (is_singleblock(request)) {
871 : if (unlikely(!arch_has_block_step()))
872 : return -EIO;
873 : user_enable_block_step(child);
874 0 : } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
875 : if (unlikely(!arch_has_single_step()))
876 : return -EIO;
877 0 : user_enable_single_step(child);
878 : } else {
879 0 : user_disable_single_step(child);
880 : }
881 :
882 : /*
883 : * Change ->exit_code and ->state under siglock to avoid the race
884 : * with wait_task_stopped() in between; a non-zero ->exit_code will
885 : * wrongly look like another report from tracee.
886 : *
887 : * Note that we need siglock even if ->exit_code == data and/or this
888 : * status was not reported yet, the new status must not be cleared by
889 : * wait_task_stopped() after resume.
890 : *
891 : * If data == 0 we do not care if wait_task_stopped() reports the old
892 : * status and clears the code too; this can't race with the tracee, it
893 : * takes siglock after resume.
894 : */
895 0 : need_siglock = data && !thread_group_empty(current);
896 0 : if (need_siglock)
897 0 : spin_lock_irq(&child->sighand->siglock);
898 0 : child->exit_code = data;
899 0 : wake_up_state(child, __TASK_TRACED);
900 0 : if (need_siglock)
901 0 : spin_unlock_irq(&child->sighand->siglock);
902 :
903 : return 0;
904 : }
905 :
906 : #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
907 :
908 : static const struct user_regset *
909 : find_regset(const struct user_regset_view *view, unsigned int type)
910 : {
911 : const struct user_regset *regset;
912 : int n;
913 :
914 : for (n = 0; n < view->n; ++n) {
915 : regset = view->regsets + n;
916 : if (regset->core_note_type == type)
917 : return regset;
918 : }
919 :
920 : return NULL;
921 : }
922 :
923 : static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
924 : struct iovec *kiov)
925 : {
926 : const struct user_regset_view *view = task_user_regset_view(task);
927 : const struct user_regset *regset = find_regset(view, type);
928 : int regset_no;
929 :
930 : if (!regset || (kiov->iov_len % regset->size) != 0)
931 : return -EINVAL;
932 :
933 : regset_no = regset - view->regsets;
934 : kiov->iov_len = min(kiov->iov_len,
935 : (__kernel_size_t) (regset->n * regset->size));
936 :
937 : if (req == PTRACE_GETREGSET)
938 : return copy_regset_to_user(task, view, regset_no, 0,
939 : kiov->iov_len, kiov->iov_base);
940 : else
941 : return copy_regset_from_user(task, view, regset_no, 0,
942 : kiov->iov_len, kiov->iov_base);
943 : }
944 :
945 : /*
946 : * This is declared in linux/regset.h and defined in machine-dependent
947 : * code. We put the export here, near the primary machine-neutral use,
948 : * to ensure no machine forgets it.
949 : */
950 : EXPORT_SYMBOL_GPL(task_user_regset_view);
951 :
952 : static unsigned long
953 : ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
954 : struct ptrace_syscall_info *info)
955 : {
956 : unsigned long args[ARRAY_SIZE(info->entry.args)];
957 : int i;
958 :
959 : info->op = PTRACE_SYSCALL_INFO_ENTRY;
960 : info->entry.nr = syscall_get_nr(child, regs);
961 : syscall_get_arguments(child, regs, args);
962 : for (i = 0; i < ARRAY_SIZE(args); i++)
963 : info->entry.args[i] = args[i];
964 :
965 : /* args is the last field in struct ptrace_syscall_info.entry */
966 : return offsetofend(struct ptrace_syscall_info, entry.args);
967 : }
968 :
969 : static unsigned long
970 : ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
971 : struct ptrace_syscall_info *info)
972 : {
973 : /*
974 : * As struct ptrace_syscall_info.entry is currently a subset
975 : * of struct ptrace_syscall_info.seccomp, it makes sense to
976 : * initialize that subset using ptrace_get_syscall_info_entry().
977 : * This can be reconsidered in the future if these structures
978 : * diverge significantly enough.
979 : */
980 : ptrace_get_syscall_info_entry(child, regs, info);
981 : info->op = PTRACE_SYSCALL_INFO_SECCOMP;
982 : info->seccomp.ret_data = child->ptrace_message;
983 :
984 : /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
985 : return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
986 : }
987 :
988 : static unsigned long
989 : ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
990 : struct ptrace_syscall_info *info)
991 : {
992 : info->op = PTRACE_SYSCALL_INFO_EXIT;
993 : info->exit.rval = syscall_get_error(child, regs);
994 : info->exit.is_error = !!info->exit.rval;
995 : if (!info->exit.is_error)
996 : info->exit.rval = syscall_get_return_value(child, regs);
997 :
998 : /* is_error is the last field in struct ptrace_syscall_info.exit */
999 : return offsetofend(struct ptrace_syscall_info, exit.is_error);
1000 : }
1001 :
1002 : static int
1003 : ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
1004 : void __user *datavp)
1005 : {
1006 : struct pt_regs *regs = task_pt_regs(child);
1007 : struct ptrace_syscall_info info = {
1008 : .op = PTRACE_SYSCALL_INFO_NONE,
1009 : .arch = syscall_get_arch(child),
1010 : .instruction_pointer = instruction_pointer(regs),
1011 : .stack_pointer = user_stack_pointer(regs),
1012 : };
1013 : unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
1014 : unsigned long write_size;
1015 :
1016 : /*
1017 : * This does not need lock_task_sighand() to access
1018 : * child->last_siginfo because ptrace_freeze_traced()
1019 : * called earlier by ptrace_check_attach() ensures that
1020 : * the tracee cannot go away and clear its last_siginfo.
1021 : */
1022 : switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
1023 : case SIGTRAP | 0x80:
1024 : switch (child->ptrace_message) {
1025 : case PTRACE_EVENTMSG_SYSCALL_ENTRY:
1026 : actual_size = ptrace_get_syscall_info_entry(child, regs,
1027 : &info);
1028 : break;
1029 : case PTRACE_EVENTMSG_SYSCALL_EXIT:
1030 : actual_size = ptrace_get_syscall_info_exit(child, regs,
1031 : &info);
1032 : break;
1033 : }
1034 : break;
1035 : case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
1036 : actual_size = ptrace_get_syscall_info_seccomp(child, regs,
1037 : &info);
1038 : break;
1039 : }
1040 :
1041 : write_size = min(actual_size, user_size);
1042 : return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
1043 : }
1044 : #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1045 :
1046 0 : int ptrace_request(struct task_struct *child, long request,
1047 : unsigned long addr, unsigned long data)
1048 : {
1049 0 : bool seized = child->ptrace & PT_SEIZED;
1050 0 : int ret = -EIO;
1051 : kernel_siginfo_t siginfo, *si;
1052 0 : void __user *datavp = (void __user *) data;
1053 0 : unsigned long __user *datalp = datavp;
1054 : unsigned long flags;
1055 :
1056 0 : switch (request) {
1057 : case PTRACE_PEEKTEXT:
1058 : case PTRACE_PEEKDATA:
1059 0 : return generic_ptrace_peekdata(child, addr, data);
1060 : case PTRACE_POKETEXT:
1061 : case PTRACE_POKEDATA:
1062 : return generic_ptrace_pokedata(child, addr, data);
1063 :
1064 : #ifdef PTRACE_OLDSETOPTIONS
1065 : case PTRACE_OLDSETOPTIONS:
1066 : #endif
1067 : case PTRACE_SETOPTIONS:
1068 0 : ret = ptrace_setoptions(child, data);
1069 : break;
1070 : case PTRACE_GETEVENTMSG:
1071 0 : ret = put_user(child->ptrace_message, datalp);
1072 : break;
1073 :
1074 : case PTRACE_PEEKSIGINFO:
1075 0 : ret = ptrace_peek_siginfo(child, addr, data);
1076 0 : break;
1077 :
1078 : case PTRACE_GETSIGINFO:
1079 0 : ret = ptrace_getsiginfo(child, &siginfo);
1080 0 : if (!ret)
1081 0 : ret = copy_siginfo_to_user(datavp, &siginfo);
1082 : break;
1083 :
1084 : case PTRACE_SETSIGINFO:
1085 0 : ret = copy_siginfo_from_user(&siginfo, datavp);
1086 0 : if (!ret)
1087 0 : ret = ptrace_setsiginfo(child, &siginfo);
1088 : break;
1089 :
1090 : case PTRACE_GETSIGMASK: {
1091 : sigset_t *mask;
1092 :
1093 0 : if (addr != sizeof(sigset_t)) {
1094 : ret = -EINVAL;
1095 : break;
1096 : }
1097 :
1098 0 : if (test_tsk_restore_sigmask(child))
1099 0 : mask = &child->saved_sigmask;
1100 : else
1101 0 : mask = &child->blocked;
1102 :
1103 0 : if (copy_to_user(datavp, mask, sizeof(sigset_t)))
1104 : ret = -EFAULT;
1105 : else
1106 0 : ret = 0;
1107 :
1108 : break;
1109 : }
1110 :
1111 : case PTRACE_SETSIGMASK: {
1112 : sigset_t new_set;
1113 :
1114 0 : if (addr != sizeof(sigset_t)) {
1115 : ret = -EINVAL;
1116 : break;
1117 : }
1118 :
1119 0 : if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1120 : ret = -EFAULT;
1121 : break;
1122 : }
1123 :
1124 0 : sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1125 :
1126 : /*
1127 : * Every thread does recalc_sigpending() after resume, so
1128 : * retarget_shared_pending() and recalc_sigpending() are not
1129 : * called here.
1130 : */
1131 0 : spin_lock_irq(&child->sighand->siglock);
1132 0 : child->blocked = new_set;
1133 0 : spin_unlock_irq(&child->sighand->siglock);
1134 :
1135 0 : clear_tsk_restore_sigmask(child);
1136 :
1137 0 : ret = 0;
1138 0 : break;
1139 : }
1140 :
1141 : case PTRACE_INTERRUPT:
1142 : /*
1143 : * Stop tracee without any side-effect on signal or job
1144 : * control. At least one trap is guaranteed to happen
1145 : * after this request. If @child is already trapped, the
1146 : * current trap is not disturbed and another trap will
1147 : * happen after the current trap is ended with PTRACE_CONT.
1148 : *
1149 : * The actual trap might not be PTRACE_EVENT_STOP trap but
1150 : * the pending condition is cleared regardless.
1151 : */
1152 0 : if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1153 : break;
1154 :
1155 : /*
1156 : * INTERRUPT doesn't disturb existing trap sans one
1157 : * exception. If ptracer issued LISTEN for the current
1158 : * STOP, this INTERRUPT should clear LISTEN and re-trap
1159 : * tracee into STOP.
1160 : */
1161 0 : if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1162 0 : ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1163 :
1164 0 : unlock_task_sighand(child, &flags);
1165 0 : ret = 0;
1166 0 : break;
1167 :
1168 : case PTRACE_LISTEN:
1169 : /*
1170 : * Listen for events. Tracee must be in STOP. It's not
1171 : * resumed per-se but is not considered to be in TRACED by
1172 : * wait(2) or ptrace(2). If an async event (e.g. group
1173 : * stop state change) happens, tracee will enter STOP trap
1174 : * again. Alternatively, ptracer can issue INTERRUPT to
1175 : * finish listening and re-trap tracee into STOP.
1176 : */
1177 0 : if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1178 : break;
1179 :
1180 0 : si = child->last_siginfo;
1181 0 : if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1182 0 : child->jobctl |= JOBCTL_LISTENING;
1183 : /*
1184 : * If NOTIFY is set, it means event happened between
1185 : * start of this trap and now. Trigger re-trap.
1186 : */
1187 0 : if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1188 : ptrace_signal_wake_up(child, true);
1189 : ret = 0;
1190 : }
1191 0 : unlock_task_sighand(child, &flags);
1192 : break;
1193 :
1194 : case PTRACE_DETACH: /* detach a process that was attached. */
1195 0 : ret = ptrace_detach(child, data);
1196 0 : break;
1197 :
1198 : #ifdef CONFIG_BINFMT_ELF_FDPIC
1199 : case PTRACE_GETFDPIC: {
1200 : struct mm_struct *mm = get_task_mm(child);
1201 : unsigned long tmp = 0;
1202 :
1203 : ret = -ESRCH;
1204 : if (!mm)
1205 : break;
1206 :
1207 : switch (addr) {
1208 : case PTRACE_GETFDPIC_EXEC:
1209 : tmp = mm->context.exec_fdpic_loadmap;
1210 : break;
1211 : case PTRACE_GETFDPIC_INTERP:
1212 : tmp = mm->context.interp_fdpic_loadmap;
1213 : break;
1214 : default:
1215 : break;
1216 : }
1217 : mmput(mm);
1218 :
1219 : ret = put_user(tmp, datalp);
1220 : break;
1221 : }
1222 : #endif
1223 :
1224 : #ifdef PTRACE_SINGLESTEP
1225 : case PTRACE_SINGLESTEP:
1226 : #endif
1227 : #ifdef PTRACE_SINGLEBLOCK
1228 : case PTRACE_SINGLEBLOCK:
1229 : #endif
1230 : #ifdef PTRACE_SYSEMU
1231 : case PTRACE_SYSEMU:
1232 : case PTRACE_SYSEMU_SINGLESTEP:
1233 : #endif
1234 : case PTRACE_SYSCALL:
1235 : case PTRACE_CONT:
1236 0 : return ptrace_resume(child, request, data);
1237 :
1238 : case PTRACE_KILL:
1239 0 : if (child->exit_state) /* already dead */
1240 : return 0;
1241 0 : return ptrace_resume(child, request, SIGKILL);
1242 :
1243 : #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1244 : case PTRACE_GETREGSET:
1245 : case PTRACE_SETREGSET: {
1246 : struct iovec kiov;
1247 : struct iovec __user *uiov = datavp;
1248 :
1249 : if (!access_ok(uiov, sizeof(*uiov)))
1250 : return -EFAULT;
1251 :
1252 : if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1253 : __get_user(kiov.iov_len, &uiov->iov_len))
1254 : return -EFAULT;
1255 :
1256 : ret = ptrace_regset(child, request, addr, &kiov);
1257 : if (!ret)
1258 : ret = __put_user(kiov.iov_len, &uiov->iov_len);
1259 : break;
1260 : }
1261 :
1262 : case PTRACE_GET_SYSCALL_INFO:
1263 : ret = ptrace_get_syscall_info(child, addr, datavp);
1264 : break;
1265 : #endif
1266 :
1267 : case PTRACE_SECCOMP_GET_FILTER:
1268 0 : ret = seccomp_get_filter(child, addr, datavp);
1269 0 : break;
1270 :
1271 : case PTRACE_SECCOMP_GET_METADATA:
1272 0 : ret = seccomp_get_metadata(child, addr, datavp);
1273 0 : break;
1274 :
1275 : #ifdef CONFIG_RSEQ
1276 : case PTRACE_GET_RSEQ_CONFIGURATION:
1277 : ret = ptrace_get_rseq_configuration(child, addr, datavp);
1278 : break;
1279 : #endif
1280 :
1281 : default:
1282 : break;
1283 : }
1284 :
1285 : return ret;
1286 : }
1287 :
1288 : #ifndef arch_ptrace_attach
1289 : #define arch_ptrace_attach(child) do { } while (0)
1290 : #endif
1291 :
1292 0 : SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1293 : unsigned long, data)
1294 : {
1295 : struct task_struct *child;
1296 : long ret;
1297 :
1298 0 : if (request == PTRACE_TRACEME) {
1299 0 : ret = ptrace_traceme();
1300 : if (!ret)
1301 : arch_ptrace_attach(current);
1302 0 : goto out;
1303 : }
1304 :
1305 0 : child = find_get_task_by_vpid(pid);
1306 0 : if (!child) {
1307 : ret = -ESRCH;
1308 : goto out;
1309 : }
1310 :
1311 0 : if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1312 0 : ret = ptrace_attach(child, request, addr, data);
1313 : /*
1314 : * Some architectures need to do book-keeping after
1315 : * a ptrace attach.
1316 : */
1317 : if (!ret)
1318 : arch_ptrace_attach(child);
1319 0 : goto out_put_task_struct;
1320 : }
1321 :
1322 0 : ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1323 0 : request == PTRACE_INTERRUPT);
1324 0 : if (ret < 0)
1325 : goto out_put_task_struct;
1326 :
1327 0 : ret = arch_ptrace(child, request, addr, data);
1328 0 : if (ret || request != PTRACE_DETACH)
1329 0 : ptrace_unfreeze_traced(child);
1330 :
1331 : out_put_task_struct:
1332 0 : put_task_struct(child);
1333 : out:
1334 0 : return ret;
1335 : }
1336 :
1337 0 : int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1338 : unsigned long data)
1339 : {
1340 : unsigned long tmp;
1341 : int copied;
1342 :
1343 0 : copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1344 0 : if (copied != sizeof(tmp))
1345 : return -EIO;
1346 0 : return put_user(tmp, (unsigned long __user *)data);
1347 : }
1348 :
1349 0 : int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1350 : unsigned long data)
1351 : {
1352 : int copied;
1353 :
1354 0 : copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1355 : FOLL_FORCE | FOLL_WRITE);
1356 0 : return (copied == sizeof(data)) ? 0 : -EIO;
1357 : }
1358 :
1359 : #if defined CONFIG_COMPAT
1360 :
1361 : int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1362 : compat_ulong_t addr, compat_ulong_t data)
1363 : {
1364 : compat_ulong_t __user *datap = compat_ptr(data);
1365 : compat_ulong_t word;
1366 : kernel_siginfo_t siginfo;
1367 : int ret;
1368 :
1369 : switch (request) {
1370 : case PTRACE_PEEKTEXT:
1371 : case PTRACE_PEEKDATA:
1372 : ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1373 : FOLL_FORCE);
1374 : if (ret != sizeof(word))
1375 : ret = -EIO;
1376 : else
1377 : ret = put_user(word, datap);
1378 : break;
1379 :
1380 : case PTRACE_POKETEXT:
1381 : case PTRACE_POKEDATA:
1382 : ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1383 : FOLL_FORCE | FOLL_WRITE);
1384 : ret = (ret != sizeof(data) ? -EIO : 0);
1385 : break;
1386 :
1387 : case PTRACE_GETEVENTMSG:
1388 : ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1389 : break;
1390 :
1391 : case PTRACE_GETSIGINFO:
1392 : ret = ptrace_getsiginfo(child, &siginfo);
1393 : if (!ret)
1394 : ret = copy_siginfo_to_user32(
1395 : (struct compat_siginfo __user *) datap,
1396 : &siginfo);
1397 : break;
1398 :
1399 : case PTRACE_SETSIGINFO:
1400 : ret = copy_siginfo_from_user32(
1401 : &siginfo, (struct compat_siginfo __user *) datap);
1402 : if (!ret)
1403 : ret = ptrace_setsiginfo(child, &siginfo);
1404 : break;
1405 : #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1406 : case PTRACE_GETREGSET:
1407 : case PTRACE_SETREGSET:
1408 : {
1409 : struct iovec kiov;
1410 : struct compat_iovec __user *uiov =
1411 : (struct compat_iovec __user *) datap;
1412 : compat_uptr_t ptr;
1413 : compat_size_t len;
1414 :
1415 : if (!access_ok(uiov, sizeof(*uiov)))
1416 : return -EFAULT;
1417 :
1418 : if (__get_user(ptr, &uiov->iov_base) ||
1419 : __get_user(len, &uiov->iov_len))
1420 : return -EFAULT;
1421 :
1422 : kiov.iov_base = compat_ptr(ptr);
1423 : kiov.iov_len = len;
1424 :
1425 : ret = ptrace_regset(child, request, addr, &kiov);
1426 : if (!ret)
1427 : ret = __put_user(kiov.iov_len, &uiov->iov_len);
1428 : break;
1429 : }
1430 : #endif
1431 :
1432 : default:
1433 : ret = ptrace_request(child, request, addr, data);
1434 : }
1435 :
1436 : return ret;
1437 : }
1438 :
1439 : COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1440 : compat_long_t, addr, compat_long_t, data)
1441 : {
1442 : struct task_struct *child;
1443 : long ret;
1444 :
1445 : if (request == PTRACE_TRACEME) {
1446 : ret = ptrace_traceme();
1447 : goto out;
1448 : }
1449 :
1450 : child = find_get_task_by_vpid(pid);
1451 : if (!child) {
1452 : ret = -ESRCH;
1453 : goto out;
1454 : }
1455 :
1456 : if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1457 : ret = ptrace_attach(child, request, addr, data);
1458 : /*
1459 : * Some architectures need to do book-keeping after
1460 : * a ptrace attach.
1461 : */
1462 : if (!ret)
1463 : arch_ptrace_attach(child);
1464 : goto out_put_task_struct;
1465 : }
1466 :
1467 : ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1468 : request == PTRACE_INTERRUPT);
1469 : if (!ret) {
1470 : ret = compat_arch_ptrace(child, request, addr, data);
1471 : if (ret || request != PTRACE_DETACH)
1472 : ptrace_unfreeze_traced(child);
1473 : }
1474 :
1475 : out_put_task_struct:
1476 : put_task_struct(child);
1477 : out:
1478 : return ret;
1479 : }
1480 : #endif /* CONFIG_COMPAT */
|