Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4 : * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5 : * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 : * Copyright 2003 PathScale, Inc.
7 : */
8 :
9 : #include <linux/stddef.h>
10 : #include <linux/err.h>
11 : #include <linux/hardirq.h>
12 : #include <linux/mm.h>
13 : #include <linux/module.h>
14 : #include <linux/personality.h>
15 : #include <linux/proc_fs.h>
16 : #include <linux/ptrace.h>
17 : #include <linux/random.h>
18 : #include <linux/slab.h>
19 : #include <linux/sched.h>
20 : #include <linux/sched/debug.h>
21 : #include <linux/sched/task.h>
22 : #include <linux/sched/task_stack.h>
23 : #include <linux/seq_file.h>
24 : #include <linux/tick.h>
25 : #include <linux/threads.h>
26 : #include <linux/resume_user_mode.h>
27 : #include <asm/current.h>
28 : #include <asm/mmu_context.h>
29 : #include <linux/uaccess.h>
30 : #include <as-layout.h>
31 : #include <kern_util.h>
32 : #include <os.h>
33 : #include <skas.h>
34 : #include <registers.h>
35 : #include <linux/time-internal.h>
36 :
37 : /*
38 : * This is a per-cpu array. A processor only modifies its entry and it only
39 : * cares about its entry, so it's OK if another processor is modifying its
40 : * entry.
41 : */
42 : struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
43 :
44 : static inline int external_pid(void)
45 : {
46 : /* FIXME: Need to look up userspace_pid by cpu */
47 618 : return userspace_pid[0];
48 : }
49 :
50 0 : int pid_to_processor_id(int pid)
51 : {
52 : int i;
53 :
54 0 : for (i = 0; i < ncpus; i++) {
55 0 : if (cpu_tasks[i].pid == pid)
56 : return i;
57 : }
58 : return -1;
59 : }
60 :
61 0 : void free_stack(unsigned long stack, int order)
62 : {
63 0 : free_pages(stack, order);
64 0 : }
65 :
66 0 : unsigned long alloc_stack(int order, int atomic)
67 : {
68 : unsigned long page;
69 0 : gfp_t flags = GFP_KERNEL;
70 :
71 0 : if (atomic)
72 0 : flags = GFP_ATOMIC;
73 0 : page = __get_free_pages(flags, order);
74 :
75 0 : return page;
76 : }
77 :
78 : static inline void set_current(struct task_struct *task)
79 : {
80 1236 : cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
81 : { external_pid(), task });
82 : }
83 :
84 : extern void arch_switch_to(struct task_struct *to);
85 :
86 618 : void *__switch_to(struct task_struct *from, struct task_struct *to)
87 : {
88 618 : to->thread.prev_sched = from;
89 618 : set_current(to);
90 :
91 618 : switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
92 511 : arch_switch_to(current);
93 :
94 511 : return current->thread.prev_sched;
95 : }
96 :
97 0 : void interrupt_end(void)
98 : {
99 0 : struct pt_regs *regs = ¤t->thread.regs;
100 :
101 0 : if (need_resched())
102 0 : schedule();
103 0 : if (test_thread_flag(TIF_SIGPENDING) ||
104 0 : test_thread_flag(TIF_NOTIFY_SIGNAL))
105 0 : do_signal(regs);
106 0 : if (test_thread_flag(TIF_NOTIFY_RESUME))
107 0 : resume_user_mode_work(regs);
108 0 : }
109 :
110 0 : int get_current_pid(void)
111 : {
112 0 : return task_pid_nr(current);
113 : }
114 :
115 : /*
116 : * This is called magically, by its address being stuffed in a jmp_buf
117 : * and being longjmp-d to.
118 : */
119 108 : void new_thread_handler(void)
120 : {
121 : int (*fn)(void *), n;
122 : void *arg;
123 :
124 108 : if (current->thread.prev_sched != NULL)
125 107 : schedule_tail(current->thread.prev_sched);
126 108 : current->thread.prev_sched = NULL;
127 :
128 108 : fn = current->thread.request.u.thread.proc;
129 108 : arg = current->thread.request.u.thread.arg;
130 :
131 : /*
132 : * callback returns only if the kernel thread execs a process
133 : */
134 108 : n = fn(arg);
135 0 : userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
136 0 : }
137 :
138 : /* Called magically, see new_thread_handler above */
139 0 : void fork_handler(void)
140 : {
141 0 : force_flush_all();
142 :
143 0 : schedule_tail(current->thread.prev_sched);
144 :
145 : /*
146 : * XXX: if interrupt_end() calls schedule, this call to
147 : * arch_switch_to isn't needed. We could want to apply this to
148 : * improve performance. -bb
149 : */
150 0 : arch_switch_to(current);
151 :
152 0 : current->thread.prev_sched = NULL;
153 :
154 0 : userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
155 0 : }
156 :
157 107 : int copy_thread(unsigned long clone_flags, unsigned long sp,
158 : unsigned long arg, struct task_struct * p, unsigned long tls)
159 : {
160 : void (*handler)(void);
161 107 : int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER);
162 107 : int ret = 0;
163 :
164 107 : p->thread = (struct thread_struct) INIT_THREAD;
165 :
166 107 : if (!kthread) {
167 0 : memcpy(&p->thread.regs.regs, current_pt_regs(),
168 : sizeof(p->thread.regs.regs));
169 0 : PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
170 0 : if (sp != 0)
171 0 : REGS_SP(p->thread.regs.regs.gp) = sp;
172 :
173 0 : handler = fork_handler;
174 :
175 0 : arch_copy_thread(¤t->thread.arch, &p->thread.arch);
176 : } else {
177 107 : get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
178 107 : p->thread.request.u.thread.proc = (int (*)(void *))sp;
179 107 : p->thread.request.u.thread.arg = (void *)arg;
180 107 : handler = new_thread_handler;
181 : }
182 :
183 107 : new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
184 :
185 107 : if (!kthread) {
186 0 : clear_flushed_tls(p);
187 :
188 : /*
189 : * Set a new TLS for the child thread?
190 : */
191 0 : if (clone_flags & CLONE_SETTLS)
192 0 : ret = arch_set_tls(p, tls);
193 : }
194 :
195 107 : return ret;
196 : }
197 :
198 1 : void initial_thread_cb(void (*proc)(void *), void *arg)
199 : {
200 1 : int save_kmalloc_ok = kmalloc_ok;
201 :
202 1 : kmalloc_ok = 0;
203 1 : initial_thread_cb_skas(proc, arg);
204 1 : kmalloc_ok = save_kmalloc_ok;
205 1 : }
206 :
207 0 : void um_idle_sleep(void)
208 : {
209 : if (time_travel_mode != TT_MODE_OFF)
210 : time_travel_sleep();
211 : else
212 0 : os_idle_sleep();
213 0 : }
214 :
215 0 : void arch_cpu_idle(void)
216 : {
217 0 : cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
218 : um_idle_sleep();
219 : raw_local_irq_enable();
220 0 : }
221 :
222 0 : int __cant_sleep(void) {
223 0 : return in_atomic() || irqs_disabled() || in_interrupt();
224 : /* Is in_interrupt() really needed? */
225 : }
226 :
227 0 : int user_context(unsigned long sp)
228 : {
229 : unsigned long stack;
230 :
231 0 : stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
232 0 : return stack != (unsigned long) current_thread_info();
233 : }
234 :
235 : extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
236 :
237 1 : void do_uml_exitcalls(void)
238 : {
239 : exitcall_t *call;
240 :
241 1 : call = &__uml_exitcall_end;
242 7 : while (--call >= &__uml_exitcall_begin)
243 5 : (*call)();
244 1 : }
245 :
246 0 : char *uml_strdup(const char *string)
247 : {
248 0 : return kstrdup(string, GFP_KERNEL);
249 : }
250 : EXPORT_SYMBOL(uml_strdup);
251 :
252 0 : int copy_to_user_proc(void __user *to, void *from, int size)
253 : {
254 0 : return copy_to_user(to, from, size);
255 : }
256 :
257 0 : int copy_from_user_proc(void *to, void __user *from, int size)
258 : {
259 0 : return copy_from_user(to, from, size);
260 : }
261 :
262 0 : int clear_user_proc(void __user *buf, int size)
263 : {
264 0 : return clear_user(buf, size);
265 : }
266 :
267 : static atomic_t using_sysemu = ATOMIC_INIT(0);
268 : int sysemu_supported;
269 :
270 5 : void set_using_sysemu(int value)
271 : {
272 5 : if (value > sysemu_supported)
273 : return;
274 : atomic_set(&using_sysemu, value);
275 : }
276 :
277 0 : int get_using_sysemu(void)
278 : {
279 0 : return atomic_read(&using_sysemu);
280 : }
281 :
282 0 : static int sysemu_proc_show(struct seq_file *m, void *v)
283 : {
284 0 : seq_printf(m, "%d\n", get_using_sysemu());
285 0 : return 0;
286 : }
287 :
288 0 : static int sysemu_proc_open(struct inode *inode, struct file *file)
289 : {
290 0 : return single_open(file, sysemu_proc_show, NULL);
291 : }
292 :
293 0 : static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
294 : size_t count, loff_t *pos)
295 : {
296 : char tmp[2];
297 :
298 0 : if (copy_from_user(tmp, buf, 1))
299 : return -EFAULT;
300 :
301 0 : if (tmp[0] >= '0' && tmp[0] <= '2')
302 0 : set_using_sysemu(tmp[0] - '0');
303 : /* We use the first char, but pretend to write everything */
304 0 : return count;
305 : }
306 :
307 : static const struct proc_ops sysemu_proc_ops = {
308 : .proc_open = sysemu_proc_open,
309 : .proc_read = seq_read,
310 : .proc_lseek = seq_lseek,
311 : .proc_release = single_release,
312 : .proc_write = sysemu_proc_write,
313 : };
314 :
315 1 : int __init make_proc_sysemu(void)
316 : {
317 : struct proc_dir_entry *ent;
318 1 : if (!sysemu_supported)
319 : return 0;
320 :
321 1 : ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
322 :
323 1 : if (ent == NULL)
324 : {
325 0 : printk(KERN_WARNING "Failed to register /proc/sysemu\n");
326 0 : return 0;
327 : }
328 :
329 : return 0;
330 : }
331 :
332 : late_initcall(make_proc_sysemu);
333 :
334 0 : int singlestepping(void * t)
335 : {
336 0 : struct task_struct *task = t ? t : current;
337 :
338 0 : if (!(task->ptrace & PT_DTRACE))
339 : return 0;
340 :
341 0 : if (task->thread.singlestep_syscall)
342 : return 1;
343 :
344 0 : return 2;
345 : }
346 :
347 : /*
348 : * Only x86 and x86_64 have an arch_align_stack().
349 : * All other arches have "#define arch_align_stack(x) (x)"
350 : * in their asm/exec.h
351 : * As this is included in UML from asm-um/system-generic.h,
352 : * we can use it to behave as the subarch does.
353 : */
354 : #ifndef arch_align_stack
355 0 : unsigned long arch_align_stack(unsigned long sp)
356 : {
357 0 : if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
358 0 : sp -= get_random_int() % 8192;
359 0 : return sp & ~0xf;
360 : }
361 : #endif
362 :
363 0 : unsigned long __get_wchan(struct task_struct *p)
364 : {
365 : unsigned long stack_page, sp, ip;
366 0 : bool seen_sched = 0;
367 :
368 0 : stack_page = (unsigned long) task_stack_page(p);
369 : /* Bail if the process has no kernel stack for some reason */
370 0 : if (stack_page == 0)
371 : return 0;
372 :
373 0 : sp = p->thread.switch_buf->JB_SP;
374 : /*
375 : * Bail if the stack pointer is below the bottom of the kernel
376 : * stack for some reason
377 : */
378 0 : if (sp < stack_page)
379 : return 0;
380 :
381 0 : while (sp < stack_page + THREAD_SIZE) {
382 0 : ip = *((unsigned long *) sp);
383 0 : if (in_sched_functions(ip))
384 : /* Ignore everything until we're above the scheduler */
385 : seen_sched = 1;
386 0 : else if (kernel_text_address(ip) && seen_sched)
387 : return ip;
388 :
389 0 : sp += sizeof(unsigned long);
390 : }
391 :
392 : return 0;
393 : }
394 :
395 0 : int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
396 : {
397 0 : int cpu = current_thread_info()->cpu;
398 :
399 0 : return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
400 : }
401 :
|