blob: 0c3927accb0054b71c7de9eb828a93559232737e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 */
7
8/*
9 * This file handles the architecture-dependent parts of process handling..
10 */
11
12#include <stdarg.h>
13
Zwane Mwaikambof3705132005-06-25 14:54:50 -070014#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/errno.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/elfcore.h>
21#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/stddef.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/utsname.h>
28#include <linux/delay.h>
29#include <linux/reboot.h>
30#include <linux/init.h>
31#include <linux/mc146818rtc.h>
32#include <linux/module.h>
33#include <linux/kallsyms.h>
34#include <linux/ptrace.h>
35#include <linux/random.h>
Andi Kleenc16b63e2006-09-26 10:52:28 +020036#include <linux/personality.h>
Ingo Molnar74167342007-02-16 01:28:07 -080037#include <linux/tick.h>
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +020038#include <linux/percpu.h>
Erik Bosman529e25f2008-04-14 00:24:18 +020039#include <linux/prctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#include <asm/uaccess.h>
42#include <asm/pgtable.h>
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/ldt.h>
46#include <asm/processor.h>
47#include <asm/i387.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/desc.h>
49#ifdef CONFIG_MATH_EMULATION
50#include <asm/math_emu.h>
51#endif
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <linux/err.h>
54
Zwane Mwaikambof3705132005-06-25 14:54:50 -070055#include <asm/tlbflush.h>
56#include <asm/cpu.h>
Thomas Gleixner718fc132008-01-30 13:30:17 +010057#include <asm/kdebug.h>
Zwane Mwaikambof3705132005-06-25 14:54:50 -070058
Linus Torvalds1da177e2005-04-16 15:20:36 -070059asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
60
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +020061DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
62EXPORT_PER_CPU_SYMBOL(current_task);
63
64DEFINE_PER_CPU(int, cpu_number);
65EXPORT_PER_CPU_SYMBOL(cpu_number);
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/*
68 * Return saved PC of a blocked thread.
69 */
70unsigned long thread_saved_pc(struct task_struct *tsk)
71{
H. Peter Anvinfaca6222008-01-30 13:31:02 +010072 return ((unsigned long *)tsk->thread.sp)[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Zwane Mwaikambof3705132005-06-25 14:54:50 -070075#ifdef CONFIG_HOTPLUG_CPU
76#include <asm/nmi.h>
Glauber Costa1481a3d2008-06-04 15:35:03 -030077
78static void cpu_exit_clear(void)
79{
80 int cpu = raw_smp_processor_id();
81
82 idle_task_exit();
83
84 cpu_uninit();
85 irq_ctx_exit(cpu);
86
87 cpu_clear(cpu, cpu_callout_map);
88 cpu_clear(cpu, cpu_callin_map);
89
90 numa_remove_cpu(cpu);
91}
92
Zwane Mwaikambof3705132005-06-25 14:54:50 -070093/* We don't actually take CPU down, just spin without interrupts. */
94static inline void play_dead(void)
95{
Li Shaohuae1367da2005-06-25 14:54:56 -070096 /* This must be done before dead CPU ack */
97 cpu_exit_clear();
98 wbinvd();
99 mb();
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700100 /* Ack it */
101 __get_cpu_var(cpu_state) = CPU_DEAD;
102
Li Shaohuae1367da2005-06-25 14:54:56 -0700103 /*
104 * With physical CPU hotplug, we should halt the cpu
105 */
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700106 local_irq_disable();
Li Shaohuae1367da2005-06-25 14:54:56 -0700107 while (1)
Zachary Amsdenf2ab4462005-09-03 15:56:42 -0700108 halt();
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700109}
110#else
111static inline void play_dead(void)
112{
113 BUG();
114}
115#endif /* CONFIG_HOTPLUG_CPU */
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117/*
118 * The idle thread. There's no useful work to be
119 * done, so just try to conserve power and have a
120 * low exit latency (ie sit in a loop waiting for
121 * somebody to say that they'd like to reschedule)
122 */
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700123void cpu_idle(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800125 int cpu = smp_processor_id();
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700126
Andi Kleen495ab9c2006-06-26 13:59:11 +0200127 current_thread_info()->status |= TS_POLLING;
Nick Piggin64c7c8f2005-11-08 21:39:04 -0800128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 /* endless idle loop with no priority at all */
130 while (1) {
Ingo Molnar74167342007-02-16 01:28:07 -0800131 tick_nohz_stop_sched_tick();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 while (!need_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700134 check_pgt_cache();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Benjamin LaHaise0723a692008-01-30 13:33:13 +0100137 if (rcu_pending(cpu))
138 rcu_check_callbacks(cpu, 0);
139
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700140 if (cpu_is_offline(cpu))
141 play_dead();
142
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200143 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200145 /* Don't trace irqs off for idle */
146 stop_critical_timings();
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200147 pm_idle();
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200148 start_critical_timings();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
Ingo Molnar74167342007-02-16 01:28:07 -0800150 tick_nohz_restart_sched_tick();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800151 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 schedule();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800153 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 }
155}
156
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200157void __show_registers(struct pt_regs *regs, int all)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
Alan Sternbb1995d2007-07-21 17:10:42 +0200160 unsigned long d0, d1, d2, d3, d6, d7;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100161 unsigned long sp;
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200162 unsigned short ss, gs;
163
164 if (user_mode_vm(regs)) {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100165 sp = regs->sp;
166 ss = regs->ss & 0xffff;
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200167 savesegment(gs, gs);
168 } else {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100169 sp = (unsigned long) (&regs->sp);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200170 savesegment(ss, ss);
171 savesegment(gs, gs);
172 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
174 printk("\n");
Linus Torvalds60812a42007-10-19 15:06:00 -0700175 printk("Pid: %d, comm: %s %s (%s %.*s)\n",
176 task_pid_nr(current), current->comm,
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200177 print_tainted(), init_utsname()->release,
178 (int)strcspn(init_utsname()->version, " "),
179 init_utsname()->version);
180
181 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
Harvey Harrison92bc2052008-02-08 12:09:56 -0800182 (u16)regs->cs, regs->ip, regs->flags,
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200183 smp_processor_id());
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100184 print_symbol("EIP is at %s\n", regs->ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100187 regs->ax, regs->bx, regs->cx, regs->dx);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200188 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100189 regs->si, regs->di, regs->bp, sp);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200190 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
Harvey Harrison92bc2052008-02-08 12:09:56 -0800191 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200192
193 if (!all)
194 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700196 cr0 = read_cr0();
197 cr2 = read_cr2();
198 cr3 = read_cr3();
Zachary Amsdenff6e8c02006-01-06 00:11:50 -0800199 cr4 = read_cr4_safe();
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200200 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
201 cr0, cr2, cr3, cr4);
Alan Sternbb1995d2007-07-21 17:10:42 +0200202
203 get_debugreg(d0, 0);
204 get_debugreg(d1, 1);
205 get_debugreg(d2, 2);
206 get_debugreg(d3, 3);
207 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
208 d0, d1, d2, d3);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200209
Alan Sternbb1995d2007-07-21 17:10:42 +0200210 get_debugreg(d6, 6);
211 get_debugreg(d7, 7);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200212 printk("DR6: %08lx DR7: %08lx\n",
213 d6, d7);
214}
Alan Sternbb1995d2007-07-21 17:10:42 +0200215
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200216void show_regs(struct pt_regs *regs)
217{
218 __show_registers(regs, 1);
Arjan van de Ven5bc27dc2008-01-30 13:33:07 +0100219 show_trace(NULL, regs, &regs->sp, regs->bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
222/*
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100223 * This gets run with %bx containing the
224 * function to call, and %dx containing
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 * the "args".
226 */
227extern void kernel_thread_helper(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
229/*
230 * Create a kernel thread
231 */
232int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
233{
234 struct pt_regs regs;
235
236 memset(&regs, 0, sizeof(regs));
237
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100238 regs.bx = (unsigned long) fn;
239 regs.dx = (unsigned long) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100241 regs.ds = __USER_DS;
242 regs.es = __USER_DS;
243 regs.fs = __KERNEL_PERCPU;
244 regs.orig_ax = -1;
245 regs.ip = (unsigned long) kernel_thread_helper;
246 regs.cs = __KERNEL_CS | get_kernel_rpl();
247 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 /* Ok, create the new process.. */
Andi Kleen8cf2c512006-10-21 18:37:02 +0200250 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700252EXPORT_SYMBOL(kernel_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254/*
255 * Free current thread data structures etc..
256 */
257void exit_thread(void)
258{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 /* The process may have allocated an io port bitmap... nuke it. */
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400260 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
261 struct task_struct *tsk = current;
262 struct thread_struct *t = &tsk->thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 int cpu = get_cpu();
264 struct tss_struct *tss = &per_cpu(init_tss, cpu);
265
266 kfree(t->io_bitmap_ptr);
267 t->io_bitmap_ptr = NULL;
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400268 clear_thread_flag(TIF_IO_BITMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 /*
270 * Careful, clear this in the TSS too:
271 */
272 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
273 t->io_bitmap_max = 0;
274 tss->io_bitmap_owner = NULL;
275 tss->io_bitmap_max = 0;
Rusty Russella75c54f2007-05-02 19:27:13 +0200276 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 put_cpu();
278 }
279}
280
281void flush_thread(void)
282{
283 struct task_struct *tsk = current;
284
Roland McGrath0f534092008-01-30 13:30:59 +0100285 tsk->thread.debugreg0 = 0;
286 tsk->thread.debugreg1 = 0;
287 tsk->thread.debugreg2 = 0;
288 tsk->thread.debugreg3 = 0;
289 tsk->thread.debugreg6 = 0;
290 tsk->thread.debugreg7 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400292 clear_tsk_thread_flag(tsk, TIF_DEBUG);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /*
294 * Forget coprocessor state..
295 */
Suresh Siddha75118a82008-06-13 15:47:12 -0700296 tsk->fpu_counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 clear_fpu(tsk);
298 clear_used_math();
299}
300
301void release_thread(struct task_struct *dead_task)
302{
Zachary Amsden26849272006-01-06 00:11:59 -0800303 BUG_ON(dead_task->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 release_vm86_irqs(dead_task);
305}
306
307/*
308 * This gets called before we allocate a new thread and copy
309 * the current task into it.
310 */
311void prepare_to_copy(struct task_struct *tsk)
312{
313 unlazy_fpu(tsk);
314}
315
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100316int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 unsigned long unused,
318 struct task_struct * p, struct pt_regs * regs)
319{
320 struct pt_regs * childregs;
321 struct task_struct *tsk;
322 int err;
323
akpm@osdl.org07b047f2006-01-12 01:05:41 -0800324 childregs = task_pt_regs(p);
Alexander Nybergf48d9662005-05-05 16:15:03 -0700325 *childregs = *regs;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100326 childregs->ax = 0;
327 childregs->sp = sp;
Alexander Nybergf48d9662005-05-05 16:15:03 -0700328
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100329 p->thread.sp = (unsigned long) childregs;
330 p->thread.sp0 = (unsigned long) (childregs+1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100332 p->thread.ip = (unsigned long) ret_from_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100334 savesegment(gs, p->thread.gs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336 tsk = current;
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400337 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
Alexey Dobriyan52978be2006-09-30 23:27:21 -0700338 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
339 IO_BITMAP_BYTES, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 if (!p->thread.io_bitmap_ptr) {
341 p->thread.io_bitmap_max = 0;
342 return -ENOMEM;
343 }
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400344 set_tsk_thread_flag(p, TIF_IO_BITMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
346
Roland McGrathefd1ca52008-01-30 13:30:46 +0100347 err = 0;
348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 /*
350 * Set a new TLS for the child thread?
351 */
Roland McGrathefd1ca52008-01-30 13:30:46 +0100352 if (clone_flags & CLONE_SETTLS)
353 err = do_set_thread_area(p, -1,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100354 (struct user_desc __user *)childregs->si, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 if (err && p->thread.io_bitmap_ptr) {
357 kfree(p->thread.io_bitmap_ptr);
358 p->thread.io_bitmap_max = 0;
359 }
360 return err;
361}
362
Ingo Molnar513ad842008-02-21 05:18:40 +0100363void
364start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
365{
366 __asm__("movl %0, %%gs" :: "r"(0));
367 regs->fs = 0;
368 set_fs(USER_DS);
369 regs->ds = __USER_DS;
370 regs->es = __USER_DS;
371 regs->ss = __USER_DS;
372 regs->cs = __USER_CS;
373 regs->ip = new_ip;
374 regs->sp = new_sp;
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700375 /*
376 * Free the old FP and other extended state
377 */
378 free_thread_xstate(current);
Ingo Molnar513ad842008-02-21 05:18:40 +0100379}
380EXPORT_SYMBOL_GPL(start_thread);
381
Jan Beulichbdb4f152008-01-30 13:31:21 +0100382static void hard_disable_TSC(void)
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700383{
384 write_cr4(read_cr4() | X86_CR4_TSD);
385}
Erik Bosman529e25f2008-04-14 00:24:18 +0200386
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700387void disable_TSC(void)
388{
389 preempt_disable();
390 if (!test_and_set_thread_flag(TIF_NOTSC))
391 /*
392 * Must flip the CPU state synchronously with
393 * TIF_NOTSC in the current running context.
394 */
395 hard_disable_TSC();
396 preempt_enable();
397}
Erik Bosman529e25f2008-04-14 00:24:18 +0200398
Jan Beulichbdb4f152008-01-30 13:31:21 +0100399static void hard_enable_TSC(void)
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700400{
401 write_cr4(read_cr4() & ~X86_CR4_TSD);
402}
Erik Bosman529e25f2008-04-14 00:24:18 +0200403
Ingo Molnara4928cf2008-04-23 13:20:56 +0200404static void enable_TSC(void)
Erik Bosman529e25f2008-04-14 00:24:18 +0200405{
406 preempt_disable();
407 if (test_and_clear_thread_flag(TIF_NOTSC))
408 /*
409 * Must flip the CPU state synchronously with
410 * TIF_NOTSC in the current running context.
411 */
412 hard_enable_TSC();
413 preempt_enable();
414}
415
416int get_tsc_mode(unsigned long adr)
417{
418 unsigned int val;
419
420 if (test_thread_flag(TIF_NOTSC))
421 val = PR_TSC_SIGSEGV;
422 else
423 val = PR_TSC_ENABLE;
424
425 return put_user(val, (unsigned int __user *)adr);
426}
427
428int set_tsc_mode(unsigned int val)
429{
430 if (val == PR_TSC_SIGSEGV)
431 disable_TSC();
432 else if (val == PR_TSC_ENABLE)
433 enable_TSC();
434 else
435 return -EINVAL;
436
437 return 0;
438}
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700439
440static noinline void
441__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
442 struct tss_struct *tss)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
Roland McGrath7e991602008-01-30 13:30:54 +0100444 struct thread_struct *prev, *next;
Markus Metzgereee3af42008-01-30 13:31:09 +0100445 unsigned long debugctl;
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400446
Roland McGrath7e991602008-01-30 13:30:54 +0100447 prev = &prev_p->thread;
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400448 next = &next_p->thread;
449
Markus Metzgereee3af42008-01-30 13:31:09 +0100450 debugctl = prev->debugctlmsr;
451 if (next->ds_area_msr != prev->ds_area_msr) {
452 /* we clear debugctl to make sure DS
453 * is not in use when we change it */
454 debugctl = 0;
Jan Beulich5b0e5082008-03-10 13:11:17 +0000455 update_debugctlmsr(0);
Markus Metzgereee3af42008-01-30 13:31:09 +0100456 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
457 }
458
459 if (next->debugctlmsr != debugctl)
Jan Beulich5b0e5082008-03-10 13:11:17 +0000460 update_debugctlmsr(next->debugctlmsr);
Roland McGrath7e991602008-01-30 13:30:54 +0100461
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400462 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
Roland McGrath0f534092008-01-30 13:30:59 +0100463 set_debugreg(next->debugreg0, 0);
464 set_debugreg(next->debugreg1, 1);
465 set_debugreg(next->debugreg2, 2);
466 set_debugreg(next->debugreg3, 3);
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400467 /* no 4 and 5 */
Roland McGrath0f534092008-01-30 13:30:59 +0100468 set_debugreg(next->debugreg6, 6);
469 set_debugreg(next->debugreg7, 7);
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400470 }
471
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700472 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
473 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
474 /* prev and next are different */
475 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
476 hard_disable_TSC();
477 else
478 hard_enable_TSC();
479 }
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700480
Ingo Molnarb4ef95d2008-02-26 09:40:27 +0100481#ifdef X86_BTS
Markus Metzgereee3af42008-01-30 13:31:09 +0100482 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
483 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
484
485 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
486 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
Ingo Molnarb4ef95d2008-02-26 09:40:27 +0100487#endif
Markus Metzgereee3af42008-01-30 13:31:09 +0100488
489
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400490 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 /*
492 * Disable the bitmap via an invalid offset. We still cache
493 * the previous bitmap owner and the IO bitmap contents:
494 */
Rusty Russella75c54f2007-05-02 19:27:13 +0200495 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 return;
497 }
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 if (likely(next == tss->io_bitmap_owner)) {
500 /*
501 * Previous owner of the bitmap (hence the bitmap content)
502 * matches the next task, we dont have to do anything but
503 * to set a valid offset in the TSS:
504 */
Rusty Russella75c54f2007-05-02 19:27:13 +0200505 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 return;
507 }
508 /*
509 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
510 * and we let the task to get a GPF in case an I/O instruction
511 * is performed. The handler of the GPF will verify that the
512 * faulting task has a valid I/O bitmap and, it true, does the
513 * real copy and restart the instruction. This will save us
514 * redundant copies when the currently switched task does not
515 * perform any I/O during its timeslice.
516 */
Rusty Russella75c54f2007-05-02 19:27:13 +0200517 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520/*
521 * switch_to(x,yn) should switch tasks from x to y.
522 *
523 * We fsave/fwait so that an exception goes off at the right time
524 * (as a call from the fsave or fwait in effect) rather than to
525 * the wrong process. Lazy FP saving no longer makes any sense
526 * with modern CPU's, and this simplifies a lot of things (SMP
527 * and UP become the same).
528 *
529 * NOTE! We used to use the x86 hardware context switching. The
530 * reason for not using it any more becomes apparent when you
531 * try to recover gracefully from saved state that is no longer
532 * valid (stale segment register values in particular). With the
533 * hardware task-switch, there is no way to fix up bad state in
534 * a reasonable manner.
535 *
536 * The fact that Intel documents the hardware task-switching to
537 * be slow is a fairly red herring - this code is not noticeably
538 * faster. However, there _is_ some room for improvement here,
539 * so the performance issues may eventually be a valid point.
540 * More important, however, is the fact that this allows us much
541 * more flexibility.
542 *
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100543 * The return value (in %ax) will be the "prev" task after
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 * the task-switch, and shows up in ret_from_fork in entry.S,
545 * for example.
546 */
Harvey Harrison75604d72008-01-30 13:31:17 +0100547struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
549 struct thread_struct *prev = &prev_p->thread,
550 *next = &next_p->thread;
551 int cpu = smp_processor_id();
552 struct tss_struct *tss = &per_cpu(init_tss, cpu);
553
554 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
555
556 __unlazy_fpu(prev_p);
557
Chuck Ebbertacc20762006-12-07 02:14:01 +0100558
559 /* we're going to use this soon, after a few expensive things */
560 if (next_p->fpu_counter > 5)
Suresh Siddha61c46282008-03-10 15:28:04 -0700561 prefetch(next->xstate);
Chuck Ebbertacc20762006-12-07 02:14:01 +0100562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 /*
Zachary Amsdene7a2ff52005-09-03 15:56:39 -0700564 * Reload esp0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 */
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100566 load_sp0(tss, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568 /*
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +0100569 * Save away %gs. No need to save %fs, as it was saved on the
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100570 * stack on entry. No need to save %es and %ds, as those are
571 * always kernel segments while inside the kernel. Doing this
572 * before setting the new TLS descriptors avoids the situation
573 * where we temporarily have non-reloadable segments in %fs
574 * and %gs. This could be an issue if the NMI handler ever
575 * used %fs or %gs (it does not today), or if the kernel is
576 * running inside of a hypervisor layer.
Zachary Amsdene7a2ff52005-09-03 15:56:39 -0700577 */
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +0100578 savesegment(gs, prev->gs);
Zachary Amsdene7a2ff52005-09-03 15:56:39 -0700579
580 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 * Load the per-thread Thread-Local Storage descriptor.
582 */
583 load_TLS(next, cpu);
584
585 /*
Zachary Amsden8b151142007-02-13 13:26:21 +0100586 * Restore IOPL if needed. In normal use, the flags restore
587 * in the switch assembly will handle this. But if the kernel
588 * is running virtualized at a non-zero CPL, the popf will
589 * not restore flags, so it must be done in a separate step.
590 */
591 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
592 set_iopl_mask(next->iopl);
593
594 /*
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400595 * Now maybe handle debug registers and/or IO bitmaps
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 */
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700597 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
598 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
599 __switch_to_xtra(prev_p, next_p, tss);
Andrea Arcangeliffaa8bd2005-06-27 14:36:36 -0700600
Zachary Amsden9226d122007-02-13 13:26:21 +0100601 /*
602 * Leave lazy mode, flushing any hypercalls made here.
603 * This must be done before restoring TLS segments so
604 * the GDT and LDT are properly updated, and must be
605 * done before math_state_restore, so the TS bit is up
606 * to date.
607 */
608 arch_leave_lazy_cpu_mode();
609
Chuck Ebbertacc20762006-12-07 02:14:01 +0100610 /* If the task has used fpu the last 5 timeslices, just do a full
611 * restore of the math state immediately to avoid the trap; the
612 * chances of needing FPU soon are obviously high now
Suresh Siddha870568b2008-06-02 15:57:27 -0700613 *
614 * tsk_used_math() checks prevent calling math_state_restore(),
615 * which can sleep in the case of !tsk_used_math()
Chuck Ebbertacc20762006-12-07 02:14:01 +0100616 */
Suresh Siddha870568b2008-06-02 15:57:27 -0700617 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
Chuck Ebbertacc20762006-12-07 02:14:01 +0100618 math_state_restore();
619
Zachary Amsden9226d122007-02-13 13:26:21 +0100620 /*
621 * Restore %gs if needed (which is common)
622 */
623 if (prev->gs | next->gs)
624 loadsegment(gs, next->gs);
625
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +0200626 x86_write_percpu(current_task, next_p);
Zachary Amsden9226d122007-02-13 13:26:21 +0100627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 return prev_p;
629}
630
631asmlinkage int sys_fork(struct pt_regs regs)
632{
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100633 return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634}
635
636asmlinkage int sys_clone(struct pt_regs regs)
637{
638 unsigned long clone_flags;
639 unsigned long newsp;
640 int __user *parent_tidptr, *child_tidptr;
641
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100642 clone_flags = regs.bx;
643 newsp = regs.cx;
644 parent_tidptr = (int __user *)regs.dx;
645 child_tidptr = (int __user *)regs.di;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 if (!newsp)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100647 newsp = regs.sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
649}
650
651/*
652 * This is trivial, and on the face of it looks like it
653 * could equally well be done in user mode.
654 *
655 * Not so, for quite unobvious reasons - register pressure.
656 * In user mode vfork() cannot have a stack frame, and if
657 * done by calling the "clone()" system call directly, you
658 * do not have enough call-clobbered registers to hold all
659 * the information you need.
660 */
661asmlinkage int sys_vfork(struct pt_regs regs)
662{
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100663 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666/*
667 * sys_execve() executes a new program.
668 */
669asmlinkage int sys_execve(struct pt_regs regs)
670{
671 int error;
672 char * filename;
673
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100674 filename = getname((char __user *) regs.bx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 error = PTR_ERR(filename);
676 if (IS_ERR(filename))
677 goto out;
678 error = do_execve(filename,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100679 (char __user * __user *) regs.cx,
680 (char __user * __user *) regs.dx,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 &regs);
682 if (error == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 /* Make sure we don't return using sysenter.. */
684 set_thread_flag(TIF_IRET);
685 }
686 putname(filename);
687out:
688 return error;
689}
690
691#define top_esp (THREAD_SIZE - sizeof(unsigned long))
692#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
693
694unsigned long get_wchan(struct task_struct *p)
695{
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100696 unsigned long bp, sp, ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 unsigned long stack_page;
698 int count = 0;
699 if (!p || p == current || p->state == TASK_RUNNING)
700 return 0;
Al Viro65e0fdf2006-01-12 01:05:41 -0800701 stack_page = (unsigned long)task_stack_page(p);
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100702 sp = p->thread.sp;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100703 if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 return 0;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100705 /* include/asm-i386/system.h:switch_to() pushes bp last. */
706 bp = *(unsigned long *) sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 do {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100708 if (bp < stack_page || bp > top_ebp+stack_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 return 0;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100710 ip = *(unsigned long *) (bp+4);
711 if (!in_sched_functions(ip))
712 return ip;
713 bp = *(unsigned long *) bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 } while (count++ < 16);
715 return 0;
716}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718unsigned long arch_align_stack(unsigned long sp)
719{
Andi Kleenc16b63e2006-09-26 10:52:28 +0200720 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 sp -= get_random_int() % 8192;
722 return sp & ~0xf;
723}
Jiri Kosinac1d171a2008-01-30 13:30:40 +0100724
725unsigned long arch_randomize_brk(struct mm_struct *mm)
726{
727 unsigned long range_end = mm->brk + 0x02000000;
728 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
729}