Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Intel SMP support routines. |
| 3 | * |
| 4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> |
| 5 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> |
| 6 | * (c) 2002,2003 Andi Kleen, SuSE Labs. |
| 7 | * |
| 8 | * This code is released under the GNU General Public License version 2 or |
| 9 | * later. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/init.h> |
| 13 | |
| 14 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/delay.h> |
| 16 | #include <linux/spinlock.h> |
| 17 | #include <linux/smp_lock.h> |
| 18 | #include <linux/smp.h> |
| 19 | #include <linux/kernel_stat.h> |
| 20 | #include <linux/mc146818rtc.h> |
| 21 | #include <linux/interrupt.h> |
| 22 | |
| 23 | #include <asm/mtrr.h> |
| 24 | #include <asm/pgalloc.h> |
| 25 | #include <asm/tlbflush.h> |
| 26 | #include <asm/mach_apic.h> |
| 27 | #include <asm/mmu_context.h> |
| 28 | #include <asm/proto.h> |
Andi Kleen | a8ab26f | 2005-04-16 15:25:19 -0700 | [diff] [blame] | 29 | #include <asm/apicdef.h> |
Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 30 | #include <asm/idle.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
| 32 | /* |
| 33 | * Smarter SMP flushing macros. |
| 34 | * c/o Linus Torvalds. |
| 35 | * |
| 36 | * These mean you can really definitely utterly forget about |
| 37 | * writing to user space from interrupts. (Its not allowed anyway). |
| 38 | * |
| 39 | * Optimizations Manfred Spraul <manfred@colorfullife.com> |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 40 | * |
| 41 | * More scalable flush, from Andi Kleen |
| 42 | * |
| 43 | * To avoid global state use 8 different call vectors. |
| 44 | * Each CPU uses a specific vector to trigger flushes on other |
| 45 | * CPUs. Depending on the received vector the target CPUs look into |
| 46 | * the right per cpu variable for the flush data. |
| 47 | * |
| 48 | * With more than 8 CPUs they are hashed to the 8 available |
| 49 | * vectors. The limited global vector space forces us to this right now. |
| 50 | * In future when interrupts are split into per CPU domains this could be |
| 51 | * fixed, at the cost of triggering multiple IPIs in some cases. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | */ |
| 53 | |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 54 | union smp_flush_state { |
| 55 | struct { |
| 56 | cpumask_t flush_cpumask; |
| 57 | struct mm_struct *flush_mm; |
| 58 | unsigned long flush_va; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #define FLUSH_ALL -1ULL |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 60 | spinlock_t tlbstate_lock; |
| 61 | }; |
| 62 | char pad[SMP_CACHE_BYTES]; |
| 63 | } ____cacheline_aligned; |
| 64 | |
| 65 | /* State is put into the per CPU data section, but padded |
| 66 | to a full cache line because other CPUs can access it and we don't |
| 67 | want false sharing in the per cpu data segment. */ |
| 68 | static DEFINE_PER_CPU(union smp_flush_state, flush_state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | |
| 70 | /* |
| 71 | * We cannot call mmdrop() because we are in interrupt context, |
| 72 | * instead update mm->cpu_vm_mask. |
| 73 | */ |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 74 | static inline void leave_mm(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | { |
| 76 | if (read_pda(mmu_state) == TLBSTATE_OK) |
| 77 | BUG(); |
Brian Gerst | b1fc513 | 2006-03-25 16:31:13 +0100 | [diff] [blame] | 78 | cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | load_cr3(swapper_pg_dir); |
| 80 | } |
| 81 | |
| 82 | /* |
| 83 | * |
| 84 | * The flush IPI assumes that a thread switch happens in this order: |
| 85 | * [cpu0: the cpu that switches] |
| 86 | * 1) switch_mm() either 1a) or 1b) |
| 87 | * 1a) thread switch to a different mm |
Brian Gerst | b1fc513 | 2006-03-25 16:31:13 +0100 | [diff] [blame] | 88 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | * Stop ipi delivery for the old mm. This is not synchronized with |
| 90 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis |
| 91 | * for the wrong mm, and in the worst case we perform a superfluous |
| 92 | * tlb flush. |
| 93 | * 1a2) set cpu mmu_state to TLBSTATE_OK |
| 94 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 |
| 95 | * was in lazy tlb mode. |
| 96 | * 1a3) update cpu active_mm |
| 97 | * Now cpu0 accepts tlb flushes for the new mm. |
Brian Gerst | b1fc513 | 2006-03-25 16:31:13 +0100 | [diff] [blame] | 98 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | * Now the other cpus will send tlb flush ipis. |
| 100 | * 1a4) change cr3. |
| 101 | * 1b) thread switch without mm change |
| 102 | * cpu active_mm is correct, cpu0 already handles |
| 103 | * flush ipis. |
| 104 | * 1b1) set cpu mmu_state to TLBSTATE_OK |
| 105 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. |
| 106 | * Atomically set the bit [other cpus will start sending flush ipis], |
| 107 | * and test the bit. |
| 108 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. |
| 109 | * 2) switch %%esp, ie current |
| 110 | * |
| 111 | * The interrupt must handle 2 special cases: |
| 112 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. |
| 113 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only |
| 114 | * runs in kernel space, the cpu could load tlb entries for user space |
| 115 | * pages. |
| 116 | * |
| 117 | * The good news is that cpu mmu_state is local to each cpu, no |
| 118 | * write/read ordering problems. |
| 119 | */ |
| 120 | |
| 121 | /* |
| 122 | * TLB flush IPI: |
| 123 | * |
| 124 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. |
| 125 | * 2) Leave the mm if we are in the lazy tlb mode. |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 126 | * |
| 127 | * Interrupts are disabled. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | */ |
| 129 | |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 130 | asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | { |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 132 | int cpu; |
| 133 | int sender; |
| 134 | union smp_flush_state *f; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 136 | cpu = smp_processor_id(); |
| 137 | /* |
Rusty Russell | 19eadf9 | 2006-06-27 02:53:44 -0700 | [diff] [blame] | 138 | * orig_rax contains the negated interrupt vector. |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 139 | * Use that to determine where the sender put the data. |
| 140 | */ |
Rusty Russell | 19eadf9 | 2006-06-27 02:53:44 -0700 | [diff] [blame] | 141 | sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START; |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 142 | f = &per_cpu(flush_state, sender); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 144 | if (!cpu_isset(cpu, f->flush_cpumask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | goto out; |
| 146 | /* |
| 147 | * This was a BUG() but until someone can quote me the |
| 148 | * line from the intel manual that guarantees an IPI to |
| 149 | * multiple CPUs is retried _only_ on the erroring CPUs |
| 150 | * its staying as a return |
| 151 | * |
| 152 | * BUG(); |
| 153 | */ |
| 154 | |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 155 | if (f->flush_mm == read_pda(active_mm)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | if (read_pda(mmu_state) == TLBSTATE_OK) { |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 157 | if (f->flush_va == FLUSH_ALL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | local_flush_tlb(); |
| 159 | else |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 160 | __flush_tlb_one(f->flush_va); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | } else |
| 162 | leave_mm(cpu); |
| 163 | } |
Andi Kleen | 5df3574 | 2005-07-28 21:15:22 -0700 | [diff] [blame] | 164 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | ack_APIC_irq(); |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 166 | cpu_clear(cpu, f->flush_cpumask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, |
| 170 | unsigned long va) |
| 171 | { |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 172 | int sender; |
| 173 | union smp_flush_state *f; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 175 | /* Caller has disabled preemption */ |
| 176 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; |
| 177 | f = &per_cpu(flush_state, sender); |
| 178 | |
| 179 | /* Could avoid this lock when |
| 180 | num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is |
| 181 | probably not worth checking this for a cache-hot lock. */ |
| 182 | spin_lock(&f->tlbstate_lock); |
| 183 | |
| 184 | f->flush_mm = mm; |
| 185 | f->flush_va = va; |
| 186 | cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | |
| 188 | /* |
| 189 | * We have to send the IPI only to |
| 190 | * CPUs affected. |
| 191 | */ |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 192 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 194 | while (!cpus_empty(f->flush_cpumask)) |
| 195 | cpu_relax(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 197 | f->flush_mm = NULL; |
| 198 | f->flush_va = 0; |
| 199 | spin_unlock(&f->tlbstate_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 201 | |
| 202 | int __cpuinit init_smp_flush(void) |
| 203 | { |
| 204 | int i; |
| 205 | for_each_cpu_mask(i, cpu_possible_map) { |
Alexey Dobriyan | 825e037 | 2006-08-05 12:14:34 -0700 | [diff] [blame] | 206 | spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); |
Andi Kleen | e5bc8b6 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 207 | } |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | core_initcall(init_smp_flush); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | |
| 213 | void flush_tlb_current_task(void) |
| 214 | { |
| 215 | struct mm_struct *mm = current->mm; |
| 216 | cpumask_t cpu_mask; |
| 217 | |
| 218 | preempt_disable(); |
| 219 | cpu_mask = mm->cpu_vm_mask; |
| 220 | cpu_clear(smp_processor_id(), cpu_mask); |
| 221 | |
| 222 | local_flush_tlb(); |
| 223 | if (!cpus_empty(cpu_mask)) |
| 224 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); |
| 225 | preempt_enable(); |
| 226 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 227 | EXPORT_SYMBOL(flush_tlb_current_task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | |
| 229 | void flush_tlb_mm (struct mm_struct * mm) |
| 230 | { |
| 231 | cpumask_t cpu_mask; |
| 232 | |
| 233 | preempt_disable(); |
| 234 | cpu_mask = mm->cpu_vm_mask; |
| 235 | cpu_clear(smp_processor_id(), cpu_mask); |
| 236 | |
| 237 | if (current->active_mm == mm) { |
| 238 | if (current->mm) |
| 239 | local_flush_tlb(); |
| 240 | else |
| 241 | leave_mm(smp_processor_id()); |
| 242 | } |
| 243 | if (!cpus_empty(cpu_mask)) |
| 244 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); |
| 245 | |
| 246 | preempt_enable(); |
| 247 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 248 | EXPORT_SYMBOL(flush_tlb_mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | |
| 250 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) |
| 251 | { |
| 252 | struct mm_struct *mm = vma->vm_mm; |
| 253 | cpumask_t cpu_mask; |
| 254 | |
| 255 | preempt_disable(); |
| 256 | cpu_mask = mm->cpu_vm_mask; |
| 257 | cpu_clear(smp_processor_id(), cpu_mask); |
| 258 | |
| 259 | if (current->active_mm == mm) { |
| 260 | if(current->mm) |
| 261 | __flush_tlb_one(va); |
| 262 | else |
| 263 | leave_mm(smp_processor_id()); |
| 264 | } |
| 265 | |
| 266 | if (!cpus_empty(cpu_mask)) |
| 267 | flush_tlb_others(cpu_mask, mm, va); |
| 268 | |
| 269 | preempt_enable(); |
| 270 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 271 | EXPORT_SYMBOL(flush_tlb_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | |
| 273 | static void do_flush_tlb_all(void* info) |
| 274 | { |
| 275 | unsigned long cpu = smp_processor_id(); |
| 276 | |
| 277 | __flush_tlb_all(); |
| 278 | if (read_pda(mmu_state) == TLBSTATE_LAZY) |
| 279 | leave_mm(cpu); |
| 280 | } |
| 281 | |
| 282 | void flush_tlb_all(void) |
| 283 | { |
| 284 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); |
| 285 | } |
| 286 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | /* |
| 288 | * this function sends a 'reschedule' IPI to another CPU. |
| 289 | * it goes straight through and wastes no time serializing |
| 290 | * anything. Worst case is that we lose a reschedule ... |
| 291 | */ |
| 292 | |
| 293 | void smp_send_reschedule(int cpu) |
| 294 | { |
| 295 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
| 296 | } |
| 297 | |
| 298 | /* |
| 299 | * Structure and data for smp_call_function(). This is designed to minimise |
| 300 | * static memory requirements. It also looks cleaner. |
| 301 | */ |
| 302 | static DEFINE_SPINLOCK(call_lock); |
| 303 | |
| 304 | struct call_data_struct { |
| 305 | void (*func) (void *info); |
| 306 | void *info; |
| 307 | atomic_t started; |
| 308 | atomic_t finished; |
| 309 | int wait; |
| 310 | }; |
| 311 | |
| 312 | static struct call_data_struct * call_data; |
| 313 | |
Ashok Raj | 884d9e4 | 2005-06-25 14:55:02 -0700 | [diff] [blame] | 314 | void lock_ipi_call_lock(void) |
| 315 | { |
| 316 | spin_lock_irq(&call_lock); |
| 317 | } |
| 318 | |
| 319 | void unlock_ipi_call_lock(void) |
| 320 | { |
| 321 | spin_unlock_irq(&call_lock); |
| 322 | } |
| 323 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | /* |
Eric W. Biederman | 3d483f4 | 2005-07-29 14:03:29 -0700 | [diff] [blame] | 325 | * this function sends a 'generic call function' IPI to one other CPU |
| 326 | * in the system. |
Andi Kleen | f1f4e83 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 327 | * |
| 328 | * cpu is a standard Linux logical CPU number. |
Eric W. Biederman | 3d483f4 | 2005-07-29 14:03:29 -0700 | [diff] [blame] | 329 | */ |
Andi Kleen | f1f4e83 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 330 | static void |
| 331 | __smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
Eric W. Biederman | 3d483f4 | 2005-07-29 14:03:29 -0700 | [diff] [blame] | 332 | int nonatomic, int wait) |
| 333 | { |
| 334 | struct call_data_struct data; |
| 335 | int cpus = 1; |
| 336 | |
| 337 | data.func = func; |
| 338 | data.info = info; |
| 339 | atomic_set(&data.started, 0); |
| 340 | data.wait = wait; |
| 341 | if (wait) |
| 342 | atomic_set(&data.finished, 0); |
| 343 | |
| 344 | call_data = &data; |
| 345 | wmb(); |
| 346 | /* Send a message to all other CPUs and wait for them to respond */ |
| 347 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); |
| 348 | |
| 349 | /* Wait for response */ |
| 350 | while (atomic_read(&data.started) != cpus) |
| 351 | cpu_relax(); |
| 352 | |
| 353 | if (!wait) |
| 354 | return; |
| 355 | |
| 356 | while (atomic_read(&data.finished) != cpus) |
| 357 | cpu_relax(); |
| 358 | } |
| 359 | |
| 360 | /* |
| 361 | * smp_call_function_single - Run a function on another CPU |
| 362 | * @func: The function to run. This must be fast and non-blocking. |
| 363 | * @info: An arbitrary pointer to pass to the function. |
| 364 | * @nonatomic: Currently unused. |
| 365 | * @wait: If true, wait until function has completed on other CPUs. |
| 366 | * |
| 367 | * Retrurns 0 on success, else a negative status code. |
| 368 | * |
| 369 | * Does not return until the remote CPU is nearly ready to execute <func> |
| 370 | * or is or has executed. |
| 371 | */ |
| 372 | |
| 373 | int smp_call_function_single (int cpu, void (*func) (void *info), void *info, |
| 374 | int nonatomic, int wait) |
| 375 | { |
| 376 | /* prevent preemption and reschedule on another processor */ |
| 377 | int me = get_cpu(); |
| 378 | if (cpu == me) { |
Eric W. Biederman | 3d483f4 | 2005-07-29 14:03:29 -0700 | [diff] [blame] | 379 | put_cpu(); |
Andi Kleen | 8c131af | 2006-11-14 16:57:46 +0100 | [diff] [blame] | 380 | return 0; |
Eric W. Biederman | 3d483f4 | 2005-07-29 14:03:29 -0700 | [diff] [blame] | 381 | } |
| 382 | spin_lock_bh(&call_lock); |
| 383 | __smp_call_function_single(cpu, func, info, nonatomic, wait); |
| 384 | spin_unlock_bh(&call_lock); |
| 385 | put_cpu(); |
| 386 | return 0; |
| 387 | } |
| 388 | |
| 389 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | * this function sends a 'generic call function' IPI to all other CPUs |
| 391 | * in the system. |
| 392 | */ |
| 393 | static void __smp_call_function (void (*func) (void *info), void *info, |
| 394 | int nonatomic, int wait) |
| 395 | { |
| 396 | struct call_data_struct data; |
| 397 | int cpus = num_online_cpus()-1; |
| 398 | |
| 399 | if (!cpus) |
| 400 | return; |
| 401 | |
| 402 | data.func = func; |
| 403 | data.info = info; |
| 404 | atomic_set(&data.started, 0); |
| 405 | data.wait = wait; |
| 406 | if (wait) |
| 407 | atomic_set(&data.finished, 0); |
| 408 | |
| 409 | call_data = &data; |
| 410 | wmb(); |
| 411 | /* Send a message to all other CPUs and wait for them to respond */ |
| 412 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
| 413 | |
| 414 | /* Wait for response */ |
| 415 | while (atomic_read(&data.started) != cpus) |
| 416 | cpu_relax(); |
| 417 | |
| 418 | if (!wait) |
| 419 | return; |
| 420 | |
| 421 | while (atomic_read(&data.finished) != cpus) |
| 422 | cpu_relax(); |
| 423 | } |
| 424 | |
| 425 | /* |
| 426 | * smp_call_function - run a function on all other CPUs. |
| 427 | * @func: The function to run. This must be fast and non-blocking. |
| 428 | * @info: An arbitrary pointer to pass to the function. |
| 429 | * @nonatomic: currently unused. |
| 430 | * @wait: If true, wait (atomically) until function has completed on other |
| 431 | * CPUs. |
| 432 | * |
| 433 | * Returns 0 on success, else a negative status code. Does not return until |
| 434 | * remote CPUs are nearly ready to execute func or are or have executed. |
| 435 | * |
| 436 | * You must not call this function with disabled interrupts or from a |
| 437 | * hardware interrupt handler or from a bottom half handler. |
| 438 | * Actually there are a few legal cases, like panic. |
| 439 | */ |
| 440 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, |
| 441 | int wait) |
| 442 | { |
| 443 | spin_lock(&call_lock); |
| 444 | __smp_call_function(func,info,nonatomic,wait); |
| 445 | spin_unlock(&call_lock); |
| 446 | return 0; |
| 447 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 448 | EXPORT_SYMBOL(smp_call_function); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | |
| 450 | void smp_stop_cpu(void) |
| 451 | { |
Andi Kleen | 3506229 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 452 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | /* |
| 454 | * Remove this CPU: |
| 455 | */ |
| 456 | cpu_clear(smp_processor_id(), cpu_online_map); |
Andi Kleen | 3506229 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 457 | local_irq_save(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | disable_local_APIC(); |
Andi Kleen | 3506229 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 459 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | } |
| 461 | |
| 462 | static void smp_really_stop_cpu(void *dummy) |
| 463 | { |
| 464 | smp_stop_cpu(); |
| 465 | for (;;) |
Jan Beulich | 46d13a3 | 2006-06-26 13:57:59 +0200 | [diff] [blame] | 466 | halt(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | } |
| 468 | |
| 469 | void smp_send_stop(void) |
| 470 | { |
| 471 | int nolock = 0; |
| 472 | if (reboot_force) |
| 473 | return; |
| 474 | /* Don't deadlock on the call lock in panic */ |
| 475 | if (!spin_trylock(&call_lock)) { |
Lee Revell | f18190b | 2006-06-26 18:30:00 +0200 | [diff] [blame] | 476 | /* ignore locking because we have panicked anyways */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | nolock = 1; |
| 478 | } |
| 479 | __smp_call_function(smp_really_stop_cpu, NULL, 0, 0); |
| 480 | if (!nolock) |
| 481 | spin_unlock(&call_lock); |
| 482 | |
| 483 | local_irq_disable(); |
| 484 | disable_local_APIC(); |
| 485 | local_irq_enable(); |
| 486 | } |
| 487 | |
| 488 | /* |
| 489 | * Reschedule call back. Nothing to do, |
| 490 | * all the work is done automatically when |
| 491 | * we return from the interrupt. |
| 492 | */ |
| 493 | asmlinkage void smp_reschedule_interrupt(void) |
| 494 | { |
| 495 | ack_APIC_irq(); |
| 496 | } |
| 497 | |
| 498 | asmlinkage void smp_call_function_interrupt(void) |
| 499 | { |
| 500 | void (*func) (void *info) = call_data->func; |
| 501 | void *info = call_data->info; |
| 502 | int wait = call_data->wait; |
| 503 | |
| 504 | ack_APIC_irq(); |
| 505 | /* |
| 506 | * Notify initiating CPU that I've grabbed the data and am |
| 507 | * about to execute the function |
| 508 | */ |
| 509 | mb(); |
| 510 | atomic_inc(&call_data->started); |
| 511 | /* |
| 512 | * At this point the info structure may be out of scope unless wait==1 |
| 513 | */ |
Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 514 | exit_idle(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | irq_enter(); |
| 516 | (*func)(info); |
| 517 | irq_exit(); |
| 518 | if (wait) { |
| 519 | mb(); |
| 520 | atomic_inc(&call_data->finished); |
| 521 | } |
| 522 | } |
Andi Kleen | a8ab26f | 2005-04-16 15:25:19 -0700 | [diff] [blame] | 523 | |