blob: 7cd8d94df0dca8ca7ae44812f0c179366d3b9c1b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* smp.c: Sparc64 SMP support.
2 *
David S. Miller27a2ef32007-07-14 00:58:53 -07003 * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/cache.h>
21#include <linux/jiffies.h>
22#include <linux/profile.h>
23#include <linux/bootmem.h>
24
25#include <asm/head.h>
26#include <asm/ptrace.h>
27#include <asm/atomic.h>
28#include <asm/tlbflush.h>
29#include <asm/mmu_context.h>
30#include <asm/cpudata.h>
David S. Miller27a2ef32007-07-14 00:58:53 -070031#include <asm/hvtramp.h>
32#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <asm/irq.h>
Al Viro6d24c8d2006-10-08 08:23:28 -040035#include <asm/irq_regs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/page.h>
37#include <asm/pgtable.h>
38#include <asm/oplib.h>
39#include <asm/uaccess.h>
40#include <asm/timer.h>
41#include <asm/starfire.h>
42#include <asm/tlb.h>
David S. Miller56fb4df2006-02-26 23:24:22 -080043#include <asm/sections.h>
David S. Miller07f8e5f2006-06-21 23:34:02 -070044#include <asm/prom.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070045#include <asm/mdesc.h>
David S. Miller4f0234f2007-07-13 16:03:42 -070046#include <asm/ldc.h>
David S. Millere0204402007-07-16 03:49:40 -070047#include <asm/hypervisor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049extern void calibrate_delay(void);
50
David S. Millera2f9f6b2007-06-04 21:48:33 -070051int sparc64_multi_core __read_mostly;
52
David S. Miller4f0234f2007-07-13 16:03:42 -070053cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
Andrew Mortonc12a8282005-07-12 12:09:43 -070054cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
Mike Travisd5a74302007-10-16 01:24:05 -070055DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
David S. Millerf78eae22007-06-04 17:01:39 -070056cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
57 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
David S. Miller4f0234f2007-07-13 16:03:42 -070058
59EXPORT_SYMBOL(cpu_possible_map);
60EXPORT_SYMBOL(cpu_online_map);
Mike Travisd5a74302007-10-16 01:24:05 -070061EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
David S. Miller4f0234f2007-07-13 16:03:42 -070062EXPORT_SYMBOL(cpu_core_map);
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064static cpumask_t smp_commenced_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66void smp_info(struct seq_file *m)
67{
68 int i;
69
70 seq_printf(m, "State:\n");
Andrew Morton394e3902006-03-23 03:01:05 -080071 for_each_online_cpu(i)
72 seq_printf(m, "CPU%d:\t\tonline\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75void smp_bogo(struct seq_file *m)
76{
77 int i;
78
Andrew Morton394e3902006-03-23 03:01:05 -080079 for_each_online_cpu(i)
80 seq_printf(m,
Andrew Morton394e3902006-03-23 03:01:05 -080081 "Cpu%dClkTck\t: %016lx\n",
Andrew Morton394e3902006-03-23 03:01:05 -080082 i, cpu_data(i).clock_tick);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
David S. Millere0204402007-07-16 03:49:40 -070085static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
86
David S. Miller112f4872007-03-05 15:28:37 -080087extern void setup_sparc64_timer(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89static volatile unsigned long callin_flag = 0;
90
David S. Miller4f0234f2007-07-13 16:03:42 -070091void __devinit smp_callin(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
93 int cpuid = hard_smp_processor_id();
94
David S. Miller56fb4df2006-02-26 23:24:22 -080095 __local_per_cpu_offset = __per_cpu_offset(cpuid);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
David S. Miller4a07e642006-02-14 13:49:32 -080097 if (tlb_type == hypervisor)
David S. Miller490384e2006-02-11 14:41:18 -080098 sun4v_ktsb_register();
David S. Miller481295f2006-02-07 21:51:08 -080099
David S. Miller56fb4df2006-02-26 23:24:22 -0800100 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
David S. Miller112f4872007-03-05 15:28:37 -0800102 setup_sparc64_timer();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
David S. Miller816242d2005-05-23 15:52:08 -0700104 if (cheetah_pcache_forced_on)
105 cheetah_enable_pcache();
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 local_irq_enable();
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 callin_flag = 1;
110 __asm__ __volatile__("membar #Sync\n\t"
111 "flush %%g6" : : : "memory");
112
113 /* Clear this or we will die instantly when we
114 * schedule back to this idler...
115 */
David S. Millerdb7d9a42005-07-24 19:36:26 -0700116 current_thread_info()->new_child = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118 /* Attach to the address space of init_task. */
119 atomic_inc(&init_mm.mm_count);
120 current->active_mm = &init_mm;
121
122 while (!cpu_isset(cpuid, smp_commenced_mask))
David S. Miller4f071182005-08-29 12:46:22 -0700123 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
David S. Millere0204402007-07-16 03:49:40 -0700125 spin_lock(&call_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 cpu_set(cpuid, cpu_online_map);
David S. Millere0204402007-07-16 03:49:40 -0700127 spin_unlock(&call_lock);
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800128
129 /* idle thread is expected to have preempt disabled */
130 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131}
132
133void cpu_panic(void)
134{
135 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
136 panic("SMP bolixed\n");
137}
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/* This tick register synchronization scheme is taken entirely from
140 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
141 *
142 * The only change I've made is to rework it so that the master
143 * initiates the synchonization instead of the slave. -DaveM
144 */
145
146#define MASTER 0
147#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
148
149#define NUM_ROUNDS 64 /* magic value */
150#define NUM_ITERS 5 /* likewise */
151
152static DEFINE_SPINLOCK(itc_sync_lock);
153static unsigned long go[SLAVE + 1];
154
155#define DEBUG_TICK_SYNC 0
156
157static inline long get_delta (long *rt, long *master)
158{
159 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
160 unsigned long tcenter, t0, t1, tm;
161 unsigned long i;
162
163 for (i = 0; i < NUM_ITERS; i++) {
164 t0 = tick_ops->get_tick();
165 go[MASTER] = 1;
David S. Miller4f071182005-08-29 12:46:22 -0700166 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 while (!(tm = go[SLAVE]))
David S. Miller4f071182005-08-29 12:46:22 -0700168 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 go[SLAVE] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700170 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 t1 = tick_ops->get_tick();
172
173 if (t1 - t0 < best_t1 - best_t0)
174 best_t0 = t0, best_t1 = t1, best_tm = tm;
175 }
176
177 *rt = best_t1 - best_t0;
178 *master = best_tm - best_t0;
179
180 /* average best_t0 and best_t1 without overflow: */
181 tcenter = (best_t0/2 + best_t1/2);
182 if (best_t0 % 2 + best_t1 % 2 == 2)
183 tcenter++;
184 return tcenter - best_tm;
185}
186
187void smp_synchronize_tick_client(void)
188{
189 long i, delta, adj, adjust_latency = 0, done = 0;
190 unsigned long flags, rt, master_time_stamp, bound;
191#if DEBUG_TICK_SYNC
192 struct {
193 long rt; /* roundtrip time */
194 long master; /* master's timestamp */
195 long diff; /* difference between midpoint and master's timestamp */
196 long lat; /* estimate of itc adjustment latency */
197 } t[NUM_ROUNDS];
198#endif
199
200 go[MASTER] = 1;
201
202 while (go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700203 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205 local_irq_save(flags);
206 {
207 for (i = 0; i < NUM_ROUNDS; i++) {
208 delta = get_delta(&rt, &master_time_stamp);
209 if (delta == 0) {
210 done = 1; /* let's lock on to this... */
211 bound = rt;
212 }
213
214 if (!done) {
215 if (i > 0) {
216 adjust_latency += -delta;
217 adj = -delta + adjust_latency/4;
218 } else
219 adj = -delta;
220
David S. Miller112f4872007-03-05 15:28:37 -0800221 tick_ops->add_tick(adj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 }
223#if DEBUG_TICK_SYNC
224 t[i].rt = rt;
225 t[i].master = master_time_stamp;
226 t[i].diff = delta;
227 t[i].lat = adjust_latency/4;
228#endif
229 }
230 }
231 local_irq_restore(flags);
232
233#if DEBUG_TICK_SYNC
234 for (i = 0; i < NUM_ROUNDS; i++)
235 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
236 t[i].rt, t[i].master, t[i].diff, t[i].lat);
237#endif
238
239 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
240 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
241}
242
243static void smp_start_sync_tick_client(int cpu);
244
245static void smp_synchronize_one_tick(int cpu)
246{
247 unsigned long flags, i;
248
249 go[MASTER] = 0;
250
251 smp_start_sync_tick_client(cpu);
252
253 /* wait for client to be ready */
254 while (!go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700255 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 /* now let the client proceed into his loop */
258 go[MASTER] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700259 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 spin_lock_irqsave(&itc_sync_lock, flags);
262 {
263 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
264 while (!go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700265 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 go[MASTER] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700267 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 go[SLAVE] = tick_ops->get_tick();
David S. Miller4f071182005-08-29 12:46:22 -0700269 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 }
271 }
272 spin_unlock_irqrestore(&itc_sync_lock, flags);
273}
274
David S. Millerb14f5c12007-07-14 00:45:16 -0700275#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
David S. Miller27a2ef32007-07-14 00:58:53 -0700276/* XXX Put this in some common place. XXX */
277static unsigned long kimage_addr_to_ra(void *p)
278{
279 unsigned long val = (unsigned long) p;
280
281 return kern_base + (val - KERNBASE);
282}
283
David S. Millerb14f5c12007-07-14 00:45:16 -0700284static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
285{
286 extern unsigned long sparc64_ttable_tl0;
287 extern unsigned long kern_locked_tte_data;
288 extern int bigkernel;
289 struct hvtramp_descr *hdesc;
290 unsigned long trampoline_ra;
291 struct trap_per_cpu *tb;
292 u64 tte_vaddr, tte_data;
293 unsigned long hv_err;
294
295 hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
296 if (!hdesc) {
David S. Miller27a2ef32007-07-14 00:58:53 -0700297 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
David S. Millerb14f5c12007-07-14 00:45:16 -0700298 "hvtramp_descr.\n");
299 return;
300 }
301
302 hdesc->cpu = cpu;
303 hdesc->num_mappings = (bigkernel ? 2 : 1);
304
305 tb = &trap_block[cpu];
306 tb->hdesc = hdesc;
307
308 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
309 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
310
311 hdesc->thread_reg = thread_reg;
312
313 tte_vaddr = (unsigned long) KERNBASE;
314 tte_data = kern_locked_tte_data;
315
316 hdesc->maps[0].vaddr = tte_vaddr;
317 hdesc->maps[0].tte = tte_data;
318 if (bigkernel) {
319 tte_vaddr += 0x400000;
320 tte_data += 0x400000;
321 hdesc->maps[1].vaddr = tte_vaddr;
322 hdesc->maps[1].tte = tte_data;
323 }
324
325 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
326
327 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
328 kimage_addr_to_ra(&sparc64_ttable_tl0),
329 __pa(hdesc));
David S. Millere0204402007-07-16 03:49:40 -0700330 if (hv_err)
331 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
332 "gives error %lu\n", hv_err);
David S. Millerb14f5c12007-07-14 00:45:16 -0700333}
334#endif
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336extern unsigned long sparc64_cpu_startup;
337
338/* The OBP cpu startup callback truncates the 3rd arg cookie to
339 * 32-bits (I think) so to be safe we have it read the pointer
340 * contained here so we work on >4GB machines. -DaveM
341 */
342static struct thread_info *cpu_new_thread = NULL;
343
344static int __devinit smp_boot_one_cpu(unsigned int cpu)
345{
David S. Millerb37d40d2007-07-15 01:08:03 -0700346 struct trap_per_cpu *tb = &trap_block[cpu];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 unsigned long entry =
348 (unsigned long)(&sparc64_cpu_startup);
349 unsigned long cookie =
350 (unsigned long)(&cpu_new_thread);
351 struct task_struct *p;
David S. Miller7890f792006-02-15 02:26:54 -0800352 int timeout, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 p = fork_idle(cpu);
Akinobu Mita1177bf92007-10-04 14:55:59 -0700355 if (IS_ERR(p))
356 return PTR_ERR(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 callin_flag = 0;
Al Virof3169642006-01-12 01:05:42 -0800358 cpu_new_thread = task_thread_info(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
David S. Miller7890f792006-02-15 02:26:54 -0800360 if (tlb_type == hypervisor) {
David S. Millerb14f5c12007-07-14 00:45:16 -0700361#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
David S. Miller4f0234f2007-07-13 16:03:42 -0700362 if (ldom_domaining_enabled)
363 ldom_startcpu_cpuid(cpu,
364 (unsigned long) cpu_new_thread);
365 else
366#endif
367 prom_startcpu_cpuid(cpu, entry, cookie);
David S. Miller7890f792006-02-15 02:26:54 -0800368 } else {
David S. Miller5cbc3072007-05-25 15:49:59 -0700369 struct device_node *dp = of_find_node_by_cpuid(cpu);
David S. Miller7890f792006-02-15 02:26:54 -0800370
David S. Miller07f8e5f2006-06-21 23:34:02 -0700371 prom_startcpu(dp->node, entry, cookie);
David S. Miller7890f792006-02-15 02:26:54 -0800372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
David S. Miller4f0234f2007-07-13 16:03:42 -0700374 for (timeout = 0; timeout < 50000; timeout++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 if (callin_flag)
376 break;
377 udelay(100);
378 }
David S. Miller72aff532006-02-17 01:29:17 -0800379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 if (callin_flag) {
381 ret = 0;
382 } else {
383 printk("Processor %d is stuck.\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 ret = -ENODEV;
385 }
386 cpu_new_thread = NULL;
387
David S. Millerb37d40d2007-07-15 01:08:03 -0700388 if (tb->hdesc) {
389 kfree(tb->hdesc);
390 tb->hdesc = NULL;
391 }
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return ret;
394}
395
396static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
397{
398 u64 result, target;
399 int stuck, tmp;
400
401 if (this_is_starfire) {
402 /* map to real upaid */
403 cpu = (((cpu & 0x3c) << 1) |
404 ((cpu & 0x40) >> 4) |
405 (cpu & 0x3));
406 }
407
408 target = (cpu << 14) | 0x70;
409again:
410 /* Ok, this is the real Spitfire Errata #54.
411 * One must read back from a UDB internal register
412 * after writes to the UDB interrupt dispatch, but
413 * before the membar Sync for that write.
414 * So we use the high UDB control register (ASI 0x7f,
415 * ADDR 0x20) for the dummy read. -DaveM
416 */
417 tmp = 0x40;
418 __asm__ __volatile__(
419 "wrpr %1, %2, %%pstate\n\t"
420 "stxa %4, [%0] %3\n\t"
421 "stxa %5, [%0+%8] %3\n\t"
422 "add %0, %8, %0\n\t"
423 "stxa %6, [%0+%8] %3\n\t"
424 "membar #Sync\n\t"
425 "stxa %%g0, [%7] %3\n\t"
426 "membar #Sync\n\t"
427 "mov 0x20, %%g1\n\t"
428 "ldxa [%%g1] 0x7f, %%g0\n\t"
429 "membar #Sync"
430 : "=r" (tmp)
431 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
432 "r" (data0), "r" (data1), "r" (data2), "r" (target),
433 "r" (0x10), "0" (tmp)
434 : "g1");
435
436 /* NOTE: PSTATE_IE is still clear. */
437 stuck = 100000;
438 do {
439 __asm__ __volatile__("ldxa [%%g0] %1, %0"
440 : "=r" (result)
441 : "i" (ASI_INTR_DISPATCH_STAT));
442 if (result == 0) {
443 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
444 : : "r" (pstate));
445 return;
446 }
447 stuck -= 1;
448 if (stuck == 0)
449 break;
450 } while (result & 0x1);
451 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
452 : : "r" (pstate));
453 if (stuck == 0) {
454 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
455 smp_processor_id(), result);
456 } else {
457 udelay(2);
458 goto again;
459 }
460}
461
David S. Millerd979f172007-10-27 00:13:04 -0700462static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463{
464 u64 pstate;
465 int i;
466
467 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
468 for_each_cpu_mask(i, mask)
469 spitfire_xcall_helper(data0, data1, data2, pstate, i);
470}
471
472/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
473 * packet, but we have no use for that. However we do take advantage of
474 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
475 */
476static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
477{
478 u64 pstate, ver;
David S. Miller22adb352007-05-26 01:14:43 -0700479 int nack_busy_id, is_jbus, need_more;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
481 if (cpus_empty(mask))
482 return;
483
484 /* Unfortunately, someone at Sun had the brilliant idea to make the
485 * busy/nack fields hard-coded by ITID number for this Ultra-III
486 * derivative processor.
487 */
488 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
David S. Miller92704a12006-02-26 23:27:19 -0800489 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
490 (ver >> 32) == __SERRANO_ID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
493
494retry:
David S. Miller22adb352007-05-26 01:14:43 -0700495 need_more = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
497 : : "r" (pstate), "i" (PSTATE_IE));
498
499 /* Setup the dispatch data registers. */
500 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
501 "stxa %1, [%4] %6\n\t"
502 "stxa %2, [%5] %6\n\t"
503 "membar #Sync\n\t"
504 : /* no outputs */
505 : "r" (data0), "r" (data1), "r" (data2),
506 "r" (0x40), "r" (0x50), "r" (0x60),
507 "i" (ASI_INTR_W));
508
509 nack_busy_id = 0;
510 {
511 int i;
512
513 for_each_cpu_mask(i, mask) {
514 u64 target = (i << 14) | 0x70;
515
David S. Miller92704a12006-02-26 23:27:19 -0800516 if (!is_jbus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 target |= (nack_busy_id << 24);
518 __asm__ __volatile__(
519 "stxa %%g0, [%0] %1\n\t"
520 "membar #Sync\n\t"
521 : /* no outputs */
522 : "r" (target), "i" (ASI_INTR_W));
523 nack_busy_id++;
David S. Miller22adb352007-05-26 01:14:43 -0700524 if (nack_busy_id == 32) {
525 need_more = 1;
526 break;
527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 }
529 }
530
531 /* Now, poll for completion. */
532 {
533 u64 dispatch_stat;
534 long stuck;
535
536 stuck = 100000 * nack_busy_id;
537 do {
538 __asm__ __volatile__("ldxa [%%g0] %1, %0"
539 : "=r" (dispatch_stat)
540 : "i" (ASI_INTR_DISPATCH_STAT));
541 if (dispatch_stat == 0UL) {
542 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
543 : : "r" (pstate));
David S. Miller22adb352007-05-26 01:14:43 -0700544 if (unlikely(need_more)) {
545 int i, cnt = 0;
546 for_each_cpu_mask(i, mask) {
547 cpu_clear(i, mask);
548 cnt++;
549 if (cnt == 32)
550 break;
551 }
552 goto retry;
553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 return;
555 }
556 if (!--stuck)
557 break;
558 } while (dispatch_stat & 0x5555555555555555UL);
559
560 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
561 : : "r" (pstate));
562
563 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
564 /* Busy bits will not clear, continue instead
565 * of freezing up on this cpu.
566 */
567 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
568 smp_processor_id(), dispatch_stat);
569 } else {
570 int i, this_busy_nack = 0;
571
572 /* Delay some random time with interrupts enabled
573 * to prevent deadlock.
574 */
575 udelay(2 * nack_busy_id);
576
577 /* Clear out the mask bits for cpus which did not
578 * NACK us.
579 */
580 for_each_cpu_mask(i, mask) {
581 u64 check_mask;
582
David S. Miller92704a12006-02-26 23:27:19 -0800583 if (is_jbus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 check_mask = (0x2UL << (2*i));
585 else
586 check_mask = (0x2UL <<
587 this_busy_nack);
588 if ((dispatch_stat & check_mask) == 0)
589 cpu_clear(i, mask);
590 this_busy_nack += 2;
David S. Miller22adb352007-05-26 01:14:43 -0700591 if (this_busy_nack == 64)
592 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 }
594
595 goto retry;
596 }
597 }
598}
599
David S. Miller1d2f1f92006-02-08 16:41:20 -0800600/* Multi-cpu list version. */
David S. Millera43fe0e2006-02-04 03:10:53 -0800601static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
602{
David S. Millerb830ab62006-02-28 15:10:26 -0800603 struct trap_per_cpu *tb;
604 u16 *cpu_list;
605 u64 *mondo;
606 cpumask_t error_mask;
607 unsigned long flags, status;
David S. Miller3cab0c32006-03-02 21:50:47 -0800608 int cnt, retries, this_cpu, prev_sent, i;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800609
David S. Miller17f34f02007-05-14 02:01:52 -0700610 if (cpus_empty(mask))
611 return;
612
David S. Millerb830ab62006-02-28 15:10:26 -0800613 /* We have to do this whole thing with interrupts fully disabled.
614 * Otherwise if we send an xcall from interrupt context it will
615 * corrupt both our mondo block and cpu list state.
616 *
617 * One consequence of this is that we cannot use timeout mechanisms
618 * that depend upon interrupts being delivered locally. So, for
619 * example, we cannot sample jiffies and expect it to advance.
620 *
621 * Fortunately, udelay() uses %stick/%tick so we can use that.
622 */
623 local_irq_save(flags);
624
625 this_cpu = smp_processor_id();
626 tb = &trap_block[this_cpu];
627
628 mondo = __va(tb->cpu_mondo_block_pa);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800629 mondo[0] = data0;
630 mondo[1] = data1;
631 mondo[2] = data2;
632 wmb();
633
David S. Millerb830ab62006-02-28 15:10:26 -0800634 cpu_list = __va(tb->cpu_list_pa);
635
636 /* Setup the initial cpu list. */
637 cnt = 0;
638 for_each_cpu_mask(i, mask)
639 cpu_list[cnt++] = i;
640
641 cpus_clear(error_mask);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800642 retries = 0;
David S. Miller3cab0c32006-03-02 21:50:47 -0800643 prev_sent = 0;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800644 do {
David S. Miller3cab0c32006-03-02 21:50:47 -0800645 int forward_progress, n_sent;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800646
David S. Millerb830ab62006-02-28 15:10:26 -0800647 status = sun4v_cpu_mondo_send(cnt,
648 tb->cpu_list_pa,
649 tb->cpu_mondo_block_pa);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800650
David S. Millerb830ab62006-02-28 15:10:26 -0800651 /* HV_EOK means all cpus received the xcall, we're done. */
652 if (likely(status == HV_EOK))
David S. Miller1d2f1f92006-02-08 16:41:20 -0800653 break;
654
David S. Miller3cab0c32006-03-02 21:50:47 -0800655 /* First, see if we made any forward progress.
656 *
657 * The hypervisor indicates successful sends by setting
658 * cpu list entries to the value 0xffff.
David S. Millerb830ab62006-02-28 15:10:26 -0800659 */
David S. Miller3cab0c32006-03-02 21:50:47 -0800660 n_sent = 0;
David S. Millerb830ab62006-02-28 15:10:26 -0800661 for (i = 0; i < cnt; i++) {
David S. Miller3cab0c32006-03-02 21:50:47 -0800662 if (likely(cpu_list[i] == 0xffff))
663 n_sent++;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800664 }
665
David S. Miller3cab0c32006-03-02 21:50:47 -0800666 forward_progress = 0;
667 if (n_sent > prev_sent)
668 forward_progress = 1;
669
670 prev_sent = n_sent;
671
David S. Millerb830ab62006-02-28 15:10:26 -0800672 /* If we get a HV_ECPUERROR, then one or more of the cpus
673 * in the list are in error state. Use the cpu_state()
674 * hypervisor call to find out which cpus are in error state.
675 */
676 if (unlikely(status == HV_ECPUERROR)) {
677 for (i = 0; i < cnt; i++) {
678 long err;
679 u16 cpu;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800680
David S. Millerb830ab62006-02-28 15:10:26 -0800681 cpu = cpu_list[i];
682 if (cpu == 0xffff)
683 continue;
684
685 err = sun4v_cpu_state(cpu);
686 if (err >= 0 &&
687 err == HV_CPU_STATE_ERROR) {
David S. Miller3cab0c32006-03-02 21:50:47 -0800688 cpu_list[i] = 0xffff;
David S. Millerb830ab62006-02-28 15:10:26 -0800689 cpu_set(cpu, error_mask);
690 }
691 }
692 } else if (unlikely(status != HV_EWOULDBLOCK))
693 goto fatal_mondo_error;
694
David S. Miller3cab0c32006-03-02 21:50:47 -0800695 /* Don't bother rewriting the CPU list, just leave the
696 * 0xffff and non-0xffff entries in there and the
697 * hypervisor will do the right thing.
698 *
699 * Only advance timeout state if we didn't make any
700 * forward progress.
701 */
David S. Millerb830ab62006-02-28 15:10:26 -0800702 if (unlikely(!forward_progress)) {
703 if (unlikely(++retries > 10000))
704 goto fatal_mondo_timeout;
705
706 /* Delay a little bit to let other cpus catch up
707 * on their cpu mondo queue work.
708 */
709 udelay(2 * cnt);
710 }
David S. Miller1d2f1f92006-02-08 16:41:20 -0800711 } while (1);
712
David S. Millerb830ab62006-02-28 15:10:26 -0800713 local_irq_restore(flags);
714
715 if (unlikely(!cpus_empty(error_mask)))
716 goto fatal_mondo_cpu_error;
717
718 return;
719
720fatal_mondo_cpu_error:
721 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
722 "were in error state\n",
723 this_cpu);
724 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
725 for_each_cpu_mask(i, error_mask)
726 printk("%d ", i);
727 printk("]\n");
728 return;
729
730fatal_mondo_timeout:
731 local_irq_restore(flags);
732 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
733 " progress after %d retries.\n",
734 this_cpu, retries);
735 goto dump_cpu_list_and_out;
736
737fatal_mondo_error:
738 local_irq_restore(flags);
739 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
740 this_cpu, status);
741 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
742 "mondo_block_pa(%lx)\n",
743 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
744
745dump_cpu_list_and_out:
746 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
747 for (i = 0; i < cnt; i++)
748 printk("%u ", cpu_list[i]);
749 printk("]\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800750}
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752/* Send cross call to all processors mentioned in MASK
753 * except self.
754 */
755static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
756{
757 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
758 int this_cpu = get_cpu();
759
760 cpus_and(mask, mask, cpu_online_map);
761 cpu_clear(this_cpu, mask);
762
763 if (tlb_type == spitfire)
764 spitfire_xcall_deliver(data0, data1, data2, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800765 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 cheetah_xcall_deliver(data0, data1, data2, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800767 else
768 hypervisor_xcall_deliver(data0, data1, data2, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 /* NOTE: Caller runs local copy on master. */
770
771 put_cpu();
772}
773
774extern unsigned long xcall_sync_tick;
775
776static void smp_start_sync_tick_client(int cpu)
777{
778 cpumask_t mask = cpumask_of_cpu(cpu);
779
780 smp_cross_call_masked(&xcall_sync_tick,
781 0, 0, 0, mask);
782}
783
784/* Send cross call to all processors except self. */
785#define smp_cross_call(func, ctx, data1, data2) \
786 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
787
788struct call_data_struct {
789 void (*func) (void *info);
790 void *info;
791 atomic_t finished;
792 int wait;
793};
794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795static struct call_data_struct *call_data;
796
797extern unsigned long xcall_call_function;
798
David S. Milleraa1d1a02006-04-06 16:54:33 -0700799/**
800 * smp_call_function(): Run a function on all other CPUs.
801 * @func: The function to run. This must be fast and non-blocking.
802 * @info: An arbitrary pointer to pass to the function.
803 * @nonatomic: currently unused.
804 * @wait: If true, wait (atomically) until function has completed on other CPUs.
805 *
806 * Returns 0 on success, else a negative status code. Does not return until
807 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
808 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 * You must not call this function with disabled interrupts or from a
810 * hardware interrupt handler or from a bottom half handler.
811 */
David S. Millerbd407912006-01-31 18:31:38 -0800812static int smp_call_function_mask(void (*func)(void *info), void *info,
813 int nonatomic, int wait, cpumask_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814{
815 struct call_data_struct data;
David S. Milleree290742006-03-06 22:50:44 -0800816 int cpus;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 /* Can deadlock when called with interrupts disabled */
819 WARN_ON(irqs_disabled());
820
821 data.func = func;
822 data.info = info;
823 atomic_set(&data.finished, 0);
824 data.wait = wait;
825
826 spin_lock(&call_lock);
827
David S. Milleree290742006-03-06 22:50:44 -0800828 cpu_clear(smp_processor_id(), mask);
829 cpus = cpus_weight(mask);
830 if (!cpus)
831 goto out_unlock;
832
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 call_data = &data;
David S. Milleraa1d1a02006-04-06 16:54:33 -0700834 mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
David S. Millerbd407912006-01-31 18:31:38 -0800836 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
David S. Milleraa1d1a02006-04-06 16:54:33 -0700838 /* Wait for response */
839 while (atomic_read(&data.finished) != cpus)
840 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
David S. Milleree290742006-03-06 22:50:44 -0800842out_unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 spin_unlock(&call_lock);
844
845 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846}
847
David S. Millerbd407912006-01-31 18:31:38 -0800848int smp_call_function(void (*func)(void *info), void *info,
849 int nonatomic, int wait)
850{
851 return smp_call_function_mask(func, info, nonatomic, wait,
852 cpu_online_map);
853}
854
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855void smp_call_function_client(int irq, struct pt_regs *regs)
856{
857 void (*func) (void *info) = call_data->func;
858 void *info = call_data->info;
859
860 clear_softint(1 << irq);
861 if (call_data->wait) {
862 /* let initiator proceed only after completion */
863 func(info);
864 atomic_inc(&call_data->finished);
865 } else {
866 /* let initiator proceed after getting data */
867 atomic_inc(&call_data->finished);
868 func(info);
869 }
870}
871
David S. Millerbd407912006-01-31 18:31:38 -0800872static void tsb_sync(void *info)
873{
David S. Miller6f25f392006-03-28 13:29:26 -0800874 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
David S. Millerbd407912006-01-31 18:31:38 -0800875 struct mm_struct *mm = info;
876
David S. Miller6f25f392006-03-28 13:29:26 -0800877 /* It is not valid to test "currrent->active_mm == mm" here.
878 *
879 * The value of "current" is not changed atomically with
880 * switch_mm(). But that's OK, we just need to check the
881 * current cpu's trap block PGD physical address.
882 */
883 if (tp->pgd_paddr == __pa(mm->pgd))
David S. Millerbd407912006-01-31 18:31:38 -0800884 tsb_context_switch(mm);
885}
886
887void smp_tsb_sync(struct mm_struct *mm)
888{
889 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
890}
891
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892extern unsigned long xcall_flush_tlb_mm;
893extern unsigned long xcall_flush_tlb_pending;
894extern unsigned long xcall_flush_tlb_kernel_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895extern unsigned long xcall_report_regs;
896extern unsigned long xcall_receive_signal;
David S. Milleree290742006-03-06 22:50:44 -0800897extern unsigned long xcall_new_mmu_context_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899#ifdef DCACHE_ALIASING_POSSIBLE
900extern unsigned long xcall_flush_dcache_page_cheetah;
901#endif
902extern unsigned long xcall_flush_dcache_page_spitfire;
903
904#ifdef CONFIG_DEBUG_DCFLUSH
905extern atomic_t dcpage_flushes;
906extern atomic_t dcpage_flushes_xcall;
907#endif
908
David S. Millerd979f172007-10-27 00:13:04 -0700909static inline void __local_flush_dcache_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910{
911#ifdef DCACHE_ALIASING_POSSIBLE
912 __flush_dcache_page(page_address(page),
913 ((tlb_type == spitfire) &&
914 page_mapping(page) != NULL));
915#else
916 if (page_mapping(page) != NULL &&
917 tlb_type == spitfire)
918 __flush_icache_page(__pa(page_address(page)));
919#endif
920}
921
922void smp_flush_dcache_page_impl(struct page *page, int cpu)
923{
924 cpumask_t mask = cpumask_of_cpu(cpu);
David S. Millera43fe0e2006-02-04 03:10:53 -0800925 int this_cpu;
926
927 if (tlb_type == hypervisor)
928 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930#ifdef CONFIG_DEBUG_DCFLUSH
931 atomic_inc(&dcpage_flushes);
932#endif
David S. Millera43fe0e2006-02-04 03:10:53 -0800933
934 this_cpu = get_cpu();
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 if (cpu == this_cpu) {
937 __local_flush_dcache_page(page);
938 } else if (cpu_online(cpu)) {
939 void *pg_addr = page_address(page);
940 u64 data0;
941
942 if (tlb_type == spitfire) {
943 data0 =
944 ((u64)&xcall_flush_dcache_page_spitfire);
945 if (page_mapping(page) != NULL)
946 data0 |= ((u64)1 << 32);
947 spitfire_xcall_deliver(data0,
948 __pa(pg_addr),
949 (u64) pg_addr,
950 mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800951 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952#ifdef DCACHE_ALIASING_POSSIBLE
953 data0 =
954 ((u64)&xcall_flush_dcache_page_cheetah);
955 cheetah_xcall_deliver(data0,
956 __pa(pg_addr),
957 0, mask);
958#endif
959 }
960#ifdef CONFIG_DEBUG_DCFLUSH
961 atomic_inc(&dcpage_flushes_xcall);
962#endif
963 }
964
965 put_cpu();
966}
967
968void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
969{
970 void *pg_addr = page_address(page);
971 cpumask_t mask = cpu_online_map;
972 u64 data0;
David S. Millera43fe0e2006-02-04 03:10:53 -0800973 int this_cpu;
974
975 if (tlb_type == hypervisor)
976 return;
977
978 this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 cpu_clear(this_cpu, mask);
981
982#ifdef CONFIG_DEBUG_DCFLUSH
983 atomic_inc(&dcpage_flushes);
984#endif
985 if (cpus_empty(mask))
986 goto flush_self;
987 if (tlb_type == spitfire) {
988 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
989 if (page_mapping(page) != NULL)
990 data0 |= ((u64)1 << 32);
991 spitfire_xcall_deliver(data0,
992 __pa(pg_addr),
993 (u64) pg_addr,
994 mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800995 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996#ifdef DCACHE_ALIASING_POSSIBLE
997 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
998 cheetah_xcall_deliver(data0,
999 __pa(pg_addr),
1000 0, mask);
1001#endif
1002 }
1003#ifdef CONFIG_DEBUG_DCFLUSH
1004 atomic_inc(&dcpage_flushes_xcall);
1005#endif
1006 flush_self:
1007 __local_flush_dcache_page(page);
1008
1009 put_cpu();
1010}
1011
David S. Millera0663a72006-02-23 14:19:28 -08001012static void __smp_receive_signal_mask(cpumask_t mask)
1013{
1014 smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
1015}
1016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017void smp_receive_signal(int cpu)
1018{
1019 cpumask_t mask = cpumask_of_cpu(cpu);
1020
David S. Millera0663a72006-02-23 14:19:28 -08001021 if (cpu_online(cpu))
1022 __smp_receive_signal_mask(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023}
1024
1025void smp_receive_signal_client(int irq, struct pt_regs *regs)
1026{
David S. Milleree290742006-03-06 22:50:44 -08001027 clear_softint(1 << irq);
1028}
1029
1030void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1031{
David S. Millera0663a72006-02-23 14:19:28 -08001032 struct mm_struct *mm;
David S. Milleree290742006-03-06 22:50:44 -08001033 unsigned long flags;
David S. Millera0663a72006-02-23 14:19:28 -08001034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 clear_softint(1 << irq);
David S. Millera0663a72006-02-23 14:19:28 -08001036
1037 /* See if we need to allocate a new TLB context because
1038 * the version of the one we are using is now out of date.
1039 */
1040 mm = current->active_mm;
David S. Milleree290742006-03-06 22:50:44 -08001041 if (unlikely(!mm || (mm == &init_mm)))
1042 return;
David S. Millera0663a72006-02-23 14:19:28 -08001043
David S. Milleree290742006-03-06 22:50:44 -08001044 spin_lock_irqsave(&mm->context.lock, flags);
David S. Milleraac0aad2006-02-27 17:56:51 -08001045
David S. Milleree290742006-03-06 22:50:44 -08001046 if (unlikely(!CTX_VALID(mm->context)))
1047 get_new_mmu_context(mm);
David S. Milleraac0aad2006-02-27 17:56:51 -08001048
David S. Milleree290742006-03-06 22:50:44 -08001049 spin_unlock_irqrestore(&mm->context.lock, flags);
David S. Milleraac0aad2006-02-27 17:56:51 -08001050
David S. Milleree290742006-03-06 22:50:44 -08001051 load_secondary_context(mm);
1052 __flush_tlb_mm(CTX_HWBITS(mm->context),
1053 SECONDARY_CONTEXT);
David S. Millera0663a72006-02-23 14:19:28 -08001054}
1055
1056void smp_new_mmu_context_version(void)
1057{
David S. Milleree290742006-03-06 22:50:44 -08001058 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059}
1060
1061void smp_report_regs(void)
1062{
1063 smp_cross_call(&xcall_report_regs, 0, 0, 0);
1064}
1065
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066/* We know that the window frames of the user have been flushed
1067 * to the stack before we get here because all callers of us
1068 * are flush_tlb_*() routines, and these run after flush_cache_*()
1069 * which performs the flushw.
1070 *
1071 * The SMP TLB coherency scheme we use works as follows:
1072 *
1073 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1074 * space has (potentially) executed on, this is the heuristic
1075 * we use to avoid doing cross calls.
1076 *
1077 * Also, for flushing from kswapd and also for clones, we
1078 * use cpu_vm_mask as the list of cpus to make run the TLB.
1079 *
1080 * 2) TLB context numbers are shared globally across all processors
1081 * in the system, this allows us to play several games to avoid
1082 * cross calls.
1083 *
1084 * One invariant is that when a cpu switches to a process, and
1085 * that processes tsk->active_mm->cpu_vm_mask does not have the
1086 * current cpu's bit set, that tlb context is flushed locally.
1087 *
1088 * If the address space is non-shared (ie. mm->count == 1) we avoid
1089 * cross calls when we want to flush the currently running process's
1090 * tlb state. This is done by clearing all cpu bits except the current
1091 * processor's in current->active_mm->cpu_vm_mask and performing the
1092 * flush locally only. This will force any subsequent cpus which run
1093 * this task to flush the context from the local tlb if the process
1094 * migrates to another cpu (again).
1095 *
1096 * 3) For shared address spaces (threads) and swapping we bite the
1097 * bullet for most cases and perform the cross call (but only to
1098 * the cpus listed in cpu_vm_mask).
1099 *
1100 * The performance gain from "optimizing" away the cross call for threads is
1101 * questionable (in theory the big win for threads is the massive sharing of
1102 * address space state across processors).
1103 */
David S. Miller62dbec72005-11-07 14:09:58 -08001104
1105/* This currently is only used by the hugetlb arch pre-fault
1106 * hook on UltraSPARC-III+ and later when changing the pagesize
1107 * bits of the context register for an address space.
1108 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109void smp_flush_tlb_mm(struct mm_struct *mm)
1110{
David S. Miller62dbec72005-11-07 14:09:58 -08001111 u32 ctx = CTX_HWBITS(mm->context);
1112 int cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
David S. Miller62dbec72005-11-07 14:09:58 -08001114 if (atomic_read(&mm->mm_users) == 1) {
1115 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1116 goto local_flush_and_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
David S. Miller62dbec72005-11-07 14:09:58 -08001118
1119 smp_cross_call_masked(&xcall_flush_tlb_mm,
1120 ctx, 0, 0,
1121 mm->cpu_vm_mask);
1122
1123local_flush_and_out:
1124 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1125
1126 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127}
1128
1129void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1130{
1131 u32 ctx = CTX_HWBITS(mm->context);
1132 int cpu = get_cpu();
1133
Hugh Dickinsdedeb002005-11-07 14:09:01 -08001134 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
Hugh Dickinsdedeb002005-11-07 14:09:01 -08001136 else
1137 smp_cross_call_masked(&xcall_flush_tlb_pending,
1138 ctx, nr, (unsigned long) vaddrs,
1139 mm->cpu_vm_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 __flush_tlb_pending(ctx, nr, vaddrs);
1142
1143 put_cpu();
1144}
1145
1146void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1147{
1148 start &= PAGE_MASK;
1149 end = PAGE_ALIGN(end);
1150 if (start != end) {
1151 smp_cross_call(&xcall_flush_tlb_kernel_range,
1152 0, start, end);
1153
1154 __flush_tlb_kernel_range(start, end);
1155 }
1156}
1157
1158/* CPU capture. */
1159/* #define CAPTURE_DEBUG */
1160extern unsigned long xcall_capture;
1161
1162static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1163static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1164static unsigned long penguins_are_doing_time;
1165
1166void smp_capture(void)
1167{
1168 int result = atomic_add_ret(1, &smp_capture_depth);
1169
1170 if (result == 1) {
1171 int ncpus = num_online_cpus();
1172
1173#ifdef CAPTURE_DEBUG
1174 printk("CPU[%d]: Sending penguins to jail...",
1175 smp_processor_id());
1176#endif
1177 penguins_are_doing_time = 1;
David S. Miller4f071182005-08-29 12:46:22 -07001178 membar_storestore_loadstore();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 atomic_inc(&smp_capture_registry);
1180 smp_cross_call(&xcall_capture, 0, 0, 0);
1181 while (atomic_read(&smp_capture_registry) != ncpus)
David S. Miller4f071182005-08-29 12:46:22 -07001182 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183#ifdef CAPTURE_DEBUG
1184 printk("done\n");
1185#endif
1186 }
1187}
1188
1189void smp_release(void)
1190{
1191 if (atomic_dec_and_test(&smp_capture_depth)) {
1192#ifdef CAPTURE_DEBUG
1193 printk("CPU[%d]: Giving pardon to "
1194 "imprisoned penguins\n",
1195 smp_processor_id());
1196#endif
1197 penguins_are_doing_time = 0;
David S. Miller4f071182005-08-29 12:46:22 -07001198 membar_storeload_storestore();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 atomic_dec(&smp_capture_registry);
1200 }
1201}
1202
1203/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1204 * can service tlb flush xcalls...
1205 */
1206extern void prom_world(int);
David S. Miller96c6e0d2006-01-31 18:32:29 -08001207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1209{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 clear_softint(1 << irq);
1211
1212 preempt_disable();
1213
1214 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 prom_world(1);
1216 atomic_inc(&smp_capture_registry);
David S. Miller4f071182005-08-29 12:46:22 -07001217 membar_storeload_storestore();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 while (penguins_are_doing_time)
David S. Miller4f071182005-08-29 12:46:22 -07001219 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 atomic_dec(&smp_capture_registry);
1221 prom_world(0);
1222
1223 preempt_enable();
1224}
1225
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226/* /proc/profile writes can call this, don't __init it please. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227int setup_profiling_timer(unsigned int multiplier)
1228{
David S. Miller777a4472007-02-22 06:24:10 -08001229 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230}
1231
1232void __init smp_prepare_cpus(unsigned int max_cpus)
1233{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234}
1235
1236void __devinit smp_prepare_boot_cpu(void)
1237{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238}
1239
David S. Miller5cbc3072007-05-25 15:49:59 -07001240void __devinit smp_fill_in_sib_core_maps(void)
1241{
1242 unsigned int i;
1243
David S. Millere0204402007-07-16 03:49:40 -07001244 for_each_present_cpu(i) {
David S. Miller5cbc3072007-05-25 15:49:59 -07001245 unsigned int j;
1246
David S. Miller39dd9922007-07-15 01:29:24 -07001247 cpus_clear(cpu_core_map[i]);
David S. Miller5cbc3072007-05-25 15:49:59 -07001248 if (cpu_data(i).core_id == 0) {
David S. Millerf78eae22007-06-04 17:01:39 -07001249 cpu_set(i, cpu_core_map[i]);
David S. Miller5cbc3072007-05-25 15:49:59 -07001250 continue;
1251 }
1252
David S. Millere0204402007-07-16 03:49:40 -07001253 for_each_present_cpu(j) {
David S. Miller5cbc3072007-05-25 15:49:59 -07001254 if (cpu_data(i).core_id ==
1255 cpu_data(j).core_id)
David S. Millerf78eae22007-06-04 17:01:39 -07001256 cpu_set(j, cpu_core_map[i]);
1257 }
1258 }
1259
David S. Millere0204402007-07-16 03:49:40 -07001260 for_each_present_cpu(i) {
David S. Millerf78eae22007-06-04 17:01:39 -07001261 unsigned int j;
1262
Mike Travisd5a74302007-10-16 01:24:05 -07001263 cpus_clear(per_cpu(cpu_sibling_map, i));
David S. Millerf78eae22007-06-04 17:01:39 -07001264 if (cpu_data(i).proc_id == -1) {
Mike Travisd5a74302007-10-16 01:24:05 -07001265 cpu_set(i, per_cpu(cpu_sibling_map, i));
David S. Millerf78eae22007-06-04 17:01:39 -07001266 continue;
1267 }
1268
David S. Millere0204402007-07-16 03:49:40 -07001269 for_each_present_cpu(j) {
David S. Millerf78eae22007-06-04 17:01:39 -07001270 if (cpu_data(i).proc_id ==
1271 cpu_data(j).proc_id)
Mike Travisd5a74302007-10-16 01:24:05 -07001272 cpu_set(j, per_cpu(cpu_sibling_map, i));
David S. Miller5cbc3072007-05-25 15:49:59 -07001273 }
1274 }
1275}
1276
Gautham R Shenoyb282b6f2007-01-10 23:15:34 -08001277int __cpuinit __cpu_up(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278{
1279 int ret = smp_boot_one_cpu(cpu);
1280
1281 if (!ret) {
1282 cpu_set(cpu, smp_commenced_mask);
1283 while (!cpu_isset(cpu, cpu_online_map))
1284 mb();
1285 if (!cpu_isset(cpu, cpu_online_map)) {
1286 ret = -ENODEV;
1287 } else {
David S. Miller02fead72006-02-11 23:22:47 -08001288 /* On SUN4V, writes to %tick and %stick are
1289 * not allowed.
1290 */
1291 if (tlb_type != hypervisor)
1292 smp_synchronize_one_tick(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 }
1294 }
1295 return ret;
1296}
1297
David S. Miller4f0234f2007-07-13 16:03:42 -07001298#ifdef CONFIG_HOTPLUG_CPU
David S. Millere0204402007-07-16 03:49:40 -07001299void cpu_play_dead(void)
1300{
1301 int cpu = smp_processor_id();
1302 unsigned long pstate;
1303
1304 idle_task_exit();
1305
1306 if (tlb_type == hypervisor) {
1307 struct trap_per_cpu *tb = &trap_block[cpu];
1308
1309 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1310 tb->cpu_mondo_pa, 0);
1311 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1312 tb->dev_mondo_pa, 0);
1313 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1314 tb->resum_mondo_pa, 0);
1315 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1316 tb->nonresum_mondo_pa, 0);
1317 }
1318
1319 cpu_clear(cpu, smp_commenced_mask);
1320 membar_safe("#Sync");
1321
1322 local_irq_disable();
1323
1324 __asm__ __volatile__(
1325 "rdpr %%pstate, %0\n\t"
1326 "wrpr %0, %1, %%pstate"
1327 : "=r" (pstate)
1328 : "i" (PSTATE_IE));
1329
1330 while (1)
1331 barrier();
1332}
1333
David S. Miller4f0234f2007-07-13 16:03:42 -07001334int __cpu_disable(void)
1335{
David S. Millere0204402007-07-16 03:49:40 -07001336 int cpu = smp_processor_id();
1337 cpuinfo_sparc *c;
1338 int i;
1339
1340 for_each_cpu_mask(i, cpu_core_map[cpu])
1341 cpu_clear(cpu, cpu_core_map[i]);
1342 cpus_clear(cpu_core_map[cpu]);
1343
Mike Travisd5a74302007-10-16 01:24:05 -07001344 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1345 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1346 cpus_clear(per_cpu(cpu_sibling_map, cpu));
David S. Millere0204402007-07-16 03:49:40 -07001347
1348 c = &cpu_data(cpu);
1349
1350 c->core_id = 0;
1351 c->proc_id = -1;
1352
1353 spin_lock(&call_lock);
1354 cpu_clear(cpu, cpu_online_map);
1355 spin_unlock(&call_lock);
1356
1357 smp_wmb();
1358
1359 /* Make sure no interrupts point to this cpu. */
1360 fixup_irqs();
1361
1362 local_irq_enable();
1363 mdelay(1);
1364 local_irq_disable();
1365
1366 return 0;
David S. Miller4f0234f2007-07-13 16:03:42 -07001367}
1368
1369void __cpu_die(unsigned int cpu)
1370{
David S. Millere0204402007-07-16 03:49:40 -07001371 int i;
1372
1373 for (i = 0; i < 100; i++) {
1374 smp_rmb();
1375 if (!cpu_isset(cpu, smp_commenced_mask))
1376 break;
1377 msleep(100);
1378 }
1379 if (cpu_isset(cpu, smp_commenced_mask)) {
1380 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1381 } else {
1382#if defined(CONFIG_SUN_LDOMS)
1383 unsigned long hv_err;
1384 int limit = 100;
1385
1386 do {
1387 hv_err = sun4v_cpu_stop(cpu);
1388 if (hv_err == HV_EOK) {
1389 cpu_clear(cpu, cpu_present_map);
1390 break;
1391 }
1392 } while (--limit > 0);
1393 if (limit <= 0) {
1394 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1395 hv_err);
1396 }
1397#endif
1398 }
David S. Miller4f0234f2007-07-13 16:03:42 -07001399}
1400#endif
1401
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402void __init smp_cpus_done(unsigned int max_cpus)
1403{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404}
1405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406void smp_send_reschedule(int cpu)
1407{
Nick Piggin64c7c8f2005-11-08 21:39:04 -08001408 smp_receive_signal(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409}
1410
1411/* This is a nop because we capture all other cpus
1412 * anyways when making the PROM active.
1413 */
1414void smp_send_stop(void)
1415{
1416}
1417
David S. Millerd369ddd2005-07-10 15:45:11 -07001418unsigned long __per_cpu_base __read_mostly;
1419unsigned long __per_cpu_shift __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
1421EXPORT_SYMBOL(__per_cpu_base);
1422EXPORT_SYMBOL(__per_cpu_shift);
1423
David S. Miller5cbc3072007-05-25 15:49:59 -07001424void __init real_setup_per_cpu_areas(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425{
1426 unsigned long goal, size, i;
1427 char *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
1429 /* Copy section for each CPU (we discard the original) */
David S. Miller5a089002006-12-14 23:40:57 -08001430 goal = PERCPU_ENOUGH_ROOM;
1431
Jeremy Fitzhardingeb6e35902007-05-02 19:27:12 +02001432 __per_cpu_shift = PAGE_SHIFT;
1433 for (size = PAGE_SIZE; size < goal; size <<= 1UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 __per_cpu_shift++;
1435
Jeremy Fitzhardingeb6e35902007-05-02 19:27:12 +02001436 ptr = alloc_bootmem_pages(size * NR_CPUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
1438 __per_cpu_base = ptr - __per_cpu_start;
1439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 for (i = 0; i < NR_CPUS; i++, ptr += size)
1441 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
David S. Miller951bc822006-05-31 01:24:02 -07001442
1443 /* Setup %g5 for the boot cpu. */
1444 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445}