2 * $Id: smp.c,v 1.62 1999/09/05 11:56:34 paulus Exp $ 6 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 7 * deal of code from the sparc and intel versions. 9 * Support for PReP (Motorola MTX/MVME) SMP by Troy Benjegerdes 10 * (troy@microux.com, hozer@drgw.net) 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/smp.h> 16 #include <linux/smp_lock.h> 17 #include <linux/interrupt.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/delay.h> 20 #define __KERNEL_SYSCALLS__ 21 #include <linux/unistd.h> 22 #include <linux/init.h> 23 #include <linux/openpic.h> 24 #include <linux/spinlock.h> 26 #include <asm/ptrace.h> 27 #include <asm/atomic.h> 30 #include <asm/pgtable.h> 31 #include <asm/hardirq.h> 32 #include <asm/softirq.h> 39 int first_cpu_booted
=0; 40 int smp_threads_ready
=0; 41 volatileint smp_commenced
=0; 43 struct cpuinfo_PPC cpu_data
[NR_CPUS
]; 44 struct klock_info_struct klock_info
= { KLOCK_CLEAR
,0}; 45 volatileunsigned char active_kernel_processor
= NO_PROC_ID
;/* Processor holding kernel spinlock */ 46 volatileunsigned long ipi_count
; 47 spinlock_t kernel_flag
= SPIN_LOCK_UNLOCKED
; 48 unsigned int prof_multiplier
[NR_CPUS
]; 49 unsigned int prof_counter
[NR_CPUS
]; 50 cycles_t cacheflush_time
; 52 /* all cpu mappings are 1-1 -- Cort */ 53 int cpu_number_map
[NR_CPUS
] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,}; 54 volatileunsigned long cpu_callin_map
[NR_CPUS
] = {0,}; 56 intstart_secondary(void*); 57 externintcpu_idle(void*unused
); 58 u_int
openpic_read(volatile u_int
*addr
); 60 /* register for interrupting the secondary processor on the powersurge */ 61 #define PSURGE_INTR ((volatile unsigned *)0xf80000c0) 63 voidsmp_local_timer_interrupt(struct pt_regs
* regs
) 65 int cpu
=smp_processor_id(); 66 externvoidupdate_one_process(struct task_struct
*,unsigned long, 67 unsigned long,unsigned long,int); 68 if(!--prof_counter
[cpu
]) { 70 struct task_struct
* p
= current
; 73 * After doing the above, we need to make like 74 * a normal interrupt - otherwise timer interrupts 75 * ignore the global interrupt lock, which is the 76 * WrongThing (tm) to do. 85 update_one_process(p
,1, user
, system
, cpu
); 90 current
->need_resched
=1; 92 if(p
->priority
< DEF_PRIORITY
) { 93 kstat
.cpu_nice
+= user
; 94 kstat
.per_cpu_nice
[cpu
] += user
; 96 kstat
.cpu_user
+= user
; 97 kstat
.per_cpu_user
[cpu
] += user
; 100 kstat
.cpu_system
+= system
; 101 kstat
.per_cpu_system
[cpu
] += system
; 104 prof_counter
[cpu
]=prof_multiplier
[cpu
]; 109 * Dirty hack to get smp message passing working. 111 * As it is now, if we're sending two message at the same time 112 * we have race conditions. The PowerSurge doesn't easily 113 * allow us to send IPI messages so we put the messages in 116 * This is because don't have several IPI's on the PowerSurge even though 117 * we do on the chrp. It would be nice to use the actual IPI's on the chrp 118 * rather than this but having two methods of doing IPI isn't a good idea 122 int smp_message
[NR_CPUS
]; 123 voidsmp_message_recv(void) 125 int msg
= smp_message
[smp_processor_id()]; 127 if( _machine
== _MACH_Pmac
) 129 /* clear interrupt */ 130 out_be32(PSURGE_INTR
, ~0); 133 /* make sure msg is for us */ 134 if( msg
== -1)return; 145 current
->need_resched
=1; 147 case0xf0f0:/* syncing time bases - just return */ 150 printk("SMP %d: smp_message_recv(): unknown msg %d\n", 151 smp_processor_id(), msg
); 155 smp_message
[smp_processor_id()] = -1; 158 voidsmp_send_reschedule(int cpu
) 160 /* This is only used if `cpu' is running an idle task, 161 so it will reschedule itself anyway... */ 162 /*smp_message_pass(cpu, MSG_RESCHEDULE, 0, 0);*/ 165 voidsmp_send_stop(void) 167 smp_message_pass(MSG_ALL_BUT_SELF
, MSG_STOP_CPU
,0,0); 170 spinlock_t mesg_pass_lock
= SPIN_LOCK_UNLOCKED
; 171 voidsmp_message_pass(int target
,int msg
,unsigned long data
,int wait
) 174 if( !(_machine
& (_MACH_Pmac
|_MACH_chrp
)) ) 177 spin_lock(&mesg_pass_lock
); 180 * We assume here that the msg is not -1. If it is, 181 * the recipient won't know the message was destined 188 smp_message
[smp_processor_id()] = msg
; 190 case MSG_ALL_BUT_SELF
: 191 for( i
=0; i
< smp_num_cpus
; i
++ ) 192 if( i
!=smp_processor_id() ) 193 smp_message
[i
] = msg
; 196 smp_message
[target
] = msg
; 200 if( _machine
== _MACH_Pmac
) 202 /* interrupt secondary processor */ 203 out_be32(PSURGE_INTR
, ~0); 204 out_be32(PSURGE_INTR
,0); 206 * Assume for now that the secondary doesn't send 209 /* interrupt primary */ 210 /**(volatile unsigned long *)(0xf3019000);*/ 213 if( _machine
== _MACH_chrp
) 216 * There has to be some way of doing this better - 217 * perhaps a sent-to-all or send-to-all-but-self 218 * in the openpic. This gets us going for now, though. 224 for( i
=0; i
< smp_num_cpus
; i
++ ) 225 openpic_cause_IPI(i
,0,0xffffffff); 227 case MSG_ALL_BUT_SELF
: 228 for( i
=0; i
< smp_num_cpus
; i
++ ) 229 if( i
!=smp_processor_id() ) 230 openpic_cause_IPI(i
,0, 231 0xffffffff& ~(1<<smp_processor_id())); 234 openpic_cause_IPI(target
,0,1U<< target
); 239 spin_unlock(&mesg_pass_lock
); 242 void __init
smp_boot_cpus(void) 244 externstruct task_struct
*current_set
[NR_CPUS
]; 245 externunsigned long smp_chrp_cpu_nr
; 246 externvoid__secondary_start_psurge(void); 247 externvoid__secondary_start_chrp(void); 249 struct task_struct
*p
; 252 printk("Entering SMP Mode...\n"); 253 /* let other processors know to not do certain initialization */ 256 smp_store_cpu_info(0); 259 * assume for now that the first cpu booted is 260 * cpu 0, the master -- Cort 262 cpu_callin_map
[0] =1; 263 active_kernel_processor
=0; 264 current
->processor
=0; 268 for(i
=0; i
< NR_CPUS
; i
++) { 270 prof_multiplier
[i
] =1; 274 * XXX very rough, assumes 20 bus cycles to read a cache line, 275 * timebase increments every 4 bus cycles, 32kB L1 data cache. 277 cacheflush_time
=5*1024; 279 if( !(_machine
& (_MACH_Pmac
|_MACH_chrp
)) ) 281 printk("SMP not supported on this machine.\n"); 288 /* assume powersurge board - 2 processors -- Cort */ 292 /* openpic doesn't report # of cpus, just # possible -- Cort */ 294 cpu_nr
= ((openpic_read(&OpenPIC
->Global
.Feature_Reporting0
) 295 & OPENPIC_FEATURE_LAST_PROCESSOR_MASK
) >> 296 OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT
)+1; 298 cpu_nr
= smp_chrp_cpu_nr
; 303 * only check for cpus we know exist. We keep the callin map 304 * with cpus at the bottom -- Cort 306 for( i
=1; i
< cpu_nr
; i
++ ) 310 struct task_struct
*idle
; 312 /* create a process for the processor */ 313 /* we don't care about the values in regs since we'll 314 never reschedule the forked task. */ 315 if(do_fork(CLONE_VM
|CLONE_PID
,0, ®s
) <0) 316 panic("failed fork for CPU %d", i
); 317 p
= init_task
.prev_task
; 319 panic("No idle task for CPU %d", i
); 320 del_from_runqueue(p
); 328 /* need to flush here since secondary bats aren't setup */ 329 for(a
= KERNELBASE
; a
< KERNELBASE
+0x800000; a
+=32) 330 asmvolatile("dcbf 0,%0": :"r"(a
) :"memory"); 337 /* setup entry point of secondary processor */ 338 *(volatileunsigned long*)(0xf2800000) = 339 (unsigned long)__secondary_start_psurge
-KERNELBASE
; 341 /* interrupt secondary to begin executing code */ 342 out_be32(PSURGE_INTR
, ~0); 344 out_be32(PSURGE_INTR
,0); 347 *(unsigned long*)KERNELBASE
= i
; 348 asmvolatile("dcbf 0,%0"::"r"(KERNELBASE
):"memory"); 350 device
=find_type_devices("cpu"); 351 /* assume cpu device list is in order, find the ith cpu */ 352 for( a
= i
; device
&& a
; device
= device
->next
, a
-- ) 356 printk("Starting %s (%lu): ", device
->full_name
, 357 *(ulong
*)get_property(device
,"reg", NULL
) ); 358 call_rtas("start-cpu",3,1, NULL
, 359 *(ulong
*)get_property(device
,"reg", NULL
), 360 __pa(__secondary_start_chrp
), i
); 366 * wait to see if the cpu made a callin (is actually up). 367 * use this value that I found through experimentation. 370 for( c
=1000; c
&& !cpu_callin_map
[i
] ; c
-- ) 373 if( cpu_callin_map
[i
] ) 375 printk("Processor %d found.\n", i
); 376 /* this sync's the decr's -- Cort */ 377 if( _machine
== _MACH_Pmac
) 378 set_dec(decrementer_count
); 381 printk("Processor %d is stuck.\n", i
); 385 if( _machine
== _MACH_Pmac
) 387 /* reset the entry point so if we get another intr we won't 388 * try to startup again */ 389 *(volatileunsigned long*)(0xf2800000) =0x100; 390 /* send interrupt to other processors to start decr's on all cpus */ 391 smp_message_pass(1,0xf0f0,0,0); 395 void __init
smp_commence(void) 398 * Lets the callin's below out of their loop. 404 /* intel needs this */ 405 void __init
initialize_secondary(void) 409 /* Activate a secondary processor. */ 410 int __init
start_secondary(void*unused
) 412 atomic_inc(&init_mm
.mm_count
); 413 current
->active_mm
= &init_mm
; 415 returncpu_idle(NULL
); 418 void __init
smp_callin(void) 420 smp_store_cpu_info(current
->processor
); 421 set_dec(decrementer_count
); 424 current
->mm
->mmap
->vm_page_prot
= PAGE_SHARED
; 425 current
->mm
->mmap
->vm_start
= PAGE_OFFSET
; 426 current
->mm
->mmap
->vm_end
= init_mm
.mmap
->vm_end
; 428 cpu_callin_map
[current
->processor
] =1; 429 while(!smp_commenced
) 434 void __init
smp_setup(char*str
,int*ints
) 438 int __init
setup_profiling_timer(unsigned int multiplier
) 443 void __init
smp_store_cpu_info(int id
) 445 struct cpuinfo_PPC
*c
= &cpu_data
[id
]; 447 /* assume bogomips are same for everything */ 448 c
->loops_per_sec
= loops_per_sec
;