1 /* $Id: system.h,v 1.62 2000/09/23 02:09:21 davem Exp $ */ 2 #ifndef __SPARC64_SYSTEM_H 3 #define __SPARC64_SYSTEM_H 5 #include <linux/config.h> 6 #include <asm/ptrace.h> 7 #include <asm/processor.h> 8 #include <asm/asm_offsets.h> 9 #include <asm/visasm.h> 13 * Sparc (general) CPU types 21 sun4u
=0x05,/* V8 ploos ploos */ 23 ap1000
=0x07,/* almost a sun4m */ 26 #define sparc_cpu_model sun4u 28 /* This cannot ever be a sun4c nor sun4 :) That's just history. */ 29 #define ARCH_SUN4C_SUN4 0 32 externunsigned long empty_bad_page
; 33 externunsigned long empty_zero_page
; 36 #define setipl(__new_ipl) \ 37 __asm__ __volatile__("wrpr %0, %%pil" : :"r" (__new_ipl) :"memory") 40 __asm__ __volatile__("wrpr 15, %%pil" : : :"memory") 43 __asm__ __volatile__("wrpr 0, %%pil" : : :"memory") 46 ({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" :"=r" (retval)); retval; }) 48 #define swap_pil(__new_pil) \ 49 ({ unsigned long retval; \ 50 __asm__ __volatile__("rdpr %%pil, %0\n\t" \ 58 #define read_pil_and_cli() \ 59 ({ unsigned long retval; \ 60 __asm__ __volatile__("rdpr %%pil, %0\n\t" \ 67 #define __save_flags(flags) ((flags) = getipl()) 68 #define __save_and_cli(flags) ((flags) = read_pil_and_cli()) 69 #define __restore_flags(flags) setipl((flags)) 70 #define local_irq_disable() __cli() 71 #define local_irq_enable() __sti() 72 #define local_irq_save(flags) __save_and_cli(flags) 73 #define local_irq_restore(flags) __restore_flags(flags) 78 #define save_flags(x) __save_flags(x) 79 #define restore_flags(x) __restore_flags(x) 80 #define save_and_cli(x) __save_and_cli(x) 84 externvoid__global_cli(void); 85 externvoid__global_sti(void); 86 externunsigned long__global_save_flags(void); 87 externvoid__global_restore_flags(unsigned long flags
); 90 #define cli() __global_cli() 91 #define sti() __global_sti() 92 #define save_flags(x) ((x) = __global_save_flags()) 93 #define restore_flags(flags) __global_restore_flags(flags) 94 #define save_and_cli(flags) do { save_flags(flags); cli(); } while(0) 98 #define nop() __asm__ __volatile__ ("nop") 100 #define membar(type) __asm__ __volatile__ ("membar " type : : :"memory"); 102 membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad"); 103 #define rmb() membar("#LoadLoad") 104 #define wmb() membar("#StoreStore") 105 #define set_mb(__var, __value) \ 106 do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0) 107 #define set_wmb(__var, __value) \ 108 do { __var = __value; membar("#StoreStore"); } while(0) 111 #define smp_mb() mb() 112 #define smp_rmb() rmb() 113 #define smp_wmb() wmb() 115 #define smp_mb() __asm__ __volatile__("":::"memory"); 116 #define smp_rmb() __asm__ __volatile__("":::"memory"); 117 #define smp_wmb() __asm__ __volatile__("":::"memory"); 120 #define flushi(addr) __asm__ __volatile__ ("flush %0" : :"r" (addr) :"memory") 122 #define flushw_all() __asm__ __volatile__("flushw") 124 /* Performance counter register access. */ 125 #define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" :"=r" (__p)) 126 #define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : :"r" (__p)); 127 #define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" :"=r" (__p)) 129 /* Blackbird errata workaround. See commentary in 130 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt() 131 * for more information. 133 #define reset_pic() \ 134 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \ 136 "99:wr %g0, 0x0, %pic\n\t" \ 141 externvoidsynchronize_user_stack(void); 143 externvoid__flushw_user(void); 144 #define flushw_user() __flushw_user() 146 #define flush_user_windows flushw_user 147 #define flush_register_windows flushw_all 148 #define prepare_to_switch flushw_all 150 /* See what happens when you design the chip correctly? 152 * We tell gcc we clobber all non-fixed-usage registers except 153 * for l0/l1. It will use one for 'next' and the other to hold 154 * the output value of 'last'. 'next' is not referenced again 155 * past the invocation of switch_to in the scheduler, so we need 156 * not preserve it's value. Hairy, but it lets us remove 2 loads 157 * and 2 stores in this critical code path. -DaveM 159 #define switch_to(prev, next, last) \ 160 do { if (current->thread.flags & SPARC_FLAG_PERFCTR) { \ 161 unsigned long __tmp; \ 163 current->thread.pcr_reg = __tmp; \ 165 current->thread.kernel_cntd0 += (unsigned int)(__tmp); \ 166 current->thread.kernel_cntd1 += ((__tmp) >> 32); \ 168 save_and_clear_fpu(); \ 169 /* If you are tempted to conditionalize the following */ \ 170 /* so that ASI is only written if it changes, think again. */ \ 171 __asm__ __volatile__("wr %%g0, %0, %%asi" \ 172 : :"r" (next->thread.current_ds.seg)); \ 173 __asm__ __volatile__( \ 174 "mov %%g6, %%g5\n\t" \ 175 "wrpr %%g0, 0x95, %%pstate\n\t" \ 176 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ 177 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ 178 "rdpr %%wstate, %%o5\n\t" \ 179 "stx %%o6, [%%g6 + %3]\n\t" \ 180 "stb %%o5, [%%g6 + %2]\n\t" \ 181 "rdpr %%cwp, %%o5\n\t" \ 182 "stb %%o5, [%%g6 + %5]\n\t" \ 184 "ldub [%1 + %5], %%g1\n\t" \ 185 "wrpr %%g1, %%cwp\n\t" \ 186 "ldx [%%g6 + %3], %%o6\n\t" \ 187 "ldub [%%g6 + %2], %%o5\n\t" \ 188 "ldub [%%g6 + %4], %%o7\n\t" \ 189 "mov %%g6, %%l2\n\t" \ 190 "wrpr %%o5, 0x0, %%wstate\n\t" \ 191 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ 192 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ 193 "wrpr %%g0, 0x94, %%pstate\n\t" \ 194 "mov %%l2, %%g6\n\t" \ 195 "wrpr %%g0, 0x96, %%pstate\n\t" \ 196 "andcc %%o7, %6, %%g0\n\t" \ 197 "bne,pn %%icc, ret_from_syscall\n\t" \ 198 " mov %%g5, %0\n\t" \ 201 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.wstate)),\ 202 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.ksp)), \ 203 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.flags)),\ 204 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.cwp)), \ 205 "i" (SPARC_FLAG_NEWCHILD) \ 206 :"cc","g1","g2","g3","g5","g7", \ 207 "l2","l3","l4","l5","l6","l7", \ 208 "i0","i1","i2","i3","i4","i5", \ 209 "o0","o1","o2","o3","o4","o5","o7"); \ 210 /* If you fuck with this, update ret_from_syscall code too. */ \ 211 if (current->thread.flags & SPARC_FLAG_PERFCTR) { \ 212 write_pcr(current->thread.pcr_reg); \ 217 extern __inline__
unsigned longxchg32(__volatile__
unsigned int*m
,unsigned int val
) 219 __asm__
__volatile__(" 226 membar #StoreLoad | #StoreStore 229 :"g5","g7","cc","memory"); 233 extern __inline__
unsigned longxchg64(__volatile__
unsigned long*m
,unsigned long val
) 235 __asm__
__volatile__(" 242 membar #StoreLoad | #StoreStore 245 :"g5","g7","cc","memory"); 249 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 250 #define tas(ptr) (xchg((ptr),1)) 252 externvoid__xchg_called_with_bad_pointer(void); 254 static __inline__
unsigned long__xchg(unsigned long x
, __volatile__
void* ptr
, 259 returnxchg32(ptr
, x
); 261 returnxchg64(ptr
, x
); 263 __xchg_called_with_bad_pointer(); 267 externvoiddie_if_kernel(char*str
,struct pt_regs
*regs
)__attribute__((noreturn
)); 270 * Atomic compare and exchange. Compare OLD with MEM, if identical, 271 * store NEW in MEM. Return the initial value in MEM. Success is 272 * indicated by comparing RETURN with OLD. 275 #define __HAVE_ARCH_CMPXCHG 1 277 extern __inline__
unsigned long 278 __cmpxchg_u32(volatileint*m
,int old
,intnew) 280 __asm__
__volatile__("cas [%2], %3, %0\n\t" 281 "membar #StoreStore | #StoreLoad" 283 :"0"(new),"r"(m
),"r"(old
) 289 extern __inline__
unsigned long 290 __cmpxchg_u64(volatilelong*m
,unsigned long old
,unsigned longnew) 292 __asm__
__volatile__("casx [%2], %3, %0\n\t" 293 "membar #StoreStore | #StoreLoad" 295 :"0"(new),"r"(m
),"r"(old
) 301 /* This function doesn't exist, so you'll get a linker error 302 if something tries to do an invalid cmpxchg(). */ 303 externvoid__cmpxchg_called_with_bad_pointer(void); 305 static __inline__
unsigned long 306 __cmpxchg(volatilevoid*ptr
,unsigned long old
,unsigned longnew,int size
) 310 return__cmpxchg_u32(ptr
, old
,new); 312 return__cmpxchg_u64(ptr
, old
,new); 314 __cmpxchg_called_with_bad_pointer(); 318 #define cmpxchg(ptr,o,n) \ 320 __typeof__(*(ptr)) _o_ = (o); \ 321 __typeof__(*(ptr)) _n_ = (n); \ 322 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 323 (unsigned long)_n_, sizeof(*(ptr))); \ 326 #endif/* !(__ASSEMBLY__) */ 328 #endif/* !(__SPARC64_SYSTEM_H) */