1 #ifndef __ALPHA_COMPILER_H 2 #define __ALPHA_COMPILER_H 5 * Herein are macros we use when describing various patterns we want to GCC. 6 * In all cases we can get better schedules out of the compiler if we hide 7 * as little as possible inside inline assembly. However, we want to be 8 * able to know what we'll get out before giving up inline assembly. Thus 9 * these tests and macros. 13 #define __kernel_insbl(val, shift) \ 14 (((unsigned long)(val) & 0xfful) << ((shift) * 8)) 15 #define __kernel_inswl(val, shift) \ 16 (((unsigned long)(val) & 0xfffful) << ((shift) * 8)) 17 #define __kernel_insql(val, shift) \ 18 ((unsigned long)(val) << ((shift) * 8)) 20 #define __kernel_insbl(val, shift) \ 21 ({ unsigned long __kir; \ 22 __asm__("insbl %2,%1,%0" :"=r"(__kir) :"rI"(shift),"r"(val)); \ 24 #define __kernel_inswl(val, shift) \ 25 ({ unsigned long __kir; \ 26 __asm__("inswl %2,%1,%0" :"=r"(__kir) :"rI"(shift),"r"(val)); \ 28 #define __kernel_insql(val, shift) \ 29 ({ unsigned long __kir; \ 30 __asm__("insql %2,%1,%0" :"=r"(__kir) :"rI"(shift),"r"(val)); \ 34 #if 0 && (__GNUC__ > 2 || __GNUC_MINOR__ >= 92) 35 #define __kernel_extbl(val, shift) (((val) >> (((shift) & 7) * 8)) & 0xfful) 36 #define __kernel_extwl(val, shift) (((val) >> (((shift) & 7) * 8)) & 0xfffful) 38 #define __kernel_extbl(val, shift) \ 39 ({ unsigned long __kir; \ 40 __asm__("extbl %2,%1,%0" :"=r"(__kir) :"rI"(shift),"r"(val)); \ 42 #define __kernel_extwl(val, shift) \ 43 ({ unsigned long __kir; \ 44 __asm__("extwl %2,%1,%0" :"=r"(__kir) :"rI"(shift),"r"(val)); \ 50 * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX 51 * extension is enabled. Previous versions did not define anything 52 * we could test during compilation -- too bad, so sad. 55 #if defined(__alpha_bwx__) 56 #define __kernel_ldbu(mem) (mem) 57 #define __kernel_ldwu(mem) (mem) 58 #define __kernel_stb(val,mem) ((mem) = (val)) 59 #define __kernel_stw(val,mem) ((mem) = (val)) 61 #define __kernel_ldbu(mem) \ 62 ({ unsigned char __kir; \ 63 __asm__("ldbu %0,%1" :"=r"(__kir) :"m"(mem)); \ 65 #define __kernel_ldwu(mem) \ 66 ({ unsigned short __kir; \ 67 __asm__("ldwu %0,%1" :"=r"(__kir) :"m"(mem)); \ 69 #define __kernel_stb(val,mem) \ 70 __asm__("stb %1,%0" :"=m"(mem) :"r"(val)) 71 #define __kernel_stw(val,mem) \ 72 __asm__("stw %1,%0" :"=m"(mem) :"r"(val)) 75 /* Somewhere in the middle of the GCC 2.96 development cycle, we implemented 76 a mechanism by which the user can annotate likely branch directions and 77 expect the blocks to be reordered appropriately. Define __builtin_expect 78 to nothing for earlier compilers. */ 80 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96 81 #define __builtin_expect(x, expected_value) (x) 84 #endif/* __ALPHA_COMPILER_H */