@@ -1092,7+1092,7 @@ sys_call_table: .quad sys_munlockall
.quad sys_sysinfo
.quad sys_sysctl
- .quad sys_idle /* 320 */
+ .quad sys_ni_syscall /* 320 */
.quad sys_oldumount
.quad sys_swapon
.quad sys_times
@@ -74,9+74,8 @@ sys_sethae(unsigned long hae, unsigned long a1, unsigned long a2, return 0;
}
-#ifdef __SMP__
-int
-cpu_idle(void *unused)
+void
+cpu_idle(void)
{
/* An endless idle loop with no priority at all. */
current->priority = 0;
@@ -94,27+93,6 @@ cpu_idle(void *unused) }
}
}
-#endif
-
-asmlinkage int
-sys_idle(void)
-{
- if (current->pid != 0)
- return -EPERM;
-
- /* An endless idle loop with no priority at all. */
- current->priority = 0;
- current->counter = -100;
- init_idle();
-
- while (1) {
- /* FIXME -- EV6 and LCA45 know how to power down
- the CPU. */
-
- schedule();
- check_pgt_cache();
- }
-}
void
generic_kill_arch (int mode, char *restart_cmd)
@@ -194,7+194,7 @@ extern void entDbg(void);
/* process.c */
extern void generic_kill_arch (int mode, char *reboot_cmd);
-extern int cpu_idle(void *) __attribute__((noreturn));
+extern void cpu_idle(void) __attribute__((noreturn));
/* ptrace.c */
extern int ptrace_set_bpt (struct task_struct *child);
@@ -153,7+153,7 @@ smp_callin(void) cpuid, current));
/* Do nothing. */
- cpu_idle(NULL);
+ cpu_idle();
}
@@ -221,7+221,7 @@ ruffian_init_arch(unsigned long *mem_start, unsigned long *mem_end) /* FIXME: What do we do with ruffian_get_bank_size above? */
#if 1
- pyxis_init_arch();
+ pyxis_init_arch(mem_start, mem_end);
#else
pyxis_enable_errors();
if (!pyxis_srm_window_setup()) {
@@ -2,11+2,11 @@ OUTPUT_FORMAT("elf64-alpha") ENTRY(__start)
SECTIONS
{
- . = 0xfffffc0000310000;
- _text = .;
- .text : { *(.text) }
- .text2 : { *(.text2) }
- _etext = .;
+ . = 0xfffffc0000310000;
+ _text = .;
+ .text : { *(.text) }
+ .text2 : { *(.text2) }
+ _etext = .;
/* Exception table */
. = ALIGN(16);
@@ -26,6+26,17 @@ SECTIONS __init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
+
+ . = ALIGN(16);
+ __setup_start = .;
+ .setup.init : { *(.setup.init) }
+ __setup_end = .;
+
+ . = ALIGN(8);
+ __initcall_start = .;
+ .initcall.init : { *(.initcall.init) }
+ __initcall_end = .;
+
. = ALIGN(2*8192); /* Align double page for init_task_union */
__init_end = .;
-/* $Id: pcic.c,v 1.6 1999/06/03 15:02:18 davem Exp $
+/* $Id: pcic.c,v 1.7 1999/07/23 01:56:07 davem Exp $
* pcic.c: Sparc/PCI controller support
*
* Copyright (C) 1998 V. Roganov and G. Raiko
-/* $Id: process.c,v 1.137 1999/05/08 03:00:10 davem Exp $
+/* $Id: process.c,v 1.138 1999/07/23 01:56:10 davem Exp $
* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: setup.c,v 1.107 1999/06/03 15:02:20 davem Exp $
+/* $Id: setup.c,v 1.108 1999/07/30 09:35:03 davem Exp $
* linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: signal.c,v 1.92 1999/06/14 05:23:53 davem Exp $
+/* $Id: signal.c,v 1.94 1999/07/30 09:35:04 davem Exp $
* linux/arch/sparc/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -659,6+659,9 @@ new_setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
err |= __copy_to_user(sf, (char *) regs->u_regs [UREG_FP],
sizeof (struct reg_window));
+
+ err |= __copy_to_user(&sf->info, info, sizeof(siginfo_t));
+
if (err)
goto sigsegv;
-/* $Id: sparc-stub.c,v 1.24 1998/02/08 07:58:44 ecd Exp $
+/* $Id: sparc-stub.c,v 1.25 1999/07/23 01:56:13 davem Exp $
* sparc-stub.c: KGDB support for the Linux kernel.
*
* Modifications to run under Linux
-/* $Id: sparc_ksyms.c,v 1.77 1999/03/21 06:37:43 davem Exp $
+/* $Id: sparc_ksyms.c,v 1.78 1999/07/23 01:56:15 davem Exp $
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: sunos_ioctl.c,v 1.31 1998/10/25 19:31:04 davem Exp $
+/* $Id: sunos_ioctl.c,v 1.33 1999/07/28 12:59:03 anton Exp $
* sunos_ioctl.c: The Linux Operating system: SunOS ioctl compatibility.
*
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
+#include <linux/file.h>
#include <asm/kbio.h>
#if 0
-/* $Id: sys_sunos.c,v 1.101 1999/06/29 12:33:54 davem Exp $
+/* $Id: sys_sunos.c,v 1.102 1999/07/23 01:56:19 davem Exp $
* sys_sunos.c: SunOS specific syscall compatibility support.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#define _FP_WS_TYPE signed long
#define _FP_I_TYPE long
-#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(S,R,X,Y,umul_ppmm)
-#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(D,R,X,Y,umul_ppmm)
-#define _FP_MUL_MEAT_Q(R,X,Y) _FP_MUL_MEAT_4_wide(Q,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
* CPU instruction emulation this should prefer Y.
* (see SPAMv9 B.2.2 section).
*/
-#define _FP_CHOOSENAN(fs, wc, R, X, Y) \
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
do { \
if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
&& !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
-/* $Id: fault.c,v 1.103 1999/07/04 04:35:51 davem Exp $
+/* $Id: fault.c,v 1.106 1999/07/30 09:35:07 davem Exp $
* fault.c: Page fault handlers for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -382,12+382,13 @@ inline void force_user_fault(unsigned long address, int write) if(expand_stack(vma, address))
goto bad_area;
good_area:
- if(write)
+ if(write) {
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
- else
+ } else {
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
+ }
if (!handle_mm_fault(current, vma, address, write))
goto do_sigbus;
up(&mm->mmap_sem);
-/* $Id: srmmu.c,v 1.187 1999/04/28 17:00:45 davem Exp $
+/* $Id: srmmu.c,v 1.189 1999/07/30 09:35:08 davem Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: sun4c.c,v 1.173 1999/01/17 02:20:37 davem Exp $
+/* $Id: sun4c.c,v 1.175 1999/07/30 09:35:10 davem Exp $
* sun4c.c: Doing in software what should be done in hardware.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
-# $Id: Makefile,v 1.37 1999/06/04 13:29:10 jj Exp $
+# $Id: Makefile,v 1.38 1999/08/02 12:06:06 jj Exp $
# sparc64/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
@@ -15,7+15,7 @@ SHELL =/bin/bash CC := sparc64-linux-gcc -D__KERNEL__ -I$(TOPDIR)/include
CC_HAS_ARGS := $(shell if echo "$(CC)" | grep '\(__KERNEL__\| \)' > /dev/null; then echo y; else echo n; fi)
-IS_EGCS := $(shell if $(CC) -c -m64 -mcmodel=medlow -o _tmp.o arch/sparc64/math-emu/fnegq.c >/dev/null 2>&1; then echo y; else echo n; fi; rm -f _tmp.o)
+IS_EGCS := $(shell if $(CC) -c -m64 -mcmodel=medlow -o /dev/null /dev/null >/dev/null 2>&1; then echo y; else echo n; fi; )
NEW_GAS := $(shell if $(LD) --version 2>&1 | grep 'elf64_sparc' > /dev/null; then echo y; else echo n; fi)
ifneq ($(CC_HAS_ARGS),y)
-# $Id: config.in,v 1.67 1999/05/01 09:17:37 davem Exp $
+# $Id: config.in,v 1.71 1999/07/30 09:35:13 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
# Global things across all Sun machines.
define_bool CONFIG_SBUS y
define_bool CONFIG_SBUSCHAR y
+define_bool CONFIG_MOUSE y
define_bool CONFIG_SUN_MOUSE y
define_bool CONFIG_SERIAL y
define_bool CONFIG_SUN_SERIAL y
@@ -49,6+49,7 @@ CONFIG_FONT_SUN8x16=y # CONFIG_FBCON_FONTS is not set
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
+CONFIG_MOUSE=y
CONFIG_SUN_MOUSE=y
CONFIG_SERIAL=y
CONFIG_SUN_SERIAL=y
@@ -93,12+94,13 @@ CONFIG_BINFMT_ELF32=y CONFIG_BINFMT_MISC=m
CONFIG_SOLARIS_EMUL=m
CONFIG_PARPORT=m
-# CONFIG_PARPORT_PC is not set
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_FIFO=y
# CONFIG_PARPORT_AMIGA is not set
# CONFIG_PARPORT_MFC3 is not set
# CONFIG_PARPORT_ATARI is not set
# CONFIG_PARPORT_OTHER is not set
-# CONFIG_PARPORT_1284 is not set
+CONFIG_PARPORT_1284=y
CONFIG_PRINTER=m
CONFIG_ENVCTRL=m
-# $Id: Makefile,v 1.43 1999/01/02 16:45:53 davem Exp $
+# $Id: Makefile,v 1.44 1999/08/02 12:05:53 jj Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
check_asm: dummy
@echo "/* Automatically generated. Do not edit. */" > asm_offsets.h
@echo "#ifndef __ASM_OFFSETS_H__" >> asm_offsets.h
- @echo "#define __ASM_OFFSETS_H__" >> asm_offsets.h
- @echo "" >> asm_offsets.h
- @echo "#include <linux/config.h>" >> asm_offsets.h
- @echo "" >> asm_offsets.h
- @echo "#ifndef CONFIG_SMP" >> asm_offsets.h
- @echo "" >> asm_offsets.h
+ @echo -e "#define __ASM_OFFSETS_H__\n" >> asm_offsets.h
+ @echo -e "#include <linux/config.h>\n" >> asm_offsets.h
+ @echo '#if defined(__KERNEL__) && !defined(__ASSEMBLY__)' >> asm_offsets.h
+ @if $(CC) -c -m64 -mcmodel=medlow -o /dev/null /dev/null >/dev/null 2>&1; then \
+ echo '# if !((__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8))' >> asm_offsets.h; \
+ else \
+ echo '# if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)' >> asm_offsets.h; \
+ fi
+ @echo -e "# error Please issue 'make check_asm' in linux top-level directory first\n# endif\n#endif\n" >> asm_offsets.h
+ @echo -e "#ifndef CONFIG_SMP\n" >> asm_offsets.h
@echo "#include <linux/config.h>" > tmp.c
@echo "#undef CONFIG_SMP" >> tmp.c
@echo "#include <linux/sched.h>" >> tmp.c
@@ -92,11+96,8 @@ check_asm: dummy # </hack>
./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
- @echo "" >> asm_offsets.h
- @echo "#else /* CONFIG_SMP */" >> asm_offsets.h
- @echo "" >> asm_offsets.h
- @echo "#ifndef SPIN_LOCK_DEBUG" >>asm_offsets.h
- @echo "" >> asm_offsets.h
+ @echo -e "\n#else /* CONFIG_SMP */\n" >> asm_offsets.h
+ @echo -e "#ifndef SPIN_LOCK_DEBUG\n" >>asm_offsets.h
@echo "#include <linux/config.h>" > tmp.c
@echo "#undef CONFIG_SMP" >> tmp.c
@echo "#define CONFIG_SMP 1" >> tmp.c
@@ -124,9+125,7 @@ check_asm: dummy # </hack>
./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
- @echo "" >> asm_offsets.h
- @echo "#else /* SPIN_LOCK_DEBUG */" >> asm_offsets.h
- @echo "" >> asm_offsets.h
+ @echo -e "\n#else /* SPIN_LOCK_DEBUG */\n" >> asm_offsets.h
@echo "#include <linux/sched.h>" > tmp.c
$(CC) -D__SMP__ -DSPIN_LOCK_DEBUG -E tmp.c -o tmp.i
@echo "/* Automatically generated. Do not edit. */" > check_asm.c
@@ -151,10+150,8 @@ check_asm: dummy # </hack>
./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
- @echo "#endif /* SPIN_LOCK_DEBUG */" >> asm_offsets.h
- @echo "" >> asm_offsets.h
- @echo "#endif /* CONFIG_SMP */" >> asm_offsets.h
- @echo "" >> asm_offsets.h
+ @echo -e "#endif /* SPIN_LOCK_DEBUG */\n" >> asm_offsets.h
+ @echo -e "#endif /* CONFIG_SMP */\n" >> asm_offsets.h
@echo "#endif /* __ASM_OFFSETS_H__ */" >> asm_offsets.h
@if test -r $(HPATH)/asm/asm_offsets.h; then \
if cmp -s asm_offsets.h $(HPATH)/asm/asm_offsets.h; then \
@@ -339,6+339,16 @@ beyond_if:
current->mm->start_stack =
(unsigned long) create_aout32_tables((char *)bprm->p, bprm);
+ if (!(current->thread.flags & SPARC_FLAG_32BIT)) {
+ unsigned long pgd_cache;
+
+ pgd_cache = ((unsigned long)current->mm->pgd[0])<<11UL;
+ __asm__ __volatile__("stxa\t%0, [%1] %2"
+ : /* no outputs */
+ : "r" (pgd_cache),
+ "r" (TSB_REG), "i" (ASI_DMMU));
+ current->thread.flags |= SPARC_FLAG_32BIT;
+ }
start_thread32(regs, ex.a_entry, current->mm->start_stack);
if (current->flags & PF_PTRACED)
send_sig(SIGTRAP, current, 0);
@@ -142,7+142,7 @@ struct elf_prpsinfo32 #ifdef CONFIG_BINFMT_ELF32_MODULE
#define CONFIG_BINFMT_ELF_MODULE CONFIG_BINFMT_ELF32_MODULE
#endif
-#define ELF_FLAGS_INIT current->tss.flags |= SPARC_FLAG_32BIT
+#define ELF_FLAGS_INIT current->thread.flags |= SPARC_FLAG_32BIT
MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit SparcLinux binaries on the Ultra");
MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
#include <linux/kernel.h>
#include <linux/tasks.h>
#include <linux/init.h>
+#include <linux/ioport.h>
#include <asm/page.h>
#include <asm/oplib.h>
@@ -28,6+29,10 @@ device_scan(unsigned long mem_start)) int cpu_nds[64]; /* One node for each cpu */
int cpu_ctr = 0;
+ /* FIX ME FAST... -DaveM */
+ ioport_resource.end = 0xffffffffffffffffUL;
+ iomem_resource.end = 0xffffffffffffffffUL;
+
prom_getstring(prom_root_node, "device_type", node_str, sizeof(node_str));
prom_printf("Booting Linux...\n");
-/* $Id: entry.S,v 1.103 1999/05/08 03:00:21 davem Exp $
+/* $Id: entry.S,v 1.106 1999/08/02 08:39:34 davem Exp $
* arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
@@ -42,13+42,13 @@ sparc64_vpte_patchme2: /* This is trivial with the new code... */
.globl do_fpdis
do_fpdis:
- ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g5 ! Load Group
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g5 ! Load Group
sethi %hi(TSTATE_PEF), %g4 ! IEU0
wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
be,a,pt %icc, 1f ! CTI
clr %g7 ! IEU0
- ldub [%g6 + AOFF_task_tss + AOFF_thread_gsr], %g7 ! Load Group
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_gsr], %g7 ! Load Group
1: andcc %g5, FPRS_DL, %g0 ! IEU1
bne,pn %icc, 2f ! CTI
fzero %f0 ! FPA
@@ -157,7+157,7 @@ fpdis_exit: flush %g6
fpdis_exit2:
wr %g7, 0, %gsr
- ldx [%g6 + AOFF_task_tss + AOFF_thread_xfsr], %fsr
+ ldx [%g6 + AOFF_task_thread + AOFF_thread_xfsr], %fsr
rdpr %tstate, %g3
or %g3, %g4, %g3 ! anal...
wrpr %g3, %tstate
@@ -167,13+167,13 @@ fpdis_exit2: .globl do_fptrap
.align 32
do_fptrap:
- ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g3
- stx %fsr, [%g6 + AOFF_task_tss + AOFF_thread_xfsr]
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
+ stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
rd %fprs, %g1
or %g3, %g1, %g3
- stb %g3, [%g6 + AOFF_task_tss + AOFF_thread_fpsaved]
+ stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
rd %gsr, %g3
- stb %g3, [%g6 + AOFF_task_tss + AOFF_thread_gsr]
+ stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_gsr]
mov SECONDARY_CONTEXT, %g3
add %g6, AOFF_task_fpregs, %g2
ldxa [%g3] ASI_DMMU, %g5
@@ -633,41+633,28 @@ execve_merge: jmpl %g1, %g0
add %sp, STACK_BIAS + REGWIN_SZ, %o0
- .globl sys_pipe, sys_execve, sys_sigpause, sys_nis_syscall
+ .globl sys_pipe, sys_sigpause, sys_nis_syscall
.globl sys_sigsuspend, sys_rt_sigsuspend, sys32_rt_sigsuspend
- .globl sys_sigreturn, sys_rt_sigreturn
+ .globl sys_rt_sigreturn
.globl sys32_sigreturn, sys32_rt_sigreturn
.globl sys32_execve, sys_ptrace
.globl sys_sigaltstack, sys32_sigaltstack
.globl sys32_sigstack
.align 32
-sys_pipe: sethi %hi(sparc_pipe), %g1
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
- jmpl %g1 + %lo(sparc_pipe), %g0
- nop
-sys_nis_syscall:sethi %hi(c_sys_nis_syscall), %g1
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
- jmpl %g1 + %lo(c_sys_nis_syscall), %g0
- nop
-
+sys_pipe: ba,pt %xcc, sparc_pipe
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+sys_nis_syscall:ba,pt %xcc, c_sys_nis_syscall
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
sys_memory_ordering:
- sethi %hi(sparc_memory_ordering), %g1
- add %sp, STACK_BIAS + REGWIN_SZ, %o1
- jmpl %g1 + %lo(sparc_memory_ordering), %g0
- nop
-sys_sigaltstack:sethi %hi(do_sigaltstack), %g1
- add %i6, STACK_BIAS, %o2
- jmpl %g1 + %lo(do_sigaltstack), %g1
- nop
-sys32_sigstack: sethi %hi(do_sys32_sigstack), %g1
- mov %i6, %o2
- jmpl %g1 + %lo(do_sys32_sigstack), %g1
- nop
+ ba,pt %xcc, sparc_memory_ordering
+ add %sp, STACK_BIAS + REGWIN_SZ, %o1
+sys_sigaltstack:ba,pt %xcc, do_sigaltstack
+ add %i6, STACK_BIAS, %o2
+sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
+ mov %i6, %o2
sys32_sigaltstack:
- sethi %hi(do_sys32_sigaltstack), %g1
- mov %i6, %o2
- jmpl %g1 + %lo(do_sys32_sigaltstack), %g1
- nop
+ ba,pt %xcc, do_sys32_sigaltstack
+ mov %i6, %o2
.align 32
sys_sigsuspend: add %sp, STACK_BIAS + REGWIN_SZ, %o0
@@ -689,10+676,6 @@ sys_sigpause: add %sp, STACK_BIAS + REGWIN_SZ, %o1 call do_sigpause
add %o7, 1f-.-4, %o7
nop
-sys_sigreturn: add %sp, STACK_BIAS + REGWIN_SZ, %o0
- call do_sigreturn
- add %o7, 1f-.-4, %o7
- nop
sys32_sigreturn:
add %sp, STACK_BIAS + REGWIN_SZ, %o0
call do_sigreturn32
@@ -761,38+744,30 @@ sys_clone: flushw ba,pt %xcc, do_fork
add %sp, STACK_BIAS + REGWIN_SZ, %o2
ret_from_syscall:
- /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves tss.flags in
+ /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
* %o7 for us. Check performance counter stuff too.
*/
-#ifdef __SMP__
- andn %o7, 0x100, %l0
+ andn %o7, SPARC_FLAG_NEWCHILD, %l0
mov %g5, %o0 /* 'prev' */
call schedule_tail
- sth %l0, [%g6 + AOFF_task_tss + AOFF_thread_flags]
-#else
- andn %o7, 0x100, %l0
- sth %l0, [%g6 + AOFF_task_tss + AOFF_thread_flags]
-#endif
- andcc %l0, 0x200, %g0
+ stb %l0, [%g6 + AOFF_task_thread + AOFF_thread_flags]
+ andcc %l0, SPARC_FLAG_PERFCTR, %g0
be,pt %icc, 1f
nop
- ldx [%g6 + AOFF_task_tss + AOFF_thread_pcr_reg], %o7
+ ldx [%g6 + AOFF_task_thread + AOFF_thread_pcr_reg], %o7
wr %g0, %o7, %pcr
wr %g0, %g0, %pic
1: b,pt %xcc, ret_sys_call
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0], %o0
sparc_exit: rdpr %otherwin, %g1
- rdpr %pstate, %g2
- wrpr %g2, PSTATE_IE, %pstate
+ wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate
rdpr %cansave, %g3
add %g3, %g1, %g3
wrpr %g3, 0x0, %cansave
wrpr %g0, 0x0, %otherwin
- wrpr %g2, 0x0, %pstate
- mov %o7, %l5
- sth %g0, [%g6 + AOFF_task_tss + AOFF_thread_w_saved]
- call sys_exit
- mov %l5, %o7
+ wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate
+ ba,pt %xcc, sys_exit
+ stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
linux_sparc_ni_syscall:
sethi %hi(sys_ni_syscall), %l7
-/* $Id: etrap.S,v 1.41 1999/05/25 16:53:09 jj Exp $
+/* $Id: etrap.S,v 1.42 1999/07/30 09:35:18 davem Exp $
* etrap.S: Preparing for entry into the kernel on Sparc V9.
*
* Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -56,7+56,7 @@ etrap_irq: rdpr %tstate, %g1 ! Single Group wrpr %g0, 0, %canrestore ! Single Group+4bubbles
sll %g2, 3, %g2 ! IEU0 Group
mov 1, %l5 ! IEU1
- stb %l5, [%l6 + AOFF_task_tss + AOFF_thread_fpdepth] ! Store
+ stb %l5, [%l6 + AOFF_task_thread + AOFF_thread_fpdepth] ! Store
wrpr %g3, 0, %otherwin ! Single Group+4bubbles
wrpr %g2, 0, %wstate ! Single Group+4bubbles
stxa %g0, [%l4] ASI_DMMU ! Store Group
@@ -89,11+89,11 @@ etrap_irq: rdpr %tstate, %g1 ! Single Group jmpl %l2 + 0x4, %g0 ! CTI Group
mov %l6, %g6 ! IEU0
-3: ldub [%l6 + AOFF_task_tss + AOFF_thread_fpdepth], %l5 ! Load Group
- add %l6, AOFF_task_tss + AOFF_thread_fpsaved + 1, %l4 ! IEU0
+3: ldub [%l6 + AOFF_task_thread + AOFF_thread_fpdepth], %l5 ! Load Group
+ add %l6, AOFF_task_thread + AOFF_thread_fpsaved + 1, %l4 ! IEU0
srl %l5, 1, %l3 ! IEU0 Group
add %l5, 2, %l5 ! IEU1
- stb %l5, [%l6 + AOFF_task_tss + AOFF_thread_fpdepth] ! Store
+ stb %l5, [%l6 + AOFF_task_thread + AOFF_thread_fpdepth] ! Store
ba,pt %xcc, 2b ! CTI
stb %g0, [%l4 + %l3] ! Store Group
-/* $Id: ioctl32.c,v 1.63 1999/06/09 04:56:14 davem Exp $
+/* $Id: ioctl32.c,v 1.65 1999/07/30 09:35:19 davem Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
#include <linux/ext2_fs.h>
#include <linux/videodev.h>
#include <linux/netdevice.h>
+#include <linux/raw.h>
#include <scsi/scsi.h>
/* Ugly hack. */
@@ -2366,6+2367,10 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) case AUTOFS_IOC_PROTOVER:
case AUTOFS_IOC_EXPIRE:
+ /* Raw devices */
+ case RAW_SETBIND:
+ case RAW_GETBIND:
+
error = sys_ioctl (fd, cmd, arg);
goto out;
-/* $Id: process.c,v 1.95 1999/06/28 08:48:51 davem Exp $
+/* $Id: process.c,v 1.98 1999/08/02 08:39:35 davem Exp $
* arch/sparc64/kernel/process.c
*
* Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
/*
* the idle loop on a Sparc... ;)
*/
-asmlinkage int sys_idle(void)
+int cpu_idle(void)
{
if (current->pid != 0)
return -EPERM;
@@ -77,7+77,7 @@ asmlinkage int sys_idle(void) */
#define idle_me_harder() (cpu_data[current->processor].idle_volume += 1)
#define unidle_me() (cpu_data[current->processor].idle_volume = 0)
-asmlinkage int cpu_idle(void)
+int cpu_idle(void)
{
current->priority = 0;
current->counter = -100;
@@ -99,15+99,6 @@ asmlinkage int cpu_idle(void) }
}
-asmlinkage int sys_idle(void)
-{
- if(current->pid != 0)
- return -EPERM;
-
- cpu_idle();
- return 0;
-}
-
#endif
extern char reboot_command [];
@@ -189,7+180,7 @@ static void show_regwindow(struct pt_regs *regs) struct reg_window r_w;
mm_segment_t old_fs;
- if ((regs->tstate & TSTATE_PRIV) || !(current->tss.flags & SPARC_FLAG_32BIT)) {
+ if ((regs->tstate & TSTATE_PRIV) || !(current->thread.flags & SPARC_FLAG_32BIT)) {
__asm__ __volatile__ ("flushw");
rw = (struct reg_window *)(regs->u_regs[14] + STACK_BIAS);
if (!(regs->tstate & TSTATE_PRIV)) {
@@ -369,90+360,96 @@ void show_regs32(struct pt_regs32 *regs) regs->u_regs[15]);
}
-void show_thread(struct thread_struct *tss)
+void show_thread(struct thread_struct *thread)
{
int i;
#if 0
- printk("kregs: 0x%016lx\n", (unsigned long)tss->kregs);
- show_regs(tss->kregs);
+ printk("kregs: 0x%016lx\n", (unsigned long)thread->kregs);
+ show_regs(thread->kregs);
#endif
- printk("sig_address: 0x%016lx\n", tss->sig_address);
- printk("sig_desc: 0x%016lx\n", tss->sig_desc);
- printk("ksp: 0x%016lx\n", tss->ksp);
+ printk("sig_address: 0x%016lx\n", thread->sig_address);
+ printk("sig_desc: 0x%016lx\n", thread->sig_desc);
+ printk("ksp: 0x%016lx\n", thread->ksp);
- if (tss->w_saved) {
+ if (thread->w_saved) {
for (i = 0; i < NSWINS; i++) {
- if (!tss->rwbuf_stkptrs[i])
+ if (!thread->rwbuf_stkptrs[i])
continue;
printk("reg_window[%d]:\n", i);
- printk("stack ptr: 0x%016lx\n", tss->rwbuf_stkptrs[i]);
+ printk("stack ptr: 0x%016lx\n", thread->rwbuf_stkptrs[i]);
}
- printk("w_saved: 0x%04x\n", tss->w_saved);
+ printk("w_saved: 0x%04x\n", thread->w_saved);
}
- printk("flags: 0x%08x\n", tss->flags);
- printk("current_ds: 0x%x\n", tss->current_ds.seg);
+ printk("flags: 0x%08x\n", thread->flags);
+ printk("current_ds: 0x%x\n", thread->current_ds.seg);
}
/* Free current thread data structures etc.. */
void exit_thread(void)
{
- if (current->tss.utraps) {
- if (current->tss.utraps[0] < 2)
- kfree (current->tss.utraps);
+ struct thread_struct *t = ¤t->thread;
+
+ if (t->utraps) {
+ if (t->utraps[0] < 2)
+ kfree (t->utraps);
else
- current->tss.utraps[0]--;
+ t->utraps[0]--;
}
/* Turn off performance counters if on. */
- if (current->tss.flags & SPARC_FLAG_PERFCTR) {
- current->tss.user_cntd0 =
- current->tss.user_cntd1 = NULL;
- current->tss.pcr_reg = 0;
- current->tss.flags &= ~(SPARC_FLAG_PERFCTR);
+ if (t->flags & SPARC_FLAG_PERFCTR) {
+ t->user_cntd0 = t->user_cntd1 = NULL;
+ t->pcr_reg = 0;
+ t->flags &= ~(SPARC_FLAG_PERFCTR);
write_pcr(0);
}
}
void flush_thread(void)
{
- if (!(current->tss.flags & SPARC_FLAG_KTHREAD))
- flush_user_windows();
- current->tss.w_saved = 0;
+ struct thread_struct *t = ¤t->thread;
+
+ if (current->mm) {
+ if (t->flags & SPARC_FLAG_32BIT) {
+ struct mm_struct *mm = current->mm;
+ pgd_t *pgd0 = &mm->pgd[0];
+ unsigned long pgd_cache;
+
+ if (pgd_none(*pgd0)) {
+ pmd_t *page = get_pmd_fast();
+ if (!page)
+ (void) get_pmd_slow(pgd0, 0);
+ else
+ pgd_set(pgd0, page);
+ }
+ pgd_cache = pgd_val(*pgd0) << 11UL;
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : /* no outputs */
+ : "r" (pgd_cache),
+ "r" (TSB_REG),
+ "i" (ASI_DMMU));
+ }
+ }
+ t->w_saved = 0;
/* Turn off performance counters if on. */
- if (current->tss.flags & SPARC_FLAG_PERFCTR) {
- current->tss.user_cntd0 =
- current->tss.user_cntd1 = NULL;
- current->tss.pcr_reg = 0;
- current->tss.flags &= ~(SPARC_FLAG_PERFCTR);
+ if (t->flags & SPARC_FLAG_PERFCTR) {
+ t->user_cntd0 = t->user_cntd1 = NULL;
+ t->pcr_reg = 0;
+ t->flags &= ~(SPARC_FLAG_PERFCTR);
write_pcr(0);
}
- /* No new signal delivery by default. */
- current->tss.new_signal = 0;
- current->tss.fpsaved[0] = 0;
+ /* Clear FPU register state. */
+ t->fpsaved[0] = 0;
- /* Now, this task is no longer a kernel thread. */
- current->tss.current_ds = USER_DS;
- if(current->tss.flags & SPARC_FLAG_KTHREAD) {
- current->tss.flags &= ~SPARC_FLAG_KTHREAD;
+ if (t->current_ds.seg != ASI_AIUS)
+ set_fs(USER_DS);
- /* exec_mmap() set context to NO_CONTEXT, here is
- * where we grab a new one.
- */
- activate_context(current);
- }
- if (current->tss.flags & SPARC_FLAG_32BIT)
- __asm__ __volatile__("stxa %%g0, [%0] %1"
- : /* no outputs */
- : "r"(TSB_REG), "i"(ASI_DMMU));
- __cli();
- current->tss.ctx = current->mm->context & 0x3ff;
- spitfire_set_secondary_context (current->tss.ctx);
- __asm__ __volatile__("flush %g6");
- __sti();
+ /* Init new signal delivery disposition. */
+ t->flags &= ~SPARC_FLAG_NEWSIGNALS;
}
/* It's a bit more tricky when 64-bit tasks are involved... */
@@ -460,12+457,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) {
unsigned long fp, distance, rval;
- /* do_fork() grabs the parent semaphore, we must release it
- * temporarily so we can build the child clone stack frame
- * without deadlocking.
- */
- up(¤t->mm->mmap_sem);
- if(!(current->tss.flags & SPARC_FLAG_32BIT)) {
+ if(!(current->thread.flags & SPARC_FLAG_32BIT)) {
csp += STACK_BIAS;
psp += STACK_BIAS;
__get_user(fp, &(((struct reg_window *)psp)->ins[6]));
@@ -482,7+474,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) rval = (csp - distance);
if(copy_in_user(rval, psp, distance))
rval = 0;
- else if(current->tss.flags & SPARC_FLAG_32BIT) {
+ else if(current->thread.flags & SPARC_FLAG_32BIT) {
if(put_user(((u32)csp), &(((struct reg_window32 *)rval)->ins[6])))
rval = 0;
} else {
@@ -492,47+484,46 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) else
rval = rval - STACK_BIAS;
}
- down(¤t->mm->mmap_sem);
return rval;
}
/* Standard stuff. */
static inline void shift_window_buffer(int first_win, int last_win,
- struct thread_struct *tp)
+ struct thread_struct *t)
{
int i;
for(i = first_win; i < last_win; i++) {
- tp->rwbuf_stkptrs[i] = tp->rwbuf_stkptrs[i+1];
- memcpy(&tp->reg_window[i], &tp->reg_window[i+1],
+ t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
+ memcpy(&t->reg_window[i], &t->reg_window[i+1],
sizeof(struct reg_window));
}
}
void synchronize_user_stack(void)
{
- struct thread_struct *tp = ¤t->tss;
+ struct thread_struct *t = ¤t->thread;
unsigned long window;
flush_user_windows();
- if((window = tp->w_saved) != 0) {
+ if((window = t->w_saved) != 0) {
int winsize = REGWIN_SZ;
int bias = 0;
- if(tp->flags & SPARC_FLAG_32BIT)
+ if(t->flags & SPARC_FLAG_32BIT)
winsize = REGWIN32_SZ;
else
bias = STACK_BIAS;
window -= 1;
do {
- unsigned long sp = (tp->rwbuf_stkptrs[window] + bias);
- struct reg_window *rwin = &tp->reg_window[window];
+ unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
+ struct reg_window *rwin = &t->reg_window[window];
if(!copy_to_user((char *)sp, rwin, winsize)) {
- shift_window_buffer(window, tp->w_saved - 1, tp);
- tp->w_saved--;
+ shift_window_buffer(window, t->w_saved - 1, t);
+ t->w_saved--;
}
} while(window--);
}
@@ -540,28+531,28 @@ void synchronize_user_stack(void)
void fault_in_user_windows(struct pt_regs *regs)
{
- struct thread_struct *tp = ¤t->tss;
+ struct thread_struct *t = ¤t->thread;
unsigned long window;
int winsize = REGWIN_SZ;
int bias = 0;
- if(tp->flags & SPARC_FLAG_32BIT)
+ if(t->flags & SPARC_FLAG_32BIT)
winsize = REGWIN32_SZ;
else
bias = STACK_BIAS;
flush_user_windows();
- window = tp->w_saved;
+ window = t->w_saved;
if(window != 0) {
window -= 1;
do {
- unsigned long sp = (tp->rwbuf_stkptrs[window] + bias);
- struct reg_window *rwin = &tp->reg_window[window];
+ unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
+ struct reg_window *rwin = &t->reg_window[window];
if(copy_to_user((char *)sp, rwin, winsize))
goto barf;
} while(window--);
}
- current->tss.w_saved = 0;
+ t->w_saved = 0;
return;
barf:
do_exit(SIGILL);
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
struct task_struct *p, struct pt_regs *regs)
{
+ struct thread_struct *t = &p->thread;
char *child_trap_frame;
/* Calculate offset to stack_frame & pt_regs */
child_trap_frame = ((char *)p) + ((PAGE_SIZE << 1) - (TRACEREG_SZ+REGWIN_SZ));
memcpy(child_trap_frame, (((struct reg_window *)regs)-1), (TRACEREG_SZ+REGWIN_SZ));
- p->tss.ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
- p->tss.kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct reg_window));
- p->tss.cwp = (regs->tstate + 1) & TSTATE_CWP;
- p->tss.fpsaved[0] = 0;
- p->mm->segments = (void *) 0;
+ t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
+ t->flags |= SPARC_FLAG_NEWCHILD;
+ t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct reg_window));
+ t->cwp = (regs->tstate + 1) & TSTATE_CWP;
+ t->fpsaved[0] = 0;
if(regs->tstate & TSTATE_PRIV) {
/* Special case, if we are spawning a kernel thread from
* a userspace task (via KMOD, NFS, or similar) we must
* disable performance counters in the child because the
* address space and protection realm are changing.
*/
- if (current->tss.flags & SPARC_FLAG_PERFCTR) {
- p->tss.user_cntd0 =
- p->tss.user_cntd1 = NULL;
- p->tss.pcr_reg = 0;
- p->tss.flags &= ~(SPARC_FLAG_PERFCTR);
+ if (t->flags & SPARC_FLAG_PERFCTR) {
+ t->user_cntd0 = t->user_cntd1 = NULL;
+ t->pcr_reg = 0;
+ t->flags &= ~(SPARC_FLAG_PERFCTR);
}
- p->tss.kregs->u_regs[UREG_FP] = p->tss.ksp;
- p->tss.flags |= (SPARC_FLAG_KTHREAD | SPARC_FLAG_NEWCHILD);
- p->tss.current_ds = KERNEL_DS;
- p->tss.ctx = 0;
- __asm__ __volatile__("flushw");
- memcpy((void *)(p->tss.ksp + STACK_BIAS),
+ t->kregs->u_regs[UREG_FP] = p->thread.ksp;
+ t->current_ds = KERNEL_DS;
+ flush_register_windows();
+ memcpy((void *)(t->ksp + STACK_BIAS),
(void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
sizeof(struct reg_window));
- p->tss.kregs->u_regs[UREG_G6] = (unsigned long) p;
+ t->kregs->u_regs[UREG_G6] = (unsigned long) p;
} else {
- if(current->tss.flags & SPARC_FLAG_32BIT) {
+ if(t->flags & SPARC_FLAG_32BIT) {
sp &= 0x00000000ffffffffUL;
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
}
- p->tss.kregs->u_regs[UREG_FP] = sp;
- p->tss.flags = (p->tss.flags & ~SPARC_FLAG_KTHREAD) |
- SPARC_FLAG_NEWCHILD;
- p->tss.current_ds = USER_DS;
- p->tss.ctx = (p->mm->context & 0x3ff);
+ t->kregs->u_regs[UREG_FP] = sp;
+ t->current_ds = USER_DS;
if (sp != regs->u_regs[UREG_FP]) {
unsigned long csp;
csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
if(!csp)
return -EFAULT;
- p->tss.kregs->u_regs[UREG_FP] = csp;
+ t->kregs->u_regs[UREG_FP] = csp;
}
- if (p->tss.utraps)
- p->tss.utraps[0]++;
+ if (t->utraps)
+ t->utraps[0]++;
}
/* Set the return value for the child. */
- p->tss.kregs->u_regs[UREG_I0] = current->pid;
- p->tss.kregs->u_regs[UREG_I1] = 1;
+ t->kregs->u_regs[UREG_I0] = current->pid;
+ t->kregs->u_regs[UREG_I1] = 1;
/* Set the second return value for the parent. */
regs->u_regs[UREG_I1] = 0;
+#if 0
+ printk("\ncopy_thread: c(%p[mm(%p:%p)]) p(%p[mm(%p:%p)])\n",
+ current, current->mm, current->active_mm,
+ p, p->mm, p->active_mm);
+ printk("copy_thread: c MM_ctx(%016lx) MM_pgd(%016lx)\n",
+ (current->mm ? current->mm->context : 0),
+ (current->mm ? pgd_val(current->mm->pgd[0]) : 0));
+ printk("copy_thread: p MM_ctx(%016lx) MM_pgd(%08x)\n",
+ (p->mm ? p->mm->context : 0),
+ (p->mm ? pgd_val(p->mm->pgd[0]) : 0));
+ printk("copy_thread: c->flags(%x) p->flags(%x) ",
+ current->thread.flags, p->thread.flags);
+#endif
return 0;
}
@@ -703,10+702,10 @@ void dump_thread(struct pt_regs * regs, struct user * dump) dump->u_dsize &= ~(PAGE_SIZE - 1);
first_stack_page = (regs->u_regs[UREG_FP] & ~(PAGE_SIZE - 1));
dump->u_ssize = (TASK_SIZE - first_stack_page) & ~(PAGE_SIZE - 1);
- memcpy(&dump->fpu.fpstatus.fregs.regs[0], ¤t->tss.float_regs[0], (sizeof(unsigned long) * 32));
- dump->fpu.fpstatus.fsr = current->tss.fsr;
+ memcpy(&dump->fpu.fpstatus.fregs.regs[0], ¤t->thread.float_regs[0], (sizeof(unsigned long) * 32));
+ dump->fpu.fpstatus.fsr = current->thread.fsr;
dump->fpu.fpstatus.flags = dump->fpu.fpstatus.extra = 0;
- dump->sigcode = current->tss.sig_desc;
+ dump->sigcode = current->thread.sig_desc;
#endif
}
@@ -729,9+728,9 @@ typedef struct { int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
{
unsigned long *kfpregs = (unsigned long *)(((char *)current) + AOFF_task_fpregs);
- unsigned long fprs = current->tss.fpsaved[0];
+ unsigned long fprs = current->thread.fpsaved[0];
- if ((current->tss.flags & SPARC_FLAG_32BIT) != 0) {
+ if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
if (fprs & FPRS_DL)
@@ -745,7+744,7 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) memset(&fpregs32->pr_q[0], 0,
(sizeof(unsigned int) * 64));
if (fprs & FPRS_FEF) {
- fpregs32->pr_fsr = (unsigned int) current->tss.xfsr[0];
+ fpregs32->pr_fsr = (unsigned int) current->thread.xfsr[0];
fpregs32->pr_en = 1;
} else {
fpregs32->pr_fsr = 0;
@@ -765,8+764,8 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) memset(&fpregs->pr_regs[16], 0,
sizeof(unsigned int) * 32);
if(fprs & FPRS_FEF) {
- fpregs->pr_fsr = current->tss.xfsr[0];
- fpregs->pr_gsr = current->tss.gsr[0];
+ fpregs->pr_fsr = current->thread.xfsr[0];
+ fpregs->pr_gsr = current->thread.gsr[0];
} else {
fpregs->pr_fsr = fpregs->pr_gsr = 0;
}
@@ -784,6+783,8 @@ asmlinkage int sparc_execve(struct pt_regs *regs) int error, base = 0;
char *filename;
+ /* User register window flush is done by entry.S */
+
/* Check for indirect call. */
if(regs->u_regs[UREG_G1] == 0)
base = 1;
@@ -798,8+799,8 @@ asmlinkage int sparc_execve(struct pt_regs *regs) putname(filename);
if(!error) {
fprs_write(0);
- current->tss.xfsr[0] = 0;
- current->tss.fpsaved[0] = 0;
+ current->thread.xfsr[0] = 0;
+ current->thread.fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
}
out:
-/* $Id: psycho.c,v 1.86 1999/07/01 10:39:43 davem Exp $
+/* $Id: psycho.c,v 1.87 1999/07/23 01:56:45 davem Exp $
* psycho.c: Ultra/AX U2P PCI controller support.
*
* Copyright (C) 1997 David S. Miller (davem@caipfs.rutgers.edu)
@@ -52,7+52,7 @@ static inline void pt_succ_return(struct pt_regs *regs, unsigned long value) static inline void
pt_succ_return_linux(struct pt_regs *regs, unsigned long value, long *addr)
{
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
if(put_user(value, (unsigned int *)addr))
return pt_error_return(regs, EFAULT);
} else {
@@ -114,7+114,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs) unsigned long addr2 = regs->u_regs[UREG_I4];
struct task_struct *child;
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
addr &= 0xffffffffUL;
data &= 0xffffffffUL;
addr2 &= 0xffffffffUL;
@@ -220,7+220,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs) goto out;
}
- if(!(child->tss.flags & SPARC_FLAG_32BIT) &&
+ if(!(child->thread.flags & SPARC_FLAG_32BIT) &&
((request == PTRACE_READDATA64) ||
(request == PTRACE_WRITEDATA64) ||
(request == PTRACE_READTEXT64) ||
@@ -242,7+242,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs) int res, copied;
res = -EIO;
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
copied = access_process_vm(child, addr,
&tmp32, sizeof(tmp32), 0);
tmp64 = (unsigned long) tmp32;
@@ -267,7+267,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs) unsigned int tmp32;
int copied, res = -EIO;
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
tmp32 = data;
copied = access_process_vm(child, addr,
&tmp32, sizeof(tmp32), 1);
@@ -289,7+289,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_GETREGS: {
struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
- struct pt_regs *cregs = child->tss.kregs;
+ struct pt_regs *cregs = child->thread.kregs;
int rval;
if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
@@ -313,7+313,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_GETREGS64: {
struct pt_regs *pregs = (struct pt_regs *) addr;
- struct pt_regs *cregs = child->tss.kregs;
+ struct pt_regs *cregs = child->thread.kregs;
int rval;
if (__put_user(cregs->tstate, (&pregs->tstate)) ||
@@ -337,7+337,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_SETREGS: {
struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
- struct pt_regs *cregs = child->tss.kregs;
+ struct pt_regs *cregs = child->thread.kregs;
unsigned int psr, pc, npc, y;
int i;
@@ -370,7+370,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_SETREGS64: {
struct pt_regs *pregs = (struct pt_regs *) addr;
- struct pt_regs *cregs = child->tss.kregs;
+ struct pt_regs *cregs = child->thread.kregs;
unsigned long tstate, tpc, tnpc, y;
int i;
@@ -418,7+418,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
if (copy_to_user(&fps->regs[0], fpregs,
(32 * sizeof(unsigned int))) ||
- __put_user(child->tss.xfsr[0], (&fps->fsr)) ||
+ __put_user(child->thread.xfsr[0], (&fps->fsr)) ||
__put_user(0, (&fps->fpqd)) ||
__put_user(0, (&fps->flags)) ||
__put_user(0, (&fps->extra)) ||
@@ -439,7+439,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
if (copy_to_user(&fps->regs[0], fpregs,
(64 * sizeof(unsigned int))) ||
- __put_user(child->tss.xfsr[0], (&fps->fsr))) {
+ __put_user(child->thread.xfsr[0], (&fps->fsr))) {
pt_error_return(regs, EFAULT);
goto out;
}
@@ -468,11+468,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs) pt_error_return(regs, EFAULT);
goto out;
}
- child->tss.xfsr[0] &= 0xffffffff00000000UL;
- child->tss.xfsr[0] |= fsr;
- if (!(child->tss.fpsaved[0] & FPRS_FEF))
- child->tss.gsr[0] = 0;
- child->tss.fpsaved[0] |= (FPRS_FEF | FPRS_DL);
+ child->thread.xfsr[0] &= 0xffffffff00000000UL;
+ child->thread.xfsr[0] |= fsr;
+ if (!(child->thread.fpsaved[0] & FPRS_FEF))
+ child->thread.gsr[0] = 0;
+ child->thread.fpsaved[0] |= (FPRS_FEF | FPRS_DL);
pt_succ_return(regs, 0);
goto out;
}
@@ -486,13+486,13 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
if (copy_from_user(fpregs, &fps->regs[0],
(64 * sizeof(unsigned int))) ||
- __get_user(child->tss.xfsr[0], (&fps->fsr))) {
+ __get_user(child->thread.xfsr[0], (&fps->fsr))) {
pt_error_return(regs, EFAULT);
goto out;
}
- if (!(child->tss.fpsaved[0] & FPRS_FEF))
- child->tss.gsr[0] = 0;
- child->tss.fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
+ if (!(child->thread.fpsaved[0] & FPRS_FEF))
+ child->thread.gsr[0] = 0;
+ child->thread.fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
pt_succ_return(regs, 0);
goto out;
}
@@ -538,11+538,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs) goto out;
}
#ifdef DEBUG_PTRACE
- printk ("Original: %016lx %016lx\n", child->tss.kregs->tpc, child->tss.kregs->tnpc);
+ printk ("Original: %016lx %016lx\n", child->thread.kregs->tpc, child->thread.kregs->tnpc);
printk ("Continuing with %016lx %016lx\n", addr, addr+4);
#endif
- child->tss.kregs->tpc = addr;
- child->tss.kregs->tnpc = addr + 4;
+ child->thread.kregs->tpc = addr;
+ child->thread.kregs->tnpc = addr + 4;
}
if (request == PTRACE_SYSCALL)
@@ -554,8+554,8 @@ asmlinkage void do_ptrace(struct pt_regs *regs) #ifdef DEBUG_PTRACE
printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
child->pid, child->exit_code,
- child->tss.kregs->tpc,
- child->tss.kregs->tnpc);
+ child->thread.kregs->tpc,
+ child->thread.kregs->tnpc);
#endif
wake_up_process(child);
@@ -634,7+634,7 @@ asmlinkage void syscall_trace(void) return;
current->exit_code = SIGTRAP;
current->state = TASK_STOPPED;
- current->tss.flags ^= MAGIC_CONSTANT;
+ current->thread.flags ^= MAGIC_CONSTANT;
notify_parent(current, SIGCHLD);
schedule();
/*
-/* $Id: rtrap.S,v 1.46 1999/05/25 16:53:20 jj Exp $
+/* $Id: rtrap.S,v 1.47 1999/07/30 09:35:23 davem Exp $
* rtrap.S: Preparing for return from trap on Sparc V9.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/visasm.h>
+#include <asm/processor.h>
#define PTREGS_OFF (STACK_BIAS + REGWIN_SZ)
@@ -39,13+40,13 @@ rtrap: sethi %hi(bh_active), %l2 be,pt %icc, to_user
andn %l7, PSTATE_IE, %l7
- ldub [%g6 + AOFF_task_tss + AOFF_thread_fpdepth], %l5
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %l5
brz,pt %l5, rt_continue
srl %l5, 1, %o0
- add %g6, AOFF_task_tss + AOFF_thread_fpsaved, %l6
+ add %g6, AOFF_task_thread + AOFF_thread_fpsaved, %l6
ldub [%l6 + %o0], %l2
sub %l5, 2, %l5
- add %g6, AOFF_task_tss + AOFF_thread_gsr, %o1
+ add %g6, AOFF_task_thread + AOFF_thread_gsr, %o1
andcc %l2, (FPRS_FEF|FPRS_DU), %g0
be,pt %icc, 2f
and %l2, FPRS_DL, %l6
@@ -55,7+56,7 @@ rtrap: sethi %hi(bh_active), %l2 rd %fprs, %g5
wr %g5, FPRS_FEF, %fprs
ldub [%o1 + %o0], %g5
- add %g6, AOFF_task_tss + AOFF_thread_xfsr, %o1
+ add %g6, AOFF_task_thread + AOFF_thread_xfsr, %o1
membar #StoreLoad | #LoadLoad
sll %o0, 8, %o2
add %g6, AOFF_task_fpregs, %o3
@@ -71,9+72,8 @@ rtrap: sethi %hi(bh_active), %l2 ldda [%o4 + %o2] ASI_BLK_P, %f48
1: membar #Sync
ldx [%o1 + %o5], %fsr
-2: stb %l5, [%g6 + AOFF_task_tss + AOFF_thread_fpdepth]
-rt_continue: lduh [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0
- ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
+2: stb %l5, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
+rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
mov %g6, %o5
@@ -105,10+105,11 @@ rt_continue: lduh [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0 wrpr %o2, %g0, %tnpc
brnz,pn %l3, kern_rtt
mov PRIMARY_CONTEXT, %l7
+ ldxa [%l7 + %l7] ASI_DMMU, %l0
stxa %l0, [%l7] ASI_DMMU
flush %o5
- rdpr %wstate, %l1
+ rdpr %wstate, %l1
rdpr %otherwin, %l2
srl %l1, 3, %l1
wrpr %l2, %g0, %canrestore
@@ -116,8+117,8 @@ rt_continue: lduh [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0 wrpr %g0, %g0, %otherwin
restore
rdpr %canrestore, %g1
- wrpr %g1, 0x0, %cleanwin
+ wrpr %g1, 0x0, %cleanwin
retry
kern_rtt: restore
retry
@@ -125,8+126,8 @@ to_user: ldx [%g6 + AOFF_task_need_resched], %l0 wrpr %l7, PSTATE_IE, %pstate
orcc %g0, %l0, %g0
be,a,pt %xcc, check_signal
- lduw [%g6 + AOFF_task_sigpending], %l0
+ lduw [%g6 + AOFF_task_sigpending], %l0
call schedule
nop
lduw [%g6 + AOFF_task_sigpending], %l0
@@ -146,7+147,7 @@ check_signal: brz,a,pt %l0, check_user_wins */
check_user_wins:
wrpr %l7, 0x0, %pstate
- lduh [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %o2
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
brz,pt %o2, 1f
sethi %hi(TSTATE_PEF), %l6
@@ -162,8+163,8 @@ check_user_wins: call rtrap_check
add %sp, STACK_BIAS + REGWIN_SZ, %o0
#endif
- lduh [%g6 + AOFF_task_tss + AOFF_thread_flags], %l5
- andcc %l5, 0x200, %g0
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %l5
+ andcc %l5, SPARC_FLAG_PERFCTR, %g0
be,pt %xcc, 1f
nop
@@ -172,7+173,7 @@ check_user_wins: call update_perfctrs
nop
wrpr %l7, 0x0, %pstate
- lduh [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %o2
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
brz,pt %o2, 1f
sethi %hi(TSTATE_PEF), %l6
wrpr %l7, PSTATE_IE, %pstate
@@ -182,14+183,14 @@ check_user_wins: 1:
andcc %l1, %l6, %g0
be,pt %xcc, rt_continue
- stb %g0, [%g6 + AOFF_task_tss + AOFF_thread_fpdepth] ! This is neccessary for non-syscall rtraps only
+ stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth] ! This is neccessary for non-syscall rtraps only
rd %fprs, %l5
andcc %l5, FPRS_FEF, %g0
be,a,pn %icc, rt_continue
andn %l1, %l6, %l1
- ba,pt %xcc, rt_continue+4
- lduh [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0
+ ba,pt %xcc, rt_continue
+ nop
5: wr %g0, FPRS_FEF, %fprs
membar #StoreLoad | #LoadLoad
@@ -201,6+202,6 @@ check_user_wins: 1: membar #Sync
wr %g0, FPRS_DU, %fprs
ba,pt %xcc, rt_continue
- stb %l5, [%g6 + AOFF_task_tss + AOFF_thread_fpdepth]
+ stb %l5, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
#undef PTREGS_OFF
-/* $Id: setup.c,v 1.44 1999/05/28 02:17:29 davem Exp $
+/* $Id: setup.c,v 1.46 1999/08/02 08:39:36 davem Exp $
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
@@ -135,17+135,21 @@ int prom_callback(long *args) * Find process owning ctx, lookup mapping.
*/
struct task_struct *p;
+ struct mm_struct *mm = NULL;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- for_each_task(p)
- if (p->tss.ctx == ctx)
+ for_each_task(p) {
+ mm = p->mm;
+ if (CTX_HWBITS(mm->context) == ctx)
break;
- if (p->tss.ctx != ctx)
+ }
+ if (!mm ||
+ CTX_HWBITS(mm->context) != ctx)
goto done;
- pgdp = pgd_offset(p->mm, va);
+ pgdp = pgd_offset(mm, va);
if (pgd_none(*pgdp))
goto done;
pmdp = pmd_offset(pgdp, va);
@@ -534,8+538,7 @@ __initfunc(void setup_arch(char **cmdline_p, init_mm.mmap->vm_page_prot = PAGE_SHARED;
init_mm.mmap->vm_start = PAGE_OFFSET;
init_mm.mmap->vm_end = *memory_end_p;
- init_mm.context = (unsigned long) NO_CONTEXT;
- init_task.tss.kregs = &fake_swapper_regs;
+ init_task.thread.kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
-/* $Id: signal.c,v 1.41 1999/06/14 05:23:58 davem Exp $
+/* $Id: signal.c,v 1.43 1999/07/30 09:35:24 davem Exp $
* arch/sparc64/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -46,7+46,7 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
- struct thread_struct *tp = ¤t->tss;
+ struct thread_struct *tp = ¤t->thread;
mc_gregset_t *grp;
unsigned long pc, npc, tstate;
unsigned long fp, i7;
@@ -123,9+123,9 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) err |= copy_from_user(fpregs+16,
((unsigned long *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
(sizeof(unsigned int) * 32));
- err |= __get_user(current->tss.xfsr[0],
+ err |= __get_user(current->thread.xfsr[0],
&(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
- err |= __get_user(current->tss.gsr[0],
+ err |= __get_user(current->thread.gsr[0],
&(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
regs->tstate &= ~TSTATE_PEF;
}
@@ -141,7+141,7 @@ do_sigsegv: asmlinkage void sparc64_get_context(struct pt_regs *regs)
{
struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
- struct thread_struct *tp = ¤t->tss;
+ struct thread_struct *tp = ¤t->thread;
mc_gregset_t *grp;
mcontext_t *mcp;
unsigned long fp, i7;
@@ -155,7+155,7 @@ asmlinkage void sparc64_get_context(struct pt_regs *regs) #if 1
fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
#else
- fenab = (current->tss.fpsaved[0] & FPRS_FEF);
+ fenab = (current->thread.fpsaved[0] & FPRS_FEF);
#endif
mcp = &ucp->uc_mcontext;
@@ -205,7+205,7 @@ asmlinkage void sparc64_get_context(struct pt_regs *regs) unsigned long *fpregs = (unsigned long *)(((char *)current) + AOFF_task_fpregs);
unsigned long fprs;
- fprs = current->tss.fpsaved[0];
+ fprs = current->thread.fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
(sizeof(unsigned int) * 32));
@@ -213,8+213,8 @@ asmlinkage void sparc64_get_context(struct pt_regs *regs) err |= copy_to_user(
((unsigned long *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
(sizeof(unsigned int) * 32));
- err |= __put_user(current->tss.xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
- err |= __put_user(current->tss.gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
+ err |= __put_user(current->thread.xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
+ err |= __put_user(current->thread.gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
}
if (err)
@@ -226,34+226,17 @@ do_sigsegv: do_exit(SIGSEGV);
}
-/*
- * The new signal frame, intended to be used for Linux applications only
- * (we have enough in there to work with clone).
- * All the interesting bits are in the info field.
- */
-
-struct new_signal_frame {
- struct sparc_stackf ss;
- __siginfo_t info;
- __siginfo_fpu_t * fpu_save;
- unsigned int insns [2];
- unsigned long extramask[_NSIG_WORDS-1];
- __siginfo_fpu_t fpu_state;
-};
-
struct rt_signal_frame {
struct sparc_stackf ss;
- siginfo_t info;
+ siginfo_t info;
struct pt_regs regs;
sigset_t mask;
__siginfo_fpu_t * fpu_save;
- unsigned int insns [2];
stack_t stack;
__siginfo_fpu_t fpu_state;
};
/* Align macros */
-#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7)))
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
/*
@@ -265,7+248,7 @@ asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs) sigset_t saveset;
#ifdef CONFIG_SPARC32_COMPAT
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
extern asmlinkage void _sigpause32_common(old_sigset_t32,
struct pt_regs *);
_sigpause32_common(set, regs);
@@ -372,65+355,12 @@ restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu) if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
(sizeof(unsigned int) * 32));
- err |= __get_user(current->tss.xfsr[0], &fpu->si_fsr);
- err |= __get_user(current->tss.gsr[0], &fpu->si_gsr);
- current->tss.fpsaved[0] |= fprs;
+ err |= __get_user(current->thread.xfsr[0], &fpu->si_fsr);
+ err |= __get_user(current->thread.gsr[0], &fpu->si_gsr);
+ current->thread.fpsaved[0] |= fprs;
return err;
}
-void do_sigreturn(struct pt_regs *regs)
-{
- struct new_signal_frame *sf;
- unsigned long tpc, tnpc, tstate;
- __siginfo_fpu_t *fpu_save;
- sigset_t set;
- int err;
-
- synchronize_user_stack ();
- sf = (struct new_signal_frame *)
- (regs->u_regs [UREG_FP] + STACK_BIAS);
-
- /* 1. Make sure we are not getting garbage from the user */
- if (((unsigned long) sf) & 3)
- goto segv;
-
- err = get_user(tpc, &sf->info.si_regs.tpc);
- err |= __get_user(tnpc, &sf->info.si_regs.tnpc);
- err |= ((tpc | tnpc) & 3);
-
- /* 2. Restore the state */
- err |= __get_user(regs->y, &sf->info.si_regs.y);
- err |= __get_user(tstate, &sf->info.si_regs.tstate);
- err |= copy_from_user(regs->u_regs, sf->info.si_regs.u_regs, sizeof(regs->u_regs));
-
- /* User can only change condition codes in %tstate. */
- regs->tstate &= ~(TSTATE_ICC);
- regs->tstate |= (tstate & TSTATE_ICC);
-
- err |= __get_user(fpu_save, &sf->fpu_save);
- if (fpu_save)
- err |= restore_fpu_state(regs, &sf->fpu_state);
-
- err |= __get_user(set.sig[0], &sf->info.si_mask);
- if (_NSIG_WORDS > 1)
- err |= __copy_from_user(&set.sig[1], &sf->extramask, sizeof(sf->extramask));
-
- if (err)
- goto segv;
-
- regs->tpc = tpc;
- regs->tnpc = tnpc;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(¤t->sigmask_lock);
- current->blocked = set;
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
- return;
-segv:
- send_sig(SIGSEGV, current, 1);
-}
-
void do_rt_sigreturn(struct pt_regs *regs)
{
struct rt_signal_frame *sf;
@@ -503,15+433,15 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu) unsigned long fprs;
int err = 0;
- fprs = current->tss.fpsaved[0];
+ fprs = current->thread.fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
(sizeof(unsigned int) * 32));
- err |= __put_user(current->tss.xfsr[0], &fpu->si_fsr);
- err |= __put_user(current->tss.gsr[0], &fpu->si_gsr);
+ err |= __put_user(current->thread.xfsr[0], &fpu->si_fsr);
+ err |= __put_user(current->thread.gsr[0], &fpu->si_gsr);
err |= __put_user(fprs, &fpu->si_fprs);
return err;
@@ -533,77+463,6 @@ static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, u }
static inline void
-new_setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
-{
- struct new_signal_frame *sf;
- int sigframe_size, err;
-
- /* 1. Make sure everything is clean */
- synchronize_user_stack();
- save_and_clear_fpu();
-
- sigframe_size = NF_ALIGNEDSZ;
-
- if (!(current->tss.fpsaved[0] & FPRS_FEF))
- sigframe_size -= sizeof(__siginfo_fpu_t);
-
- sf = (struct new_signal_frame *)get_sigframe(ka, regs, sigframe_size);
-
- if (invalid_frame_pointer (sf, sigframe_size))
- goto sigill;
-
- if (current->tss.w_saved != 0) {
-#ifdef DEBUG_SIGNALS
- printk ("%s[%d]: Invalid user stack frame for "
- "signal delivery.\n", current->comm, current->pid);
-#endif
- goto sigill;
- }
-
- /* 2. Save the current process state */
- err = copy_to_user(&sf->info.si_regs, regs, sizeof (*regs));
-
- if (current->tss.fpsaved[0] & FPRS_FEF) {
- err |= save_fpu_state(regs, &sf->fpu_state);
- err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
- } else {
- err |= __put_user(0, &sf->fpu_save);
- }
-
- err |= __put_user(oldset->sig[0], &sf->info.si_mask);
- if (_NSIG_WORDS > 1)
- err |= __copy_to_user(sf->extramask, &oldset->sig[1],
- sizeof(sf->extramask));
-
- err |= copy_in_user((u64 *)sf,
- (u64 *)(regs->u_regs[UREG_FP]+STACK_BIAS),
- sizeof(struct reg_window));
- if (err)
- goto sigsegv;
-
- /* 3. signal handler back-trampoline and parameters */
- regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
- regs->u_regs[UREG_I0] = signo;
- regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
-
- /* 5. signal handler */
- regs->tpc = (unsigned long) ka->sa.sa_handler;
- regs->tnpc = (regs->tpc + 4);
-
- /* 4. return to kernel instructions */
- regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
- return;
-
-sigill:
- lock_kernel();
- do_exit(SIGILL);
-sigsegv:
- lock_kernel();
- do_exit(SIGSEGV);
-}
-
-static inline void
setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
@@ -615,7+474,7 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, save_and_clear_fpu();
sigframe_size = RT_ALIGNEDSZ;
- if (!(current->tss.fpsaved[0] & FPRS_FEF))
+ if (!(current->thread.fpsaved[0] & FPRS_FEF))
sigframe_size -= sizeof(__siginfo_fpu_t);
sf = (struct rt_signal_frame *)get_sigframe(ka, regs, sigframe_size);
@@ -623,7+482,7 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, if (invalid_frame_pointer (sf, sigframe_size))
goto sigill;
- if (current->tss.w_saved != 0) {
+ if (current->thread.w_saved != 0) {
#ifdef DEBUG_SIGNALS
printk ("%s[%d]: Invalid user stack frame for "
"signal delivery.\n", current->comm, current->pid);
@@ -634,7+493,7 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, /* 2. Save the current process state */
err = copy_to_user(&sf->regs, regs, sizeof (*regs));
- if (current->tss.fpsaved[0] & FPRS_FEF) {
+ if (current->thread.fpsaved[0] & FPRS_FEF) {
err |= save_fpu_state(regs, &sf->fpu_state);
err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
} else {
@@ -652,7+511,12 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, (u64 *)(regs->u_regs[UREG_FP]+STACK_BIAS),
sizeof(struct reg_window));
- err |= copy_to_user(&sf->info, info, sizeof(siginfo_t));
+ if (info)
+ err |= copy_to_user(&sf->info, info, sizeof(siginfo_t));
+ else {
+ err |= __put_user(signo, &sf->info.si_signo);
+ err |= __put_user(SI_NOINFO, &sf->info.si_code);
+ }
if (err)
goto sigsegv;
@@ -681,10+545,7 @@ static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
- if(ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(ka, regs, signr, oldset, info);
- else
- new_setup_frame(ka, regs, signr, oldset);
+ setup_rt_frame(ka, regs, signr, oldset, (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
if(ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
if(!(ka->sa.sa_flags & SA_NOMASK)) {
@@ -785,7+646,7 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, oldset = ¤t->blocked;
#ifdef CONFIG_SPARC32_COMPAT
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
extern asmlinkage int do_signal32(sigset_t *, struct pt_regs *,
unsigned long, int);
return do_signal32(oldset, regs, orig_i0, restart_syscall);
-/* $Id: signal32.c,v 1.48 1999/06/14 05:24:01 davem Exp $
+/* $Id: signal32.c,v 1.50 1999/07/30 09:35:25 davem Exp $
* arch/sparc64/kernel/signal32.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -202,9+202,9 @@ static inline int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32));
- err |= __get_user(current->tss.xfsr[0], &fpu->si_fsr);
- err |= __get_user(current->tss.gsr[0], &fpu->si_gsr);
- current->tss.fpsaved[0] |= fprs;
+ err |= __get_user(current->thread.xfsr[0], &fpu->si_fsr);
+ err |= __get_user(current->thread.gsr[0], &fpu->si_gsr);
+ current->thread.fpsaved[0] |= fprs;
return err;
}
@@ -285,7+285,7 @@ asmlinkage void do_sigreturn32(struct pt_regs *regs) int err;
synchronize_user_stack();
- if (current->tss.new_signal)
+ if (current->thread.flags & SPARC_FLAG_NEWSIGNALS)
return do_new_sigreturn32(regs);
scptr = (struct sigcontext32 *)
@@ -489,20+489,20 @@ setup_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc, err |= __put_user(pc, &sc->sigc_pc);
err |= __put_user(npc, &sc->sigc_npc);
psr = tstate_to_psr (regs->tstate);
- if(current->tss.fpsaved[0] & FPRS_FEF)
+ if(current->thread.fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &sc->sigc_psr);
err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
- err |= __put_user(current->tss.w_saved, &sc->sigc_oswins);
+ err |= __put_user(current->thread.w_saved, &sc->sigc_oswins);
#if 0
/* w_saved is not currently used... */
- if(current->tss.w_saved)
- for(window = 0; window < current->tss.w_saved; window++) {
+ if(current->thread.w_saved)
+ for(window = 0; window < current->thread.w_saved; window++) {
sc->sigc_spbuf[window] =
- (char *)current->tss.rwbuf_stkptrs[window];
+ (char *)current->thread.rwbuf_stkptrs[window];
err |= copy_to_user(&sc->sigc_wbuf[window],
- ¤t->tss.reg_window[window],
+ ¤t->thread.reg_window[window],
sizeof(struct reg_window));
}
else
@@ -511,15+511,15 @@ setup_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc, (u32 *)(regs->u_regs[UREG_FP]),
sizeof(struct reg_window32));
- current->tss.w_saved = 0; /* So process is allowed to execute. */
+ current->thread.w_saved = 0; /* So process is allowed to execute. */
err |= __put_user(signr, &sframep->sig_num);
if(signr == SIGSEGV ||
signr == SIGILL ||
signr == SIGFPE ||
signr == SIGBUS ||
signr == SIGEMT) {
- err |= __put_user(current->tss.sig_desc, &sframep->sig_code);
- err |= __put_user(current->tss.sig_address, &sframep->sig_address);
+ err |= __put_user(current->thread.sig_desc, &sframep->sig_code);
+ err |= __put_user(current->thread.sig_address, &sframep->sig_address);
} else {
err |= __put_user(0, &sframep->sig_code);
err |= __put_user(0, &sframep->sig_address);
@@ -544,15+544,15 @@ static inline int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu) unsigned long fprs;
int err = 0;
- fprs = current->tss.fpsaved[0];
+ fprs = current->thread.fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
(sizeof(unsigned int) * 32));
- err |= __put_user(current->tss.xfsr[0], &fpu->si_fsr);
- err |= __put_user(current->tss.gsr[0], &fpu->si_gsr);
+ err |= __put_user(current->thread.xfsr[0], &fpu->si_fsr);
+ err |= __put_user(current->thread.gsr[0], &fpu->si_gsr);
err |= __put_user(fprs, &fpu->si_fprs);
return err;
@@ -572,7+572,7 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg save_and_clear_fpu();
sigframe_size = NF_ALIGNEDSZ;
- if (!(current->tss.fpsaved[0] & FPRS_FEF))
+ if (!(current->thread.fpsaved[0] & FPRS_FEF))
sigframe_size -= sizeof(__siginfo_fpu_t);
sf = (struct new_signal_frame32 *)get_sigframe(&ka->sa, regs, sigframe_size);
@@ -585,7+585,7 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg goto sigill;
}
- if (current->tss.w_saved != 0) {
+ if (current->thread.w_saved != 0) {
#ifdef DEBUG_SIGNALS
printk ("%s[%d]: Invalid user stack frame for "
"signal delivery.\n", current->comm, current->pid);
@@ -598,7+598,7 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg err |= __put_user(regs->tnpc, &sf->info.si_regs.npc);
err |= __put_user(regs->y, &sf->info.si_regs.y);
psr = tstate_to_psr (regs->tstate);
- if(current->tss.fpsaved[0] & FPRS_FEF)
+ if(current->thread.fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &sf->info.si_regs.psr);
for (i = 0; i < 16; i++)
@@ -738,7+738,7 @@ setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc, err |= __put_user(regs->tpc, &((*gr) [SVR4_PC]));
err |= __put_user(regs->tnpc, &((*gr) [SVR4_NPC]));
psr = tstate_to_psr (regs->tstate);
- if(current->tss.fpsaved[0] & FPRS_FEF)
+ if(current->thread.fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &((*gr) [SVR4_PSR]));
err |= __put_user(regs->y, &((*gr) [SVR4_Y]));
@@ -760,7+760,7 @@ setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc, err |= __put_user((u32)(long)gw, &mc->gwin);
/* 2. Number of windows to restore at setcontext (): */
- err |= __put_user(current->tss.w_saved, &gw->count);
+ err |= __put_user(current->thread.w_saved, &gw->count);
/* 3. Save each valid window
* Currently, it makes a copy of the windows from the kernel copy.
@@ -774,23+774,23 @@ setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc, * to flush the user windows.
*/
#if 0
- for(window = 0; window < current->tss.w_saved; window++) {
+ for(window = 0; window < current->thread.w_saved; window++) {
err |= __put_user((int *) &(gw->win [window]),
(int **)gw->winptr +window );
err |= copy_to_user(&gw->win [window],
- ¤t->tss.reg_window [window],
+ ¤t->thread.reg_window [window],
sizeof (svr4_rwindow_t));
err |= __put_user(0, (int *)gw->winptr + window);
}
#endif
/* 4. We just pay attention to the gw->count field on setcontext */
- current->tss.w_saved = 0; /* So process is allowed to execute. */
+ current->thread.w_saved = 0; /* So process is allowed to execute. */
/* Setup the signal information. Solaris expects a bunch of
* information to be passed to the signal handler, we don't provide
* that much currently, should use those that David already
- * is providing with tss.sig_desc
+ * is providing with thread.sig_desc
*/
err |= __put_user(signr, &si->siginfo.signo);
err |= __put_user(SVR4_SINOINFO, &si->siginfo.code);
@@ -837,8+837,8 @@ svr4_getcontext(svr4_ucontext_t *uc, struct pt_regs *regs) synchronize_user_stack();
save_and_clear_fpu();
- if (current->tss.w_saved){
- printk ("Uh oh, w_saved is not zero (%d)\n", (int) current->tss.w_saved);
+ if (current->thread.w_saved){
+ printk ("Uh oh, w_saved is not zero (%d)\n", (int) current->thread.w_saved);
do_exit (SIGSEGV);
}
err = clear_user(uc, sizeof (*uc));
@@ -863,7+863,7 @@ svr4_getcontext(svr4_ucontext_t *uc, struct pt_regs *regs) err |= __put_user(0, &uc->mcontext.greg [SVR4_PSR]);
#else
i = tstate_to_psr(regs->tstate) & ~PSR_EF;
- if (current->tss.fpsaved[0] & FPRS_FEF)
+ if (current->thread.fpsaved[0] & FPRS_FEF)
i |= PSR_EF;
err |= __put_user(i, &uc->mcontext.greg [SVR4_PSR]);
#endif
@@ -890,7+890,7 @@ svr4_getcontext(svr4_ucontext_t *uc, struct pt_regs *regs) /* Set the context for a svr4 application, this is Solaris way to sigreturn */
asmlinkage int svr4_setcontext(svr4_ucontext_t *c, struct pt_regs *regs)
{
- struct thread_struct *tp = ¤t->tss;
+ struct thread_struct *tp = ¤t->thread;
svr4_gregset_t *gr;
u32 pc, npc, psr;
sigset_t set;
@@ -990,7+990,7 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs save_and_clear_fpu();
sigframe_size = RT_ALIGNEDSZ;
- if (!(current->tss.fpsaved[0] & FPRS_FEF))
+ if (!(current->thread.fpsaved[0] & FPRS_FEF))
sigframe_size -= sizeof(__siginfo_fpu_t);
sf = (struct rt_signal_frame32 *)get_sigframe(&ka->sa, regs, sigframe_size);
@@ -1003,7+1003,7 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs goto sigill;
}
- if (current->tss.w_saved != 0) {
+ if (current->thread.w_saved != 0) {
#ifdef DEBUG_SIGNALS
printk ("%s[%d]: Invalid user stack frame for "
"signal delivery.\n", current->comm, current->pid);
@@ -1016,7+1016,7 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs err |= __put_user(regs->tnpc, &sf->regs.npc);
err |= __put_user(regs->y, &sf->regs.y);
psr = tstate_to_psr (regs->tstate);
- if(current->tss.fpsaved[0] & FPRS_FEF)
+ if(current->thread.fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &sf->regs.psr);
for (i = 0; i < 16; i++)
@@ -1032,6+1032,42 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs } else {
err |= __put_user(0, &sf->fpu_save);
}
+
+ err = __put_user (info->si_signo, &sf->info.si_signo);
+ err |= __put_user (info->si_errno, &sf->info.si_errno);
+ err |= __put_user (info->si_code, &sf->info.si_code);
+ if (info->si_code < 0)
+ err |= __copy_to_user (sf->info._sifields._pad, info->_sifields._pad, SI_PAD_SIZE);
+ else {
+ i = info->si_signo;
+ if (info->si_code == SI_USER)
+ i = SIGRTMIN;
+ switch (i) {
+ case SIGPOLL:
+ err |= __put_user (info->si_band, &sf->info.si_band);
+ err |= __put_user (info->si_fd, &sf->info.si_fd);
+ break;
+ case SIGCHLD:
+ err |= __put_user (info->si_pid, &sf->info.si_pid);
+ err |= __put_user (info->si_uid, &sf->info.si_uid);
+ err |= __put_user (info->si_status, &sf->info.si_status);
+ err |= __put_user (info->si_utime, &sf->info.si_utime);
+ err |= __put_user (info->si_stime, &sf->info.si_stime);
+ break;
+ case SIGSEGV:
+ case SIGILL:
+ case SIGFPE:
+ case SIGBUS:
+ case SIGEMT:
+ err |= __put_user ((long)info->si_addr, &sf->info.si_addr);
+ err |= __put_user (info->si_trapno, &sf->info.si_trapno);
+ break;
+ default:
+ err |= __put_user (info->si_pid, &sf->info.si_pid);
+ err |= __put_user (info->si_uid, &sf->info.si_uid);
+ break;
+ }
+ }
/* Setup sigaltstack */
err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
@@ -1040,13+1076,13 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
switch (_NSIG_WORDS) {
case 4: seta.sig[7] = (oldset->sig[3] >> 32);
- seta.sig[6] = oldset->sig[3];
+ seta.sig[6] = oldset->sig[3];
case 3: seta.sig[5] = (oldset->sig[2] >> 32);
- seta.sig[4] = oldset->sig[2];
+ seta.sig[4] = oldset->sig[2];
case 2: seta.sig[3] = (oldset->sig[1] >> 32);
- seta.sig[2] = oldset->sig[1];
+ seta.sig[2] = oldset->sig[1];
case 1: seta.sig[1] = (oldset->sig[0] >> 32);
- seta.sig[0] = oldset->sig[0];
+ seta.sig[0] = oldset->sig[0];
}
err |= __copy_to_user(&sf->mask, &seta, sizeof(sigset_t));
@@ -1113,7+1149,7 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, else {
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame32(ka, regs, signr, oldset, info);
- else if (current->tss.new_signal)
+ else if (current->thread.flags & SPARC_FLAG_NEWSIGNALS)
new_setup_frame32(ka, regs, signr, oldset);
else
setup_frame32(&ka->sa, regs->tpc, regs->tnpc, regs, signr, oldset);
@@ -1256,13+1292,13 @@ asmlinkage int do_signal32(sigset_t *oldset, struct pt_regs * regs, if(signr != SIGCHLD)
continue;
- /* sys_wait4() grabs the master kernel lock, so
- * we need not do so, that sucker should be
- * threaded and would not be that difficult to
- * do anyways.
- */
- while(sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
- ;
+ /* sys_wait4() grabs the master kernel lock, so
+ * we need not do so, that sucker should be
+ * threaded and would not be that difficult to
+ * do anyways.
+ */
+ while(sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ ;
continue;
}
if(ka->sa.sa_handler == SIG_DFL) {
@@ -1312,10+1348,10 @@ asmlinkage int do_signal32(sigset_t *oldset, struct pt_regs * regs, {
struct reg_window32 *rw = (struct reg_window32 *)(regs->u_regs[UREG_FP] & 0xffffffff);
unsigned int ins[8];
-
+
while(rw &&
!(((unsigned long) rw) & 0x3)) {
- copy_from_user(ins, &rw->ins[0], sizeof(ins));
+ copy_from_user(ins, &rw->ins[0], sizeof(ins));
printk("Caller[%08x](%08x,%08x,%08x,%08x,%08x,%08x)\n", ins[7], ins[0], ins[1], ins[2], ins[3], ins[4], ins[5]);
rw = (struct reg_window32 *)(unsigned long)ins[6];
}
@@ -1374,7+1410,7 @@ asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp) /* Now see if we want to update the new state. */
if (ssptr) {
void *ss_sp;
-
+
if (get_user((long)ss_sp, &ssptr->the_stack))
goto out;
/* If the current stack was set with sigaltstack, don't
@@ -53,7+53,7 @@ unsigned long cpu_present_map = 0; int smp_num_cpus = 1;
int smp_threads_ready = 0;
-__initfunc(void smp_setup(char *str, int *ints))
+void __init smp_setup(char *str, int *ints)
{
/* XXX implement me XXX */
}
@@ -151,13+151,17 @@ __initfunc(void smp_callin(void)) /* Clear this or we will die instantly when we
* schedule back to this idler...
*/
- current->tss.flags &= ~(SPARC_FLAG_NEWCHILD);
+ current->thread.flags &= ~(SPARC_FLAG_NEWCHILD);
+
+ /* Attach to the address space of init_task. */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
while(!smp_processors_ready)
membar("#LoadLoad");
}
-extern int cpu_idle(void *unused);
+extern int cpu_idle(void);
extern void init_IRQ(void);
void initialize_secondary(void)
@@ -169,7+173,7 @@ int start_secondary(void *unused) trap_init();
init_IRQ();
smp_callin();
- return cpu_idle(NULL);
+ return cpu_idle();
}
void cpu_panic(void)
@@ -216,9+220,17 @@ __initfunc(void smp_boot_cpus(void)) entry += phys_base - KERNBASE;
cookie += phys_base - KERNBASE;
kernel_thread(start_secondary, NULL, CLONE_PID);
- p = task[++cpucount];
+ cpucount++;
+
+ p = init_task.prev_task;
+ init_tasks[cpucount] = p;
+
p->processor = i;
p->has_cpu = 1; /* we schedule the first task manually */
+
+ del_from_runqueue(p);
+ unhash_process(p);
+
callin_flag = 0;
for (no = 0; no < linux_num_cpus; no++)
if (linux_cpus[no].mid == i)
@@ -384,6+396,9 @@ void smp_flush_tlb_all(void) * are flush_tlb_*() routines, and these run after flush_cache_*()
* which performs the flushw.
*
+ * XXX I diked out the fancy flush avoidance code for the
+ * XXX swapping cases for now until the new MM code stabilizes. -DaveM
+ *
* The SMP TLB coherency scheme we use works as follows:
*
* 1) mm->cpu_vm_mask is a bit mask of which cpus an address
@@ -395,16+410,16 @@ void smp_flush_tlb_all(void) * cross calls.
*
* One invariant is that when a cpu switches to a process, and
- * that processes tsk->mm->cpu_vm_mask does not have the current
- * cpu's bit set, that tlb context is flushed locally.
+ * that processes tsk->active_mm->cpu_vm_mask does not have the
+ * current cpu's bit set, that tlb context is flushed locally.
*
* If the address space is non-shared (ie. mm->count == 1) we avoid
* cross calls when we want to flush the currently running process's
* tlb state. This is done by clearing all cpu bits except the current
- * processor's in current->mm->cpu_vm_mask and performing the flush
- * locally only. This will force any subsequent cpus which run this
- * task to flush the context from the local tlb if the process migrates
- * to another cpu (again).
+ * processor's in current->active_mm->cpu_vm_mask and performing the
+ * flush locally only. This will force any subsequent cpus which run
+ * this task to flush the context from the local tlb if the process
+ * migrates to another cpu (again).
*
* 3) For shared address spaces (threads) and swapping we bite the
* bullet for most cases and perform the cross call.
@@ -422,13+437,13 @@ void smp_flush_tlb_all(void) */
void smp_flush_tlb_mm(struct mm_struct *mm)
{
- u32 ctx = mm->context & 0x3ff;
+ u32 ctx = CTX_HWBITS(mm->context);
- if(mm == current->mm && atomic_read(&mm->count) == 1) {
- if(mm->cpu_vm_mask != (1UL << smp_processor_id()))
- mm->cpu_vm_mask = (1UL << smp_processor_id());
+ if (mm == current->active_mm &&
+ atomic_read(&mm->mm_users) == 1 &&
+ (mm->cpu_vm_mask == (1UL << smp_processor_id())))
goto local_flush_and_out;
- }
+
smp_cross_call(&xcall_flush_tlb_mm, ctx, 0, 0);
local_flush_and_out:
@@ -438,15+453,15 @@ local_flush_and_out: void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
- u32 ctx = mm->context & 0x3ff;
+ u32 ctx = CTX_HWBITS(mm->context);
start &= PAGE_MASK;
end &= PAGE_MASK;
- if(mm == current->mm && atomic_read(&mm->count) == 1) {
- if(mm->cpu_vm_mask != (1UL << smp_processor_id()))
- mm->cpu_vm_mask = (1UL << smp_processor_id());
+ if(mm == current->active_mm &&
+ atomic_read(&mm->mm_users) == 1 &&
+ (mm->cpu_vm_mask == (1UL << smp_processor_id())))
goto local_flush_and_out;
- }
+
smp_cross_call(&xcall_flush_tlb_range, ctx, start, end);
local_flush_and_out:
@@ -455,30+470,15 @@ local_flush_and_out:
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
{
- u32 ctx = mm->context & 0x3ff;
+ u32 ctx = CTX_HWBITS(mm->context);
page &= PAGE_MASK;
- if(mm == current->mm && atomic_read(&mm->count) == 1) {
- if(mm->cpu_vm_mask != (1UL << smp_processor_id()))
- mm->cpu_vm_mask = (1UL << smp_processor_id());
+ if(mm == current->active_mm &&
+ atomic_read(&mm->mm_users) == 1 &&
+ (mm->cpu_vm_mask == (1UL << smp_processor_id()))) {
goto local_flush_and_out;
- } else {
- /* Try to handle two special cases to avoid cross calls
- * in common scenerios where we are swapping process
- * pages out.
- */
- if(((mm->context ^ tlb_context_cache) & CTX_VERSION_MASK) ||
- (mm->cpu_vm_mask == 0)) {
- /* A dead context cannot ever become "alive" until
- * a task switch is done to it.
- */
- return; /* It's dead, nothing to do. */
- }
- if(mm->cpu_vm_mask == (1UL << smp_processor_id())) {
- __flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
- return; /* Only local flush is necessary. */
- }
}
+
smp_cross_call(&xcall_flush_tlb_page, ctx, page, 0);
local_flush_and_out:
-/* $Id: sparc64_ksyms.c,v 1.60 1999/07/03 22:11:12 davem Exp $
+/* $Id: sparc64_ksyms.c,v 1.61 1999/07/23 01:56:48 davem Exp $
* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: sys_sparc.c,v 1.27 1999/06/02 12:06:34 jj Exp $
+/* $Id: sys_sparc.c,v 1.28 1999/07/30 09:35:27 davem Exp $
* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
@@ -170,7+170,7 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, }
retval = -EINVAL;
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
if (len > 0xf0000000UL || addr > 0xf0000000UL - len)
goto out_putf;
} else {
@@ -281,40+281,40 @@ asmlinkage int sys_utrap_install(utrap_entry_t type, utrap_handler_t new_p, return -EINVAL;
if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
if (old_p) {
- if (!current->tss.utraps)
+ if (!current->thread.utraps)
put_user_ret(NULL, old_p, -EFAULT);
else
- put_user_ret((utrap_handler_t)(current->tss.utraps[type]), old_p, -EFAULT);
+ put_user_ret((utrap_handler_t)(current->thread.utraps[type]), old_p, -EFAULT);
}
if (old_d)
put_user_ret(NULL, old_d, -EFAULT);
return 0;
}
lock_kernel();
- if (!current->tss.utraps) {
- current->tss.utraps = kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
- if (!current->tss.utraps) return -ENOMEM;
- current->tss.utraps[0] = 1;
- memset(current->tss.utraps+1, 0, UT_TRAP_INSTRUCTION_31*sizeof(long));
+ if (!current->thread.utraps) {
+ current->thread.utraps = kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
+ if (!current->thread.utraps) return -ENOMEM;
+ current->thread.utraps[0] = 1;
+ memset(current->thread.utraps+1, 0, UT_TRAP_INSTRUCTION_31*sizeof(long));
} else {
- if ((utrap_handler_t)current->tss.utraps[type] != new_p && current->tss.utraps[0] > 1) {
- long *p = current->tss.utraps;
+ if ((utrap_handler_t)current->thread.utraps[type] != new_p && current->thread.utraps[0] > 1) {
+ long *p = current->thread.utraps;
- current->tss.utraps = kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
- if (!current->tss.utraps) {
- current->tss.utraps = p;
+ current->thread.utraps = kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
+ if (!current->thread.utraps) {
+ current->thread.utraps = p;
return -ENOMEM;
}
p[0]--;
- current->tss.utraps[0] = 1;
- memcpy(current->tss.utraps+1, p+1, UT_TRAP_INSTRUCTION_31*sizeof(long));
+ current->thread.utraps[0] = 1;
+ memcpy(current->thread.utraps+1, p+1, UT_TRAP_INSTRUCTION_31*sizeof(long));
}
}
if (old_p)
- put_user_ret((utrap_handler_t)(current->tss.utraps[type]), old_p, -EFAULT);
+ put_user_ret((utrap_handler_t)(current->thread.utraps[type]), old_p, -EFAULT);
if (old_d)
put_user_ret(NULL, old_d, -EFAULT);
- current->tss.utraps[type] = (long)new_p;
+ current->thread.utraps[type] = (long)new_p;
unlock_kernel();
return 0;
}
@@ -363,10+363,10 @@ update_perfctrs(void) unsigned long pic, tmp;
read_pic(pic);
- tmp = (current->tss.kernel_cntd0 += (unsigned int)pic);
- __put_user(tmp, current->tss.user_cntd0);
- tmp = (current->tss.kernel_cntd1 += (pic >> 32));
- __put_user(tmp, current->tss.user_cntd1);
+ tmp = (current->thread.kernel_cntd0 += (unsigned int)pic);
+ __put_user(tmp, current->thread.user_cntd0);
+ tmp = (current->thread.kernel_cntd1 += (pic >> 32));
+ __put_user(tmp, current->thread.user_cntd1);
reset_pic();
}
@@ -377,24+377,24 @@ sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long ar
switch(opcode) {
case PERFCTR_ON:
- current->tss.pcr_reg = arg2;
- current->tss.user_cntd0 = (u64 *) arg0;
- current->tss.user_cntd1 = (u64 *) arg1;
- current->tss.kernel_cntd0 =
- current->tss.kernel_cntd1 = 0;
+ current->thread.pcr_reg = arg2;
+ current->thread.user_cntd0 = (u64 *) arg0;
+ current->thread.user_cntd1 = (u64 *) arg1;
+ current->thread.kernel_cntd0 =
+ current->thread.kernel_cntd1 = 0;
write_pcr(arg2);
reset_pic();
- current->tss.flags |= SPARC_FLAG_PERFCTR;
+ current->thread.flags |= SPARC_FLAG_PERFCTR;
break;
case PERFCTR_OFF:
err = -EINVAL;
- if ((current->tss.flags & SPARC_FLAG_PERFCTR) != 0) {
- current->tss.user_cntd0 =
- current->tss.user_cntd1 = NULL;
- current->tss.pcr_reg = 0;
+ if ((current->thread.flags & SPARC_FLAG_PERFCTR) != 0) {
+ current->thread.user_cntd0 =
+ current->thread.user_cntd1 = NULL;
+ current->thread.pcr_reg = 0;
write_pcr(0);
- current->tss.flags &= ~(SPARC_FLAG_PERFCTR);
+ current->thread.flags &= ~(SPARC_FLAG_PERFCTR);
err = 0;
}
break;
@@ -402,50+402,50 @@ sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long ar case PERFCTR_READ: {
unsigned long pic, tmp;
- if (!(current->tss.flags & SPARC_FLAG_PERFCTR)) {
+ if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
err = -EINVAL;
break;
}
read_pic(pic);
- tmp = (current->tss.kernel_cntd0 += (unsigned int)pic);
- err |= __put_user(tmp, current->tss.user_cntd0);
- tmp = (current->tss.kernel_cntd1 += (pic >> 32));
- err |= __put_user(tmp, current->tss.user_cntd1);
+ tmp = (current->thread.kernel_cntd0 += (unsigned int)pic);
+ err |= __put_user(tmp, current->thread.user_cntd0);
+ tmp = (current->thread.kernel_cntd1 += (pic >> 32));
+ err |= __put_user(tmp, current->thread.user_cntd1);
reset_pic();
break;
}
case PERFCTR_CLRPIC:
- if (!(current->tss.flags & SPARC_FLAG_PERFCTR)) {
+ if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
err = -EINVAL;
break;
}
- current->tss.kernel_cntd0 =
- current->tss.kernel_cntd1 = 0;
+ current->thread.kernel_cntd0 =
+ current->thread.kernel_cntd1 = 0;
reset_pic();
break;
case PERFCTR_SETPCR: {
u64 *user_pcr = (u64 *)arg0;
- if (!(current->tss.flags & SPARC_FLAG_PERFCTR)) {
+ if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
err = -EINVAL;
break;
}
- err |= __get_user(current->tss.pcr_reg, user_pcr);
- write_pcr(current->tss.pcr_reg);
- current->tss.kernel_cntd0 =
- current->tss.kernel_cntd1 = 0;
+ err |= __get_user(current->thread.pcr_reg, user_pcr);
+ write_pcr(current->thread.pcr_reg);
+ current->thread.kernel_cntd0 =
+ current->thread.kernel_cntd1 = 0;
reset_pic();
break;
}
case PERFCTR_GETPCR: {
u64 *user_pcr = (u64 *)arg0;
- if (!(current->tss.flags & SPARC_FLAG_PERFCTR)) {
+ if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
err = -EINVAL;
break;
}
- err |= __put_user(current->tss.pcr_reg, user_pcr);
+ err |= __put_user(current->thread.pcr_reg, user_pcr);
break;
}
-/* $Id: sys_sparc32.c,v 1.112 1999/06/29 12:34:02 davem Exp $
+/* $Id: sys_sparc32.c,v 1.117 1999/08/02 08:39:40 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -1663,85+1663,6 @@ asmlinkage int sys32_rt_sigpending(sigset_t32 *set, __kernel_size_t32 sigsetsize return ret;
}
-siginfo_t32 *
-siginfo64to32(siginfo_t32 *d, siginfo_t *s)
-{
- memset (&d, 0, sizeof(siginfo_t32));
- d->si_signo = s->si_signo;
- d->si_errno = s->si_errno;
- d->si_code = s->si_code;
- if (s->si_signo >= SIGRTMIN) {
- d->si_pid = s->si_pid;
- d->si_uid = s->si_uid;
- /* XXX: Ouch, how to find this out??? */
- d->si_int = s->si_int;
- } else switch (s->si_signo) {
- /* XXX: What about POSIX1.b timers */
- case SIGCHLD:
- d->si_pid = s->si_pid;
- d->si_status = s->si_status;
- d->si_utime = s->si_utime;
- d->si_stime = s->si_stime;
- break;
- case SIGSEGV:
- case SIGBUS:
- case SIGFPE:
- case SIGILL:
- d->si_addr = (long)(s->si_addr);
- /* XXX: Do we need to translate this from sparc64 to sparc32 traps? */
- d->si_trapno = s->si_trapno;
- break;
- case SIGPOLL:
- d->si_band = s->si_band;
- d->si_fd = s->si_fd;
- break;
- default:
- d->si_pid = s->si_pid;
- d->si_uid = s->si_uid;
- break;
- }
- return d;
-}
-
-siginfo_t *
-siginfo32to64(siginfo_t *d, siginfo_t32 *s)
-{
- d->si_signo = s->si_signo;
- d->si_errno = s->si_errno;
- d->si_code = s->si_code;
- if (s->si_signo >= SIGRTMIN) {
- d->si_pid = s->si_pid;
- d->si_uid = s->si_uid;
- /* XXX: Ouch, how to find this out??? */
- d->si_int = s->si_int;
- } else switch (s->si_signo) {
- /* XXX: What about POSIX1.b timers */
- case SIGCHLD:
- d->si_pid = s->si_pid;
- d->si_status = s->si_status;
- d->si_utime = s->si_utime;
- d->si_stime = s->si_stime;
- break;
- case SIGSEGV:
- case SIGBUS:
- case SIGFPE:
- case SIGILL:
- d->si_addr = (void *)A(s->si_addr);
- /* XXX: Do we need to translate this from sparc32 to sparc64 traps? */
- d->si_trapno = s->si_trapno;
- break;
- case SIGPOLL:
- d->si_band = s->si_band;
- d->si_fd = s->si_fd;
- break;
- default:
- d->si_pid = s->si_pid;
- d->si_uid = s->si_uid;
- break;
- }
- return d;
-}
-
extern asmlinkage int
sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
const struct timespec *uts, size_t sigsetsize);
@@ -1753,10+1674,9 @@ sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo, sigset_t s;
sigset_t32 s32;
struct timespec t;
- int ret;
+ int ret, err, i;
mm_segment_t old_fs = get_fs();
siginfo_t info;
- siginfo_t32 info32;
if (copy_from_user (&s32, uthese, sizeof(sigset_t32)))
return -EFAULT;
@@ -1776,8+1696,43 @@ sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo, ret = sys_rt_sigtimedwait(&s, &info, &t, sigsetsize);
set_fs (old_fs);
if (ret >= 0 && uinfo) {
- if (copy_to_user (uinfo, siginfo64to32(&info32, &info), sizeof(siginfo_t32)))
- return -EFAULT;
+ err = put_user (info.si_signo, &uinfo->si_signo);
+ err |= __put_user (info.si_errno, &uinfo->si_errno);
+ err |= __put_user (info.si_code, &uinfo->si_code);
+ if (info.si_code < 0)
+ err |= __copy_to_user (uinfo->_sifields._pad, info._sifields._pad, SI_PAD_SIZE);
+ else {
+ i = info.si_signo;
+ if (info.si_code == SI_USER)
+ i = SIGRTMIN;
+ switch (i) {
+ case SIGPOLL:
+ err |= __put_user (info.si_band, &uinfo->si_band);
+ err |= __put_user (info.si_fd, &uinfo->si_fd);
+ break;
+ case SIGCHLD:
+ err |= __put_user (info.si_pid, &uinfo->si_pid);
+ err |= __put_user (info.si_uid, &uinfo->si_uid);
+ err |= __put_user (info.si_status, &uinfo->si_status);
+ err |= __put_user (info.si_utime, &uinfo->si_utime);
+ err |= __put_user (info.si_stime, &uinfo->si_stime);
+ break;
+ case SIGSEGV:
+ case SIGILL:
+ case SIGFPE:
+ case SIGBUS:
+ case SIGEMT:
+ err |= __put_user ((long)info.si_addr, &uinfo->si_addr);
+ err |= __put_user (info.si_trapno, &uinfo->si_trapno);
+ break;
+ default:
+ err |= __put_user (info.si_pid, &uinfo->si_pid);
+ err |= __put_user (info.si_uid, &uinfo->si_uid);
+ break;
+ }
+ }
+ if (err)
+ ret = -EFAULT;
}
return ret;
}
@@ -1789,14+1744,12 @@ asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, siginfo_t32 *uinfo)
{
siginfo_t info;
- siginfo_t32 info32;
int ret;
mm_segment_t old_fs = get_fs();
- if (copy_from_user (&info32, uinfo, sizeof(siginfo_t32)))
+ if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
+ copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
return -EFAULT;
- /* XXX: Is this correct? */
- siginfo32to64(&info, &info32);
set_fs (KERNEL_DS);
ret = sys_rt_sigqueueinfo(pid, sig, &info);
set_fs (old_fs);
@@ -2659,7+2612,7 @@ asmlinkage int sys32_sigaction (int sig, struct old_sigaction32 *act, struct old int ret;
if(sig < 0) {
- current->tss.new_signal = 1;
+ current->thread.flags |= SPARC_FLAG_NEWSIGNALS;
sig = -sig;
}
@@ -2703,7+2656,7 @@ sys32_rt_sigaction(int sig, struct sigaction32 *act, struct sigaction32 *oact, /* All tasks which use RT signals (effectively) use
* new style signals.
*/
- current->tss.new_signal = 1;
+ current->thread.flags |= SPARC_FLAG_NEWSIGNALS;
if (act) {
new_ka.ka_restorer = restorer;
@@ -2883,6+2836,8 @@ asmlinkage int sparc32_execve(struct pt_regs *regs) int error, base = 0;
char *filename;
+ /* User register window flush is done by entry.S */
+
/* Check for indirect call. */
if((u32)regs->u_regs[UREG_G1] == 0)
base = 1;
@@ -2899,8+2854,8 @@ asmlinkage int sparc32_execve(struct pt_regs *regs)
if(!error) {
fprs_write(0);
- current->tss.xfsr[0] = 0;
- current->tss.fpsaved[0] = 0;
+ current->thread.xfsr[0] = 0;
+ current->thread.fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
}
out:
-/* $Id: sys_sunos32.c,v 1.28 1999/06/29 12:34:04 davem Exp $
+/* $Id: sys_sunos32.c,v 1.30 1999/07/30 09:35:31 davem Exp $
* sys_sunos32.c: SunOS binary compatability layer on sparc64.
*
* Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -557,9+557,9 @@ asmlinkage int sunos_nosys(void) struct pt_regs *regs;
lock_kernel();
- regs = current->tss.kregs;
- current->tss.sig_address = regs->tpc;
- current->tss.sig_desc = regs->u_regs[UREG_G1];
+ regs = current->thread.kregs;
+ current->thread.sig_address = regs->tpc;
+ current->thread.sig_desc = regs->u_regs[UREG_G1];
send_sig(SIGSYS, current, 1);
printk("Process makes ni_syscall number %d, register dump:\n",
(int) regs->u_regs[UREG_G1]);
@@ -1159,7+1159,7 @@ asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4) if(!kmbuf)
break;
sp = (struct sparc_stackf32 *)
- (current->tss.kregs->u_regs[UREG_FP] & 0xffffffffUL);
+ (current->thread.kregs->u_regs[UREG_FP] & 0xffffffffUL);
if(get_user(arg5, &sp->xxargs[0])) {
rval = -EFAULT;
break;
-/* $Id: systbls.S,v 1.54 1999/06/02 12:06:31 jj Exp $
+/* $Id: systbls.S,v 1.56 1999/07/31 00:06:17 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
@@ -59,7+59,7 @@ sys_call_table32: .word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
/*200*/ .word sys_ssetmask, sys_sigsuspend, sys32_newlstat, sys_uselib, old32_readdir
.word sys_nis_syscall, sys32_socketcall, sys_syslog, sys_nis_syscall, sys_nis_syscall
-/*210*/ .word sys_idle, sys_nis_syscall, sys_waitpid, sys_swapoff, sys32_sysinfo
+/*210*/ .word sys_nis_syscall, sys_nis_syscall, sys_waitpid, sys_swapoff, sys32_sysinfo
.word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
/*220*/ .word sys32_sigprocmask, sys32_create_module, sys32_delete_module, sys32_get_kernel_syms, sys_getpgid
.word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
@@ -112,15+112,15 @@ sys_call_table: .word sys_quotactl, sys_nis_syscall, sys_mount, sys_ustat, sys_nis_syscall
/*170*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getdents
.word sys_setsid, sys_fchdir, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
-/*180*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_sigpending, sys_query_module
+/*180*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_query_module
.word sys_setpgid, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_newuname
/*190*/ .word sys_init_module, sys_personality, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
.word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask
-/*200*/ .word sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_nis_syscall
+/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
.word sys_nis_syscall, sys_socketcall, sys_syslog, sys_nis_syscall, sys_nis_syscall
-/*210*/ .word sys_idle, sys_nis_syscall, sys_waitpid, sys_swapoff, sys_sysinfo
- .word sys_ipc, sys_sigreturn, sys_clone, sys_nis_syscall, sys_adjtimex
-/*220*/ .word sys_sigprocmask, sys_create_module, sys_delete_module, sys_get_kernel_syms, sys_getpgid
+/*210*/ .word sys_nis_syscall, sys_nis_syscall, sys_waitpid, sys_swapoff, sys_sysinfo
+ .word sys_ipc, sys_nis_syscall, sys_clone, sys_nis_syscall, sys_adjtimex
+/*220*/ .word sys_nis_syscall, sys_create_module, sys_delete_module, sys_get_kernel_syms, sys_getpgid
.word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
/*230*/ .word sys_select, sys_time, sys_nis_syscall, sys_stime, sys_nis_syscall
.word sys_nis_syscall, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
-/* $Id: traps.c,v 1.60 1999/06/02 19:19:55 jj Exp $
+/* $Id: traps.c,v 1.61 1999/07/30 09:35:32 davem Exp $
* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
@@ -147,12+147,12 @@ void syscall_trace_entry(unsigned long g1, struct pt_regs *regs) if(i)
printk(",");
if(!sdp->arg_is_string[i]) {
- if (current->tss.flags & SPARC_FLAG_32BIT)
+ if (current->thread.flags & SPARC_FLAG_32BIT)
printk("%08x", (unsigned int)regs->u_regs[UREG_I0 + i]);
else
printk("%016lx", regs->u_regs[UREG_I0 + i]);
} else {
- if (current->tss.flags & SPARC_FLAG_32BIT)
+ if (current->thread.flags & SPARC_FLAG_32BIT)
strncpy_from_user(scall_strbuf,
(char *)(regs->u_regs[UREG_I0 + i] & 0xffffffff),
512);
@@ -178,7+178,7 @@ unsigned long syscall_trace_exit(unsigned long retval, struct pt_regs *regs) }
#endif /* SYSCALL_TRACING */
-#if 0
+#if 1
void rtrap_check(struct pt_regs *regs)
{
register unsigned long pgd_phys asm("o1");
@@ -219,7+219,7 @@ void rtrap_check(struct pt_regs *regs)
if((pgd_phys != __pa(current->mm->pgd)) ||
((pgd_cache != 0) &&
- (pgd_cache != pgd_val(current->mm->pgd[0]))) ||
+ (pgd_cache != pgd_val(current->mm->pgd[0])<<11UL)) ||
(g1_or_g3 != (0xfffffffe00000000UL | 0x0000000000000018UL)) ||
#define KERN_HIGHBITS ((_PAGE_VALID | _PAGE_SZ4MB) ^ 0xfffff80000000000)
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
@@ -228,18+228,17 @@ void rtrap_check(struct pt_regs *regs) #undef KERN_LOWBITS
((ctx != (current->mm->context & 0x3ff)) ||
(ctx == 0) ||
- (current->tss.ctx != ctx))) {
+ (CTX_HWBITS(current->mm->context) != ctx))) {
printk("SHIT[%s:%d]: "
- "(PP[%016lx] CACH[%016lx] CTX[%x] g1g3[%016lx] g2[%016lx]) ",
+ "(PP[%016lx] CACH[%016lx] CTX[%lx] g1g3[%016lx] g2[%016lx]) ",
current->comm, current->pid,
pgd_phys, pgd_cache, ctx, g1_or_g3, g2);
printk("SHIT[%s:%d]: "
- "[PP[%016lx] CACH[%016lx] CTX[%x:%x]] PC[%016lx:%016lx]\n",
+ "[PP[%016lx] CACH[%016lx] CTX[%lx]] PC[%016lx:%016lx]\n",
current->comm, current->pid,
__pa(current->mm->pgd),
pgd_val(current->mm->pgd[0]),
current->mm->context & 0x3ff,
- current->tss.ctx,
regs->tpc, regs->tnpc);
show_regs(regs);
#if 1
@@ -262,8+261,8 @@ void bad_trap (struct pt_regs *regs, long lvl) }
if (regs->tstate & TSTATE_PRIV)
die_if_kernel ("Kernel bad trap", regs);
- current->tss.sig_desc = SUBSIG_BADTRAP(lvl - 0x100);
- current->tss.sig_address = regs->tpc;
+ current->thread.sig_desc = SUBSIG_BADTRAP(lvl - 0x100);
+ current->thread.sig_address = regs->tpc;
force_sig(SIGILL, current);
unlock_kernel ();
}
@@ -289,8+288,8 @@ void instruction_access_exception (struct pt_regs *regs, #endif
die_if_kernel("Iax", regs);
}
- current->tss.sig_desc = SUBSIG_ILLINST;
- current->tss.sig_address = regs->tpc;
+ current->thread.sig_desc = SUBSIG_ILLINST;
+ current->thread.sig_address = regs->tpc;
force_sig(SIGILL, current);
unlock_kernel();
}
@@ -402,8+401,8 @@ void do_fpe_common(struct pt_regs *regs) regs->tpc = regs->tnpc;
regs->tnpc += 4;
} else {
- current->tss.sig_address = regs->tpc;
- current->tss.sig_desc = SUBSIG_FPERROR;
+ current->thread.sig_address = regs->tpc;
+ current->thread.sig_desc = SUBSIG_FPERROR;
send_sig(SIGFPE, current, 1);
}
}
@@ -411,7+410,7 @@ void do_fpe_common(struct pt_regs *regs) void do_fpieee(struct pt_regs *regs)
{
#ifdef DEBUG_FPU
- printk("fpieee %016lx\n", current->tss.xfsr[0]);
+ printk("fpieee %016lx\n", current->thread.xfsr[0]);
#endif
do_fpe_common(regs);
}
@@ -423,7+422,7 @@ void do_fpother(struct pt_regs *regs) struct fpustate *f = FPUSTATE;
int ret = 0;
- switch ((current->tss.xfsr[0] & 0x1c000)) {
+ switch ((current->thread.xfsr[0] & 0x1c000)) {
case (2 << 14): /* unfinished_FPop */
case (3 << 14): /* unimplemented_FPop */
ret = do_mathemu(regs, f);
@@ -431,7+430,7 @@ void do_fpother(struct pt_regs *regs) }
if (ret) return;
#ifdef DEBUG_FPU
- printk("fpother %016lx\n", current->tss.xfsr[0]);
+ printk("fpother %016lx\n", current->thread.xfsr[0]);
#endif
do_fpe_common(regs);
}
@@ -440,8+439,8 @@ void do_tof(struct pt_regs *regs) {
if(regs->tstate & TSTATE_PRIV)
die_if_kernel("Penguin overflow trap from kernel mode", regs);
- current->tss.sig_address = regs->tpc;
- current->tss.sig_desc = SUBSIG_TAG; /* as good as any */
+ current->thread.sig_address = regs->tpc;
+ current->thread.sig_desc = SUBSIG_TAG; /* as good as any */
send_sig(SIGEMT, current, 1);
}
@@ -540,7+539,7 @@ void do_illegal_instruction(struct pt_regs *regs)
if(tstate & TSTATE_PRIV)
die_if_kernel("Kernel illegal instruction", regs);
- if(current->tss.flags & SPARC_FLAG_32BIT)
+ if(current->thread.flags & SPARC_FLAG_32BIT)
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
@@ -551,8+550,8 @@ void do_illegal_instruction(struct pt_regs *regs) return;
}
}
- current->tss.sig_address = pc;
- current->tss.sig_desc = SUBSIG_ILLINST;
+ current->thread.sig_address = pc;
+ current->thread.sig_desc = SUBSIG_ILLINST;
send_sig(SIGILL, current, 1);
}
@@ -565,23+564,23 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
return kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc), sfar, sfsr);
} else {
- current->tss.sig_address = regs->tpc;
- current->tss.sig_desc = SUBSIG_PRIVINST;
+ current->thread.sig_address = regs->tpc;
+ current->thread.sig_desc = SUBSIG_PRIVINST;
send_sig(SIGBUS, current, 1);
}
}
void do_privop(struct pt_regs *regs)
{
- current->tss.sig_address = regs->tpc;
- current->tss.sig_desc = SUBSIG_PRIVINST;
+ current->thread.sig_address = regs->tpc;
+ current->thread.sig_desc = SUBSIG_PRIVINST;
send_sig(SIGILL, current, 1);
}
void do_privact(struct pt_regs *regs)
{
- current->tss.sig_address = regs->tpc;
- current->tss.sig_desc = SUBSIG_PRIVINST;
+ current->thread.sig_address = regs->tpc;
+ current->thread.sig_desc = SUBSIG_PRIVINST;
send_sig(SIGILL, current, 1);
}
@@ -590,8+589,8 @@ void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long n {
if(tstate & TSTATE_PRIV)
die_if_kernel("Penguin instruction from Penguin mode??!?!", regs);
- current->tss.sig_address = pc;
- current->tss.sig_desc = SUBSIG_PRIVINST;
+ current->thread.sig_address = pc;
+ current->thread.sig_desc = SUBSIG_PRIVINST;
send_sig(SIGILL, current, 1);
}
@@ -727,4+726,11 @@ void cache_flush_trap(struct pt_regs *regs)
void trap_init(void)
{
+ /* Attach to the address space of init_task. */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+
+ /* NOTE: Other cpus have this done as they are started
+ * up on SMP.
+ */
}
-/* $Id: unaligned.c,v 1.16 1999/05/25 16:53:15 jj Exp $
+/* $Id: unaligned.c,v 1.18 1999/08/02 08:39:44 davem Exp $
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
@@ -70,7+70,7 @@ static inline int decode_access_size(unsigned int insn) return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
- die_if_kernel("Byte sized unaligned access?!?!", current->tss.kregs);
+ die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
}
}
@@ -117,7+117,7 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
value = win->locals[reg - 16];
- } else if (current->tss.flags & SPARC_FLAG_32BIT) {
+ } else if (current->thread.flags & SPARC_FLAG_32BIT) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(value, &win32->locals[reg - 16]);
@@ -137,7+137,7 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
return &win->locals[reg - 16];
- } else if (current->tss.flags & SPARC_FLAG_32BIT) {
+ } else if (current->thread.flags & SPARC_FLAG_32BIT) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
return (unsigned long *)&win32->locals[reg - 16];
@@ -164,10+164,10 @@ static inline unsigned long compute_effective_address(struct pt_regs *regs, }
}
-/* This is just to make gcc think panic does return... */
-static void unaligned_panic(char *str)
+/* This is just to make gcc think die_if_kernel does return... */
+static void unaligned_panic(char *str, struct pt_regs *regs)
{
- panic(str);
+ die_if_kernel(str, regs);
}
#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
@@ -380,7+380,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u if(!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
regs->tpc);
- unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
+ unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
__asm__ __volatile__ ("\n"
"kernel_unaligned_trap_fault:\n\t"
@@ -453,7+453,7 @@ int handle_popc(u32 insn, struct pt_regs *regs) if (rd)
regs->u_regs[rd] = ret;
} else {
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
put_user(ret, &win32->locals[rd - 16]);
@@ -480,9+480,9 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
save_and_clear_fpu();
- current->tss.xfsr[0] &= ~0x1c000;
+ current->thread.xfsr[0] &= ~0x1c000;
if (freg & 3) {
- current->tss.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+ current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
do_fpother(regs);
return 0;
}
@@ -490,7+490,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) /* STQ */
u64 first = 0, second = 0;
- if (current->tss.fpsaved[0] & flag) {
+ if (current->thread.fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
}
@@ -565,18+565,18 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) break;
}
}
- if (!(current->tss.fpsaved[0] & FPRS_FEF)) {
- current->tss.fpsaved[0] = FPRS_FEF;
- current->tss.gsr[0] = 0;
+ if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
+ current->thread.fpsaved[0] = FPRS_FEF;
+ current->thread.gsr[0] = 0;
}
- if (!(current->tss.fpsaved[0] & flag)) {
+ if (!(current->thread.fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
memcpy(f->regs + freg, data, size * 4);
- current->tss.fpsaved[0] |= flag;
+ current->thread.fpsaved[0] |= flag;
}
advance(regs);
return 1;
@@ -609,7+609,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if(tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
- if(current->tss.flags & SPARC_FLAG_32BIT)
+ if(current->thread.flags & SPARC_FLAG_32BIT)
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
asi = sfsr >> 16;
@@ -629,18+629,18 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (asi & 0x8) /* Little */
value = __swab64p(&value);
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
- if (!(current->tss.fpsaved[0] & FPRS_FEF)) {
- current->tss.fpsaved[0] = FPRS_FEF;
- current->tss.gsr[0] = 0;
+ if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
+ current->thread.fpsaved[0] = FPRS_FEF;
+ current->thread.gsr[0] = 0;
}
- if (!(current->tss.fpsaved[0] & flag)) {
+ if (!(current->thread.fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
*(u64 *)(f->regs + freg) = value;
- current->tss.fpsaved[0] |= flag;
+ current->thread.fpsaved[0] |= flag;
} else {
daex: data_access_exception(regs);
return;
@@ -661,7+661,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if(tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
- if(current->tss.flags & SPARC_FLAG_32BIT)
+ if(current->thread.flags & SPARC_FLAG_32BIT)
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
@@ -672,7+672,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr (asi < ASI_P))
goto daex;
save_and_clear_fpu();
- if (current->tss.fpsaved[0] & flag)
+ if (current->thread.fpsaved[0] & flag)
value = *(u64 *)&f->regs[freg];
switch (asi) {
case ASI_P:
-/* $Id: winfixup.S,v 1.27 1998/09/25 01:09:14 davem Exp $
+/* $Id: winfixup.S,v 1.28 1999/07/30 09:35:34 davem Exp $
*
* winfixup.S: Handle cases where user stack pointer is found to be bogus.
*
@@ -95,56+95,56 @@ fill_fixup: * do not touch %g7 or %g2 so we handle the two cases fine.
*/
spill_fixup:
- lduh [%g6 + AOFF_task_tss + AOFF_thread_flags], %g1
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %g1
andcc %g1, SPARC_FLAG_32BIT, %g0
- lduh [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %g1
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %g1
sll %g1, 3, %g3
add %g6, %g3, %g3
- stx %sp, [%g3 + AOFF_task_tss + AOFF_thread_rwbuf_stkptrs]
+ stx %sp, [%g3 + AOFF_task_thread + AOFF_thread_rwbuf_stkptrs]
sll %g1, 7, %g3
bne,pt %xcc, 1f
add %g6, %g3, %g3
- stx %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
- stx %l1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
-
- stx %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
- stx %l3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x18]
- stx %l4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x20]
- stx %l5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x28]
- stx %l6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x30]
- stx %l7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x38]
- stx %i0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x40]
- stx %i1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x48]
-
- stx %i2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x50]
- stx %i3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x58]
- stx %i4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x60]
- stx %i5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x68]
- stx %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x70]
+ stx %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
+ stx %l1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
+
+ stx %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
+ stx %l3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
+ stx %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
+ stx %l5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
+ stx %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
+ stx %l7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
+ stx %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x40]
+ stx %i1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x48]
+
+ stx %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x50]
+ stx %i3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x58]
+ stx %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x60]
+ stx %i5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x68]
+ stx %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x70]
b,pt %xcc, 2f
- stx %i7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x78]
-1: stw %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
-
- stw %l1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x04]
- stw %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
- stw %l3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x0c]
- stw %l4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
- stw %l5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x14]
- stw %l6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x18]
- stw %l7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x1c]
- stw %i0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x20]
-
- stw %i1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x24]
- stw %i2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x28]
- stw %i3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x2c]
- stw %i4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x30]
- stw %i5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x34]
- stw %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x38]
- stw %i7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x3c]
+ stx %i7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x78]
+1: stw %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
+
+ stw %l1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x04]
+ stw %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
+ stw %l3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x0c]
+ stw %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
+ stw %l5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x14]
+ stw %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
+ stw %l7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x1c]
+ stw %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
+
+ stw %i1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x24]
+ stw %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
+ stw %i3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x2c]
+ stw %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
+ stw %i5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x34]
+ stw %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
+ stw %i7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x3c]
2: add %g1, 1, %g1
- sth %g1, [%g6 + AOFF_task_tss + AOFF_thread_w_saved]
+ stb %g1, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
rdpr %tstate, %g1
andcc %g1, TSTATE_PRIV, %g0
saved
@@ -208,47+208,47 @@ fill_fixup_mna: b,pt %xcc, rtrap
nop ! yes, the nop is correct
spill_fixup_mna:
- lduh [%g6 + AOFF_task_tss + AOFF_thread_flags], %g1
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %g1
andcc %g1, SPARC_FLAG_32BIT, %g0
- lduh [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %g1
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %g1
sll %g1, 3, %g3
add %g6, %g3, %g3
- stx %sp, [%g3 + AOFF_task_tss + AOFF_thread_rwbuf_stkptrs]
+ stx %sp, [%g3 + AOFF_task_thread + AOFF_thread_rwbuf_stkptrs]
sll %g1, 7, %g3
bne,pt %xcc, 1f
add %g6, %g3, %g3
- stx %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
- stx %l1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
- stx %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
- stx %l3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x18]
- stx %l4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x20]
-
- stx %l5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x28]
- stx %l6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x30]
- stx %l7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x38]
- stx %i0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x40]
- stx %i1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x48]
- stx %i2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x50]
- stx %i3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x58]
- stx %i4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x60]
-
- stx %i5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x68]
- stx %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x70]
- stx %i7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x78]
+ stx %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
+ stx %l1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
+ stx %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
+ stx %l3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
+ stx %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
+
+ stx %l5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
+ stx %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
+ stx %l7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
+ stx %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x40]
+ stx %i1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x48]
+ stx %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x50]
+ stx %i3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x58]
+ stx %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x60]
+
+ stx %i5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x68]
+ stx %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x70]
+ stx %i7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x78]
b,pt %xcc, 2f
add %g1, 1, %g1
-1: std %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
- std %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
- std %l4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
-
- std %l6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x18]
- std %i0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x20]
- std %i2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x28]
- std %i4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x30]
- std %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x38]
+1: std %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
+ std %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
+ std %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
+
+ std %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
+ std %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
+ std %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
+ std %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
+ std %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
add %g1, 1, %g1
-2: sth %g1, [%g6 + AOFF_task_tss + AOFF_thread_w_saved]
+2: stb %g1, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
rdpr %tstate, %g1
andcc %g1, TSTATE_PRIV, %g0
@@ -315,47+315,47 @@ fill_fixup_dax: b,pt %xcc, rtrap
nop ! yes, the nop is correct
spill_fixup_dax:
- lduh [%g6 + AOFF_task_tss + AOFF_thread_flags], %g1
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %g1
andcc %g1, SPARC_FLAG_32BIT, %g0
- lduh [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %g1
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %g1
sll %g1, 3, %g3
add %g6, %g3, %g3
- stx %sp, [%g3 + AOFF_task_tss + AOFF_thread_rwbuf_stkptrs]
+ stx %sp, [%g3 + AOFF_task_thread + AOFF_thread_rwbuf_stkptrs]
sll %g1, 7, %g3
bne,pt %xcc, 1f
add %g6, %g3, %g3
- stx %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
- stx %l1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
- stx %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
- stx %l3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x18]
- stx %l4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x20]
-
- stx %l5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x28]
- stx %l6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x30]
- stx %l7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x38]
- stx %i0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x40]
- stx %i1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x48]
- stx %i2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x50]
- stx %i3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x58]
- stx %i4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x60]
-
- stx %i5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x68]
- stx %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x70]
- stx %i7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x78]
+ stx %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
+ stx %l1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
+ stx %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
+ stx %l3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
+ stx %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
+
+ stx %l5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
+ stx %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
+ stx %l7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
+ stx %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x40]
+ stx %i1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x48]
+ stx %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x50]
+ stx %i3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x58]
+ stx %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x60]
+
+ stx %i5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x68]
+ stx %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x70]
+ stx %i7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x78]
b,pt %xcc, 2f
add %g1, 1, %g1
-1: std %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
- std %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
- std %l4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
-
- std %l6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x18]
- std %i0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x20]
- std %i2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x28]
- std %i4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x30]
- std %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x38]
+1: std %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
+ std %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
+ std %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
+
+ std %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
+ std %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
+ std %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
+ std %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
+ std %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
add %g1, 1, %g1
-2: sth %g1, [%g6 + AOFF_task_tss + AOFF_thread_w_saved]
+2: stb %g1, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
rdpr %tstate, %g1
andcc %g1, TSTATE_PRIV, %g0
-/* $Id: VIScopy.S,v 1.20 1999/05/25 16:52:57 jj Exp $
+/* $Id: VIScopy.S,v 1.21 1999/07/30 09:35:35 davem Exp $
* VIScopy.S: High speed copy operations utilizing the UltraSparc
* Visual Instruction Set.
*
#include <asm/asm_offsets.h>
#define FPU_CLEAN_RETL \
- ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %o1; \
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o1; \
VISExit \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define FPU_RETL \
- ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %o1; \
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o1; \
VISExit \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define NORMAL_RETL \
- ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %o1; \
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o1; \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
@@ -1009,7+1009,7 @@ VIScopyfixup_ret: /* If this is copy_from_user(), zero out the rest of the
* kernel buffer.
*/
- ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %o4
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o4
andcc asi_src, 0x1, %g0
be,pt %icc, 1f
VISExit
-/* $Id: VIScsum.S,v 1.4 1999/05/25 16:53:00 jj Exp $
+/* $Id: VIScsum.S,v 1.5 1999/07/30 09:35:36 davem Exp $
* VIScsum.S: High bandwidth IP checksumming utilizing the UltraSparc
* Visual Instruction Set.
*
@@ -341,7+341,7 @@ csum_partial: DO_THE_TRICK(f44,f46,f48,f50,f52,f54,f56,f58,f60,f62,f0,f2,f4,f6,f8,f10,f12,f14)
END_THE_TRICK(f60,f62,f0,f2,f4,f6,f8,f10,f12,f14,f16,f18,f20,f22,f24,f26,f28,f30)
#ifdef __KERNEL__
- ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %g7
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %g7
#endif
and %o1, 0x3f, %o1 /* IEU0 Group */
#ifdef __KERNEL__
-/* $Id: VISsave.S,v 1.3 1998/10/21 10:36:39 jj Exp $
+/* $Id: VISsave.S,v 1.4 1999/07/30 09:35:37 davem Exp $
* VISsave.S: Code for saving FPU register state for
* VIS routines. One should not call this directly,
* but use macros provided in <asm/visasm.h>.
.align 32
VISenter:
- ldub [%g6 + AOFF_task_tss + AOFF_thread_fpdepth], %g1
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
- stb %g0, [%g6 + AOFF_task_tss + AOFF_thread_fpsaved]
- stx %fsr, [%g6 + AOFF_task_tss + AOFF_thread_xfsr]
+ stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
+ stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
9: jmpl %g7 + %g0, %g0
nop
1: bne,pn %icc, 2f
srl %g1, 1, %g1
-vis1: ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g3
- stx %fsr, [%g6 + AOFF_task_tss + AOFF_thread_xfsr]
+vis1: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
+ stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
or %g3, %o5, %g3
- stb %g3, [%g6 + AOFF_task_tss + AOFF_thread_fpsaved]
+ stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
rd %gsr, %g3
clr %g1
ba,pt %xcc, 3f
- stb %g3, [%g6 + AOFF_task_tss + AOFF_thread_gsr]
+ stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_gsr]
2: add %g6, %g1, %g3
cmp %o5, FPRS_DU
be,pn %icc, 6f
sll %g1, 3, %g1
- stb %o5, [%g3 + AOFF_task_tss + AOFF_thread_fpsaved]
+ stb %o5, [%g3 + AOFF_task_thread + AOFF_thread_fpsaved]
rd %gsr, %g2
- stb %g2, [%g3 + AOFF_task_tss + AOFF_thread_gsr]
+ stb %g2, [%g3 + AOFF_task_thread + AOFF_thread_gsr]
add %g6, %g1, %g2
- stx %fsr, [%g2 + AOFF_task_tss + AOFF_thread_xfsr]
+ stx %fsr, [%g2 + AOFF_task_thread + AOFF_thread_xfsr]
sll %g1, 5, %g1
3: andcc %o5, FPRS_DL|FPRS_DU, %g0
be,pn %icc, 9b
@@ -69,10+69,10 @@ vis1: ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g3 jmpl %g7 + %g0, %g0
nop
-6: ldub [%g3 + AOFF_task_tss + AOFF_thread_fpsaved], %o5
+6: ldub [%g3 + AOFF_task_thread + AOFF_thread_fpsaved], %o5
or %o5, FPRS_DU, %o5
add %g6, AOFF_task_fpregs+0x80, %g2
- stb %o5, [%g3 + AOFF_task_tss + AOFF_thread_fpsaved]
+ stb %o5, [%g3 + AOFF_task_thread + AOFF_thread_fpsaved]
sll %g1, 5, %g1
add %g6, AOFF_task_fpregs+0xc0, %g3
@@ -87,11+87,11 @@ vis1: ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g3
.align 32
VISenterhalf:
- ldub [%g6 + AOFF_task_tss + AOFF_thread_fpdepth], %g1
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
- stb %g0, [%g6 + AOFF_task_tss + AOFF_thread_fpsaved]
- stx %fsr, [%g6 + AOFF_task_tss + AOFF_thread_xfsr]
+ stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
+ stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
clr %o5
jmpl %g7 + %g0, %g0
wr %g0, FPRS_FEF, %fprs
@@ -103,12+103,12 @@ VISenterhalf: 2: addcc %g6, %g1, %g3
sll %g1, 3, %g1
andn %o5, FPRS_DU, %g2
- stb %g2, [%g3 + AOFF_task_tss + AOFF_thread_fpsaved]
+ stb %g2, [%g3 + AOFF_task_thread + AOFF_thread_fpsaved]
rd %gsr, %g2
- stb %g2, [%g3 + AOFF_task_tss + AOFF_thread_gsr]
+ stb %g2, [%g3 + AOFF_task_thread + AOFF_thread_gsr]
add %g6, %g1, %g2
- stx %fsr, [%g2 + AOFF_task_tss + AOFF_thread_xfsr]
+ stx %fsr, [%g2 + AOFF_task_thread + AOFF_thread_xfsr]
sll %g1, 5, %g1
3: andcc %o5, FPRS_DL, %g0
be,pn %icc, 4f
-/* $Id: blockops.S,v 1.17 1999/05/25 16:52:52 jj Exp $
+/* $Id: blockops.S,v 1.18 1999/07/30 09:35:37 davem Exp $
* blockops.S: UltraSparc block zero optimized routines.
*
* Copyright (C) 1996,1998 David S. Miller (davem@caip.rutgers.edu)
.type copy_page,@function
copy_page: /* %o0=dest, %o1=src */
VISEntry
- ldx [%g6 + AOFF_task_mm], %o2
+ ldx [%g6 + AOFF_task_active_mm], %o2
sub %o0, %g4, %g1
sethi %uhi(_PAGE_VALID), %g3
sub %o1, %g4, %g2
@@ -107,7+107,7 @@ copy_page: /* %o0=dest, %o1=src */ .type clear_page,@function
clear_page: /* %o0=dest */
VISEntryHalf
- ldx [%g6 + AOFF_task_mm], %o2
+ ldx [%g6 + AOFF_task_active_mm], %o2
sub %o0, %g4, %g1
sethi %uhi(_PAGE_VALID), %g3
sllx %g3, 32, %g3
@@ -266,7+266,7 @@ cpc_end: .globl cpc_handler
cpc_handler:
ldx [%sp + 0x7ff + 128], %g1
- ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %g3
+ ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %g3
sub %g0, EFAULT, %g2
brnz,a,pt %g1, 1f
st %g2, [%g1]
-/* $Id: fcmpeq.c,v 1.5 1999/05/28 13:43:29 jj Exp $
+/* $Id: fcmpeq.c,v 1.6 1999/07/30 09:35:40 davem Exp $
* arch/sparc64/math-emu/fcmpeq.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
@@ -26,14+26,14 @@ int FCMPEQ(void *rd, void *rs2, void *rs1) if (!FP_INHIBIT_RESULTS) {
rd = (void *)(((long)rd)&~3);
if (ret == -1) ret = 2;
- fsr = current->tss.xfsr[0];
+ fsr = current->thread.xfsr[0];
switch (fccno) {
case 0: fsr &= ~0xc00; fsr |= (ret << 10); break;
case 1: fsr &= ~0x300000000UL; fsr |= (ret << 32); break;
case 2: fsr &= ~0xc00000000UL; fsr |= (ret << 34); break;
case 3: fsr &= ~0x3000000000UL; fsr |= (ret << 36); break;
}
- current->tss.xfsr[0] = fsr;
+ current->thread.xfsr[0] = fsr;
}
FP_HANDLE_EXCEPTIONS;
}
-/* $Id: fcmpq.c,v 1.5 1999/05/28 13:43:33 jj Exp $
+/* $Id: fcmpq.c,v 1.6 1999/07/30 09:35:40 davem Exp $
* arch/sparc64/math-emu/fcmpq.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
@@ -26,14+26,14 @@ int FCMPQ(void *rd, void *rs2, void *rs1) if (!FP_INHIBIT_RESULTS) {
rd = (void *)(((long)rd)&~3);
if (ret == -1) ret = 2;
- fsr = current->tss.xfsr[0];
+ fsr = current->thread.xfsr[0];
switch (fccno) {
case 0: fsr &= ~0xc00; fsr |= (ret << 10); break;
case 1: fsr &= ~0x300000000UL; fsr |= (ret << 32); break;
case 2: fsr &= ~0xc00000000UL; fsr |= (ret << 34); break;
case 3: fsr &= ~0x3000000000UL; fsr |= (ret << 36); break;
}
- current->tss.xfsr[0] = fsr;
+ current->thread.xfsr[0] = fsr;
}
FP_HANDLE_EXCEPTIONS;
}
-/* $Id: fsubd.c,v 1.4 1999/05/28 13:45:04 jj Exp $
+/* $Id: fsubd.c,v 1.5 1999/08/02 14:08:04 jj Exp $
* arch/sparc64/math-emu/fsubd.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
@@ -17,9+17,7 @@ int FSUBD(void *rd, void *rs2, void *rs1)
FP_UNPACK_DP(A, rs1);
FP_UNPACK_DP(B, rs2);
- if (B_c != FP_CLS_NAN)
- B_s ^= 1;
- FP_ADD_D(R, A, B);
+ FP_SUB_D(R, A, B);
FP_PACK_DP(rd, R);
FP_HANDLE_EXCEPTIONS;
}
-/* $Id: fsubq.c,v 1.4 1999/05/28 13:45:09 jj Exp $
+/* $Id: fsubq.c,v 1.5 1999/08/02 14:08:06 jj Exp $
* arch/sparc64/math-emu/fsubq.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
@@ -17,9+17,7 @@ int FSUBQ(void *rd, void *rs2, void *rs1)
FP_UNPACK_QP(A, rs1);
FP_UNPACK_QP(B, rs2);
- if (B_c != FP_CLS_NAN)
- B_s ^= 1;
- FP_ADD_Q(R, A, B);
+ FP_SUB_Q(R, A, B);
FP_PACK_QP(rd, R);
FP_HANDLE_EXCEPTIONS;
}
-/* $Id: fsubs.c,v 1.4 1999/05/28 13:45:12 jj Exp $
+/* $Id: fsubs.c,v 1.5 1999/08/02 14:08:07 jj Exp $
* arch/sparc64/math-emu/fsubs.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
@@ -17,9+17,7 @@ int FSUBS(void *rd, void *rs2, void *rs1)
FP_UNPACK_SP(A, rs1);
FP_UNPACK_SP(B, rs2);
- if (B_c != FP_CLS_NAN)
- B_s ^= 1;
- FP_ADD_S(R, A, B);
+ FP_SUB_S(R, A, B);
FP_PACK_SP(rd, R);
FP_HANDLE_EXCEPTIONS;
}
-/* $Id: math.c,v 1.8 1999/05/28 13:43:11 jj Exp $
+/* $Id: math.c,v 1.9 1999/07/30 09:35:41 davem Exp $
* arch/sparc64/math-emu/math.c
*
* Copyright (C) 1997,1999 Jakub Jelinek (jj@ultra.linux.cz)
@@ -75,7+75,7 @@ FLOATFUNC(FDTOI) */
static int record_exception(struct pt_regs *regs, int eflag)
{
- u64 fsr = current->tss.xfsr[0];
+ u64 fsr = current->thread.xfsr[0];
int would_trap;
/* Determine if this exception would have generated a trap. */
@@ -120,7+120,7 @@ static int record_exception(struct pt_regs *regs, int eflag) if(would_trap != 0)
fsr |= (1UL << 14);
- current->tss.xfsr[0] = fsr;
+ current->thread.xfsr[0] = fsr;
/* If we will not trap, advance the program counter over
* the instruction being handled.
@@ -148,7+148,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if(tstate & TSTATE_PRIV)
die_if_kernel("FPQuad from kernel", regs);
- if(current->tss.flags & SPARC_FLAG_32BIT)
+ if(current->thread.flags & SPARC_FLAG_32BIT)
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
@@ -201,33+201,33 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) if (type) {
void *rs1 = NULL, *rs2 = NULL, *rd = NULL;
- freg = (current->tss.xfsr[0] >> 14) & 0xf;
+ freg = (current->thread.xfsr[0] >> 14) & 0xf;
if (freg != (type >> 8))
goto err;
- current->tss.xfsr[0] &= ~0x1c000;
+ current->thread.xfsr[0] &= ~0x1c000;
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) {
case 3: if (freg & 2) {
- current->tss.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+ current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs1 = (void *)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
- if (!(current->tss.fpsaved[0] & flags))
+ if (!(current->thread.fpsaved[0] & flags))
rs1 = (void *)&zero;
break;
}
freg = (insn & 0x1f);
switch ((type >> 2) & 0x3) {
case 3: if (freg & 2) {
- current->tss.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+ current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs2 = (void *)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
- if (!(current->tss.fpsaved[0] & flags))
+ if (!(current->thread.fpsaved[0] & flags))
rs2 = (void *)&zero;
break;
}
@@ -235,23+235,23 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) switch ((type >> 4) & 0x3) {
case 0: rd = (void *)(long)(freg & 3); break;
case 3: if (freg & 2) {
- current->tss.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+ current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rd = (void *)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
- if (!(current->tss.fpsaved[0] & FPRS_FEF)) {
- current->tss.fpsaved[0] = FPRS_FEF;
- current->tss.gsr[0] = 0;
+ if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
+ current->thread.fpsaved[0] = FPRS_FEF;
+ current->thread.gsr[0] = 0;
}
- if (!(current->tss.fpsaved[0] & flags)) {
+ if (!(current->thread.fpsaved[0] & flags)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
- current->tss.fpsaved[0] |= flags;
+ current->thread.fpsaved[0] |= flags;
break;
}
flags = func(rd, rs2, rs1);
@@ -259,7+259,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) return record_exception(regs, flags);
/* Success and no exceptions detected. */
- current->tss.xfsr[0] &= ~(FSR_CEXC_MASK);
+ current->thread.xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
/* Basic. Assuming the host word size is >= 2*FRACBITS, we can do the
multiplication immediately. */
-#define _FP_MUL_MEAT_1_imm(fs, R, X, Y) \
+#define _FP_MUL_MEAT_1_imm(wfracbits, R, X, Y) \
do { \
R##_f = X##_f * Y##_f; \
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
- _FP_FRAC_SRS_1(R, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
+ _FP_FRAC_SRS_1(R, wfracbits-1, 2*wfracbits); \
} while (0)
/* Given a 1W * 1W => 2W primitive, do the extended multiplication. */
-#define _FP_MUL_MEAT_1_wide(fs, R, X, Y, doit) \
+#define _FP_MUL_MEAT_1_wide(wfracbits, R, X, Y, doit) \
do { \
_FP_W_TYPE _Z_f0, _Z_f1; \
doit(_Z_f1, _Z_f0, X##_f, Y##_f); \
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
- _FP_FRAC_SRS_2(_Z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
+ _FP_FRAC_SRS_2(_Z, wfracbits-1, 2*wfracbits); \
R##_f = _Z_f0; \
} while (0)
/* Finally, a simple widening multiply algorithm. What fun! */
-#define _FP_MUL_MEAT_1_hard(fs, R, X, Y) \
+#define _FP_MUL_MEAT_1_hard(wfracbits, R, X, Y) \
do { \
_FP_W_TYPE _xh, _xl, _yh, _yl, _z_f0, _z_f1, _a_f0, _a_f1; \
\
_FP_FRAC_ADD_2(_z, _z, _a); \
\
/* normalize */ \
- _FP_FRAC_SRS_2(_z, _FP_WFRACBITS_##fs - 1, 2*_FP_WFRACBITS_##fs); \
+ _FP_FRAC_SRS_2(_z, wfracbits - 1, 2*wfracbits); \
R##_f = _z_f0; \
} while (0)
/* Given a 1W * 1W => 2W primitive, do the extended multiplication. */
-#define _FP_MUL_MEAT_2_wide(fs, R, X, Y, doit) \
+#define _FP_MUL_MEAT_2_wide(wfracbits, R, X, Y, doit) \
do { \
_FP_FRAC_DECL_4(_z); _FP_FRAC_DECL_2(_b); _FP_FRAC_DECL_2(_c); \
\
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
- _FP_FRAC_SRS_4(_z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
+ _FP_FRAC_SRS_4(_z, wfracbits-1, 2*wfracbits); \
R##_f0 = _FP_FRAC_WORD_4(_z,0); \
R##_f1 = _FP_FRAC_WORD_4(_z,1); \
} while (0)
Do only 3 multiplications instead of four. This one is for machines
where multiplication is much more expensive than subtraction. */
-#define _FP_MUL_MEAT_2_wide_3mul(fs, R, X, Y, doit) \
+#define _FP_MUL_MEAT_2_wide_3mul(wfracbits, R, X, Y, doit) \
do { \
_FP_FRAC_DECL_4(_z); _FP_FRAC_DECL_2(_b); _FP_FRAC_DECL_2(_c); \
_FP_W_TYPE _d; \
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
- _FP_FRAC_SRS_4(_z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
+ _FP_FRAC_SRS_4(_z, wfracbits-1, 2*wfracbits); \
R##_f0 = _FP_FRAC_WORD_4(_z,0); \
R##_f1 = _FP_FRAC_WORD_4(_z,1); \
} while (0)
-#define _FP_MUL_MEAT_2_gmp(fs, R, X, Y) \
+#define _FP_MUL_MEAT_2_gmp(wfracbits, R, X, Y) \
do { \
_FP_FRAC_DECL_4(_z); \
_FP_W_TYPE _x[2], _y[2]; \
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
- _FP_FRAC_SRS_4(_z, _FP_WFRACBITS##_fs-1, 2*_FP_WFRACBITS_##fs); \
+ _FP_FRAC_SRS_4(_z, wfracbits-1, 2*wfracbits); \
R##_f0 = _z_f[0]; \
R##_f1 = _z_f[1]; \
} while (0)
+/* Do at most 120x120=240 bits multiplication using double floating
+ point multiplication. This is useful if floating point
+ multiplication has much bigger throughput than integer multiply.
+ It is supposed to work for _FP_W_TYPE_SIZE 64 and wfracbits
+ between 106 and 120 only.
+ Caller guarantees that X and Y has (1LLL << (wfracbits - 1)) set.
+ SETFETZ is a macro which will disable all FPU exceptions and set rounding
+ towards zero, RESETFE should optionally reset it back. */
+
+#define _FP_MUL_MEAT_2_120_240_double(wfracbits, R, X, Y, setfetz, resetfe) \
+ do { \
+ static const double _const[] = { \
+ /* 2^-24 */ 5.9604644775390625e-08, \
+ /* 2^-48 */ 3.5527136788005009e-15, \
+ /* 2^-72 */ 2.1175823681357508e-22, \
+ /* 2^-96 */ 1.2621774483536189e-29, \
+ /* 2^28 */ 2.68435456e+08, \
+ /* 2^4 */ 1.600000e+01, \
+ /* 2^-20 */ 9.5367431640625e-07, \
+ /* 2^-44 */ 5.6843418860808015e-14, \
+ /* 2^-68 */ 3.3881317890172014e-21, \
+ /* 2^-92 */ 2.0194839173657902e-28, \
+ /* 2^-116 */ 1.2037062152420224e-35}; \
+ double _a240, _b240, _c240, _d240, _e240, _f240, \
+ _g240, _h240, _i240, _j240, _k240; \
+ union { double d; UDItype i; } _l240, _m240, _n240, _o240, \
+ _p240, _q240, _r240, _s240; \
+ UDItype _t240, _u240, _v240, _w240, _x240, _y240 = 0; \
+ \
+ if (wfracbits < 106 || wfracbits > 120) \
+ abort(); \
+ \
+ setfetz; \
+ \
+ _e240 = (double)(long)(X##_f0 & 0xffffff); \
+ _j240 = (double)(long)(Y##_f0 & 0xffffff); \
+ _d240 = (double)(long)((X##_f0 >> 24) & 0xffffff); \
+ _i240 = (double)(long)((Y##_f0 >> 24) & 0xffffff); \
+ _c240 = (double)(long)(((X##_f1 << 16) & 0xffffff) | (X##_f0 >> 48)); \
+ _h240 = (double)(long)(((Y##_f1 << 16) & 0xffffff) | (Y##_f0 >> 48)); \
+ _b240 = (double)(long)((X##_f1 >> 8) & 0xffffff); \
+ _g240 = (double)(long)((Y##_f1 >> 8) & 0xffffff); \
+ _a240 = (double)(long)(X##_f1 >> 32); \
+ _f240 = (double)(long)(Y##_f1 >> 32); \
+ _e240 *= _const[3]; \
+ _j240 *= _const[3]; \
+ _d240 *= _const[2]; \
+ _i240 *= _const[2]; \
+ _c240 *= _const[1]; \
+ _h240 *= _const[1]; \
+ _b240 *= _const[0]; \
+ _g240 *= _const[0]; \
+ _s240.d = _e240*_j240;\
+ _r240.d = _d240*_j240 + _e240*_i240;\
+ _q240.d = _c240*_j240 + _d240*_i240 + _e240*_h240;\
+ _p240.d = _b240*_j240 + _c240*_i240 + _d240*_h240 + _e240*_g240;\
+ _o240.d = _a240*_j240 + _b240*_i240 + _c240*_h240 + _d240*_g240 + _e240*_f240;\
+ _n240.d = _a240*_i240 + _b240*_h240 + _c240*_g240 + _d240*_f240; \
+ _m240.d = _a240*_h240 + _b240*_g240 + _c240*_f240; \
+ _l240.d = _a240*_g240 + _b240*_f240; \
+ _k240 = _a240*_f240; \
+ _r240.d += _s240.d; \
+ _q240.d += _r240.d; \
+ _p240.d += _q240.d; \
+ _o240.d += _p240.d; \
+ _n240.d += _o240.d; \
+ _m240.d += _n240.d; \
+ _l240.d += _m240.d; \
+ _k240 += _l240.d; \
+ _s240.d -= ((_const[10]+_s240.d)-_const[10]); \
+ _r240.d -= ((_const[9]+_r240.d)-_const[9]); \
+ _q240.d -= ((_const[8]+_q240.d)-_const[8]); \
+ _p240.d -= ((_const[7]+_p240.d)-_const[7]); \
+ _o240.d += _const[7]; \
+ _n240.d += _const[6]; \
+ _m240.d += _const[5]; \
+ _l240.d += _const[4]; \
+ if (_s240.d != 0.0) _y240 = 1; \
+ if (_r240.d != 0.0) _y240 = 1; \
+ if (_q240.d != 0.0) _y240 = 1; \
+ if (_p240.d != 0.0) _y240 = 1; \
+ _t240 = (DItype)_k240; \
+ _u240 = _l240.i; \
+ _v240 = _m240.i; \
+ _w240 = _n240.i; \
+ _x240 = _o240.i; \
+ R##_f1 = (_t240 << (128 - (wfracbits - 1))) \
+ | ((_u240 & 0xffffff) >> ((wfracbits - 1) - 104)); \
+ R##_f0 = ((_u240 & 0xffffff) << (168 - (wfracbits - 1))) \
+ | ((_v240 & 0xffffff) << (144 - (wfracbits - 1))) \
+ | ((_w240 & 0xffffff) << (120 - (wfracbits - 1))) \
+ | ((_x240 & 0xffffff) >> ((wfracbits - 1) - 96)) \
+ | _y240; \
+ resetfe; \
+ } while (0)
/*
* Division algorithms:
/* Given a 1W * 1W => 2W primitive, do the extended multiplication. */
-#define _FP_MUL_MEAT_4_wide(fs, R, X, Y, doit) \
+#define _FP_MUL_MEAT_4_wide(wfracbits, R, X, Y, doit) \
do { \
_FP_FRAC_DECL_8(_z); _FP_FRAC_DECL_2(_b); _FP_FRAC_DECL_2(_c); \
_FP_FRAC_DECL_2(_d); _FP_FRAC_DECL_2(_e); _FP_FRAC_DECL_2(_f); \
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
- _FP_FRAC_SRS_8(_z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
+ _FP_FRAC_SRS_8(_z, wfracbits-1, 2*wfracbits); \
__FP_FRAC_SET_4(R, _FP_FRAC_WORD_8(_z,3), _FP_FRAC_WORD_8(_z,2), \
_FP_FRAC_WORD_8(_z,1), _FP_FRAC_WORD_8(_z,0)); \
} while (0)
-#define _FP_MUL_MEAT_4_gmp(fs, R, X, Y) \
+#define _FP_MUL_MEAT_4_gmp(wfracbits, R, X, Y) \
do { \
_FP_FRAC_DECL_8(_z); \
\
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
- _FP_FRAC_SRS_8(_z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
+ _FP_FRAC_SRS_8(_z, wfracbits-1, 2*wfracbits); \
__FP_FRAC_SET_4(R, _FP_FRAC_WORD_8(_z,3), _FP_FRAC_WORD_8(_z,2), \
_FP_FRAC_WORD_8(_z,1), _FP_FRAC_WORD_8(_z,0)); \
} while (0)
@@ -207,7+207,7 @@ do { \ * Main addition routine. The input values should be cooked.
*/
-#define _FP_ADD(fs, wc, R, X, Y) \
+#define _FP_ADD_INTERNAL(fs, wc, R, X, Y, OP) \
do { \
switch (_FP_CLS_COMBINE(X##_c, Y##_c)) \
{ \
@@ -284,7+284,7 @@ do { \ } \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NAN): \
- _FP_CHOOSENAN(fs, wc, R, X, Y); \
+ _FP_CHOOSENAN(fs, wc, R, X, Y, OP); \
break; \
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
@@ -345,6+345,13 @@ do { \ } \
} while (0)
+#define _FP_ADD(fs, wc, R, X, Y) _FP_ADD_INTERNAL(fs, wc, R, X, Y, '+')
+#define _FP_SUB(fs, wc, R, X, Y) \
+ do { \
+ if (Y##_c != FP_CLS_NAN) Y##_s ^= 1; \
+ _FP_ADD_INTERNAL(fs, wc, R, X, Y, '-'); \
+ } while (0)
+
/*
* Main negation routine. FIXME -- when we care about setting exception
@@ -382,7+389,7 @@ do { \ break; \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NAN): \
- _FP_CHOOSENAN(fs, wc, R, X, Y); \
+ _FP_CHOOSENAN(fs, wc, R, X, Y, '*'); \
break; \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
@@ -440,7+447,7 @@ do { \ break; \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NAN): \
- _FP_CHOOSENAN(fs, wc, R, X, Y); \
+ _FP_CHOOSENAN(fs, wc, R, X, Y, '/'); \
break; \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
@@ -108,13+108,9 @@ union _FP_UNION_Q _FP_PACK_RAW_4_P(Q,val,X); \
} while (0)
-#define FP_ISSIGNAN_Q(X) _FP_ISSIGNAN(Q,4,X)
-#define FP_NEG_Q(R,X) _FP_NEG(Q,4,R,X)
-#define FP_ADD_Q(R,X,Y) _FP_ADD(Q,4,R,X,Y)
-/* single.h and double.h define FP_SUB_t this way too. However, _FP_SUB is
- * never defined in op-common.h! Fortunately nobody seems to use the FP_SUB_t
- * macros: I suggest a combination of FP_NEG and FP_ADD :-> -- PMM 02/1998
- */
+#define FP_ISSIGNAN_Q(X) _FP_ISSIGNAN(Q,4,X)
+#define FP_NEG_Q(R,X) _FP_NEG(Q,4,R,X)
+#define FP_ADD_Q(R,X,Y) _FP_ADD(Q,4,R,X,Y)
#define FP_SUB_Q(R,X,Y) _FP_SUB(Q,4,R,X,Y)
#define FP_MUL_Q(R,X,Y) _FP_MUL(Q,4,R,X,Y)
#define FP_DIV_Q(R,X,Y) _FP_DIV(Q,4,R,X,Y)
#define _FP_WS_TYPE signed long
#define _FP_I_TYPE long
-#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_imm(S,R,X,Y)
-#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_1_wide(D,R,X,Y,umul_ppmm)
-#define _FP_MUL_MEAT_Q(R,X,Y) _FP_MUL_MEAT_2_wide_3mul(Q,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y)
* CPU instruction emulation this should prefer Y.
* (see SPAMv9 B.2.2 section).
*/
-#define _FP_CHOOSENAN(fs, wc, R, X, Y) \
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
do { \
if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
&& !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
/* Obtain the current rounding mode. */
#ifndef FP_ROUNDMODE
-#define FP_ROUNDMODE ((current->tss.xfsr[0] >> 30) & 0x3)
+#define FP_ROUNDMODE ((current->thread.xfsr[0] >> 30) & 0x3)
#endif
/* Exception flags. */
#define FP_HANDLE_EXCEPTIONS return _fex
-#define FP_INHIBIT_RESULTS ((current->tss.xfsr[0] >> 23) & _fex)
+#define FP_INHIBIT_RESULTS ((current->thread.xfsr[0] >> 23) & _fex)
#endif
-/* $Id: asyncd.c,v 1.8 1999/07/04 04:35:55 davem Exp $
+/* $Id: asyncd.c,v 1.9 1999/07/30 09:35:43 davem Exp $
* The asyncd kernel daemon. This handles paging on behalf of
* processes that receive page faults due to remote (async) memory
* accesses.
@@ -91,7+91,8 @@ static void add_to_async_queue(int taskid, void async_fault(unsigned long address, int write, int taskid,
void (*callback)(int,unsigned long,int,int))
{
- struct task_struct *tsk = task[taskid];
+#warning Need some fixing here... -DaveM
+ struct task_struct *tsk = current /* XXX task[taskid] */;
struct mm_struct *mm = tsk->mm;
stats.faults++;
@@ -111,7+112,8 @@ static int fault_in_page(int taskid, {
static unsigned last_address;
static int last_task, loop_counter;
- struct task_struct *tsk = task[taskid];
+#warning Need some fixing here... -DaveM
+ struct task_struct *tsk = current /* XXX task[taskid] */;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
@@ -178,8+180,8 @@ no_memory:
bad_area:
stats.failure++;
- tsk->tss.sig_address = address;
- tsk->tss.sig_desc = SUBSIG_NOMAPPING;
+ tsk->thread.sig_address = address;
+ tsk->thread.sig_desc = SUBSIG_NOMAPPING;
send_sig(SIGSEGV, tsk, 1);
return 1;
}
-/* $Id: fault.c,v 1.36 1999/07/04 04:35:56 davem Exp $
+/* $Id: fault.c,v 1.38 1999/08/02 08:39:50 davem Exp $
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -84,10+84,11 @@ void unhandled_fault(unsigned long address, struct task_struct *tsk, printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %016lx\n", (unsigned long)address);
}
- printk(KERN_ALERT "tsk->mm->context = %016lx\n",
- (unsigned long) tsk->mm->context);
- printk(KERN_ALERT "tsk->mm->pgd = %016lx\n",
- (unsigned long) tsk->mm->pgd);
+ printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
+ (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
+ printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
+ (tsk->mm ? (unsigned long) tsk->mm->pgd :
+ (unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
@@ -159,11+160,40 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs, unsigned long address, in
down(&mm->mmap_sem);
#ifdef DEBUG_LOCKUPS
- if (regs->tpc == lastpc && address == lastaddr && write == lastwrite) {
+ if (regs->tpc == lastpc &&
+ address == lastaddr &&
+ write == lastwrite) {
lockcnt++;
if (lockcnt == 100000) {
- printk("do_sparc64_fault: possible fault loop for %016lx %s\n", address, write ? "write" : "read");
+ unsigned char tmp;
+ register unsigned long tmp1 asm("o5");
+ register unsigned long tmp2 asm("o4");
+
+ printk("do_sparc64_fault[%s:%d]: possible fault loop for %016lx %s\n",
+ current->comm, current->pid,
+ address, write ? "write" : "read");
+ printk("do_sparc64_fault: CHECK[papgd[%016lx],pcac[%016lx]]\n",
+ __pa(mm->pgd), pgd_val(mm->pgd[0])<<11UL);
+ __asm__ __volatile__(
+ "wrpr %%g0, 0x494, %%pstate\n\t"
+ "mov %3, %%g4\n\t"
+ "mov %%g7, %0\n\t"
+ "ldxa [%%g4] %2, %1\n\t"
+ "wrpr %%g0, 0x096, %%pstate"
+ : "=r" (tmp1), "=r" (tmp2)
+ : "i" (ASI_DMMU), "i" (TSB_REG));
+ printk("do_sparc64_fault: IS[papgd[%016lx],pcac[%016lx]]\n",
+ tmp1, tmp2);
+ printk("do_sparc64_fault: CHECK[ctx(%016lx)] IS[ctx(%016lx)]\n",
+ mm->context, spitfire_get_secondary_context());
+ __asm__ __volatile__("rd %%asi, %0"
+ : "=r" (tmp));
+ printk("do_sparc64_fault: CHECK[seg(%02x)] IS[seg(%02x)]\n",
+ current->thread.current_ds.seg, tmp);
show_regs(regs);
+ __sti();
+ while(1)
+ barrier();
}
} else {
lastpc = regs->tpc;
@@ -282,8+312,8 @@ do_kernel_fault: return;
}
} else {
- current->tss.sig_address = address;
- current->tss.sig_desc = SUBSIG_NOMAPPING;
+ current->thread.sig_address = address;
+ current->thread.sig_desc = SUBSIG_NOMAPPING;
force_sig(SIGSEGV, current);
return;
}
@@ -293,8+323,8 @@ do_kernel_fault:
do_sigbus:
up(&mm->mmap_sem);
- current->tss.sig_address = address;
- current->tss.sig_desc = SUBSIG_MISCERROR;
+ current->thread.sig_address = address;
+ current->thread.sig_desc = SUBSIG_MISCERROR;
force_sig(SIGBUS, current);
if (regs->tstate & TSTATE_PRIV)
goto do_kernel_fault;
-/* $Id: generic.c,v 1.8 1999/03/12 06:51:50 davem Exp $
+/* $Id: generic.c,v 1.9 1999/07/23 22:32:01 davem Exp $
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
@@ -95,7+95,8 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign space);
curend = address + 0x10000;
offset += 0x10000;
- }
+ } else
+ offset += PAGE_SIZE;
} else
offset += PAGE_SIZE;
-/* $Id: init.c,v 1.130 1999/06/29 12:34:06 davem Exp $
+/* $Id: init.c,v 1.131 1999/07/30 09:35:45 davem Exp $
* arch/sparc64/mm/init.c
*
* Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
@@ -42,7+42,10 @@ unsigned long *sparc64_valid_addr_bitmap; unsigned long phys_base;
/* get_new_mmu_context() uses "cache + 1". */
+spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
+unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
/* References to section boundaries */
extern char __init_begin, __init_end, etext, __bss_start;
@@ -386,7+389,7 @@ void mmu_map_dma_area(unsigned long addr, int len, __u32 *dvma_addr, dvma_pages_current_offset;
/* Map the CPU's view. */
- pgdp = pgd_offset(init_task.mm, addr);
+ pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_alloc_kernel(pgdp, addr);
ptep = pte_alloc_kernel(pmdp, addr);
pte = mk_pte(the_page, PAGE_KERNEL);
@@ -677,7+680,7 @@ static inline void inherit_prom_mappings(void) for (vaddr = trans[i].virt;
vaddr < trans[i].virt + trans[i].size;
vaddr += PAGE_SIZE) {
- pgdp = pgd_offset(init_task.mm, vaddr);
+ pgdp = pgd_offset(&init_mm, vaddr);
if (pgd_none(*pgdp)) {
pmdp = sparc_init_alloc(&mempool,
PMD_TABLE_SIZE);
@@ -739,7+742,7 @@ void prom_world(int enter) int i;
if (!enter)
- set_fs(current->tss.current_ds);
+ set_fs(current->thread.current_ds);
if (!prom_ditlb_set)
return;
@@ -957,9+960,6 @@ void __flush_tlb_all(void) : : "r" (pstate));
}
-#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
-unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
-
/* Caller does TLB context flushing on local CPU if necessary.
*
* We must be careful about boundary cases so that we never
@@ -969,14+969,16 @@ unsigned long mmu_context_bmap[CTX_BMAP_SLOTS]; */
void get_new_mmu_context(struct mm_struct *mm)
{
- unsigned long ctx = (tlb_context_cache + 1) & ~(CTX_VERSION_MASK);
- unsigned long new_ctx;
+ unsigned long ctx, new_ctx;
+ spin_lock(&ctx_alloc_lock);
+ ctx = CTX_HWBITS(tlb_context_cache + 1);
if (ctx == 0)
ctx = 1;
- if ((mm->context != NO_CONTEXT) &&
- !((mm->context ^ tlb_context_cache) & CTX_VERSION_MASK))
- clear_bit(mm->context & ~(CTX_VERSION_MASK), mmu_context_bmap);
+ if (CTX_VALID(mm->context)) {
+ unsigned long nr = CTX_HWBITS(mm->context);
+ mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
+ }
new_ctx = find_next_zero_bit(mmu_context_bmap, 1UL << CTX_VERSION_SHIFT, ctx);
if (new_ctx >= (1UL << CTX_VERSION_SHIFT)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
@@ -1003,12+1005,13 @@ void get_new_mmu_context(struct mm_struct *mm) goto out;
}
}
- set_bit(new_ctx, mmu_context_bmap);
+ mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
out:
tlb_context_cache = new_ctx;
+ spin_unlock(&ctx_alloc_lock);
+
mm->context = new_ctx;
- mm->cpu_vm_mask = 0;
}
#ifndef __SMP__
@@ -1049,7+1052,7 @@ allocate_ptable_skeleton(unsigned long start, unsigned long end)) pte_t *ptep;
while (start < end) {
- pgdp = pgd_offset(init_task.mm, start);
+ pgdp = pgd_offset(&init_mm, start);
if (pgd_none(*pgdp)) {
pmdp = sparc_init_alloc(&mempool, PAGE_SIZE);
memset(pmdp, 0, PAGE_SIZE);
@@ -1073,7+1076,7 @@ allocate_ptable_skeleton(unsigned long start, unsigned long end)) void sparc_ultra_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
int bus, int rdonly)
{
- pgd_t *pgdp = pgd_offset(init_task.mm, virt_addr);
+ pgd_t *pgdp = pgd_offset(&init_mm, virt_addr);
pmd_t *pmdp = pmd_offset(pgdp, virt_addr);
pte_t *ptep = pte_offset(pmdp, virt_addr);
pte_t pte;
@@ -1095,7+1098,7 @@ void sparc_ultra_unmapioaddr(unsigned long virt_addr) pmd_t *pmdp;
pte_t *ptep;
- pgdp = pgd_offset(init_task.mm, virt_addr);
+ pgdp = pgd_offset(&init_mm, virt_addr);
pmdp = pmd_offset(pgdp, virt_addr);
ptep = pte_offset(pmdp, virt_addr);
-/* $Id: ultra.S,v 1.32 1999/03/28 08:39:34 davem Exp $
+/* $Id: ultra.S,v 1.33 1999/08/02 08:39:49 davem Exp $
* ultra.S: Don't expand these all over the place...
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -136,36+136,37 @@ __flush_tlb_range_pbp_slow: flush_icache_page: /* %o0 = phys_page */
sethi %hi(1 << 13), %o2 ! IC_set bit
mov 1, %g1
- srlx %o0, 5, %o0 ! phys-addr comparitor
+ srlx %o0, 5, %o0
clr %o1 ! IC_addr
sllx %g1, 36, %g1
sub %g1, 1, %g2
- andn %g2, 0xff, %g2 ! IC_tag mask
- nop
+ or %o0, %g1, %o0 ! VALID+phys-addr comparitor
+ sllx %g2, 1, %g2
+ andn %g2, 0xfe, %g2 ! IC_tag mask
1: ldda [%o1] ASI_IC_TAG, %o4
and %o5, %g2, %o5
cmp %o5, %o0
be,pn %xcc, iflush1
- nop
+ add %o1, 0x20, %g3
2: ldda [%o1 + %o2] ASI_IC_TAG, %o4
and %o5, %g2, %o5
- cmp %o5, %o0
+ cmp %o5, %o0
be,pn %xcc, iflush2
nop
-3: add %o1, 0x20, %o1
- cmp %o1, %o2
+3: cmp %g3, %o2
bne,pt %xcc, 1b
- nop
+ mov %g3, %o1
retl
nop
+
iflush1:stxa %g0, [%o1] ASI_IC_TAG
- ba,pt %xcc, 2b
- flush %g6
+ flush %g6
+ ba,a,pt %xcc, 2b
iflush2:stxa %g0, [%o1 + %o2] ASI_IC_TAG
- ba,pt %xcc, 3b
- flush %g6
+ flush %g6
+ ba,a,pt %xcc, 3b
#ifdef __SMP__
/* These are all called by the slaves of a cross call, at
-/* $Id: p1275.c,v 1.15 1998/10/13 14:03:47 davem Exp $
+/* $Id: p1275.c,v 1.16 1999/08/02 12:05:57 jj Exp $
* p1275.c: Sun IEEE 1275 PROM low level interface routines
*
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -252,8+252,8 @@ void prom_cif_callback(void) * the counter is needed. -DaveM
*/
static int prom_entry_depth = 0;
-#ifdef __SMP__
static spinlock_t prom_entry_lock = SPIN_LOCK_UNLOCKED;
+#ifdef __SMP__
extern void smp_capture(void);
extern void smp_release(void);
#endif
-/* $Id: ioctl.c,v 1.11 1999/05/27 00:36:25 davem Exp $
+/* $Id: ioctl.c,v 1.12 1999/07/23 01:57:03 davem Exp $
* ioctl.c: Solaris ioctl emulation.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -367,7+367,6 @@ static inline int solaris_sockmod(unsigned int fd, unsigned int cmd, u32 arg) static inline int solaris_timod(unsigned int fd, unsigned int cmd, u32 arg,
int len, int *len_p)
{
- struct inode *ino;
int ret;
switch (cmd & 0xff) {
@@ -459,7+458,6 @@ static inline int solaris_S(struct file *filp, unsigned int fd, unsigned int cmd mm_segment_t old_fs;
struct strioctl si;
struct inode *ino;
- struct file *filp;
struct sol_socket_struct *sock;
struct module_info *mi;
-/* $Id: socksys.c,v 1.8 1998/08/26 10:28:28 davem Exp $
+/* $Id: socksys.c,v 1.9 1999/07/23 01:57:07 davem Exp $
* socksys.c: /dev/inet/ stuff for Solaris emulation.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
-/* $Id: timod.c,v 1.2 1999/05/12 11:11:55 davem Exp $
+/* $Id: timod.c,v 1.3 1999/08/02 12:06:01 jj Exp $
* timod.c: timod emulation.
*
* Copyright (C) 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
@@ -33,9+33,7 @@ extern asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
-#ifdef __SMP__
spinlock_t timod_pagelock = SPIN_LOCK_UNLOCKED;
-#endif
static char * page = NULL ;
#ifndef DEBUG_SOLARIS_KMALLOC
@@ -866,7+864,7 @@ asmlinkage int solaris_getmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
SOLD("entry");
lock_kernel();
- if(fd >= current->files->max_fds) goto out;
+ if(fd >= NR_OPEN) goto out;
filp = current->files->fd[fd];
if(!filp) goto out;
@@ -933,7+931,7 @@ asmlinkage int solaris_putmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
SOLD("entry");
lock_kernel();
- if(fd >= current->files->max_fds) goto out;
+ if(fd >= NR_OPEN) goto out;
filp = current->files->fd[fd];
if(!filp) goto out;
@@ -39,6+39,13 @@ SECTIONS __init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
+ . = ALIGN(16);
+ __setup_start = .;
+ .setup_init : { *(.setup.init) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : { *(.initcall.init) }
+ __initcall_end = .;
. = ALIGN(8192);
__init_end = .;
__bss_start = .;
-/* $Id: cmd646.c,v 1.14 1999/07/03 08:56:09 davem Exp $
+/* $Id: cmd646.c,v 1.15 1999/07/23 01:48:37 davem Exp $
* cmd646.c: Enable interrupts at initialization time on Ultra/PCI machines.
* Note, this driver is not used at all on other systems because
* there the "BIOS" has done all of the following already.
@@ -159,12+159,16 @@ static int __init ramdisk_size(char *str) return 1;
}
+static int __init ramdisk_size2(char *str)
+{
+ return ramdisk_size(str);
+}
__setup("ramdisk_start=", ramdisk_start_setup);
__setup("load_ramdisk=", load_ramdisk);
__setup("prompt_ramdisk=", prompt_ramdisk);
__setup("ramdisk=", ramdisk_size);
-__setup("ramdisk_size=", ramdisk_size);
+__setup("ramdisk_size=", ramdisk_size2);
#endif
-/* $Id: sunlance.c,v 1.85 1999/03/21 05:22:05 davem Exp $
+/* $Id: sunlance.c,v 1.86 1999/07/23 01:52:58 davem Exp $
* lance.c: Linux/Sparc/Lance driver
*
* Written 1995, 1996 by Miguel de Icaza
@@ -1421,7+1421,7 @@ static int eb4231_recintr(struct sparcaudio_driver *drv) status += 2;
}
- sparcaudio_input_done(drv, 1);
+ sparcaudio_input_done(drv, status);
return 1;
}
@@ -1503,7+1503,7 @@ static void cs4231_start_output(struct sparcaudio_driver *drv, __u8 * buffer, static void eb4231_stop_output(struct sparcaudio_driver *drv)
{
struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
- int dcsr;
+ unsigned int dcsr;
dprintk(("eb4231_stop_output: dcsr 0x%x dacr 0x%x dbcr %d\n",
readl(&cs4231_chip->eb2p->dcsr),
@@ -1635,6+1635,68 @@ static void cs4231_stop_input(struct sparcaudio_driver *drv) cs4231_pollinput(drv);
}
+#ifdef EB4231_SUPPORT
+static void eb4231_start_input(struct sparcaudio_driver *drv, __u8 * buffer,
+ unsigned long count)
+{
+ struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
+ unsigned int dcsr;
+
+ cs4231_chip->input_ptr = buffer;
+ cs4231_chip->input_size = count;
+
+ if (cs4231_chip->perchip_info.record.active ||
+ (cs4231_chip->perchip_info.record.pause))
+ return;
+
+ cs4231_ready(drv);
+
+ cs4231_chip->perchip_info.record.active = 1;
+ cs4231_chip->recording_count = 0;
+
+ dcsr = readl(&cs4231_chip->eb2c->dcsr);
+ if (!(dcsr & EBUS_DCSR_EN_DMA)) {
+ writel(EBUS_DCSR_RESET, &(cs4231_chip->eb2c->dcsr));
+ writel(EBUS_DCSR_BURST_SZ_16, &(cs4231_chip->eb2c->dcsr));
+
+ eb4231_recintr(drv);
+
+ writel(EBUS_DCSR_BURST_SZ_16 |
+ (EBUS_DCSR_EN_DMA | EBUS_DCSR_INT_EN | EBUS_DCSR_EN_CNT | EBUS_DCSR_EN_NEXT),
+ &(cs4231_chip->eb2c->dcsr));
+
+ cs4231_enable_rec(drv);
+ cs4231_ready(drv);
+ } else
+ eb4231_recintr(drv);
+}
+
+static void eb4231_stop_input(struct sparcaudio_driver *drv)
+{
+ struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
+ unsigned int dcsr;
+
+ cs4231_chip->perchip_info.record.active = 0;
+
+ cs4231_chip->input_ptr = NULL;
+ cs4231_chip->input_size = 0;
+ if (cs4231_chip->input_dma_handle) {
+ cs4231_chip->input_dma_handle = 0;
+ cs4231_chip->input_dma_size = 0;
+ }
+ if (cs4231_chip->input_next_dma_handle) {
+ cs4231_chip->input_next_dma_handle = 0;
+ cs4231_chip->input_next_dma_size = 0;
+ }
+
+ dcsr = readl(&(cs4231_chip->eb2c->dcsr));
+ if (dcsr & EBUS_DCSR_EN_DMA)
+ writel(dcsr & ~EBUS_DCSR_EN_DMA, &(cs4231_chip->eb2c->dcsr));
+
+ cs4231_disable_rec(drv);
+}
+#endif
+
static int cs4231_set_output_pause(struct sparcaudio_driver *drv, int value)
{
struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
@@ -1763,13+1825,25 @@ void eb4231_cinterrupt(int irq, void *dev_id, struct pt_regs *regs) struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
int dummy;
- /* Read status. */
- dummy = readl(&cs4231_chip->eb2c->dcsr);
+ /* Clear the interrupt. */
+ dummy = readl(&(cs4231_chip->eb2c->dcsr));
+ writel(dummy, &(cs4231_chip->eb2c->dcsr));
+
+ if ((dummy & EBUS_DCSR_TC) != 0
+ /*&& (dummy & EBUS_DCSR_A_LOADED) != 0*/) {
+ cs4231_chip->perchip_info.record.samples +=
+ cs4231_length_to_samplecount(&(cs4231_chip->perchip_info.record),
+ cs4231_chip->reclen);
+ eb4231_recintr(drv);
+ }
- cs4231_chip->perchip_info.record.samples +=
- cs4231_length_to_samplecount(&(cs4231_chip->perchip_info.record),
- cs4231_chip->reclen);
- eb4231_recintr(drv);
+ if ((dummy & EBUS_DCSR_A_LOADED) == 0) {
+ cs4231_chip->perchip_info.record.active = 0;
+ eb4231_recintr(drv);
+#if 1
+ eb4231_getsamplecount(drv, cs4231_chip->reclen, 1);
+#endif
+ }
}
/* ebus audio play interrupt handler. */
@@ -1937,8+2011,8 @@ static struct sparcaudio_operations eb4231_ops = { cs4231_ioctl,
eb4231_start_output,
eb4231_stop_output,
- cs4231_start_input,
- cs4231_stop_input,
+ eb4231_start_input,
+ eb4231_stop_input,
cs4231_audio_getdev,
cs4231_set_output_volume,
cs4231_get_output_volume,
@@ -79,11+79,6 @@ extern void scrollfront(int);
struct l1a_kbd_state l1a_state = { 0, 0 };
-/* Dummy function for now, we need it to link. -DaveM */
-void kbd_reset_setup(char *str, int *ints)
-{
-}
-
#ifndef CONFIG_PCI
DECLARE_WAIT_QUEUE_HEAD(keypress_wait);
#endif
@@ -1305,7+1300,7 @@ kbd_read (struct file *f, char *buffer, size_t count, loff_t *ppos) p = buffer;
for (; p < end && kbd_head != kbd_tail;){
#ifdef CONFIG_SPARC32_COMPAT
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
copy_to_user_ret((Firm_event *)p, &kbd_queue [kbd_tail],
sizeof(Firm_event)-sizeof(struct timeval), -EFAULT);
p += sizeof(Firm_event)-sizeof(struct timeval);
@@ -386,7+386,7 @@ sun_mouse_read(struct file *file, char *buffer,
while (p < end && !queue_empty ()){
#ifdef CONFIG_SPARC32_COMPAT
- if (current->tss.flags & SPARC_FLAG_32BIT) {
+ if (current->thread.flags & SPARC_FLAG_32BIT) {
Firm_event *q = get_from_queue();
copy_to_user_ret((Firm_event *)p, q,
-/* $Id: zs.c,v 1.42 1999/05/12 11:15:26 davem Exp $
+/* $Id: zs.c,v 1.43 1999/07/17 06:03:58 zaitcev Exp $
* zs.c: Zilog serial port driver for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -1844,7+1844,7 @@ int zs_open(struct tty_struct *tty, struct file * filp)
static void show_serial_version(void)
{
- char *revision = "$Revision: 1.42 $";
+ char *revision = "$Revision: 1.43 $";
char *version, *p;
version = strchr(revision, ' ');
@@ -2012,9+2012,8 @@ get_zs(int chip)) /* Can use the prom for other machine types */
zsnode = prom_getchild(prom_root_node);
if (sparc_cpu_model == sun4d) {
- int node;
int no = 0;
-
+
tmpnode = zsnode;
zsnode = 0;
bbnode = 0;
-/* $Id: sbus.c,v 1.77 1999/05/29 06:25:57 davem Exp $
+/* $Id: sbus.c,v 1.78 1999/07/23 02:00:27 davem Exp $
* sbus.c: SBus support routines.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -603,7+603,7 @@ static unsigned long get_wchan(struct task_struct *p) #ifdef __sparc_v9__
bias = STACK_BIAS;
#endif
- fp = p->tss.ksp + bias;
+ fp = p->thread.ksp + bias;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct task_struct)) ||
@@ -648,11+648,11 @@ static unsigned long get_wchan(struct task_struct *p) #define KSTK_EIP(tsk) ((tsk)->tss.regs->nip)
#define KSTK_ESP(tsk) ((tsk)->tss.regs->gpr[1])
#elif defined (__sparc_v9__)
-# define KSTK_EIP(tsk) ((tsk)->tss.kregs->tpc)
-# define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
+# define KSTK_EIP(tsk) ((tsk)->thread.kregs->tpc)
+# define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
#elif defined(__sparc__)
-# define KSTK_EIP(tsk) ((tsk)->tss.kregs->pc)
-# define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
+# define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
+# define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
#endif
/* Gcc optimizes away "strlen(x)" for constant x */
#define _ALPHA_INIT_H
#ifndef MODULE
+
#define __init __attribute__ ((__section__ (".text.init")))
#define __initdata __attribute__ ((__section__ (".data.init")))
#define __initfunc(__arginit) \
#define __INIT .section .text.init,"ax"
#define __FINIT .previous
#define __INITDATA .section .data.init,"a"
-#endif
#define __cacheline_aligned __attribute__((__aligned__(32)))
-#endif
+/*
+ * Used for initialization calls.
+ */
+
+typedef int (*initcall_t)(void);
+
+extern initcall_t __initcall_start, __initcall_end;
+
+#define __initcall(fn) \
+ static __attribute__ ((unused, __section__ (".initcall.init"))) \
+ initcall_t __initcall_##fn = fn
+
+/*
+ * Used for kernel command line parameter setup.
+ */
+
+struct kernel_param {
+ const char *str;
+ int (*setup_func)(char *);
+};
+
+extern struct kernel_param __setup_start, __setup_end;
+
+#define __setup(str, fn) \
+ static __attribute__ ((__section__ (".data.init"))) \
+ char __setup_str_##fn[] = str; \
+ static __attribute__ ((unused, __section__ (".setup.init"))) \
+ struct kernel_param __setup_##fn = { __setup_str_##fn, fn }
+
+#endif /* MODULE */
+#endif /* _ALPHA_INIT_H */
@@ -88,6+88,7 @@ struct el_common_EV5_uncorrectable_mcheck {
extern void halt(void) __attribute__((noreturn));
+#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) \
do { \
unsigned long pcbb; \
#define __NR_munlockall 317
#define __NR_sysinfo 318
#define __NR__sysctl 319
-#define __NR_idle 320
+/* 320 was sys_idle. */
#define __NR_oldumount 321
#define __NR_swapon 322
#define __NR_times 323
@@ -141,6+141,8 @@ extern asmlinkage void __backtrace(void); #define wmb() mb()
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
+#define prepare_to_switch() do { } while(0)
+
/*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'.
struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
@@ -16,6+16,8 @@ extern inline void wrusp(unsigned long usp) { __asm__ __volatile__("move %0,%/usp" : : "a" (usp));
}
+#define prepare_to_switch() do { } while(0)
+
/*
* switch_to(n) should switch tasks to task ptr, first checking that
* ptr isn't the current task, in which case it does nothing. This
@@ -143,6+143,7 @@ __asm__ __volatile__( \ extern asmlinkage void *(*resume)(void *last, void *next);
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
+#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) \
do { \
(last) = resume(prev, next); \
@@ -80,6+80,7 @@ struct device_node; extern void note_scsi_host(struct device_node *, void *);
struct task_struct;
+#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) _switch_to((prev),(next),&(last))
extern void _switch_to(struct task_struct *, struct task_struct *,
struct task_struct **);
-/* $Id: resource.h,v 1.7 1998/11/19 20:01:44 davem Exp $
+/* $Id: resource.h,v 1.8 1999/07/30 09:37:56 davem Exp $
* resource.h: Resource definitions.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: siginfo.h,v 1.4 1999/04/28 19:45:20 davem Exp $
+/* $Id: siginfo.h,v 1.5 1999/07/29 12:56:57 jj Exp $
* siginfo.c:
*/
@@ -26,7+26,7 @@ typedef struct siginfo { /* kill() */
struct {
pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
+ unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
@@ -38,20+38,20 @@ typedef struct siginfo { /* POSIX.1b signals */
struct {
pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
+ unsigned int _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
- uid_t _uid; /* sender's uid */
+ unsigned int _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
} _sigchld;
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
struct {
void *_addr; /* faulting insn/memory ref. */
int _trapno; /* TRAP # which caused the signal */
@@ -85,6+85,7 @@ typedef struct siginfo { * si_code values
* Digital reserves positive values for kernel-generated signals.
*/
+#define SI_NOINFO 32767 /* no information in siginfo_t */
#define SI_USER 0 /* sent by kill, sigsend, raise */
#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
#define SI_QUEUE -1 /* sent by sigqueue */
@@ -167,6+168,12 @@ typedef struct siginfo { #define NSIGPOLL 6
/*
+ * SIGEMT si_codes
+ */
+#define EMT_TAGOVF 1 /* tag overflow */
+#define NSIGEMT 1
+
+/*
* sigevent definitions
*
* It seems likely that SIGEV_THREAD will have to be handled from
-/* $Id: a.out.h,v 1.4 1997/05/04 07:21:19 davem Exp $ */
+/* $Id: a.out.h,v 1.5 1999/07/30 09:31:09 davem Exp $ */
#ifndef __SPARC64_A_OUT_H__
#define __SPARC64_A_OUT_H__
@@ -95,7+95,7 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */
#ifdef __KERNEL__
-#define STACK_TOP (current->tss.flags & SPARC_FLAG_32BIT ? 0xf0000000 : TASK_SIZE)
+#define STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : TASK_SIZE)
#endif
#include <linux/config.h>
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+# if !((__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8))
+# error Please issue 'make check_asm' in linux top-level directory first
+# endif
+#endif
+
#ifndef CONFIG_SMP
#define AOFF_task_state 0x00000000
#define ASIZ_task_next_task 0x00000008
#define AOFF_task_prev_task 0x00000058
#define ASIZ_task_prev_task 0x00000008
-#define AOFF_task_next_run 0x00000060
-#define ASIZ_task_next_run 0x00000008
-#define AOFF_task_prev_run 0x00000068
-#define ASIZ_task_prev_run 0x00000008
+#define AOFF_task_run_list 0x00000060
+#define ASIZ_task_run_list 0x00000010
#define AOFF_task_binfmt 0x00000070
#define ASIZ_task_binfmt 0x00000008
#define AOFF_task_exit_code 0x00000078
#define ASIZ_task_pidhash_next 0x00000008
#define AOFF_task_pidhash_pprev 0x000000d8
#define ASIZ_task_pidhash_pprev 0x00000008
-#define AOFF_task_tarray_ptr 0x000000e0
-#define ASIZ_task_tarray_ptr 0x00000008
-#define AOFF_task_wait_chldexit 0x000000e8
-#define ASIZ_task_wait_chldexit 0x00000028
-#define AOFF_task_vfork_sem 0x00000110
+#define AOFF_task_wait_chldexit 0x000000e0
+#define ASIZ_task_wait_chldexit 0x00000020
+#define AOFF_task_vfork_sem 0x00000100
#define ASIZ_task_vfork_sem 0x00000008
-#define AOFF_task_policy 0x00000118
+#define AOFF_task_policy 0x00000108
#define ASIZ_task_policy 0x00000008
-#define AOFF_task_rt_priority 0x00000120
+#define AOFF_task_rt_priority 0x00000110
#define ASIZ_task_rt_priority 0x00000008
-#define AOFF_task_it_real_value 0x00000128
+#define AOFF_task_it_real_value 0x00000118
#define ASIZ_task_it_real_value 0x00000008
-#define AOFF_task_it_prof_value 0x00000130
+#define AOFF_task_it_prof_value 0x00000120
#define ASIZ_task_it_prof_value 0x00000008
-#define AOFF_task_it_virt_value 0x00000138
+#define AOFF_task_it_virt_value 0x00000128
#define ASIZ_task_it_virt_value 0x00000008
-#define AOFF_task_it_real_incr 0x00000140
+#define AOFF_task_it_real_incr 0x00000130
#define ASIZ_task_it_real_incr 0x00000008
-#define AOFF_task_it_prof_incr 0x00000148
+#define AOFF_task_it_prof_incr 0x00000138
#define ASIZ_task_it_prof_incr 0x00000008
-#define AOFF_task_it_virt_incr 0x00000150
+#define AOFF_task_it_virt_incr 0x00000140
#define ASIZ_task_it_virt_incr 0x00000008
-#define AOFF_task_real_timer 0x00000158
+#define AOFF_task_real_timer 0x00000148
#define ASIZ_task_real_timer 0x00000028
-#define AOFF_task_times 0x00000180
+#define AOFF_task_times 0x00000170
#define ASIZ_task_times 0x00000020
-#define AOFF_task_start_time 0x000001a0
+#define AOFF_task_start_time 0x00000190
#define ASIZ_task_start_time 0x00000008
-#define AOFF_task_per_cpu_utime 0x000001a8
+#define AOFF_task_per_cpu_utime 0x00000198
#define ASIZ_task_per_cpu_utime 0x00000008
-#define AOFF_task_min_flt 0x000001b8
+#define AOFF_task_min_flt 0x000001a8
#define ASIZ_task_min_flt 0x00000008
-#define AOFF_task_maj_flt 0x000001c0
+#define AOFF_task_maj_flt 0x000001b0
#define ASIZ_task_maj_flt 0x00000008
-#define AOFF_task_nswap 0x000001c8
+#define AOFF_task_nswap 0x000001b8
#define ASIZ_task_nswap 0x00000008
-#define AOFF_task_cmin_flt 0x000001d0
+#define AOFF_task_cmin_flt 0x000001c0
#define ASIZ_task_cmin_flt 0x00000008
-#define AOFF_task_cmaj_flt 0x000001d8
+#define AOFF_task_cmaj_flt 0x000001c8
#define ASIZ_task_cmaj_flt 0x00000008
-#define AOFF_task_cnswap 0x000001e0
+#define AOFF_task_cnswap 0x000001d0
#define ASIZ_task_cnswap 0x00000008
-#define AOFF_task_uid 0x000001ec
+#define AOFF_task_uid 0x000001dc
#define ASIZ_task_uid 0x00000004
-#define AOFF_task_euid 0x000001f0
+#define AOFF_task_euid 0x000001e0
#define ASIZ_task_euid 0x00000004
-#define AOFF_task_suid 0x000001f4
+#define AOFF_task_suid 0x000001e4
#define ASIZ_task_suid 0x00000004
-#define AOFF_task_fsuid 0x000001f8
+#define AOFF_task_fsuid 0x000001e8
#define ASIZ_task_fsuid 0x00000004
-#define AOFF_task_gid 0x000001fc
+#define AOFF_task_gid 0x000001ec
#define ASIZ_task_gid 0x00000004
-#define AOFF_task_egid 0x00000200
+#define AOFF_task_egid 0x000001f0
#define ASIZ_task_egid 0x00000004
-#define AOFF_task_sgid 0x00000204
+#define AOFF_task_sgid 0x000001f4
#define ASIZ_task_sgid 0x00000004
-#define AOFF_task_fsgid 0x00000208
+#define AOFF_task_fsgid 0x000001f8
#define ASIZ_task_fsgid 0x00000004
-#define AOFF_task_ngroups 0x0000020c
+#define AOFF_task_ngroups 0x000001fc
#define ASIZ_task_ngroups 0x00000004
-#define AOFF_task_groups 0x00000210
+#define AOFF_task_groups 0x00000200
#define ASIZ_task_groups 0x00000080
-#define AOFF_task_cap_effective 0x00000290
+#define AOFF_task_cap_effective 0x00000280
#define ASIZ_task_cap_effective 0x00000004
-#define AOFF_task_cap_inheritable 0x00000294
+#define AOFF_task_cap_inheritable 0x00000284
#define ASIZ_task_cap_inheritable 0x00000004
-#define AOFF_task_cap_permitted 0x00000298
+#define AOFF_task_cap_permitted 0x00000288
#define ASIZ_task_cap_permitted 0x00000004
-#define AOFF_task_user 0x000002a0
+#define AOFF_task_user 0x00000290
#define ASIZ_task_user 0x00000008
-#define AOFF_task_rlim 0x000002a8
+#define AOFF_task_rlim 0x00000298
#define ASIZ_task_rlim 0x000000a0
-#define AOFF_task_used_math 0x00000348
+#define AOFF_task_used_math 0x00000338
#define ASIZ_task_used_math 0x00000002
-#define AOFF_task_comm 0x0000034a
+#define AOFF_task_comm 0x0000033a
#define ASIZ_task_comm 0x00000010
-#define AOFF_task_link_count 0x0000035c
+#define AOFF_task_link_count 0x0000034c
#define ASIZ_task_link_count 0x00000004
-#define AOFF_task_tty 0x00000360
+#define AOFF_task_tty 0x00000350
#define ASIZ_task_tty 0x00000008
-#define AOFF_task_semundo 0x00000368
+#define AOFF_task_semundo 0x00000358
#define ASIZ_task_semundo 0x00000008
-#define AOFF_task_semsleeping 0x00000370
+#define AOFF_task_semsleeping 0x00000360
#define ASIZ_task_semsleeping 0x00000008
-#define AOFF_task_tss 0x00000380
-#define ASIZ_task_tss 0x00000460
-#define AOFF_task_fs 0x000007e0
+#define AOFF_task_thread 0x00000370
+#define ASIZ_task_thread 0x00000460
+#define AOFF_task_fs 0x000007d0
#define ASIZ_task_fs 0x00000008
-#define AOFF_task_files 0x000007e8
+#define AOFF_task_files 0x000007d8
#define ASIZ_task_files 0x00000008
-#define AOFF_task_mm 0x000007f0
+#define AOFF_task_mm 0x000007e0
#define ASIZ_task_mm 0x00000008
-#define AOFF_task_sigmask_lock 0x000007f8
-#define ASIZ_task_sigmask_lock 0x00000001
-#define AOFF_task_sig 0x00000800
+#define AOFF_task_active_mm 0x000007e8
+#define ASIZ_task_active_mm 0x00000008
+#define AOFF_task_sigmask_lock 0x000007f0
+#define ASIZ_task_sigmask_lock 0x00000000
+#define AOFF_task_sig 0x000007f0
#define ASIZ_task_sig 0x00000008
-#define AOFF_task_signal 0x00000808
+#define AOFF_task_signal 0x000007f8
#define ASIZ_task_signal 0x00000008
-#define AOFF_task_blocked 0x00000810
+#define AOFF_task_blocked 0x00000800
#define ASIZ_task_blocked 0x00000008
-#define AOFF_task_sigqueue 0x00000818
+#define AOFF_task_sigqueue 0x00000808
#define ASIZ_task_sigqueue 0x00000008
-#define AOFF_task_sigqueue_tail 0x00000820
+#define AOFF_task_sigqueue_tail 0x00000810
#define ASIZ_task_sigqueue_tail 0x00000008
-#define AOFF_task_sas_ss_sp 0x00000828
+#define AOFF_task_sas_ss_sp 0x00000818
#define ASIZ_task_sas_ss_sp 0x00000008
-#define AOFF_task_sas_ss_size 0x00000830
+#define AOFF_task_sas_ss_size 0x00000820
#define ASIZ_task_sas_ss_size 0x00000008
-#define ASIZ_task 0x00000840
+#define ASIZ_task 0x00000830
#define AOFF_mm_mmap 0x00000000
#define ASIZ_mm_mmap 0x00000008
#define AOFF_mm_mmap_avl 0x00000008
#define ASIZ_mm_mmap_cache 0x00000008
#define AOFF_mm_pgd 0x00000018
#define ASIZ_mm_pgd 0x00000008
-#define AOFF_mm_count 0x00000020
-#define ASIZ_mm_count 0x00000004
-#define AOFF_mm_map_count 0x00000024
+#define AOFF_mm_mm_users 0x00000020
+#define ASIZ_mm_mm_users 0x00000004
+#define AOFF_mm_mm_count 0x00000024
+#define ASIZ_mm_mm_count 0x00000004
+#define AOFF_mm_map_count 0x00000028
#define ASIZ_mm_map_count 0x00000004
-#define AOFF_mm_mmap_sem 0x00000028
-#define ASIZ_mm_mmap_sem 0x00000038
+#define AOFF_mm_mmap_sem 0x00000030
+#define ASIZ_mm_mmap_sem 0x00000030
#define AOFF_mm_page_table_lock 0x00000060
-#define ASIZ_mm_page_table_lock 0x00000001
-#define AOFF_mm_context 0x00000068
+#define ASIZ_mm_page_table_lock 0x00000000
+#define AOFF_mm_context 0x00000060
#define ASIZ_mm_context 0x00000008
-#define AOFF_mm_start_code 0x00000070
+#define AOFF_mm_start_code 0x00000068
#define ASIZ_mm_start_code 0x00000008
-#define AOFF_mm_end_code 0x00000078
+#define AOFF_mm_end_code 0x00000070
#define ASIZ_mm_end_code 0x00000008
-#define AOFF_mm_start_data 0x00000080
+#define AOFF_mm_start_data 0x00000078
#define ASIZ_mm_start_data 0x00000008
-#define AOFF_mm_end_data 0x00000088
+#define AOFF_mm_end_data 0x00000080
#define ASIZ_mm_end_data 0x00000008
-#define AOFF_mm_start_brk 0x00000090
+#define AOFF_mm_start_brk 0x00000088
#define ASIZ_mm_start_brk 0x00000008
-#define AOFF_mm_brk 0x00000098
+#define AOFF_mm_brk 0x00000090
#define ASIZ_mm_brk 0x00000008
-#define AOFF_mm_start_stack 0x000000a0
+#define AOFF_mm_start_stack 0x00000098
#define ASIZ_mm_start_stack 0x00000008
-#define AOFF_mm_arg_start 0x000000a8
+#define AOFF_mm_arg_start 0x000000a0
#define ASIZ_mm_arg_start 0x00000008
-#define AOFF_mm_arg_end 0x000000b0
+#define AOFF_mm_arg_end 0x000000a8
#define ASIZ_mm_arg_end 0x00000008
-#define AOFF_mm_env_start 0x000000b8
+#define AOFF_mm_env_start 0x000000b0
#define ASIZ_mm_env_start 0x00000008
-#define AOFF_mm_env_end 0x000000c0
+#define AOFF_mm_env_end 0x000000b8
#define ASIZ_mm_env_end 0x00000008
-#define AOFF_mm_rss 0x000000c8
+#define AOFF_mm_rss 0x000000c0
#define ASIZ_mm_rss 0x00000008
-#define AOFF_mm_total_vm 0x000000d0
+#define AOFF_mm_total_vm 0x000000c8
#define ASIZ_mm_total_vm 0x00000008
-#define AOFF_mm_locked_vm 0x000000d8
+#define AOFF_mm_locked_vm 0x000000d0
#define ASIZ_mm_locked_vm 0x00000008
-#define AOFF_mm_def_flags 0x000000e0
+#define AOFF_mm_def_flags 0x000000d8
#define ASIZ_mm_def_flags 0x00000008
-#define AOFF_mm_cpu_vm_mask 0x000000e8
+#define AOFF_mm_cpu_vm_mask 0x000000e0
#define ASIZ_mm_cpu_vm_mask 0x00000008
-#define AOFF_mm_swap_cnt 0x000000f0
+#define AOFF_mm_swap_cnt 0x000000e8
#define ASIZ_mm_swap_cnt 0x00000008
-#define AOFF_mm_swap_address 0x000000f8
+#define AOFF_mm_swap_address 0x000000f0
#define ASIZ_mm_swap_address 0x00000008
-#define AOFF_mm_segments 0x00000100
+#define AOFF_mm_segments 0x000000f8
#define ASIZ_mm_segments 0x00000008
-#define ASIZ_mm 0x00000108
+#define ASIZ_mm 0x00000100
#define AOFF_thread_ksp 0x00000000
#define ASIZ_thread_ksp 0x00000008
#define AOFF_thread_wstate 0x00000008
-#define ASIZ_thread_wstate 0x00000002
-#define AOFF_thread_cwp 0x0000000a
-#define ASIZ_thread_cwp 0x00000002
-#define AOFF_thread_flags 0x0000000c
-#define ASIZ_thread_flags 0x00000002
-#define AOFF_thread_current_ds 0x0000000e
+#define ASIZ_thread_wstate 0x00000001
+#define AOFF_thread_cwp 0x00000009
+#define ASIZ_thread_cwp 0x00000001
+#define AOFF_thread_flags 0x0000000a
+#define ASIZ_thread_flags 0x00000001
+#define AOFF_thread_current_ds 0x0000000b
#define ASIZ_thread_current_ds 0x00000001
-#define AOFF_thread_w_saved 0x00000010
-#define ASIZ_thread_w_saved 0x00000002
-#define AOFF_thread_new_signal 0x00000012
-#define ASIZ_thread_new_signal 0x00000002
-#define AOFF_thread_ctx 0x00000014
-#define ASIZ_thread_ctx 0x00000002
+#define AOFF_thread_w_saved 0x0000000c
+#define ASIZ_thread_w_saved 0x00000001
+#define AOFF_thread_fpdepth 0x0000000d
+#define ASIZ_thread_fpdepth 0x00000001
+#define AOFF_thread_fpsaved 0x0000000e
+#define ASIZ_thread_fpsaved 0x00000007
+#define AOFF_thread___pad1 0x00000015
+#define ASIZ_thread___pad1 0x00000003
#define AOFF_thread_kregs 0x00000018
#define ASIZ_thread_kregs 0x00000008
#define AOFF_thread_utraps 0x00000020
#define ASIZ_thread_utraps 0x00000008
-#define AOFF_thread_fpdepth 0x00000028
-#define ASIZ_thread_fpdepth 0x00000001
-#define AOFF_thread_fpsaved 0x00000029
-#define ASIZ_thread_fpsaved 0x00000007
-#define AOFF_thread_gsr 0x00000030
+#define AOFF_thread_gsr 0x00000028
#define ASIZ_thread_gsr 0x00000007
-#define AOFF_thread_xfsr 0x00000038
+#define AOFF_thread___pad2 0x0000002f
+#define ASIZ_thread___pad2 0x00000001
+#define AOFF_thread_sig_address 0x00000030
+#define ASIZ_thread_sig_address 0x00000008
+#define AOFF_thread_sig_desc 0x00000038
+#define ASIZ_thread_sig_desc 0x00000008
+#define AOFF_thread_xfsr 0x00000040
#define ASIZ_thread_xfsr 0x00000038
-#define AOFF_thread_reg_window 0x00000070
+#define AOFF_thread___pad3 0x00000078
+#define ASIZ_thread___pad3 0x00000008
+#define AOFF_thread_reg_window 0x00000080
#define ASIZ_thread_reg_window 0x00000380
-#define AOFF_thread_rwbuf_stkptrs 0x000003f0
+#define AOFF_thread_rwbuf_stkptrs 0x00000400
#define ASIZ_thread_rwbuf_stkptrs 0x00000038
-#define AOFF_thread_sig_address 0x00000428
-#define ASIZ_thread_sig_address 0x00000008
-#define AOFF_thread_sig_desc 0x00000430
-#define ASIZ_thread_sig_desc 0x00000008
#define AOFF_thread_user_cntd0 0x00000438
#define ASIZ_thread_user_cntd0 0x00000008
#define AOFF_thread_user_cntd1 0x00000440
#define ASIZ_task_next_task 0x00000008
#define AOFF_task_prev_task 0x00000058
#define ASIZ_task_prev_task 0x00000008
-#define AOFF_task_next_run 0x00000060
-#define ASIZ_task_next_run 0x00000008
-#define AOFF_task_prev_run 0x00000068
-#define ASIZ_task_prev_run 0x00000008
+#define AOFF_task_run_list 0x00000060
+#define ASIZ_task_run_list 0x00000010
#define AOFF_task_binfmt 0x00000070
#define ASIZ_task_binfmt 0x00000008
#define AOFF_task_exit_code 0x00000078
#define ASIZ_task_pidhash_next 0x00000008
#define AOFF_task_pidhash_pprev 0x000000d8
#define ASIZ_task_pidhash_pprev 0x00000008
-#define AOFF_task_tarray_ptr 0x000000e0
-#define ASIZ_task_tarray_ptr 0x00000008
-#define AOFF_task_wait_chldexit 0x000000e8
+#define AOFF_task_wait_chldexit 0x000000e0
#define ASIZ_task_wait_chldexit 0x00000028
-#define AOFF_task_vfork_sem 0x00000110
+#define AOFF_task_vfork_sem 0x00000108
#define ASIZ_task_vfork_sem 0x00000008
-#define AOFF_task_policy 0x00000118
+#define AOFF_task_policy 0x00000110
#define ASIZ_task_policy 0x00000008
-#define AOFF_task_rt_priority 0x00000120
+#define AOFF_task_rt_priority 0x00000118
#define ASIZ_task_rt_priority 0x00000008
-#define AOFF_task_it_real_value 0x00000128
+#define AOFF_task_it_real_value 0x00000120
#define ASIZ_task_it_real_value 0x00000008
-#define AOFF_task_it_prof_value 0x00000130
+#define AOFF_task_it_prof_value 0x00000128
#define ASIZ_task_it_prof_value 0x00000008
-#define AOFF_task_it_virt_value 0x00000138
+#define AOFF_task_it_virt_value 0x00000130
#define ASIZ_task_it_virt_value 0x00000008
-#define AOFF_task_it_real_incr 0x00000140
+#define AOFF_task_it_real_incr 0x00000138
#define ASIZ_task_it_real_incr 0x00000008
-#define AOFF_task_it_prof_incr 0x00000148
+#define AOFF_task_it_prof_incr 0x00000140
#define ASIZ_task_it_prof_incr 0x00000008
-#define AOFF_task_it_virt_incr 0x00000150
+#define AOFF_task_it_virt_incr 0x00000148
#define ASIZ_task_it_virt_incr 0x00000008
-#define AOFF_task_real_timer 0x00000158
+#define AOFF_task_real_timer 0x00000150
#define ASIZ_task_real_timer 0x00000028
-#define AOFF_task_times 0x00000180
+#define AOFF_task_times 0x00000178
#define ASIZ_task_times 0x00000020
-#define AOFF_task_start_time 0x000001a0
+#define AOFF_task_start_time 0x00000198
#define ASIZ_task_start_time 0x00000008
-#define AOFF_task_per_cpu_utime 0x000001a8
+#define AOFF_task_per_cpu_utime 0x000001a0
#define ASIZ_task_per_cpu_utime 0x00000100
-#define AOFF_task_min_flt 0x000003a8
+#define AOFF_task_min_flt 0x000003a0
#define ASIZ_task_min_flt 0x00000008
-#define AOFF_task_maj_flt 0x000003b0
+#define AOFF_task_maj_flt 0x000003a8
#define ASIZ_task_maj_flt 0x00000008
-#define AOFF_task_nswap 0x000003b8
+#define AOFF_task_nswap 0x000003b0
#define ASIZ_task_nswap 0x00000008
-#define AOFF_task_cmin_flt 0x000003c0
+#define AOFF_task_cmin_flt 0x000003b8
#define ASIZ_task_cmin_flt 0x00000008
-#define AOFF_task_cmaj_flt 0x000003c8
+#define AOFF_task_cmaj_flt 0x000003c0
#define ASIZ_task_cmaj_flt 0x00000008
-#define AOFF_task_cnswap 0x000003d0
+#define AOFF_task_cnswap 0x000003c8
#define ASIZ_task_cnswap 0x00000008
-#define AOFF_task_uid 0x000003dc
+#define AOFF_task_uid 0x000003d4
#define ASIZ_task_uid 0x00000004
-#define AOFF_task_euid 0x000003e0
+#define AOFF_task_euid 0x000003d8
#define ASIZ_task_euid 0x00000004
-#define AOFF_task_suid 0x000003e4
+#define AOFF_task_suid 0x000003dc
#define ASIZ_task_suid 0x00000004
-#define AOFF_task_fsuid 0x000003e8
+#define AOFF_task_fsuid 0x000003e0
#define ASIZ_task_fsuid 0x00000004
-#define AOFF_task_gid 0x000003ec
+#define AOFF_task_gid 0x000003e4
#define ASIZ_task_gid 0x00000004
-#define AOFF_task_egid 0x000003f0
+#define AOFF_task_egid 0x000003e8
#define ASIZ_task_egid 0x00000004
-#define AOFF_task_sgid 0x000003f4
+#define AOFF_task_sgid 0x000003ec
#define ASIZ_task_sgid 0x00000004
-#define AOFF_task_fsgid 0x000003f8
+#define AOFF_task_fsgid 0x000003f0
#define ASIZ_task_fsgid 0x00000004
-#define AOFF_task_ngroups 0x000003fc
+#define AOFF_task_ngroups 0x000003f4
#define ASIZ_task_ngroups 0x00000004
-#define AOFF_task_groups 0x00000400
+#define AOFF_task_groups 0x000003f8
#define ASIZ_task_groups 0x00000080
-#define AOFF_task_cap_effective 0x00000480
+#define AOFF_task_cap_effective 0x00000478
#define ASIZ_task_cap_effective 0x00000004
-#define AOFF_task_cap_inheritable 0x00000484
+#define AOFF_task_cap_inheritable 0x0000047c
#define ASIZ_task_cap_inheritable 0x00000004
-#define AOFF_task_cap_permitted 0x00000488
+#define AOFF_task_cap_permitted 0x00000480
#define ASIZ_task_cap_permitted 0x00000004
-#define AOFF_task_user 0x00000490
+#define AOFF_task_user 0x00000488
#define ASIZ_task_user 0x00000008
-#define AOFF_task_rlim 0x00000498
+#define AOFF_task_rlim 0x00000490
#define ASIZ_task_rlim 0x000000a0
-#define AOFF_task_used_math 0x00000538
+#define AOFF_task_used_math 0x00000530
#define ASIZ_task_used_math 0x00000002
-#define AOFF_task_comm 0x0000053a
+#define AOFF_task_comm 0x00000532
#define ASIZ_task_comm 0x00000010
-#define AOFF_task_link_count 0x0000054c
+#define AOFF_task_link_count 0x00000544
#define ASIZ_task_link_count 0x00000004
-#define AOFF_task_tty 0x00000550
+#define AOFF_task_tty 0x00000548
#define ASIZ_task_tty 0x00000008
-#define AOFF_task_semundo 0x00000558
+#define AOFF_task_semundo 0x00000550
#define ASIZ_task_semundo 0x00000008
-#define AOFF_task_semsleeping 0x00000560
+#define AOFF_task_semsleeping 0x00000558
#define ASIZ_task_semsleeping 0x00000008
-#define AOFF_task_tss 0x00000570
-#define ASIZ_task_tss 0x00000460
-#define AOFF_task_fs 0x000009d0
+#define AOFF_task_thread 0x00000560
+#define ASIZ_task_thread 0x00000460
+#define AOFF_task_fs 0x000009c0
#define ASIZ_task_fs 0x00000008
-#define AOFF_task_files 0x000009d8
+#define AOFF_task_files 0x000009c8
#define ASIZ_task_files 0x00000008
-#define AOFF_task_mm 0x000009e0
+#define AOFF_task_mm 0x000009d0
#define ASIZ_task_mm 0x00000008
-#define AOFF_task_sigmask_lock 0x000009e8
+#define AOFF_task_active_mm 0x000009d8
+#define ASIZ_task_active_mm 0x00000008
+#define AOFF_task_sigmask_lock 0x000009e0
#define ASIZ_task_sigmask_lock 0x00000001
-#define AOFF_task_sig 0x000009f0
+#define AOFF_task_sig 0x000009e8
#define ASIZ_task_sig 0x00000008
-#define AOFF_task_signal 0x000009f8
+#define AOFF_task_signal 0x000009f0
#define ASIZ_task_signal 0x00000008
-#define AOFF_task_blocked 0x00000a00
+#define AOFF_task_blocked 0x000009f8
#define ASIZ_task_blocked 0x00000008
-#define AOFF_task_sigqueue 0x00000a08
+#define AOFF_task_sigqueue 0x00000a00
#define ASIZ_task_sigqueue 0x00000008
-#define AOFF_task_sigqueue_tail 0x00000a10
+#define AOFF_task_sigqueue_tail 0x00000a08
#define ASIZ_task_sigqueue_tail 0x00000008
-#define AOFF_task_sas_ss_sp 0x00000a18
+#define AOFF_task_sas_ss_sp 0x00000a10
#define ASIZ_task_sas_ss_sp 0x00000008
-#define AOFF_task_sas_ss_size 0x00000a20
+#define AOFF_task_sas_ss_size 0x00000a18
#define ASIZ_task_sas_ss_size 0x00000008
-#define ASIZ_task 0x00000a30
+#define ASIZ_task 0x00000a20
#define AOFF_mm_mmap 0x00000000
#define ASIZ_mm_mmap 0x00000008
#define AOFF_mm_mmap_avl 0x00000008
#define ASIZ_mm_mmap_cache 0x00000008
#define AOFF_mm_pgd 0x00000018
#define ASIZ_mm_pgd 0x00000008
-#define AOFF_mm_count 0x00000020
-#define ASIZ_mm_count 0x00000004
-#define AOFF_mm_map_count 0x00000024
+#define AOFF_mm_mm_users 0x00000020
+#define ASIZ_mm_mm_users 0x00000004
+#define AOFF_mm_mm_count 0x00000024
+#define ASIZ_mm_mm_count 0x00000004
+#define AOFF_mm_map_count 0x00000028
#define ASIZ_mm_map_count 0x00000004
-#define AOFF_mm_mmap_sem 0x00000028
+#define AOFF_mm_mmap_sem 0x00000030
#define ASIZ_mm_mmap_sem 0x00000038
-#define AOFF_mm_page_table_lock 0x00000060
+#define AOFF_mm_page_table_lock 0x00000068
#define ASIZ_mm_page_table_lock 0x00000001
-#define AOFF_mm_context 0x00000068
+#define AOFF_mm_context 0x00000070
#define ASIZ_mm_context 0x00000008
-#define AOFF_mm_start_code 0x00000070
+#define AOFF_mm_start_code 0x00000078
#define ASIZ_mm_start_code 0x00000008
-#define AOFF_mm_end_code 0x00000078
+#define AOFF_mm_end_code 0x00000080
#define ASIZ_mm_end_code 0x00000008
-#define AOFF_mm_start_data 0x00000080
+#define AOFF_mm_start_data 0x00000088
#define ASIZ_mm_start_data 0x00000008
-#define AOFF_mm_end_data 0x00000088
+#define AOFF_mm_end_data 0x00000090
#define ASIZ_mm_end_data 0x00000008
-#define AOFF_mm_start_brk 0x00000090
+#define AOFF_mm_start_brk 0x00000098
#define ASIZ_mm_start_brk 0x00000008
-#define AOFF_mm_brk 0x00000098
+#define AOFF_mm_brk 0x000000a0
#define ASIZ_mm_brk 0x00000008
-#define AOFF_mm_start_stack 0x000000a0
+#define AOFF_mm_start_stack 0x000000a8
#define ASIZ_mm_start_stack 0x00000008
-#define AOFF_mm_arg_start 0x000000a8
+#define AOFF_mm_arg_start 0x000000b0
#define ASIZ_mm_arg_start 0x00000008
-#define AOFF_mm_arg_end 0x000000b0
+#define AOFF_mm_arg_end 0x000000b8
#define ASIZ_mm_arg_end 0x00000008
-#define AOFF_mm_env_start 0x000000b8
+#define AOFF_mm_env_start 0x000000c0
#define ASIZ_mm_env_start 0x00000008
-#define AOFF_mm_env_end 0x000000c0
+#define AOFF_mm_env_end 0x000000c8
#define ASIZ_mm_env_end 0x00000008
-#define AOFF_mm_rss 0x000000c8
+#define AOFF_mm_rss 0x000000d0
#define ASIZ_mm_rss 0x00000008
-#define AOFF_mm_total_vm 0x000000d0
+#define AOFF_mm_total_vm 0x000000d8
#define ASIZ_mm_total_vm 0x00000008
-#define AOFF_mm_locked_vm 0x000000d8
+#define AOFF_mm_locked_vm 0x000000e0
#define ASIZ_mm_locked_vm 0x00000008
-#define AOFF_mm_def_flags 0x000000e0
+#define AOFF_mm_def_flags 0x000000e8
#define ASIZ_mm_def_flags 0x00000008
-#define AOFF_mm_cpu_vm_mask 0x000000e8
+#define AOFF_mm_cpu_vm_mask 0x000000f0
#define ASIZ_mm_cpu_vm_mask 0x00000008
-#define AOFF_mm_swap_cnt 0x000000f0
+#define AOFF_mm_swap_cnt 0x000000f8
#define ASIZ_mm_swap_cnt 0x00000008
-#define AOFF_mm_swap_address 0x000000f8
+#define AOFF_mm_swap_address 0x00000100
#define ASIZ_mm_swap_address 0x00000008
-#define AOFF_mm_segments 0x00000100
+#define AOFF_mm_segments 0x00000108
#define ASIZ_mm_segments 0x00000008
-#define ASIZ_mm 0x00000108
+#define ASIZ_mm 0x00000110
#define AOFF_thread_ksp 0x00000000
#define ASIZ_thread_ksp 0x00000008
#define AOFF_thread_wstate 0x00000008
-#define ASIZ_thread_wstate 0x00000002
-#define AOFF_thread_cwp 0x0000000a
-#define ASIZ_thread_cwp 0x00000002
-#define AOFF_thread_flags 0x0000000c
-#define ASIZ_thread_flags 0x00000002
-#define AOFF_thread_current_ds 0x0000000e
+#define ASIZ_thread_wstate 0x00000001
+#define AOFF_thread_cwp 0x00000009
+#define ASIZ_thread_cwp 0x00000001
+#define AOFF_thread_flags 0x0000000a
+#define ASIZ_thread_flags 0x00000001
+#define AOFF_thread_current_ds 0x0000000b
#define ASIZ_thread_current_ds 0x00000001
-#define AOFF_thread_w_saved 0x00000010
-#define ASIZ_thread_w_saved 0x00000002
-#define AOFF_thread_new_signal 0x00000012
-#define ASIZ_thread_new_signal 0x00000002
-#define AOFF_thread_ctx 0x00000014
-#define ASIZ_thread_ctx 0x00000002
+#define AOFF_thread_w_saved 0x0000000c
+#define ASIZ_thread_w_saved 0x00000001
+#define AOFF_thread_fpdepth 0x0000000d
+#define ASIZ_thread_fpdepth 0x00000001
+#define AOFF_thread_fpsaved 0x0000000e
+#define ASIZ_thread_fpsaved 0x00000007
+#define AOFF_thread___pad1 0x00000015
+#define ASIZ_thread___pad1 0x00000003
#define AOFF_thread_kregs 0x00000018
#define ASIZ_thread_kregs 0x00000008
#define AOFF_thread_utraps 0x00000020
#define ASIZ_thread_utraps 0x00000008
-#define AOFF_thread_fpdepth 0x00000028
-#define ASIZ_thread_fpdepth 0x00000001
-#define AOFF_thread_fpsaved 0x00000029
-#define ASIZ_thread_fpsaved 0x00000007
-#define AOFF_thread_gsr 0x00000030
+#define AOFF_thread_gsr 0x00000028
#define ASIZ_thread_gsr 0x00000007
-#define AOFF_thread_xfsr 0x00000038
+#define AOFF_thread___pad2 0x0000002f
+#define ASIZ_thread___pad2 0x00000001
+#define AOFF_thread_sig_address 0x00000030
+#define ASIZ_thread_sig_address 0x00000008
+#define AOFF_thread_sig_desc 0x00000038
+#define ASIZ_thread_sig_desc 0x00000008
+#define AOFF_thread_xfsr 0x00000040
#define ASIZ_thread_xfsr 0x00000038
-#define AOFF_thread_reg_window 0x00000070
+#define AOFF_thread___pad3 0x00000078
+#define ASIZ_thread___pad3 0x00000008
+#define AOFF_thread_reg_window 0x00000080
#define ASIZ_thread_reg_window 0x00000380
-#define AOFF_thread_rwbuf_stkptrs 0x000003f0
+#define AOFF_thread_rwbuf_stkptrs 0x00000400
#define ASIZ_thread_rwbuf_stkptrs 0x00000038
-#define AOFF_thread_sig_address 0x00000428
-#define ASIZ_thread_sig_address 0x00000008
-#define AOFF_thread_sig_desc 0x00000430
-#define ASIZ_thread_sig_desc 0x00000008
#define AOFF_thread_user_cntd0 0x00000438
#define ASIZ_thread_user_cntd0 0x00000008
#define AOFF_thread_user_cntd1 0x00000440
#define ASIZ_task_next_task 0x00000008
#define AOFF_task_prev_task 0x00000058
#define ASIZ_task_prev_task 0x00000008
-#define AOFF_task_next_run 0x00000060
-#define ASIZ_task_next_run 0x00000008
-#define AOFF_task_prev_run 0x00000068
-#define ASIZ_task_prev_run 0x00000008
+#define AOFF_task_run_list 0x00000060
+#define ASIZ_task_run_list 0x00000010
#define AOFF_task_binfmt 0x00000070
#define ASIZ_task_binfmt 0x00000008
#define AOFF_task_exit_code 0x00000078
#define ASIZ_task_pidhash_next 0x00000008
#define AOFF_task_pidhash_pprev 0x000000d8
#define ASIZ_task_pidhash_pprev 0x00000008
-#define AOFF_task_tarray_ptr 0x000000e0
-#define ASIZ_task_tarray_ptr 0x00000008
-#define AOFF_task_wait_chldexit 0x000000e8
+#define AOFF_task_wait_chldexit 0x000000e0
#define ASIZ_task_wait_chldexit 0x00000030
-#define AOFF_task_vfork_sem 0x00000118
+#define AOFF_task_vfork_sem 0x00000110
#define ASIZ_task_vfork_sem 0x00000008
-#define AOFF_task_policy 0x00000120
+#define AOFF_task_policy 0x00000118
#define ASIZ_task_policy 0x00000008
-#define AOFF_task_rt_priority 0x00000128
+#define AOFF_task_rt_priority 0x00000120
#define ASIZ_task_rt_priority 0x00000008
-#define AOFF_task_it_real_value 0x00000130
+#define AOFF_task_it_real_value 0x00000128
#define ASIZ_task_it_real_value 0x00000008
-#define AOFF_task_it_prof_value 0x00000138
+#define AOFF_task_it_prof_value 0x00000130
#define ASIZ_task_it_prof_value 0x00000008
-#define AOFF_task_it_virt_value 0x00000140
+#define AOFF_task_it_virt_value 0x00000138
#define ASIZ_task_it_virt_value 0x00000008
-#define AOFF_task_it_real_incr 0x00000148
+#define AOFF_task_it_real_incr 0x00000140
#define ASIZ_task_it_real_incr 0x00000008
-#define AOFF_task_it_prof_incr 0x00000150
+#define AOFF_task_it_prof_incr 0x00000148
#define ASIZ_task_it_prof_incr 0x00000008
-#define AOFF_task_it_virt_incr 0x00000158
+#define AOFF_task_it_virt_incr 0x00000150
#define ASIZ_task_it_virt_incr 0x00000008
-#define AOFF_task_real_timer 0x00000160
+#define AOFF_task_real_timer 0x00000158
#define ASIZ_task_real_timer 0x00000028
-#define AOFF_task_times 0x00000188
+#define AOFF_task_times 0x00000180
#define ASIZ_task_times 0x00000020
-#define AOFF_task_start_time 0x000001a8
+#define AOFF_task_start_time 0x000001a0
#define ASIZ_task_start_time 0x00000008
-#define AOFF_task_per_cpu_utime 0x000001b0
+#define AOFF_task_per_cpu_utime 0x000001a8
#define ASIZ_task_per_cpu_utime 0x00000100
-#define AOFF_task_min_flt 0x000003b0
+#define AOFF_task_min_flt 0x000003a8
#define ASIZ_task_min_flt 0x00000008
-#define AOFF_task_maj_flt 0x000003b8
+#define AOFF_task_maj_flt 0x000003b0
#define ASIZ_task_maj_flt 0x00000008
-#define AOFF_task_nswap 0x000003c0
+#define AOFF_task_nswap 0x000003b8
#define ASIZ_task_nswap 0x00000008
-#define AOFF_task_cmin_flt 0x000003c8
+#define AOFF_task_cmin_flt 0x000003c0
#define ASIZ_task_cmin_flt 0x00000008
-#define AOFF_task_cmaj_flt 0x000003d0
+#define AOFF_task_cmaj_flt 0x000003c8
#define ASIZ_task_cmaj_flt 0x00000008
-#define AOFF_task_cnswap 0x000003d8
+#define AOFF_task_cnswap 0x000003d0
#define ASIZ_task_cnswap 0x00000008
-#define AOFF_task_uid 0x000003e4
+#define AOFF_task_uid 0x000003dc
#define ASIZ_task_uid 0x00000004
-#define AOFF_task_euid 0x000003e8
+#define AOFF_task_euid 0x000003e0
#define ASIZ_task_euid 0x00000004
-#define AOFF_task_suid 0x000003ec
+#define AOFF_task_suid 0x000003e4
#define ASIZ_task_suid 0x00000004
-#define AOFF_task_fsuid 0x000003f0
+#define AOFF_task_fsuid 0x000003e8
#define ASIZ_task_fsuid 0x00000004
-#define AOFF_task_gid 0x000003f4
+#define AOFF_task_gid 0x000003ec
#define ASIZ_task_gid 0x00000004
-#define AOFF_task_egid 0x000003f8
+#define AOFF_task_egid 0x000003f0
#define ASIZ_task_egid 0x00000004
-#define AOFF_task_sgid 0x000003fc
+#define AOFF_task_sgid 0x000003f4
#define ASIZ_task_sgid 0x00000004
-#define AOFF_task_fsgid 0x00000400
+#define AOFF_task_fsgid 0x000003f8
#define ASIZ_task_fsgid 0x00000004
-#define AOFF_task_ngroups 0x00000404
+#define AOFF_task_ngroups 0x000003fc
#define ASIZ_task_ngroups 0x00000004
-#define AOFF_task_groups 0x00000408
+#define AOFF_task_groups 0x00000400
#define ASIZ_task_groups 0x00000080
-#define AOFF_task_cap_effective 0x00000488
+#define AOFF_task_cap_effective 0x00000480
#define ASIZ_task_cap_effective 0x00000004
-#define AOFF_task_cap_inheritable 0x0000048c
+#define AOFF_task_cap_inheritable 0x00000484
#define ASIZ_task_cap_inheritable 0x00000004
-#define AOFF_task_cap_permitted 0x00000490
+#define AOFF_task_cap_permitted 0x00000488
#define ASIZ_task_cap_permitted 0x00000004
-#define AOFF_task_user 0x00000498
+#define AOFF_task_user 0x00000490
#define ASIZ_task_user 0x00000008
-#define AOFF_task_rlim 0x000004a0
+#define AOFF_task_rlim 0x00000498
#define ASIZ_task_rlim 0x000000a0
-#define AOFF_task_used_math 0x00000540
+#define AOFF_task_used_math 0x00000538
#define ASIZ_task_used_math 0x00000002
-#define AOFF_task_comm 0x00000542
+#define AOFF_task_comm 0x0000053a
#define ASIZ_task_comm 0x00000010
-#define AOFF_task_link_count 0x00000554
+#define AOFF_task_link_count 0x0000054c
#define ASIZ_task_link_count 0x00000004
-#define AOFF_task_tty 0x00000558
+#define AOFF_task_tty 0x00000550
#define ASIZ_task_tty 0x00000008
-#define AOFF_task_semundo 0x00000560
+#define AOFF_task_semundo 0x00000558
#define ASIZ_task_semundo 0x00000008
-#define AOFF_task_semsleeping 0x00000568
+#define AOFF_task_semsleeping 0x00000560
#define ASIZ_task_semsleeping 0x00000008
-#define AOFF_task_tss 0x00000570
-#define ASIZ_task_tss 0x00000460
+#define AOFF_task_thread 0x00000570
+#define ASIZ_task_thread 0x00000460
#define AOFF_task_fs 0x000009d0
#define ASIZ_task_fs 0x00000008
#define AOFF_task_files 0x000009d8
#define ASIZ_task_files 0x00000008
#define AOFF_task_mm 0x000009e0
#define ASIZ_task_mm 0x00000008
-#define AOFF_task_sigmask_lock 0x000009e8
+#define AOFF_task_active_mm 0x000009e8
+#define ASIZ_task_active_mm 0x00000008
+#define AOFF_task_sigmask_lock 0x000009f0
#define ASIZ_task_sigmask_lock 0x0000000c
-#define AOFF_task_sig 0x000009f8
+#define AOFF_task_sig 0x00000a00
#define ASIZ_task_sig 0x00000008
-#define AOFF_task_signal 0x00000a00
+#define AOFF_task_signal 0x00000a08
#define ASIZ_task_signal 0x00000008
-#define AOFF_task_blocked 0x00000a08
+#define AOFF_task_blocked 0x00000a10
#define ASIZ_task_blocked 0x00000008
-#define AOFF_task_sigqueue 0x00000a10
+#define AOFF_task_sigqueue 0x00000a18
#define ASIZ_task_sigqueue 0x00000008
-#define AOFF_task_sigqueue_tail 0x00000a18
+#define AOFF_task_sigqueue_tail 0x00000a20
#define ASIZ_task_sigqueue_tail 0x00000008
-#define AOFF_task_sas_ss_sp 0x00000a20
+#define AOFF_task_sas_ss_sp 0x00000a28
#define ASIZ_task_sas_ss_sp 0x00000008
-#define AOFF_task_sas_ss_size 0x00000a28
+#define AOFF_task_sas_ss_size 0x00000a30
#define ASIZ_task_sas_ss_size 0x00000008
-#define ASIZ_task 0x00000a30
+#define ASIZ_task 0x00000a40
#define AOFF_mm_mmap 0x00000000
#define ASIZ_mm_mmap 0x00000008
#define AOFF_mm_mmap_avl 0x00000008
#define ASIZ_mm_mmap_cache 0x00000008
#define AOFF_mm_pgd 0x00000018
#define ASIZ_mm_pgd 0x00000008
-#define AOFF_mm_count 0x00000020
-#define ASIZ_mm_count 0x00000004
-#define AOFF_mm_map_count 0x00000024
+#define AOFF_mm_mm_users 0x00000020
+#define ASIZ_mm_mm_users 0x00000004
+#define AOFF_mm_mm_count 0x00000024
+#define ASIZ_mm_mm_count 0x00000004
+#define AOFF_mm_map_count 0x00000028
#define ASIZ_mm_map_count 0x00000004
-#define AOFF_mm_mmap_sem 0x00000028
+#define AOFF_mm_mmap_sem 0x00000030
#define ASIZ_mm_mmap_sem 0x00000040
-#define AOFF_mm_page_table_lock 0x00000068
+#define AOFF_mm_page_table_lock 0x00000070
#define ASIZ_mm_page_table_lock 0x0000000c
-#define AOFF_mm_context 0x00000078
+#define AOFF_mm_context 0x00000080
#define ASIZ_mm_context 0x00000008
-#define AOFF_mm_start_code 0x00000080
+#define AOFF_mm_start_code 0x00000088
#define ASIZ_mm_start_code 0x00000008
-#define AOFF_mm_end_code 0x00000088
+#define AOFF_mm_end_code 0x00000090
#define ASIZ_mm_end_code 0x00000008
-#define AOFF_mm_start_data 0x00000090
+#define AOFF_mm_start_data 0x00000098
#define ASIZ_mm_start_data 0x00000008
-#define AOFF_mm_end_data 0x00000098
+#define AOFF_mm_end_data 0x000000a0
#define ASIZ_mm_end_data 0x00000008
-#define AOFF_mm_start_brk 0x000000a0
+#define AOFF_mm_start_brk 0x000000a8
#define ASIZ_mm_start_brk 0x00000008
-#define AOFF_mm_brk 0x000000a8
+#define AOFF_mm_brk 0x000000b0
#define ASIZ_mm_brk 0x00000008
-#define AOFF_mm_start_stack 0x000000b0
+#define AOFF_mm_start_stack 0x000000b8
#define ASIZ_mm_start_stack 0x00000008
-#define AOFF_mm_arg_start 0x000000b8
+#define AOFF_mm_arg_start 0x000000c0
#define ASIZ_mm_arg_start 0x00000008
-#define AOFF_mm_arg_end 0x000000c0
+#define AOFF_mm_arg_end 0x000000c8
#define ASIZ_mm_arg_end 0x00000008
-#define AOFF_mm_env_start 0x000000c8
+#define AOFF_mm_env_start 0x000000d0
#define ASIZ_mm_env_start 0x00000008
-#define AOFF_mm_env_end 0x000000d0
+#define AOFF_mm_env_end 0x000000d8
#define ASIZ_mm_env_end 0x00000008
-#define AOFF_mm_rss 0x000000d8
+#define AOFF_mm_rss 0x000000e0
#define ASIZ_mm_rss 0x00000008
-#define AOFF_mm_total_vm 0x000000e0
+#define AOFF_mm_total_vm 0x000000e8
#define ASIZ_mm_total_vm 0x00000008
-#define AOFF_mm_locked_vm 0x000000e8
+#define AOFF_mm_locked_vm 0x000000f0
#define ASIZ_mm_locked_vm 0x00000008
-#define AOFF_mm_def_flags 0x000000f0
+#define AOFF_mm_def_flags 0x000000f8
#define ASIZ_mm_def_flags 0x00000008
-#define AOFF_mm_cpu_vm_mask 0x000000f8
+#define AOFF_mm_cpu_vm_mask 0x00000100
#define ASIZ_mm_cpu_vm_mask 0x00000008
-#define AOFF_mm_swap_cnt 0x00000100
+#define AOFF_mm_swap_cnt 0x00000108
#define ASIZ_mm_swap_cnt 0x00000008
-#define AOFF_mm_swap_address 0x00000108
+#define AOFF_mm_swap_address 0x00000110
#define ASIZ_mm_swap_address 0x00000008
-#define AOFF_mm_segments 0x00000110
+#define AOFF_mm_segments 0x00000118
#define ASIZ_mm_segments 0x00000008
-#define ASIZ_mm 0x00000118
+#define ASIZ_mm 0x00000120
#define AOFF_thread_ksp 0x00000000
#define ASIZ_thread_ksp 0x00000008
#define AOFF_thread_wstate 0x00000008
-#define ASIZ_thread_wstate 0x00000002
-#define AOFF_thread_cwp 0x0000000a
-#define ASIZ_thread_cwp 0x00000002
-#define AOFF_thread_flags 0x0000000c
-#define ASIZ_thread_flags 0x00000002
-#define AOFF_thread_current_ds 0x0000000e
+#define ASIZ_thread_wstate 0x00000001
+#define AOFF_thread_cwp 0x00000009
+#define ASIZ_thread_cwp 0x00000001
+#define AOFF_thread_flags 0x0000000a
+#define ASIZ_thread_flags 0x00000001
+#define AOFF_thread_current_ds 0x0000000b
#define ASIZ_thread_current_ds 0x00000001
-#define AOFF_thread_w_saved 0x00000010
-#define ASIZ_thread_w_saved 0x00000002
-#define AOFF_thread_new_signal 0x00000012
-#define ASIZ_thread_new_signal 0x00000002
-#define AOFF_thread_ctx 0x00000014
-#define ASIZ_thread_ctx 0x00000002
+#define AOFF_thread_w_saved 0x0000000c
+#define ASIZ_thread_w_saved 0x00000001
+#define AOFF_thread_fpdepth 0x0000000d
+#define ASIZ_thread_fpdepth 0x00000001
+#define AOFF_thread_fpsaved 0x0000000e
+#define ASIZ_thread_fpsaved 0x00000007
+#define AOFF_thread___pad1 0x00000015
+#define ASIZ_thread___pad1 0x00000003
#define AOFF_thread_kregs 0x00000018
#define ASIZ_thread_kregs 0x00000008
#define AOFF_thread_utraps 0x00000020
#define ASIZ_thread_utraps 0x00000008
-#define AOFF_thread_fpdepth 0x00000028
-#define ASIZ_thread_fpdepth 0x00000001
-#define AOFF_thread_fpsaved 0x00000029
-#define ASIZ_thread_fpsaved 0x00000007
-#define AOFF_thread_gsr 0x00000030
+#define AOFF_thread_gsr 0x00000028
#define ASIZ_thread_gsr 0x00000007
-#define AOFF_thread_xfsr 0x00000038
+#define AOFF_thread___pad2 0x0000002f
+#define ASIZ_thread___pad2 0x00000001
+#define AOFF_thread_sig_address 0x00000030
+#define ASIZ_thread_sig_address 0x00000008
+#define AOFF_thread_sig_desc 0x00000038
+#define ASIZ_thread_sig_desc 0x00000008
+#define AOFF_thread_xfsr 0x00000040
#define ASIZ_thread_xfsr 0x00000038
-#define AOFF_thread_reg_window 0x00000070
+#define AOFF_thread___pad3 0x00000078
+#define ASIZ_thread___pad3 0x00000008
+#define AOFF_thread_reg_window 0x00000080
#define ASIZ_thread_reg_window 0x00000380
-#define AOFF_thread_rwbuf_stkptrs 0x000003f0
+#define AOFF_thread_rwbuf_stkptrs 0x00000400
#define ASIZ_thread_rwbuf_stkptrs 0x00000038
-#define AOFF_thread_sig_address 0x00000428
-#define ASIZ_thread_sig_address 0x00000008
-#define AOFF_thread_sig_desc 0x00000430
-#define ASIZ_thread_sig_desc 0x00000008
#define AOFF_thread_user_cntd0 0x00000438
#define ASIZ_thread_user_cntd0 0x00000008
#define AOFF_thread_user_cntd1 0x00000440
-/* $Id: checksum.h,v 1.12 1999/05/25 16:53:36 jj Exp $ */
+/* $Id: checksum.h,v 1.13 1999/07/30 09:31:13 davem Exp $ */
#ifndef __SPARC64_CHECKSUM_H
#define __SPARC64_CHECKSUM_H
@@ -50,7+50,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
{
int ret;
- unsigned char cur_ds = current->tss.current_ds.seg;
+ unsigned char cur_ds = current->thread.current_ds.seg;
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "i" (ASI_P));
ret = csum_partial_copy_sparc64(src, dst, len, sum);
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" (cur_ds));
-/* $Id: elf.h,v 1.19 1999/06/11 13:26:04 jj Exp $ */
+/* $Id: elf.h,v 1.20 1999/07/30 09:31:14 davem Exp $ */
#ifndef __ASM_SPARC64_ELF_H
#define __ASM_SPARC64_ELF_H
@@ -67,17+67,30 @@ typedef struct { #define ELF_PLATFORM (NULL)
#ifdef __KERNEL__
-#define SET_PERSONALITY(ex, ibcs2) \
-do { \
- if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- current->tss.flags |= SPARC_FLAG_32BIT; \
- else \
- current->tss.flags &= ~SPARC_FLAG_32BIT; \
- \
- if (ibcs2) \
- current->personality = PER_SVR4; \
- else if (current->personality != PER_LINUX32) \
- current->personality = PER_LINUX; \
+#define SET_PERSONALITY(ex, ibcs2) \
+do { unsigned char flags = current->thread.flags; \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ flags |= SPARC_FLAG_32BIT; \
+ else \
+ flags &= ~SPARC_FLAG_32BIT; \
+ if (flags != current->thread.flags) { \
+ unsigned long pgd_cache = 0UL; \
+ if (flags & SPARC_FLAG_32BIT) \
+ pgd_cache = \
+ pgd_val(current->mm->pgd[0])<<11UL; \
+ __asm__ __volatile__( \
+ "stxa\t%0, [%1] %2" \
+ : /* no outputs */ \
+ : "r" (pgd_cache), \
+ "r" (TSB_REG), \
+ "i" (ASI_DMMU)); \
+ current->thread.flags = flags; \
+ } \
+ \
+ if (ibcs2) \
+ current->personality = PER_SVR4; \
+ else if (current->personality != PER_LINUX32) \
+ current->personality = PER_LINUX; \
} while (0)
#endif
#ifndef _SPARC_INIT_H
#define _SPARC_INIT_H
+typedef int (*initcall_t)(void);
+
+extern initcall_t __initcall_start, __initcall_end;
+
+struct kernel_param {
+ const char *str;
+ int (*setup_func)(char *);
+};
+
+extern struct kernel_param __setup_start, __setup_end;
+
+/* Used for initialization calls.. */
+#define __initcall(fn) \
+ static __attribute__ ((unused,__section__ (".initcall.init"))) initcall_t __initcall_##fn = fn
+
+/* Used for kernel command line parameter setup */
+#define __setup(str, fn) \
+ static __attribute__ ((unused,__section__ (".setup.init"))) struct kernel_param __setup_##fn = { str, fn }
+
+
#define __init __attribute__ ((__section__ (".text.init")))
#define __initdata __attribute__ ((__section__ (".data.init")))
#define __initfunc(__arginit) \
-/* $Id: mmu_context.h,v 1.36 1999/05/25 16:53:34 jj Exp $ */
-#ifndef __SPARC64_MMU_CONTEXT_H
-#define __SPARC64_MMU_CONTEXT_H
-
-/* Derived heavily from Linus's Alpha/AXP ASN code... */
-
-#include <asm/system.h>
-#include <asm/spitfire.h>
-#include <asm/spinlock.h>
-
-#define NO_CONTEXT 0
-
-#ifndef __ASSEMBLY__
-
-extern unsigned long tlb_context_cache;
-extern unsigned long mmu_context_bmap[];
-
-#define CTX_VERSION_SHIFT (PAGE_SHIFT - 3)
-#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
-#define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL)
-
-extern void get_new_mmu_context(struct mm_struct *mm);
-
-/* Initialize/destroy the context related info for a new mm_struct
- * instance.
- */
-#define init_new_context(__mm) ((__mm)->context = NO_CONTEXT)
-
-/* Kernel threads like rpciod and nfsd drop their mm, and then use
- * init_mm, when this happens we must make sure the secondary context is
- * updated as well. Otherwise we have disasters relating to
- * set_fs/get_fs usage later on.
- *
- * Also we can only clear the mmu_context_bmap bit when this is
- * the final reference to the address space.
- */
-#define destroy_context(__mm) do { \
- if ((__mm)->context != NO_CONTEXT && \
- atomic_read(&(__mm)->count) == 1) { \
- if (!(((__mm)->context ^ tlb_context_cache) & CTX_VERSION_MASK))\
- clear_bit((__mm)->context & ~(CTX_VERSION_MASK), \
- mmu_context_bmap); \
- (__mm)->context = NO_CONTEXT; \
- if(current->mm == (__mm)) { \
- current->tss.ctx = 0; \
- spitfire_set_secondary_context(0); \
- __asm__ __volatile__("flush %g6"); \
- } \
- } \
-} while (0)
-
-/* The caller must flush the current set of user windows
- * to the stack (if necessary) before we get here.
- */
-extern __inline__ void __get_mmu_context(struct task_struct *tsk)
-{
- register unsigned long paddr asm("o5");
- register unsigned long pgd_cache asm("o4");
- struct mm_struct *mm = tsk->mm;
- unsigned long asi;
-
- if(!(tsk->tss.flags & SPARC_FLAG_KTHREAD) &&
- !(tsk->flags & PF_EXITING)) {
- unsigned long ctx = tlb_context_cache;
- if((mm->context ^ ctx) & CTX_VERSION_MASK)
- get_new_mmu_context(mm);
- tsk->tss.ctx = mm->context & 0x3ff;
- spitfire_set_secondary_context(mm->context & 0x3ff);
- __asm__ __volatile__("flush %g6");
- if(!(mm->cpu_vm_mask & (1UL<<smp_processor_id()))) {
- spitfire_flush_dtlb_secondary_context();
- spitfire_flush_itlb_secondary_context();
- __asm__ __volatile__("flush %g6");
- }
- asi = tsk->tss.current_ds.seg;
- } else {
- tsk->tss.ctx = 0;
- spitfire_set_secondary_context(0);
- __asm__ __volatile__("flush %g6");
- asi = ASI_P;
- }
- /* Sigh, damned include loops... just poke seg directly. */
- __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" (asi));
- paddr = __pa(mm->pgd);
- if((tsk->tss.flags & (SPARC_FLAG_32BIT|SPARC_FLAG_KTHREAD)) ==
- (SPARC_FLAG_32BIT))
- pgd_cache = ((unsigned long) mm->pgd[0]) << 11UL;
- else
- pgd_cache = 0;
- __asm__ __volatile__("
- rdpr %%pstate, %%o2
- andn %%o2, %2, %%o3
- wrpr %%o3, %5, %%pstate
- mov %4, %%g4
- mov %0, %%g7
- stxa %1, [%%g4] %3
- wrpr %%o2, 0x0, %%pstate
- " : /* no outputs */
- : "r" (paddr), "r" (pgd_cache), "i" (PSTATE_IE),
- "i" (ASI_DMMU), "i" (TSB_REG), "i" (PSTATE_MG)
- : "o2", "o3");
-}
-
-/* Now we define this as a do nothing macro, because the only
- * generic user right now is the scheduler, and we handle all
- * the atomicity issues by having switch_to() call the above
- * function itself.
- */
-#define get_mmu_context(x) do { } while(0)
-
-/*
- * After we have set current->mm to a new value, this activates
- * the context for the new mm so we see the new mappings. Currently,
- * this is always called for 'current', if that changes put appropriate
- * checks here.
- *
- * We set the cpu_vm_mask first to zero to enforce a tlb flush for
- * the new context above, then we set it to the current cpu so the
- * smp tlb flush routines do not get confused.
- */
-#define activate_context(__tsk) \
-do { flushw_user(); \
- (__tsk)->mm->cpu_vm_mask = 0; \
- __get_mmu_context(__tsk); \
- (__tsk)->mm->cpu_vm_mask = (1UL<<smp_processor_id()); \
-} while(0)
-
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* !(__SPARC64_MMU_CONTEXT_H) */
+/* $Id: mmu_context.h,v 1.39 1999/08/02 08:39:57 davem Exp $ */
+#ifndef __SPARC64_MMU_CONTEXT_H
+#define __SPARC64_MMU_CONTEXT_H
+
+/* Derived heavily from Linus's Alpha/AXP ASN code... */
+
+#include <asm/system.h>
+#include <asm/spitfire.h>
+#include <asm/spinlock.h>
+
+#ifndef __ASSEMBLY__
+
+extern spinlock_t ctx_alloc_lock;
+extern unsigned long tlb_context_cache;
+extern unsigned long mmu_context_bmap[];
+
+#define CTX_VERSION_SHIFT (PAGE_SHIFT - 3)
+#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
+#define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL)
+#define CTX_VALID(__ctx) \
+ (!(((__ctx) ^ tlb_context_cache) & CTX_VERSION_MASK))
+#define CTX_HWBITS(__ctx) ((__ctx) & ~CTX_VERSION_MASK)
+
+extern void get_new_mmu_context(struct mm_struct *mm);
+
+/* Initialize a new mmu context. This is invoked when a new
+ * address space instance (unique or shared) is instantiated.
+ * A fresh mm_struct is cleared out to zeros, so this need not
+ * do anything on Sparc64 since the only thing we care about
+ * is that mm->context is an invalid context (ie. zero).
+ */
+#define init_new_context(__tsk, __mm) do { } while(0)
+
+/* Destroy a dead context. This occurs when mmput drops the
+ * mm_users count to zero, the mmaps have been released, and
+ * all the page tables have been flushed. Our job is to destroy
+ * any remaining processor-specific state, and in the sparc64
+ * case this just means freeing up the mmu context ID held by
+ * this task if valid.
+ */
+#define destroy_context(__mm) \
+do { spin_lock(&ctx_alloc_lock); \
+ if (CTX_VALID((__mm)->context)) { \
+ unsigned long nr = CTX_HWBITS((__mm)->context); \
+ mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
+ } \
+ spin_unlock(&ctx_alloc_lock); \
+} while(0)
+
+/* Reload the two core values used by TLB miss handler
+ * processing on sparc64. They are:
+ * 1) The physical address of mm->pgd, when full page
+ * table walks are necessary, this is where the
+ * search begins.
+ * 2) A "PGD cache". For 32-bit tasks only pgd[0] is
+ * ever used since that maps the entire low 4GB
+ * completely. To speed up TLB miss processing we
+ * make this value available to the handlers. This
+ * decreases the amount of memory traffic incurred.
+ */
+#define reload_tlbmiss_state(__tsk, __mm) \
+do { \
+ register unsigned long paddr asm("o5"); \
+ register unsigned long pgd_cache asm("o4"); \
+ paddr = __pa((__mm)->pgd); \
+ pgd_cache = 0UL; \
+ if ((__tsk)->thread.flags & SPARC_FLAG_32BIT) \
+ pgd_cache = pgd_val((__mm)->pgd[0]) << 11UL; \
+ __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
+ "mov %3, %%g4\n\t" \
+ "mov %0, %%g7\n\t" \
+ "stxa %1, [%%g4] %2\n\t" \
+ "wrpr %%g0, 0x096, %%pstate" \
+ : /* no outputs */ \
+ : "r" (paddr), "r" (pgd_cache),\
+ "i" (ASI_DMMU), "i" (TSB_REG)); \
+} while(0)
+
+/* Set MMU context in the actual hardware. */
+#define load_secondary_context(__mm) \
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t" \
+ "flush %%g6" \
+ : /* No outputs */ \
+ : "r" (CTX_HWBITS((__mm)->context)), \
+ "r" (0x10), "i" (0x58))
+
+/* Clean out potential stale TLB entries due to previous
+ * users of this TLB context. We flush TLB contexts
+ * lazily on sparc64.
+ */
+#define clean_secondary_context() \
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" \
+ "stxa %%g0, [%0] %2\n\t" \
+ "flush %%g6" \
+ : /* No outputs */ \
+ : "r" (0x50), "i" (0x5f), "i" (0x57))
+
+/* Switch the current MM context. */
+static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
+{
+ long dirty;
+
+ spin_lock(&mm->page_table_lock);
+ if (CTX_VALID(mm->context))
+ dirty = 0;
+ else
+ dirty = 1;
+ if (dirty || (old_mm != mm)) {
+ unsigned long vm_mask;
+
+ if (dirty)
+ get_new_mmu_context(mm);
+
+ vm_mask = (1UL << cpu);
+ if (!(mm->cpu_vm_mask & vm_mask)) {
+ mm->cpu_vm_mask |= vm_mask;
+ dirty = 1;
+ }
+
+ load_secondary_context(mm);
+ if (dirty != 0)
+ clean_secondary_context();
+ reload_tlbmiss_state(tsk, mm);
+ }
+ spin_unlock(&mm->page_table_lock);
+}
+
+/* Activate a new MM instance for the current task. */
+static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
+{
+ unsigned long vm_mask;
+
+ spin_lock(&mm->page_table_lock);
+ if (!CTX_VALID(mm->context))
+ get_new_mmu_context(mm);
+ vm_mask = (1UL << smp_processor_id());
+ if (!(mm->cpu_vm_mask & vm_mask))
+ mm->cpu_vm_mask |= vm_mask;
+ spin_unlock(&mm->page_table_lock);
+
+ load_secondary_context(mm);
+ clean_secondary_context();
+ reload_tlbmiss_state(current, mm);
+}
+
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* !(__SPARC64_MMU_CONTEXT_H) */
-/* $Id: page.h,v 1.25 1999/06/23 03:53:15 davem Exp $ */
+/* $Id: page.h,v 1.27 1999/07/31 00:07:25 davem Exp $ */
#ifndef _SPARC64_PAGE_H
#define _SPARC64_PAGE_H
#ifndef __ASSEMBLY__
-#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0)
-#define PAGE_BUG(page) do { \
- BUG(); } while (0)
+#define BUG() __builtin_trap()
+#define PAGE_BUG(page) BUG()
extern void clear_page(unsigned long page);
extern void copy_page(unsigned long to, unsigned long from);
@@ -85,7+84,7 @@ typedef unsigned long iopgprot_t;
#endif /* (STRICT_MM_TYPECHECKS) */
-#define TASK_UNMAPPED_BASE ((current->tss.flags & SPARC_FLAG_32BIT) ? \
+#define TASK_UNMAPPED_BASE ((current->thread.flags & SPARC_FLAG_32BIT) ? \
(0x0000000070000000UL) : (PAGE_OFFSET))
#endif /* !(__ASSEMBLY__) */
-/* $Id$
+/* $Id: parport.h,v 1.2 1999/07/31 04:48:13 ecd Exp $
* parport.h: sparc64 specific parport initialization and dma.
*
* Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
#undef HAVE_SLOW_DEVICES
-static struct linux_ebus_dma *sparc_ebus_dmas[PARPORT_MAX];
+#define PARPORT_PC_MAX_PORTS PARPORT_MAX
+
+static struct linux_ebus_dma *sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
static __inline__ void
reset_dma(unsigned int dmanr)
@@ -116,6+118,8 @@ get_dma_residue(unsigned int dmanr)
static int __maybe_init parport_pc_init_pci(int irq, int dma);
+static int user_specified __initdata = 0;
+
int __init
parport_pc_init(int *io, int *io_hi, int *irq, int *dma)
{
-/* $Id: pgtable.h,v 1.106 1999/06/27 00:38:33 davem Exp $
+/* $Id: pgtable.h,v 1.109 1999/08/02 08:57:46 jj Exp $
* pgtable.h: SpitFire page table operations.
*
* Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
* is different so we can optimize correctly for 32-bit tasks.
*/
#define REAL_PTRS_PER_PMD (1UL << (PAGE_SHIFT-2))
-#define PTRS_PER_PMD ((const int)((current->tss.flags & SPARC_FLAG_32BIT) ? \
+#define PTRS_PER_PMD ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
(REAL_PTRS_PER_PMD >> 2) : (REAL_PTRS_PER_PMD)))
/* We cannot use the top 16G because VPTE table lives there. */
#define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
/* Kernel has a separate 44bit address space. */
-#define USER_PTRS_PER_PGD ((const int)((current->tss.flags & SPARC_FLAG_32BIT) ? \
+#define USER_PTRS_PER_PGD ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
(1) : (PTRS_PER_PGD)))
#define PTE_TABLE_SIZE 0x2000 /* 1024 entries 8 bytes each */
@@ -167,9+167,12 @@ extern void *sparc_init_alloc(unsigned long *kbrk, unsigned long size); /* Cache and TLB flush operations. */
/* These are the same regardless of whether this is an SMP kernel or not. */
-#define flush_cache_mm(mm) flushw_user()
-#define flush_cache_range(mm, start, end) flushw_user()
-#define flush_cache_page(vma, page) flushw_user()
+#define flush_cache_mm(__mm) \
+ do { if ((__mm) == current->mm) flushw_user(); } while(0)
+#define flush_cache_range(mm, start, end) \
+ flush_cache_mm(mm)
+#define flush_cache_page(vma, page) \
+ flush_cache_mm((vma)->vm_mm)
/* These operations are unnecessary on the SpitFire since D-CACHE is write-through. */
#define flush_icache_range(start, end) do { } while (0)
@@ -191,16+194,16 @@ extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned #define flush_cache_all() __flush_cache_all()
#define flush_tlb_all() __flush_tlb_all()
-#define flush_tlb_mm(mm) \
-do { if((mm)->context != NO_CONTEXT) \
- __flush_tlb_mm((mm)->context & 0x3ff, SECONDARY_CONTEXT); \
+#define flush_tlb_mm(__mm) \
+do { if(CTX_VALID((__mm)->context)) \
+ __flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
} while(0)
-#define flush_tlb_range(mm, start, end) \
-do { if((mm)->context != NO_CONTEXT) { \
+#define flush_tlb_range(__mm, start, end) \
+do { if(CTX_VALID((__mm)->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = (end)&PAGE_MASK; \
- __flush_tlb_range((mm)->context & 0x3ff, __start, \
+ __flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
} \
@@ -208,8+211,8 @@ do { if((mm)->context != NO_CONTEXT) { \
#define flush_tlb_page(vma, page) \
do { struct mm_struct *__mm = (vma)->vm_mm; \
- if(__mm->context != NO_CONTEXT) \
- __flush_tlb_page(__mm->context & 0x3ff, (page)&PAGE_MASK, \
+ if(CTX_VALID(__mm->context)) \
+ __flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
SECONDARY_CONTEXT); \
} while(0)
@@ -227,14+230,14 @@ extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
extern __inline__ void flush_tlb_mm(struct mm_struct *mm)
{
- if(mm->context != NO_CONTEXT)
+ if (CTX_VALID(mm->context))
smp_flush_tlb_mm(mm);
}
extern __inline__ void flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
- if(mm->context != NO_CONTEXT)
+ if (CTX_VALID(mm->context))
smp_flush_tlb_range(mm, start, end);
}
@@ -242,7+245,7 @@ extern __inline__ void flush_tlb_page(struct vm_area_struct *vma, unsigned long {
struct mm_struct *mm = vma->vm_mm;
- if(mm->context != NO_CONTEXT)
+ if (CTX_VALID(mm->context))
smp_flush_tlb_page(mm, page);
}
@@ -520,28+523,6 @@ extern int do_check_pgt_cache(int, int); #define set_pgdir(address, entry) do { } while(0)
extern pgd_t swapper_pg_dir[1];
-
-extern inline void SET_PAGE_DIR(struct task_struct *tsk, pgd_t *pgdir)
-{
- if(pgdir != swapper_pg_dir && tsk == current) {
- register unsigned long paddr asm("o5");
-
- paddr = __pa(pgdir);
- __asm__ __volatile__ ("
- rdpr %%pstate, %%o4
- wrpr %%o4, %1, %%pstate
- mov %3, %%g4
- mov %0, %%g7
- stxa %%g0, [%%g4] %2
- wrpr %%o4, 0x0, %%pstate
- " : /* No outputs */
- : "r" (paddr), "i" (PSTATE_MG|PSTATE_IE),
- "i" (ASI_DMMU), "i" (TSB_REG)
- : "o4");
- flush_tlb_mm(current->mm);
- }
-}
-
/* Routines for getting a dvma scsi buffer. */
struct mmu_sglist {
char *addr;
-/* $Id: processor.h,v 1.55 1999/05/27 04:52:54 davem Exp $
+/* $Id: processor.h,v 1.56 1999/07/30 09:31:20 davem Exp $
* include/asm-sparc64/processor.h
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -47,66+47,61 @@ typedef struct {
/* The Sparc processor specific thread struct. */
struct thread_struct {
-/*DC1*/ unsigned long ksp __attribute__ ((aligned(16)));
- unsigned short wstate;
- unsigned short cwp;
- unsigned short flags;
+ /* D$ line 1 */
+ unsigned long ksp __attribute__ ((aligned(16)));
+ unsigned char wstate, cwp, flags;
mm_segment_t current_ds;
-
-/*DC2*/ unsigned short w_saved;
- unsigned short new_signal;
- unsigned short ctx;
- struct pt_regs *kregs;
-
-/*DC3*/ unsigned long *utraps;
- unsigned char fpdepth;
+ unsigned char w_saved, fpdepth;
unsigned char fpsaved[7];
+ unsigned char __pad1[3];
+ struct pt_regs *kregs;
+
+ /* D$ line 2 */
+ unsigned long *utraps;
+ unsigned char gsr[7];
+ unsigned char __pad2;
+ unsigned long sig_address;
+ unsigned long sig_desc;
-/*DC4*/ unsigned char gsr[7];
+ /* D$ lines 3 and 4 */
unsigned long xfsr[7];
+ unsigned long __pad3;
- struct reg_window reg_window[NSWINS] __attribute__ ((aligned (16)));
- unsigned long rwbuf_stkptrs[NSWINS] __attribute__ ((aligned (8)));
-
- unsigned long sig_address __attribute__ ((aligned (8)));
- unsigned long sig_desc;
+ struct reg_window reg_window[NSWINS];
+ unsigned long rwbuf_stkptrs[NSWINS];
/* Performance counter state */
u64 *user_cntd0, *user_cntd1;
u64 kernel_cntd0, kernel_cntd1;
u64 pcr_reg;
-
};
#endif /* !(__ASSEMBLY__) */
-#define SPARC_FLAG_KTHREAD 0x010 /* task is a kernel thread */
-#define SPARC_FLAG_UNALIGNED 0x020 /* is allowed to do unaligned accesses */
-#define SPARC_FLAG_NEWSIGNALS 0x040 /* task wants new-style signals */
-#define SPARC_FLAG_32BIT 0x080 /* task is older 32-bit binary */
-#define SPARC_FLAG_NEWCHILD 0x100 /* task is just-spawned child process */
-#define SPARC_FLAG_PERFCTR 0x200 /* task has performance counters active */
+#define SPARC_FLAG_UNALIGNED 0x01 /* is allowed to do unaligned accesses */
+#define SPARC_FLAG_NEWSIGNALS 0x02 /* task wants new-style signals */
+#define SPARC_FLAG_32BIT 0x04 /* task is older 32-bit binary */
+#define SPARC_FLAG_NEWCHILD 0x08 /* task is just-spawned child process */
+#define SPARC_FLAG_PERFCTR 0x10 /* task has performance counters active */
#define INIT_MMAP { &init_mm, 0xfffff80000000000, 0xfffff80001000000, \
NULL, PAGE_SHARED , VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-#define INIT_TSS { \
-/* ksp, wstate, cwp, flags, current_ds, */ \
- 0, 0, 0, SPARC_FLAG_KTHREAD, KERNEL_DS, \
-/* w_saved, new_signal, ctx, kregs, */ \
- 0, 0, 0, 0, \
-/* utraps, */ \
- 0, \
-/* fpdepth, fpsaved, gsr, xfsr */ \
- 0, { 0 }, { 0 }, { 0 }, \
-/* reg_window */ \
- { { { 0, }, { 0, } }, }, \
-/* rwbuf_stkptrs */ \
- { 0, 0, 0, 0, 0, 0, 0, }, \
-/* sig_address, sig_desc */ \
- 0, 0, \
+#define INIT_THREAD { \
+/* ksp, wstate, cwp, flags, current_ds, */ \
+ 0, 0, 0, 0, KERNEL_DS, \
+/* w_saved, fpdepth, fpsaved, pad1, kregs, */ \
+ 0, 0, { 0 }, { 0 }, 0, \
+/* utraps, gsr, pad2, sig_address, sig_desc, */ \
+ 0, { 0 }, 0, 0, 0, \
+/* xfsr, pad3, */ \
+ { 0 }, 0, \
+/* reg_window */ \
+ { { { 0, }, { 0, } }, }, \
+/* rwbuf_stkptrs */ \
+ { 0, 0, 0, 0, 0, 0, 0, }, \
/* user_cntd0, user_cndd1, kernel_cntd0, kernel_cntd0, pcr_reg */ \
- 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, \
}
#ifndef __ASSEMBLY__
regs->tpc = ((pc & (~3)) - 4); \
regs->tnpc = regs->tpc + 4; \
regs->y = 0; \
- current->tss.flags &= ~SPARC_FLAG_32BIT; \
- current->tss.wstate = (1 << 3); \
- if (current->tss.utraps) { \
- if (*(current->tss.utraps) < 2) \
- kfree (current->tss.utraps); \
+ current->thread.wstate = (1 << 3); \
+ if (current->thread.utraps) { \
+ if (*(current->thread.utraps) < 2) \
+ kfree (current->thread.utraps); \
else \
- (*(current->tss.utraps))--; \
- current->tss.utraps = NULL; \
+ (*(current->thread.utraps))--; \
+ current->thread.utraps = NULL; \
} \
__asm__ __volatile__( \
"stx %%g0, [%0 + %2 + 0x00]\n\t" \
regs->tpc = ((pc & (~3)) - 4); \
regs->tnpc = regs->tpc + 4; \
regs->y = 0; \
- current->tss.flags |= SPARC_FLAG_32BIT; \
- current->tss.wstate = (2 << 3); \
- if (current->tss.utraps) { \
- if (*(current->tss.utraps) < 2) \
- kfree (current->tss.utraps); \
+ current->thread.wstate = (2 << 3); \
+ if (current->thread.utraps) { \
+ if (*(current->thread.utraps) < 2) \
+ kfree (current->thread.utraps); \
else \
- (*(current->tss.utraps))--; \
- current->tss.utraps = NULL; \
+ (*(current->thread.utraps))--; \
+ current->thread.utraps = NULL; \
} \
__asm__ __volatile__( \
- "stxa %3, [%4] %5\n\t" \
"stx %%g0, [%0 + %2 + 0x00]\n\t" \
"stx %%g0, [%0 + %2 + 0x08]\n\t" \
"stx %%g0, [%0 + %2 + 0x10]\n\t" \
"wrpr %%g0, (2 << 3), %%wstate\n\t" \
: \
: "r" (regs), "r" (sp - REGWIN32_SZ), \
- "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0])), \
- "r" (((unsigned long)current->mm->pgd[0])<<11UL), \
- "r" (TSB_REG), "i" (ASI_DMMU)); \
+ "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
} while(0)
/* Free all resources held by a thread. */
#define release_thread(tsk) do { } while(0)
-#define copy_segments(nr, tsk, mm) do { } while (0)
+#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
#ifdef __KERNEL__
+#define THREAD_SIZE (2*PAGE_SIZE)
/* Allocation and freeing of task_struct and kernel stack. */
#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL, 1))
#define free_task_struct(tsk) free_pages((unsigned long)(tsk),1)
-/* $Id: resource.h,v 1.4 1998/11/19 20:01:49 davem Exp $
+/* $Id: resource.h,v 1.5 1999/07/30 09:31:21 davem Exp $
* resource.h: Resource definitions.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
#define RLIM_NLIMITS 10
#ifdef __KERNEL__
-#define INIT_RLIMITS \
-{ \
- {LONG_MAX, LONG_MAX}, {LONG_MAX, LONG_MAX}, \
- {LONG_MAX, LONG_MAX}, {_STK_LIM, LONG_MAX}, \
- { 0, LONG_MAX}, {LONG_MAX, LONG_MAX}, \
- {INR_OPEN, INR_OPEN}, {MAX_TASKS_PER_USER, MAX_TASKS_PER_USER}, \
- {LONG_MAX, LONG_MAX}, {LONG_MAX, LONG_MAX} \
+#define INIT_RLIMITS \
+{ \
+ {LONG_MAX, LONG_MAX}, {LONG_MAX, LONG_MAX}, \
+ {LONG_MAX, LONG_MAX}, {_STK_LIM, LONG_MAX}, \
+ { 0, LONG_MAX}, {LONG_MAX, LONG_MAX}, \
+ {INR_OPEN, INR_OPEN}, {0, 0}, \
+ {LONG_MAX, LONG_MAX}, {LONG_MAX, LONG_MAX} \
}
#endif /* __KERNEL__ */
@@ -57,7+57,7 @@ typedef struct siginfo { clock_t _stime;
} _sigchld;
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
struct {
void *_addr; /* faulting insn/memory ref. */
int _trapno; /* TRAP # which caused the signal */
@@ -84,7+84,7 @@ typedef struct siginfo32 { /* kill() */
struct {
__kernel_pid_t32 _pid; /* sender's pid */
- __kernel_uid_t32 _uid; /* sender's uid */
+ unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
@@ -96,19+96,20 @@ typedef struct siginfo32 { /* POSIX.1b signals */
struct {
__kernel_pid_t32 _pid; /* sender's pid */
- __kernel_uid_t32 _uid; /* sender's uid */
+ unsigned int _uid; /* sender's uid */
sigval_t32 _sigval;
} _rt;
/* SIGCHLD */
struct {
__kernel_pid_t32 _pid; /* which child */
- int _status; /* exit code */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
__kernel_clock_t32 _utime;
__kernel_clock_t32 _stime;
} _sigchld;
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
struct {
u32 _addr; /* faulting insn/memory ref. */
int _trapno;
@@ -144,6+145,7 @@ typedef struct siginfo32 { * si_code values
* Digital reserves positive values for kernel-generated signals.
*/
+#define SI_NOINFO 32767 /* no information in siginfo_t */
#define SI_USER 0 /* sent by kill, sigsend, raise */
#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
#define SI_QUEUE -1 /* sent by sigqueue */
@@ -226,6+228,12 @@ typedef struct siginfo32 { #define NSIGPOLL 6
/*
+ * SIGEMT si_codes
+ */
+#define EMT_TAGOVF 1 /* tag overflow */
+#define NSIGEMT 1
+
+/*
* sigevent definitions
*
* It seems likely that SIGEV_THREAD will have to be handled from
#ifndef _SPARC64_SMP_H
#define _SPARC64_SMP_H
-#include <linux/tasks.h>
+#include <linux/threads.h>
#include <asm/asi.h>
#ifndef __ASSEMBLY__
#ifndef __SMP__
-typedef unsigned char spinlock_t;
-#define SPIN_LOCK_UNLOCKED 0
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
+ typedef struct { } spinlock_t;
+# define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+#else
+ typedef unsigned char spinlock_t;
+# define SPIN_LOCK_UNLOCKED 0
+#endif
#define spin_lock_init(lock) do { } while(0)
-#define spin_lock(lock) do { } while(0)
+#define spin_lock(lock) (void)(lock) /* Avoid warnings about unused variable */
#define spin_trylock(lock) (1)
#define spin_unlock_wait(lock) do { } while(0)
#define spin_unlock(lock) do { } while(0)
@@ -42,12+47,17 @@ do { barrier(); \ * irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*/
-typedef unsigned int rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-
-#define read_lock(lock) do { } while(0)
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
+ typedef struct { } rwlock_t;
+# define RW_LOCK_UNLOCKED (rwlock_t) { }
+#else
+ typedef unsigned int rwlock_t;
+# define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
+#endif
+
+#define read_lock(lock) (void)(lock) /* Avoid warnings about unused variable */
#define read_unlock(lock) do { } while(0)
-#define write_lock(lock) do { } while(0)
+#define write_lock(lock) (void)(lock) /* Likewise */
#define write_unlock(lock) do { } while(0)
#define read_lock_irq(lock) cli()
#define read_unlock_irq(lock) sti()
@@ -372,4+382,4 @@ do { unsigned long flags; \
#endif /* !(__ASSEMBLY__) */
-#endif /* !(__SPARC64_SPIN%0_H) */
+#endif /* !(__SPARC64_SPINLOCK_H) */
-/* $Id: system.h,v 1.50 1999/05/08 03:03:22 davem Exp $ */
+/* $Id: system.h,v 1.52 1999/08/02 08:39:59 davem Exp $ */
#ifndef __SPARC64_SYSTEM_H
#define __SPARC64_SYSTEM_H
@@ -124,6+124,7 @@ extern __inline__ void flushw_user(void) }
#define flush_user_windows flushw_user
+#define flush_register_windows flushw_all
/* See what happens when you design the chip correctly?
*
@@ -135,20+136,19 @@ extern __inline__ void flushw_user(void) * and 2 stores in this critical code path. -DaveM
*/
#define switch_to(prev, next, last) \
-do { if (current->tss.flags & SPARC_FLAG_PERFCTR) { \
+do { if (current->thread.flags & SPARC_FLAG_PERFCTR) { \
unsigned long __tmp; \
read_pcr(__tmp); \
- current->tss.pcr_reg = __tmp; \
+ current->thread.pcr_reg = __tmp; \
read_pic(__tmp); \
- current->tss.kernel_cntd0 += (unsigned int)(__tmp); \
- current->tss.kernel_cntd1 += ((__tmp) >> 32); \
+ current->thread.kernel_cntd0 += (unsigned int)(__tmp); \
+ current->thread.kernel_cntd1 += ((__tmp) >> 32); \
} \
save_and_clear_fpu(); \
- __asm__ __volatile__( \
- "flushw\n\t" \
- "wrpr %g0, 0x94, %pstate\n\t"); \
- __get_mmu_context(next); \
- (next)->mm->cpu_vm_mask |= (1UL << smp_processor_id()); \
+ /* If you are tempted to conditionalize the following */ \
+ /* so that ASI is only written if it changes, think again. */ \
+ __asm__ __volatile__("wr %%g0, %0, %%asi" \
+ : : "r" (next->thread.current_ds.seg)); \
__asm__ __volatile__( \
"mov %%g6, %%g5\n\t" \
"wrpr %%g0, 0x95, %%pstate\n\t" \
@@ -156,15+156,15 @@ do { if (current->tss.flags & SPARC_FLAG_PERFCTR) { \ "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
"rdpr %%wstate, %%o5\n\t" \
"stx %%o6, [%%g6 + %3]\n\t" \
- "sth %%o5, [%%g6 + %2]\n\t" \
+ "stb %%o5, [%%g6 + %2]\n\t" \
"rdpr %%cwp, %%o5\n\t" \
- "sth %%o5, [%%g6 + %5]\n\t" \
+ "stb %%o5, [%%g6 + %5]\n\t" \
"mov %1, %%g6\n\t" \
- "lduh [%1 + %5], %%g1\n\t" \
+ "ldub [%1 + %5], %%g1\n\t" \
"wrpr %%g1, %%cwp\n\t" \
"ldx [%%g6 + %3], %%o6\n\t" \
- "lduh [%%g6 + %2], %%o5\n\t" \
- "lduh [%%g6 + %4], %%o7\n\t" \
+ "ldub [%%g6 + %2], %%o5\n\t" \
+ "ldub [%%g6 + %4], %%o7\n\t" \
"mov %%g6, %%l2\n\t" \
"wrpr %%o5, 0x0, %%wstate\n\t" \
"ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
@@ -172,22+172,23 @@ do { if (current->tss.flags & SPARC_FLAG_PERFCTR) { \ "wrpr %%g0, 0x94, %%pstate\n\t" \
"mov %%l2, %%g6\n\t" \
"wrpr %%g0, 0x96, %%pstate\n\t" \
- "andcc %%o7, 0x100, %%g0\n\t" \
+ "andcc %%o7, %6, %%g0\n\t" \
"bne,pn %%icc, ret_from_syscall\n\t" \
" mov %%g5, %0\n\t" \
: "=&r" (last) \
: "r" (next), \
- "i" ((const unsigned long)(&((struct task_struct *)0)->tss.wstate)), \
- "i" ((const unsigned long)(&((struct task_struct *)0)->tss.ksp)), \
- "i" ((const unsigned long)(&((struct task_struct *)0)->tss.flags)), \
- "i" ((const unsigned long)(&((struct task_struct *)0)->tss.cwp)) \
+ "i" ((const unsigned long)(&((struct task_struct *)0)->thread.wstate)),\
+ "i" ((const unsigned long)(&((struct task_struct *)0)->thread.ksp)), \
+ "i" ((const unsigned long)(&((struct task_struct *)0)->thread.flags)),\
+ "i" ((const unsigned long)(&((struct task_struct *)0)->thread.cwp)), \
+ "i" (SPARC_FLAG_NEWCHILD) \
: "cc", "g1", "g2", "g3", "g5", "g7", \
"l2", "l3", "l4", "l5", "l6", "l7", \
"i0", "i1", "i2", "i3", "i4", "i5", \
"o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
/* If you fuck with this, update ret_from_syscall code too. */ \
- if (current->tss.flags & SPARC_FLAG_PERFCTR) { \
- write_pcr(current->tss.pcr_reg); \
+ if (current->thread.flags & SPARC_FLAG_PERFCTR) { \
+ write_pcr(current->thread.pcr_reg); \
reset_pic(); \
} \
} while(0)
-/* $Id: ttable.h,v 1.11 1999/03/29 12:38:12 jj Exp $ */
+/* $Id: ttable.h,v 1.12 1999/07/30 09:31:24 davem Exp $ */
#ifndef _SPARC64_TTABLE_H
#define _SPARC64_TTABLE_H
nop;nop;nop;
#define TRAP_UTRAP(handler,lvl) \
- ldx [%g6 + AOFF_task_tss + AOFF_thread_utraps], %g1; \
+ ldx [%g6 + AOFF_task_thread + AOFF_thread_utraps], %g1; \
sethi %hi(109f), %g7; \
brz,pn %g1, utrap; \
or %g7, %lo(109f), %g7; \
-/* $Id: uaccess.h,v 1.30 1999/05/25 16:53:32 jj Exp $ */
+/* $Id: uaccess.h,v 1.31 1999/07/30 09:31:24 davem Exp $ */
#ifndef _ASM_UACCESS_H
#define _ASM_UACCESS_H
#define VERIFY_READ 0
#define VERIFY_WRITE 1
-#define get_fs() (current->tss.current_ds)
+#define get_fs() (current->thread.current_ds)
#define get_ds() (KERNEL_DS)
#define segment_eq(a,b) ((a).seg == (b).seg)
#define set_fs(val) \
do { \
- current->tss.current_ds = (val); \
+ current->thread.current_ds = (val); \
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
} while(0)
-/* $Id: unistd.h,v 1.28 1999/04/07 17:14:19 davem Exp $ */
+/* $Id: unistd.h,v 1.30 1999/07/31 04:05:24 ecd Exp $ */
#ifndef _SPARC64_UNISTD_H
#define _SPARC64_UNISTD_H
#define __NR_syslog 207 /* Linux Specific */
/* #define __NR_olduname 208 Linux Specific */
/* #define __NR_iopl 209 Linux Specific - i386 specific, unused */
-#define __NR_idle 210 /* Linux Specific */
+/* #define __NR_idle 210 Linux Specific - was sys_idle, now unused */
/* #define __NR_vm86 211 Linux Specific - i386 specific, unused */
#define __NR_waitpid 212 /* Linux Specific */
#define __NR_swapoff 213 /* Linux Specific */
@@ -411,7+411,6 @@ return -1; \ * some others too.
*/
#define __NR__exit __NR_exit
-static __inline__ _syscall0(int,idle)
static __inline__ _syscall0(int,pause)
static __inline__ _syscall0(int,sync)
static __inline__ _syscall0(pid_t,setsid)
@@ -111,6+111,10 @@ static unsigned long memory_end = 0;
int rows, cols;
+#ifdef CONFIG_BLK_DEV_INITRD
+kdev_t real_root_dev;
+#endif
+
int root_mountflags = MS_RDONLY;
char *execute_command = NULL;
@@ -387,7+391,7 @@ static void __init parse_options(char *line)
extern void setup_arch(char **, unsigned long *, unsigned long *);
-extern int cpu_idle(void);
+extern void cpu_idle(void);
#ifndef __SMP__
@@ -515,10+519,14 @@ static int do_linuxrc(void * shell) return execve(shell, argv, envp_init);
}
-static void __init no_initrd(char *s,int *ints)
+static int __init no_initrd(char *s)
{
mount_initrd = 0;
+ return 1;
}
+
+__setup("noinitrd", no_initrd);
+
#endif
struct task_struct *child_reaper = &init_task;
@@ -321,6+321,7 @@ inline void __mmdrop(struct mm_struct *mm) {
if (mm == &init_mm) BUG();
pgd_free(mm->pgd);
+ destroy_context(mm);
kmem_cache_free(mm_cachep, mm);
}
@@ -779,6+779,7 @@ still_running_back: * but prev is set to (the just run) 'last' process by switch_to().
* This might sound slightly confusing but makes tons of sense.
*/
+ prepare_to_switch();
{
struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm;