Import 2.3.9pre82.3.9pre8
authorLinus Torvalds<torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:25:54 +0000 (23 15:25 -0500)
committerLinus Torvalds<torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:25:54 +0000 (23 15:25 -0500)
69 files changed:
CREDITS
Documentation/filesystems/ufs.txt
Makefile
arch/alpha/mm/init.c
arch/arm/mm/init.c
arch/i386/mm/init.c
arch/m68k/mm/init.c
arch/mips/mm/init.c
arch/ppc/mm/init.c
arch/sparc/config.in
arch/sparc/kernel/sys_sunos.c
arch/sparc/mm/init.c
arch/sparc64/kernel/process.c
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/kernel/sys_sparc32.c
arch/sparc64/kernel/sys_sunos32.c
arch/sparc64/mm/init.c
arch/sparc64/solaris/misc.c
drivers/Makefile
drivers/block/ide-proc.c
drivers/block/ide-tape.c
drivers/block/ll_rw_blk.c
drivers/block/raid1.c
drivers/block/raid5.c
drivers/char/sysrq.c
drivers/char/tpqic02.c
drivers/net/yellowfin.c
drivers/scsi/sd.c
drivers/scsi/st.c
drivers/sgi/char/sgiserial.c
drivers/sgi/char/usema.c
drivers/video/sbusfb.c
fs/buffer.c
fs/coda/file.c
fs/ext2/fsync.c
fs/ext2/truncate.c
fs/inode.c
fs/minix/fsync.c
fs/minix/truncate.c
fs/nfsd/vfs.c
fs/proc/openpromfs.c
fs/qnx4/fsync.c
fs/sysv/balloc.c
fs/sysv/fsync.c
fs/sysv/truncate.c
fs/ufs/super.c
fs/ufs/truncate.c
fs/ufs/util.c
include/asm-sparc/page.h
include/asm-sparc/pgtable.h
include/asm-sparc64/page.h
include/asm-sparc64/pgtable.h
include/linux/fs.h
include/linux/mm.h
include/linux/swap.h
include/linux/ufs_fs.h
kernel/acct.c
kernel/ksyms.c
mm/filemap.c
mm/mmap.c
net/ipv4/ip_masq.c
net/ipv4/ip_masq_mfw.c
net/ipv4/ip_masq_mod.c
net/ipv4/ip_masq_portfw.c
net/ipv4/ipconfig.c
net/ipv4/tcp.c
net/ipv4/udp.c
net/sunrpc/xprt.c
net/unix/af_unix.c

index f24ef3e..745fa36 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -983,7+983,7 @@ N: Niels Kristian Bech Jensen
 E: nkbj@image.dk
 W: http://www.image.dk/~nkbj
 D: 4.4BSD and NeXTstep filesystem support in the old ufs.
-D: Openstep filesystem support in the new ufs.
+D: Openstep filesystem and NeXTstep CDROM support in the new ufs.
 D: Danish HOWTO, Linux+FreeBSD mini-HOWTO.
 S: Dr. Holsts Vej 34, lejl. 164
 S: DK-8230 Ã…byhøj
index a269b2c..dafae24 100644 (file)
@@ -30,6+30,10 @@ ufstype=type_of_ufs
                used in NextStep
                supported as read-only
 
+       nextstep-cd
+               used for NextStep CDROMs (block_size == 2048)
+               supported as read-only
+
        openstep
                used in OpenStep
                supported as read-only
index ed887ba..ec74c40 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -402,8+402,6 @@ sums:
 dep-files: scripts/mkdep archdep include/linux/version.h
        scripts/mkdep init/*.c > .depend
        scripts/mkdep `find $(FINDHPATH) -follow -name \*.h ! -name modversions.h -print` > .hdepend
-#      set -e; for i in $(SUBDIRS); do $(MAKE) -C $$i fastdep ;done
-# let this be made through the fastdep rule in Rules.make
        $(MAKE) $(patsubst %,_sfdep_%,$(SUBDIRS)) _FASTDEP_ALL_SUB_DIRS="$(SUBDIRS)"
 
 MODVERFILE :=
index 7b70d4a..69f4e03 100644 (file)
@@ -166,7+166,6 @@ show_mem(void)
        printk("%ld pages shared\n",shared);
        printk("%ld pages swap cached\n",cached);
        printk("%ld pages in page table cache\n",pgtable_cache_size);
-       show_buffers();
 #ifdef CONFIG_NET
        show_net_buffers();
 #endif
@@ -359,7+358,7 @@ si_meminfo(struct sysinfo *val)
        val->totalram = 0;
        val->sharedram = 0;
        val->freeram = nr_free_pages << PAGE_SHIFT;
-       val->bufferram = buffermem;
+       val->bufferram = atomic_read(&buffermem);
        while (i-- > 0)  {
                if (PageReserved(mem_map+i))
                        continue;
index 47a2cfd..8db4e79 100644 (file)
@@ -115,7+115,6 @@ void show_mem(void)
        printk("%d free pages\n",free);
        printk("%d reserved pages\n",reserved);
        printk("%d pages shared\n",shared);
-       show_buffers();
 #ifdef CONFIG_NET
        show_net_buffers();
 #endif
@@ -266,7+265,7 @@ void si_meminfo(struct sysinfo *val)
        val->totalram = 0;
        val->sharedram = 0;
        val->freeram = nr_free_pages << PAGE_SHIFT;
-       val->bufferram = buffermem;
+       val->bufferram = atomic_read(&buffermem);
        while (i-- > 0)  {
                if (PageReserved(mem_map+i))
                        continue;
index bef9ea2..703b8ca 100644 (file)
@@ -169,7+169,6 @@ void show_mem(void)
        printk("%d pages shared\n",shared);
        printk("%d pages swap cached\n",cached);
        printk("%ld pages in page table cache\n",pgtable_cache_size);
-       show_buffers();
 #ifdef CONFIG_NET
        show_net_buffers();
 #endif
@@ -489,7+488,7 @@ void si_meminfo(struct sysinfo *val)
        val->totalram = 0;
        val->sharedram = 0;
        val->freeram = nr_free_pages << PAGE_SHIFT;
-       val->bufferram = buffermem;
+       val->bufferram = atomic_read(&buffermem);
        while (i-- > 0)  {
                if (PageReserved(mem_map+i))
                        continue;
index 9a41dc2..91409cd 100644 (file)
@@ -108,7+108,6 @@ void show_mem(void)
     printk("%d pages shared\n",shared);
     printk("%d pages swap cached\n",cached);
     printk("%ld pages in page table cache\n",pgtable_cache_size);
-    show_buffers();
 #ifdef CONFIG_NET
     show_net_buffers();
 #endif
@@ -479,7+478,7 @@ void si_meminfo(struct sysinfo *val)
     val->totalram = 0;
     val->sharedram = 0;
     val->freeram = nr_free_pages << PAGE_SHIFT;
-    val->bufferram = buffermem;
+    val->bufferram = atomic_read(&buffermem);
     while (i-- > 0) {
        if (PageReserved(mem_map+i))
            continue;
index 1e8bd25..81b6e08 100644 (file)
@@ -265,7+265,6 @@ void show_mem(void)
        printk("%d pages swap cached\n",cached);
        printk("%ld pages in page table cache\n",pgtable_cache_size);
        printk("%d free pages\n", free);
-       show_buffers();
 #ifdef CONFIG_NET
        show_net_buffers();
 #endif
@@ -369,7+368,7 @@ void si_meminfo(struct sysinfo *val)
        val->totalram = 0;
        val->sharedram = 0;
        val->freeram = nr_free_pages << PAGE_SHIFT;
-       val->bufferram = buffermem;
+       val->bufferram = atomic_read(&buffermem);
        while (i-- > 0)  {
                if (PageReserved(mem_map+i))
                        continue;
index 69e1efb..bd99146 100644 (file)
@@ -1,5+1,5 @@
 /*
- *  $Id: init.c,v 1.169 1999/06/17 19:03:13 cort Exp $
+ *  $Id: init.c,v 1.170 1999/06/29 12:33:51 davem Exp $
  *
  *  PowerPC version 
  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -254,7+254,6 @@ void show_mem(void)
        printk("%d pages shared\n",shared);
        printk("%d pages swap cached\n",cached);
        printk("%d pages in page table cache\n",(int)pgtable_cache_size);
-       show_buffers();
 #ifdef CONFIG_NET
        show_net_buffers();
 #endif
@@ -312,7+311,7 @@ void si_meminfo(struct sysinfo *val)
        val->totalram = 0;
        val->sharedram = 0;
        val->freeram = nr_free_pages << PAGE_SHIFT;
-       val->bufferram = buffermem;
+       val->bufferram = atomic_read(&buffermem);
        while (i-- > 0)  {
                if (PageReserved(mem_map+i))
                        continue;
index 1f485d4..ebbf508 100644 (file)
@@ -1,4+1,4 @@
-# $Id: config.in,v 1.68 1999/03/14 03:12:42 anton Exp $
+# $Id: config.in,v 1.69 1999/06/25 11:00:20 davem Exp $
 # For a description of the syntax of this configuration file,
 # see the Configure script.
 #
index ed61094..febbd2e 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: sys_sunos.c,v 1.99 1999/06/11 11:40:39 davem Exp $
+/* $Id: sys_sunos.c,v 1.101 1999/06/29 12:33:54 davem Exp $
  * sys_sunos.c: SunOS specific syscall compatibility support.
  *
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -196,7+196,7 @@ asmlinkage int sunos_brk(unsigned long brk)
         * simple, it hopefully works in most obvious cases.. Easy to
         * fool it, but this should catch most mistakes.
         */
-       freepages = buffermem >> PAGE_SHIFT;
+       freepages = atomic_read(&buffermem) >> PAGE_SHIFT;
        freepages += atomic_read(&page_cache_size);
        freepages >>= 1;
        freepages += nr_free_pages;
index dcec30b..01cc783 100644 (file)
@@ -1,4+1,4 @@
-/*  $Id: init.c,v 1.65 1999/04/09 16:28:03 davem Exp $
+/*  $Id: init.c,v 1.67 1999/06/29 12:33:59 davem Exp $
  *  linux/arch/sparc/mm/init.c
  *
  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -106,7+106,6 @@ void show_mem(void)
        printk("%ld page tables cached\n",pgtable_cache_size);
        if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
                printk("%ld page dirs cached\n", pgd_cache_size);
-       show_buffers();
 #ifdef CONFIG_NET
        show_net_buffers();
 #endif
@@ -363,7+362,7 @@ void si_meminfo(struct sysinfo *val)
        val->totalram = 0;
        val->sharedram = 0;
        val->freeram = nr_free_pages << PAGE_SHIFT;
-       val->bufferram = buffermem;
+       val->bufferram = atomic_read(&buffermem);
        for (page = mem_map, end = mem_map + max_mapnr;
             page < end; page++) {
                if (PageSkip(page)) {
index 1abe767..d163e73 100644 (file)
@@ -1,4+1,4 @@
-/*  $Id: process.c,v 1.94 1999/05/27 04:49:30 davem Exp $
+/*  $Id: process.c,v 1.95 1999/06/28 08:48:51 davem Exp $
  *  arch/sparc64/kernel/process.c
  *
  *  Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -392,7+392,7 @@ void show_thread(struct thread_struct *tss)
        }
 
        printk("flags:             0x%08x\n", tss->flags);
-       printk("current_ds:        0x%016lx\n", tss->current_ds.seg);
+       printk("current_ds:        0x%x\n", tss->current_ds.seg);
 }
 
 /* Free current thread data structures etc.. */
index 305f37a..18d9d44 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: sparc64_ksyms.c,v 1.58 1999/05/08 03:00:31 davem Exp $
+/* $Id: sparc64_ksyms.c,v 1.59 1999/06/28 11:28:50 davem Exp $
  * arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
  *
  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -206,6+206,7 @@ EXPORT_SYMBOL(die_if_kernel);
 
 /* Kernel thread creation. */
 EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(init_mm);
 
 /* prom symbols */
 EXPORT_SYMBOL(idprom);
index a680814..8d1c250 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: sys_sparc32.c,v 1.109 1999/06/03 07:11:31 davem Exp $
+/* $Id: sys_sparc32.c,v 1.112 1999/06/29 12:34:02 davem Exp $
  * sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
  *
  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -2328,7+2328,7 @@ static void scm_detach_fds32(struct msghdr *kmsg, struct scm_cookie *scm)
                        break;
                }
                /* Bump the usage count and install the file. */
-               fp[i]->f_count++;
+               atomic_inc(&fp[i]->f_count);
                current->files->fd[new_fd] = fp[i];
        }
 
index d375dc2..f2a2847 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: sys_sunos32.c,v 1.26 1999/06/09 08:23:54 davem Exp $
+/* $Id: sys_sunos32.c,v 1.28 1999/06/29 12:34:04 davem Exp $
  * sys_sunos32.c: SunOS binary compatability layer on sparc64.
  *
  * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -163,7+163,7 @@ asmlinkage int sunos_brk(u32 baddr)
         * simple, it hopefully works in most obvious cases.. Easy to
         * fool it, but this should catch most mistakes.
         */
-       freepages = buffermem >> PAGE_SHIFT;
+       freepages = atomic_read(&buffermem) >> PAGE_SHIFT;
        freepages += atomic_read(&page_cache_size);
        freepages >>= 1;
        freepages += nr_free_pages;
index c7eeb06..5e285c1 100644 (file)
@@ -1,4+1,4 @@
-/*  $Id: init.c,v 1.129 1999/06/25 10:32:08 davem Exp $
+/*  $Id: init.c,v 1.130 1999/06/29 12:34:06 davem Exp $
  *  arch/sparc64/mm/init.c
  *
  *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
@@ -147,7+147,6 @@ void show_mem(void)
 #ifndef __SMP__
        printk("%d entries in page dir cache\n",pgd_cache_size);
 #endif 
-       show_buffers();
 #ifdef CONFIG_NET
        show_net_buffers();
 #endif
@@ -1456,7+1455,7 @@ void si_meminfo(struct sysinfo *val)
        val->totalram = 0;
        val->sharedram = 0;
        val->freeram = ((unsigned long)nr_free_pages) << PAGE_SHIFT;
-       val->bufferram = buffermem;
+       val->bufferram = atomic_read(&buffermem);
        for (page = mem_map, end = mem_map + max_mapnr;
             page < end; page++) {
                if (PageSkip(page)) {
index 4b3a1ce..c3176ac 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: misc.c,v 1.13 1998/10/28 08:11:58 jj Exp $
+/* $Id: misc.c,v 1.14 1999/06/25 11:00:53 davem Exp $
  * misc.c: Miscelaneous syscall emulation for Solaris
  *
  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
index 58af288..e516fe2 100644 (file)
@@ -9,8+9,9 @@
 
 SUB_DIRS     := block char net misc sound
 MOD_SUB_DIRS := $(SUB_DIRS)
-ALL_SUB_DIRS := $(SUB_DIRS) pci sgi scsi sbus cdrom isdn misc pnp \
-                               macintosh video dio zorro fc4 usb
+ALL_SUB_DIRS := $(SUB_DIRS) pci sgi scsi sbus cdrom isdn pnp i2o \
+                               macintosh video dio zorro fc4 usb \
+                               nubus tc ap1000
 
 ifdef CONFIG_DIO
 SUB_DIRS += dio
index 183cc4d..623ac92 100644 (file)
@@ -724,16+724,17 @@ static void destroy_proc_ide_interfaces(void)
                ide_hwif_t *hwif = &ide_hwifs[h];
                int exist = (hwif->proc != NULL);
 
-/*             if (!hwif->present)
-                       continue;*/
-               if (!hwif->proc)
+#if 0
+               if (!hwif->present)
                        continue;
-               else {
+#endif
+               if (exist) {
                        destroy_proc_ide_drives(hwif);
                        ide_remove_proc_entries(hwif->proc, hwif_entries);
                        remove_proc_entry(hwif->name, proc_ide_root);
                        hwif->proc = NULL;
-               }
+               } else
+                       continue;
        }
 }
 
index 3b2b5af..74c89cf 100644 (file)
@@ -1073,13+1073,13 @@ static void idetape_input_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigne
                        return;
                }
 #endif /* IDETAPE_DEBUG_BUGS */
-               count = IDE_MIN (bh->b_size - bh->b_count, bcount);
-               atapi_input_bytes (drive, bh->b_data + bh->b_count, count);
-               bcount -= count; bh->b_count += count;
-               if (bh->b_count == bh->b_size) {
+               count = IDE_MIN (bh->b_size - atomic_read(&bh->b_count), bcount);
+               atapi_input_bytes (drive, bh->b_data + atomic_read(&bh->b_count), count);
+               bcount -= count; atomic_add(count, &bh->b_count);
+               if (atomic_read(&bh->b_count) == bh->b_size) {
                        bh = bh->b_reqnext;
                        if (bh)
-                               bh->b_count = 0;
+                               atomic_set(&bh->b_count, 0);
                }
        }
        pc->bh = bh;
@@ -1104,7+1104,7 @@ static void idetape_output_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsign
                        pc->bh = bh = bh->b_reqnext;
                        if (bh) {
                                pc->b_data = bh->b_data;
-                               pc->b_count = bh->b_count;
+                               pc->b_count = atomic_read(&bh->b_count);
                        }
                }
        }
@@ -1126,8+1126,8 @@ static void idetape_update_buffers (idetape_pc_t *pc)
                }
 #endif /* IDETAPE_DEBUG_BUGS */
                count = IDE_MIN (bh->b_size, bcount);
-               bh->b_count = count;
-               if (bh->b_count == bh->b_size)
+               atomic_set(&bh->b_count, count);
+               if (atomic_read(&bh->b_count) == bh->b_size)
                        bh = bh->b_reqnext;
                bcount -= count;
        }
@@ -1351,13+1351,13 @@ static void idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t
                        return;
                }
 #endif /* IDETAPE_DEBUG_BUGS */
-               count = IDE_MIN (bh->b_size - bh->b_count, n);
-               copy_from_user (bh->b_data + bh->b_count, buf, count);
-               n -= count; bh->b_count += count; buf += count;
-               if (bh->b_count == bh->b_size) {
+               count = IDE_MIN (bh->b_size - atomic_read(&bh->b_count), n);
+               copy_from_user (bh->b_data + atomic_read(&bh->b_count), buf, count);
+               n -= count; atomic_add(count, &bh->b_count); buf += count;
+               if (atomic_read(&bh->b_count) == bh->b_size) {
                        bh = bh->b_reqnext;
                        if (bh)
-                               bh->b_count = 0;
+                               atomic_set(&bh->b_count, 0);
                }
        }
        tape->bh = bh;
@@ -1382,7+1382,7 @@ static void idetape_copy_stage_to_user (idetape_tape_t *tape, char *buf, idetape
                        tape->bh = bh = bh->b_reqnext;
                        if (bh) {
                                tape->b_data = bh->b_data;
-                               tape->b_count = bh->b_count;
+                               tape->b_count = atomic_read(&bh->b_count);
                        }
                }
        }
@@ -1394,10+1394,10 @@ static void idetape_init_merge_stage (idetape_tape_t *tape)
        
        tape->bh = bh;
        if (tape->chrdev_direction == idetape_direction_write)
-               bh->b_count = 0;
+               atomic_set(&bh->b_count, 0);
        else {
                tape->b_data = bh->b_data;
-               tape->b_count = bh->b_count;
+               tape->b_count = atomic_read(&bh->b_count);
        }
 }
 
@@ -2131,7+2131,7 @@ static void idetape_create_read_cmd (idetape_tape_t *tape, idetape_pc_t *pc, uns
        pc->c[1] = 1;
        pc->callback = &idetape_rw_callback;
        pc->bh = bh;
-       bh->b_count = 0;
+       atomic_set(&bh->b_count, 0);
        pc->buffer = NULL;
        pc->request_transfer = pc->buffer_size = length * tape->tape_block_size;
        if (pc->request_transfer == tape->stage_size)
@@ -2158,7+2158,7 @@ static void idetape_create_write_cmd (idetape_tape_t *tape, idetape_pc_t *pc, un
        set_bit (PC_WRITING, &pc->flags);
        pc->bh = bh;
        pc->b_data = bh->b_data;
-       pc->b_count = bh->b_count;
+       pc->b_count = atomic_read(&bh->b_count);
        pc->buffer = NULL;
        pc->request_transfer = pc->buffer_size = length * tape->tape_block_size;
        if (pc->request_transfer == tape->stage_size)
@@ -2587,9+2587,9 @@ static void idetape_pad_zeros (ide_drive_t *drive, int bcount)
                bcount -= count;
                blocks = count / tape->tape_block_size;
                while (count) {
-                       bh->b_count = IDE_MIN (count, bh->b_size);
-                       memset (bh->b_data, 0, bh->b_count);
-                       count -= bh->b_count;
+                       atomic_set(&bh->b_count, IDE_MIN (count, bh->b_size));
+                       memset (bh->b_data, 0, atomic_read(&bh->b_count));
+                       count -= atomic_read(&bh->b_count);
                        bh = bh->b_reqnext;
                }
                idetape_queue_rw_tail (drive, IDETAPE_WRITE_RQ, blocks, tape->merge_stage->bh);
@@ -2616,8+2616,8 @@ static void idetape_empty_write_pipeline (ide_drive_t *drive)
                if (tape->merge_stage_size % tape->tape_block_size) {
                        blocks++;
                        i = tape->tape_block_size - tape->merge_stage_size % tape->tape_block_size;
-                       memset (tape->bh->b_data + tape->bh->b_count, 0, i);
-                       tape->bh->b_count += i;
+                       memset (tape->bh->b_data + atomic_read(&tape->bh->b_count), 0, i);
+                       atomic_add(i, &tape->bh->b_count);
                }
                (void) idetape_add_chrdev_write_request (drive, blocks);
                tape->merge_stage_size = 0;
index 288d995..ccf8140 100644 (file)
@@ -431,9+431,7 @@ void make_request(int major,int rw, struct buffer_head * bh)
                case WRITE:
                        if (!test_and_clear_bit(BH_Dirty, &bh->b_state))
                                goto end_io;    /* Hmmph! Nothing to write */
-                       lock_kernel();
                        refile_buffer(bh);
-                       unlock_kernel();
                        /*
                         * We don't allow the write-requests to fill up the
                         * queue completely:  we want some room for reads,
@@ -612,7+610,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
        for (i = 0; i < nr; i++) {
                if (bh[i]->b_size != correct_size) {
                        printk(KERN_NOTICE "ll_rw_block: device %s: "
-                              "only %d-char blocks implemented (%lu)\n",
+                              "only %d-char blocks implemented (%u)\n",
                               kdevname(bh[0]->b_dev),
                               correct_size, bh[i]->b_size);
                        goto sorry;
index 890584d..7efb784 100644 (file)
@@ -310,7+310,7 @@ raid1_make_request (struct md_dev *mddev, int rw, struct buffer_head * bh)
                mirror_bh [i]->b_rdev       = raid_conf->mirrors [i].dev;
                mirror_bh [i]->b_rsector    = bh->b_rsector;
                mirror_bh [i]->b_state      = (1<<BH_Req) | (1<<BH_Dirty);
-               mirror_bh [i]->b_count      = 1;
+               atomic_set(&mirror_bh [i]->b_count, 1);
                mirror_bh [i]->b_size       = bh->b_size;
                mirror_bh [i]->b_data       = bh->b_data;
                mirror_bh [i]->b_list       = BUF_LOCKED;
index cdf0242..3e60f92 100644 (file)
@@ -1032,19+1032,24 @@ static void handle_stripe(struct stripe_head *sh)
                                if (sh->bh_new[i])
                                        continue;
                                block = (int) compute_blocknr(sh, i);
-                               bh = find_buffer(MKDEV(MD_MAJOR, minor), block, sh->size);
-                               if (bh && bh->b_count == 0 && buffer_dirty(bh) && !buffer_locked(bh)) {
-                                       PRINTK(("Whee.. sector %lu, index %d (%d) found in the buffer cache!\n", sh->sector, i, block));
-                                       add_stripe_bh(sh, bh, i, WRITE);
-                                       sh->new[i] = 0;
-                                       nr++; nr_write++;
-                                       if (sh->bh_old[i]) {
-                                               nr_cache_overwrite++;
-                                               nr_cache_other--;
-                                       } else if (!operational[i]) {
-                                               nr_failed_overwrite++;
-                                               nr_failed_other--;
+                               bh = get_hash_table(MKDEV(MD_MAJOR, minor), block, sh->size);
+                               if (bh) {
+                                       if (atomic_read(&bh->b_count) == 1 &&
+                                           buffer_dirty(bh) &&
+                                           !buffer_locked(bh)) {
+                                               PRINTK(("Whee.. sector %lu, index %d (%d) found in the buffer cache!\n", sh->sector, i, block));
+                                               add_stripe_bh(sh, bh, i, WRITE);
+                                               sh->new[i] = 0;
+                                               nr++; nr_write++;
+                                               if (sh->bh_old[i]) {
+                                                       nr_cache_overwrite++;
+                                                       nr_cache_other--;
+                                               } else if (!operational[i]) {
+                                                       nr_failed_overwrite++;
+                                                       nr_failed_other--;
+                                               }
                                        }
+                                       atomic_dec(&bh->b_count);
                                }
                        }
                }
index 5a4ad1c..574f1b1 100644 (file)
@@ -155,7+155,7 @@ static void all_files_read_only(void)           /* Kill write permissions of all files
        struct file *file;
 
        for (file = inuse_filps; file; file = file->f_next)
-               if (file->f_dentry && file->f_count && S_ISREG(file->f_dentry->d_inode->i_mode))
+               if (file->f_dentry && atomic_read(&file->f_count) && S_ISREG(file->f_dentry->d_inode->i_mode))
                        file->f_mode &= ~2;
 }
 
index 67b8f3a..edcf6f2 100644 (file)
@@ -2216,7+2216,7 @@ static int qic02_tape_open_no_use_count(struct inode * inode, struct file * filp
     }
     
        /* Only one at a time from here on... */
-    if (filp->f_count>1)       /* filp->f_count==1 for the first open() */
+    if (atomic_read(&filp->f_count)>1)         /* filp->f_count==1 for the first open() */
     {
        return -EBUSY;
     }
index 410121e..8d5b232 100644 (file)
@@ -76,6+76,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
 #include <linux/pci.h>
 #include <asm/processor.h>             /* Processor type for cache alignment. */
 #include <asm/bitops.h>
+#include <asm/unaligned.h>
 #include <asm/io.h>
 
 #include <linux/netdevice.h>
@@ -1054,7+1055,7 @@ static int yellowfin_rx(struct device *dev)
                u16 desc_status = desc->status;
                int data_size = desc->request_cnt - desc->result_cnt;
                u8 *buf_addr = bus_to_virt(desc->addr);
-               s16 frame_status = *(s16*)&(buf_addr[data_size - 2]); /* ?Alpha safe on 885? */
+               s16 frame_status = get_unaligned((s16*)(buf_addr+data_size-2));
 
                if (yellowfin_debug > 4)
                        printk(KERN_DEBUG "  yellowfin_rx() status was %4.4x.\n",
index e89eeec..22ab35f 100644 (file)
@@ -945,7+945,7 @@ static void requeue_sd_request (Scsi_Cmnd * SCpnt)
                printk("maxsg = %x, counted = %d this_count = %d\n",
                       max_sg, counted, this_count);
                while(bh){
-                   printk("[%p %lx] ", bh->b_data, bh->b_size);
+                   printk("[%p %x] ", bh->b_data, bh->b_size);
                    bh = bh->b_reqnext;
                }
                if(SCpnt->use_sg < 16)
index a5a825d..dc70284 100644 (file)
@@ -890,7+890,7 @@ scsi_tape_flush(struct file * filp)
     kdev_t devt = inode->i_rdev;
     int dev;
 
-    if (filp->f_count > 1)
+    if (atomic_read(&filp->f_count) > 1)
        return 0;
 
     dev = TAPE_NR(devt);
index aaa9e7e..4178891 100644 (file)
@@ -408,7+408,6 @@ static _INLINE_ void receive_chars(struct sgi_serial *info, struct pt_regs *regs
                        show_state();
                        return;
                } else if (ch == 2) {
-                       show_buffers();
                        return;
                }
                /* It is a 'keyboard interrupt' ;-) */
index ad27320..e91a944 100644 (file)
@@ -54,7+54,7 @@ sgi_usema_attach (usattach_t * attach, struct irix_usema *usema)
                return newfd;
        
        current->files->fd [newfd] = usema->filp;
-       usema->filp->f_count++;
+       atomic_inc(&usema->filp->f_count);
        /* Is that it? */
        printk("UIOCATTACHSEMA: new usema fd is %d", newfd);
        return newfd;
index ce2c89e..7e17bf1 100644 (file)
@@ -364,9+364,29 @@ static int sbusfb_get_var(struct fb_var_screeninfo *var, int con,
      */
 
 static int sbusfb_set_var(struct fb_var_screeninfo *var, int con,
-                       struct fb_info *info)
+                         struct fb_info *info)
 {
-       return -EINVAL;
+       struct display *display;
+       int activate = var->activate;
+
+       if(con >= 0)
+               display = &fb_display[con];
+       else
+               display = info->disp;
+
+       /* simple check for equality until fully implemented -E */
+       if ((activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) {
+               if (display->var.xres != var->xres ||
+                       display->var.yres != var->yres ||
+                       display->var.xres_virtual != var->xres_virtual ||
+                       display->var.yres_virtual != var->yres_virtual ||
+                       display->var.bits_per_pixel != var->bits_per_pixel ||
+                       display->var.accel_flags != var->accel_flags) {
+                       return -EINVAL;
+               }
+       }
+       return 0;
+
 }
 
     /*
index 6876bc9..f5a4aab 100644 (file)
  * - RMK
  */
 
+/* Thread it... -DaveM */
+
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <linux/malloc.h>
@@ -57,30+59,40 @@ static char buffersize_index[65] =
 #define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this 
                                             number of unused buffer heads */
 
+/* Anti-deadlock ordering:
+ *     lru_list_lock > hash_table_lock > free_list_lock > unused_list_lock
+ */
+
 /*
- * Hash table mask..
+ * Hash table gook..
  */
-static unsigned long bh_hash_mask = 0;
+static unsigned int bh_hash_mask = 0;
+static unsigned int bh_hash_shift = 0;
+static struct buffer_head **hash_table;
+static rwlock_t hash_table_lock = RW_LOCK_UNLOCKED;
 
-static int grow_buffers(int size);
+static struct buffer_head *lru_list[NR_LIST];
+static spinlock_t lru_list_lock = SPIN_LOCK_UNLOCKED;
+static int nr_buffers_type[NR_LIST] = {0,};
 
-static struct buffer_head ** hash_table;
-static struct buffer_head * lru_list[NR_LIST] = {NULL, };
-static struct buffer_head * free_list[NR_SIZES] = {NULL, };
+static struct buffer_head * unused_list = NULL;
+static int nr_unused_buffer_heads = 0;
+static spinlock_t unused_list_lock = SPIN_LOCK_UNLOCKED;
 
-static kmem_cache_t *bh_cachep;
+struct bh_free_head {
+       struct buffer_head *list;
+       spinlock_t lock;
+};
+static struct bh_free_head free_list[NR_SIZES];
 
-static struct buffer_head * unused_list = NULL;
 static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
 
-static int nr_buffers = 0;
-static int nr_buffers_type[NR_LIST] = {0,};
-static int nr_buffer_heads = 0;
-static int nr_unused_buffer_heads = 0;
-static int nr_hashed_buffers = 0;
+static kmem_cache_t *bh_cachep;
+
+static int grow_buffers(int size);
 
 /* This is used by some architectures to estimate available memory. */
-int buffermem = 0;
+atomic_t buffermem = ATOMIC_INIT(0);
 
 /* Here is the parameter block for the bdflush process. If you add or
  * remove any of the parameters, make sure to update kernel/sysctl.c.
@@ -130,7+142,7 @@ void __wait_on_buffer(struct buffer_head * bh)
        struct task_struct *tsk = current;
        DECLARE_WAITQUEUE(wait, tsk);
 
-       bh->b_count++;
+       atomic_inc(&bh->b_count);
        add_wait_queue(&bh->b_wait, &wait);
 repeat:
        tsk->state = TASK_UNINTERRUPTIBLE;
@@ -141,7+153,7 @@ repeat:
        }
        tsk->state = TASK_RUNNING;
        remove_wait_queue(&bh->b_wait, &wait);
-       bh->b_count--;
+       atomic_dec(&bh->b_count);
 }
 
 /* Call sync_buffers with wait!=0 to ensure that the call does not
@@ -166,17+178,19 @@ static int sync_buffers(kdev_t dev, int wait)
         */
        do {
                retry = 0;
-repeat:
+
                /* We search all lists as a failsafe mechanism, not because we expect
                 * there to be dirty buffers on any of the other lists.
                 */
+repeat:
+               spin_lock(&lru_list_lock);
                bh = lru_list[BUF_DIRTY];
                if (!bh)
                        goto repeat2;
+
                for (i = nr_buffers_type[BUF_DIRTY]*2 ; i-- > 0 ; bh = next) {
-                       if (bh->b_list != BUF_DIRTY)
-                               goto repeat;
                        next = bh->b_next_free;
+
                        if (!lru_list[BUF_DIRTY])
                                break;
                        if (dev && bh->b_dev != dev)
@@ -189,7+203,10 @@ repeat:
                                        retry = 1;
                                        continue;
                                }
+                               atomic_inc(&bh->b_count);
+                               spin_unlock(&lru_list_lock);
                                wait_on_buffer (bh);
+                               atomic_dec(&bh->b_count);
                                goto repeat;
                        }
 
@@ -208,30+225,24 @@ repeat:
                        if (!buffer_dirty(bh) || pass >= 2)
                                continue;
 
-                       /* Don't bother about locked buffers.
-                        *
-                        * XXX We checked if it was locked above and there is no
-                        * XXX way we could have slept in between. -DaveM
-                        */
-                       if (buffer_locked(bh))
-                               continue;
-                       bh->b_count++;
-                       next->b_count++;
+                       atomic_inc(&bh->b_count);
                        bh->b_flushtime = 0;
+                       spin_unlock(&lru_list_lock);
                        ll_rw_block(WRITE, 1, &bh);
-                       bh->b_count--;
-                       next->b_count--;
+                       atomic_dec(&bh->b_count);
                        retry = 1;
+                       goto repeat;
                }
 
     repeat2:
                bh = lru_list[BUF_LOCKED];
-               if (!bh)
+               if (!bh) {
+                       spin_unlock(&lru_list_lock);
                        break;
+               }
                for (i = nr_buffers_type[BUF_LOCKED]*2 ; i-- > 0 ; bh = next) {
-                       if (bh->b_list != BUF_LOCKED)
-                               goto repeat2;
                        next = bh->b_next_free;
+
                        if (!lru_list[BUF_LOCKED])
                                break;
                        if (dev && bh->b_dev != dev)
@@ -244,10+255,15 @@ repeat:
                                        retry = 1;
                                        continue;
                                }
+                               atomic_inc(&bh->b_count);
+                               spin_unlock(&lru_list_lock);
                                wait_on_buffer (bh);
+                               spin_lock(&lru_list_lock);
+                               atomic_dec(&bh->b_count);
                                goto repeat2;
                        }
                }
+               spin_unlock(&lru_list_lock);
 
                /* If we are waiting for the sync to succeed, and if any dirty
                 * blocks were written, then repeat; on the second pass, only
@@ -281,17+297,19 @@ void sync_dev(kdev_t dev)
 int fsync_dev(kdev_t dev)
 {
        sync_buffers(dev, 0);
+
+       lock_kernel();
        sync_supers(dev);
        sync_inodes(dev);
        DQUOT_SYNC(dev);
+       unlock_kernel();
+
        return sync_buffers(dev, 1);
 }
 
 asmlinkage int sys_sync(void)
 {
-       lock_kernel();
        fsync_dev(0);
-       unlock_kernel();
        return 0;
 }
 
@@ -395,19+413,28 @@ out:
 
 void invalidate_buffers(kdev_t dev)
 {
-       int i;
        int nlist;
-       struct buffer_head * bh;
 
+       spin_lock(&lru_list_lock);
        for(nlist = 0; nlist < NR_LIST; nlist++) {
+               struct buffer_head * bh;
+               int i;
+       retry:
                bh = lru_list[nlist];
+               if (!bh)
+                       continue;
                for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {
                        if (bh->b_dev != dev)
                                continue;
-                       wait_on_buffer(bh);
-                       if (bh->b_dev != dev)
-                               continue;
-                       if (bh->b_count)
+                       if (buffer_locked(bh)) {
+                               atomic_inc(&bh->b_count);
+                               spin_unlock(&lru_list_lock);
+                               wait_on_buffer(bh);
+                               spin_lock(&lru_list_lock);
+                               atomic_dec(&bh->b_count);
+                               goto retry;
+                       }
+                       if (atomic_read(&bh->b_count))
                                continue;
                        bh->b_flushtime = 0;
                        clear_bit(BH_Protected, &bh->b_state);
@@ -416,157+443,119 @@ void invalidate_buffers(kdev_t dev)
                        clear_bit(BH_Req, &bh->b_state);
                }
        }
+       spin_unlock(&lru_list_lock);
 }
 
-#define _hashfn(dev,block) (((unsigned)(HASHDEV(dev)^block)) & bh_hash_mask)
-#define hash(dev,block) hash_table[_hashfn(dev,block)]
+/* After several hours of tedious analysis, the following hash
+ * function won.  Do not mess with it... -DaveM
+ */
+#define _hashfn(dev,block)     \
+       ((((dev)<<(bh_hash_shift - 6)) ^ ((dev)<<(bh_hash_shift - 9))) ^ \
+        (((block)<<(bh_hash_shift - 6)) ^ ((block) >> 13) ^ ((block) << (bh_hash_shift - 12))))
+#define hash(dev,block) hash_table[(_hashfn(dev,block) & bh_hash_mask)]
 
-static void insert_into_hash_list(struct buffer_head * bh)
+static __inline__ void __hash_link(struct buffer_head *bh, struct buffer_head **head)
 {
-       bh->b_next = NULL;
-       bh->b_pprev = NULL;
-       if (bh->b_dev) {
-               struct buffer_head **bhp = &hash(bh->b_dev, bh->b_blocknr);
-               struct buffer_head *next = *bhp;
-
-               if (next) {
-                       bh->b_next = next;
-                       next->b_pprev = &bh->b_next;
-               }
-               *bhp = bh;
-               bh->b_pprev = bhp;
-               nr_hashed_buffers++;
-       }
+       if ((bh->b_next = *head) != NULL)
+               bh->b_next->b_pprev = &bh->b_next;
+       *head = bh;
+       bh->b_pprev = head;
 }
 
-static void remove_from_hash_queue(struct buffer_head * bh)
+static __inline__ void __hash_unlink(struct buffer_head *bh)
 {
-       struct buffer_head **pprev = bh->b_pprev;
-       if (pprev) {
-               struct buffer_head * next = bh->b_next;
-               if (next) {
-                       next->b_pprev = pprev;
-                       bh->b_next = NULL;
-               }
-               *pprev = next;
-               bh->b_pprev = NULL;
-               nr_hashed_buffers--;
-       }
+       if (bh->b_next)
+               bh->b_next->b_pprev = bh->b_pprev;
+       *(bh->b_pprev) = bh->b_next;
+       bh->b_pprev = NULL;
 }
 
-static void insert_into_lru_list(struct buffer_head * bh)
+static void __insert_into_lru_list(struct buffer_head * bh, int blist)
 {
-       struct buffer_head **bhp = &lru_list[bh->b_list];
-
-       if (bh->b_dev == B_FREE)
-               BUG();
+       struct buffer_head **bhp = &lru_list[blist];
 
        if(!*bhp) {
                *bhp = bh;
                bh->b_prev_free = bh;
        }
-
-       if (bh->b_next_free)
-               panic("VFS: buffer LRU pointers corrupted");
-
        bh->b_next_free = *bhp;
        bh->b_prev_free = (*bhp)->b_prev_free;
        (*bhp)->b_prev_free->b_next_free = bh;
        (*bhp)->b_prev_free = bh;
-
-       nr_buffers++;
-       nr_buffers_type[bh->b_list]++;
+       nr_buffers_type[blist]++;
 }
 
-static void remove_from_lru_list(struct buffer_head * bh)
+static void __remove_from_lru_list(struct buffer_head * bh, int blist)
 {
-       if (!(bh->b_prev_free) || !(bh->b_next_free))
-               return;
-
-       if (bh->b_dev == B_FREE) {
-               printk("LRU list corrupted");
-               *(int*)0 = 0;
+       if (bh->b_prev_free || bh->b_next_free) {
+               bh->b_prev_free->b_next_free = bh->b_next_free;
+               bh->b_next_free->b_prev_free = bh->b_prev_free;
+               if (lru_list[blist] == bh)
+                       lru_list[blist] = bh->b_next_free;
+               if (lru_list[blist] == bh)
+                       lru_list[blist] = NULL;
+               bh->b_next_free = bh->b_prev_free = NULL;
+               nr_buffers_type[blist]--;
        }
-       bh->b_prev_free->b_next_free = bh->b_next_free;
-       bh->b_next_free->b_prev_free = bh->b_prev_free;
-
-       if (lru_list[bh->b_list] == bh)
-                lru_list[bh->b_list] = bh->b_next_free;
-       if (lru_list[bh->b_list] == bh)
-                lru_list[bh->b_list] = NULL;
-       bh->b_next_free = bh->b_prev_free = NULL;
-
-       nr_buffers--;
-       nr_buffers_type[bh->b_list]--;
 }
 
-static void remove_from_free_list(struct buffer_head * bh)
+static void __remove_from_free_list(struct buffer_head * bh, int index)
 {
-       int isize = BUFSIZE_INDEX(bh->b_size);
-       if (!(bh->b_prev_free) || !(bh->b_next_free))
-               panic("VFS: Free block list corrupted");
-       if(bh->b_dev != B_FREE)
-               panic("Free list corrupted");
-       if(!free_list[isize])
-               panic("Free list empty");
        if(bh->b_next_free == bh)
-                free_list[isize] = NULL;
+                free_list[index].list = NULL;
        else {
                bh->b_prev_free->b_next_free = bh->b_next_free;
                bh->b_next_free->b_prev_free = bh->b_prev_free;
-               if (free_list[isize] == bh)
-                        free_list[isize] = bh->b_next_free;
+               if (free_list[index].list == bh)
+                        free_list[index].list = bh->b_next_free;
        }
        bh->b_next_free = bh->b_prev_free = NULL;
 }
 
-static void remove_from_queues(struct buffer_head * bh)
+/* The following two functions must operate atomically
+ * because they control the visibility of a buffer head
+ * to the rest of the kernel.
+ */
+static __inline__ void __remove_from_queues(struct buffer_head *bh)
 {
-       if (bh->b_dev == B_FREE)
-               BUG();
-       remove_from_hash_queue(bh);
-       remove_from_lru_list(bh);
+       write_lock(&hash_table_lock);
+       if (bh->b_pprev)
+               __hash_unlink(bh);
+       __remove_from_lru_list(bh, bh->b_list);
+       write_unlock(&hash_table_lock);
 }
 
-static void put_last_free(struct buffer_head * bh)
+static void insert_into_queues(struct buffer_head *bh)
 {
-       if (bh) {
-               struct buffer_head **bhp = &free_list[BUFSIZE_INDEX(bh->b_size)];
-
-               if (bh->b_count)
-                       BUG();
-
-               bh->b_dev = B_FREE;  /* So it is obvious we are on the free list. */
-
-               /* Add to back of free list. */
-               if(!*bhp) {
-                       *bhp = bh;
-                       bh->b_prev_free = bh;
-               }
-
-               bh->b_next_free = *bhp;
-               bh->b_prev_free = (*bhp)->b_prev_free;
-               (*bhp)->b_prev_free->b_next_free = bh;
-               (*bhp)->b_prev_free = bh;
-       }
+       struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr);
+
+       spin_lock(&lru_list_lock);
+       write_lock(&hash_table_lock);
+       __hash_link(bh, head);
+       __insert_into_lru_list(bh, bh->b_list);
+       write_unlock(&hash_table_lock);
+       spin_unlock(&lru_list_lock);
 }
 
-struct buffer_head * find_buffer(kdev_t dev, int block, int size)
-{              
-       struct buffer_head * next;
+/* This function must only run if there are no other
+ * references _anywhere_ to this buffer head.
+ */
+static void put_last_free(struct buffer_head * bh)
+{
+       struct bh_free_head *head = &free_list[BUFSIZE_INDEX(bh->b_size)];
+       struct buffer_head **bhp = &head->list;
 
-       next = hash(dev,block);
-       for (;;) {
-               struct buffer_head *tmp = next;
-               if (!next)
-                       break;
-               next = tmp->b_next;
-               if (tmp->b_blocknr != block || tmp->b_size != size || tmp->b_dev != dev)
-                       continue;
-               next = tmp;
-               break;
+       spin_lock(&head->lock);
+       bh->b_dev = B_FREE;
+       if(!*bhp) {
+               *bhp = bh;
+               bh->b_prev_free = bh;
        }
-       return next;
+       bh->b_next_free = *bhp;
+       bh->b_prev_free = (*bhp)->b_prev_free;
+       (*bhp)->b_prev_free->b_next_free = bh;
+       (*bhp)->b_prev_free = bh;
+       spin_unlock(&head->lock);
 }
 
 /*
@@ -578,10+567,19 @@ struct buffer_head * find_buffer(kdev_t dev, int block, int size)
  */
 struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
 {
-       struct buffer_head * bh;
-       bh = find_buffer(dev,block,size);
+       struct buffer_head **head = &hash(dev, block);
+       struct buffer_head *bh;
+
+       read_lock(&hash_table_lock);
+       for(bh = *head; bh; bh = bh->b_next)
+               if (bh->b_blocknr == block      &&
+                   bh->b_size    == size       &&
+                   bh->b_dev     == dev)
+                       break;
        if (bh)
-               bh->b_count++;
+               atomic_inc(&bh->b_count);
+       read_unlock(&hash_table_lock);
+
        return bh;
 }
 
@@ -630,6+628,8 @@ void set_blocksize(kdev_t dev, int size)
         * around on the free list, and we can get in a loop if we are not careful.
         */
        for(nlist = 0; nlist < NR_LIST; nlist++) {
+       repeat:
+               spin_lock(&lru_list_lock);
                bh = lru_list[nlist];
                for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
                        if(!bh)
@@ -640,21+640,25 @@ void set_blocksize(kdev_t dev, int size)
                                 continue;
                        if (bh->b_size == size)
                                 continue;
-                       bhnext->b_count++;
-                       bh->b_count++;
-                       wait_on_buffer(bh);
-                       bhnext->b_count--;
+                       if (buffer_locked(bh)) {
+                               atomic_inc(&bh->b_count);
+                               spin_unlock(&lru_list_lock);
+                               wait_on_buffer(bh);
+                               atomic_dec(&bh->b_count);
+                               goto repeat;
+                       }
                        if (bh->b_dev == dev && bh->b_size != size) {
                                clear_bit(BH_Dirty, &bh->b_state);
                                clear_bit(BH_Uptodate, &bh->b_state);
                                clear_bit(BH_Req, &bh->b_state);
                                bh->b_flushtime = 0;
                        }
-                       if (--bh->b_count)
-                               continue;
-                       remove_from_queues(bh);
-                       put_last_free(bh);
+                       if (atomic_read(&bh->b_count) == 0) {
+                               __remove_from_queues(bh);
+                               put_last_free(bh);
+                       }
                }
+               spin_unlock(&lru_list_lock);
        }
 }
 
@@ -721,10+725,11 @@ static void end_buffer_io_async(struct buffer_head * bh, int uptodate)
         */
        spin_lock_irqsave(&page_uptodate_lock, flags);
        unlock_buffer(bh);
-       bh->b_count--;
+       atomic_dec(&bh->b_count);
        tmp = bh->b_this_page;
        while (tmp != bh) {
-               if (tmp->b_count && (tmp->b_end_io == end_buffer_io_async))
+               if (atomic_read(&tmp->b_count) &&
+                   (tmp->b_end_io == end_buffer_io_async))
                        goto still_busy;
                tmp = tmp->b_this_page;
        }
@@ -794,24+799,26 @@ repeat:
        }
 
        isize = BUFSIZE_INDEX(size);
-get_free:
-       bh = free_list[isize];
+       spin_lock(&free_list[isize].lock);
+       bh = free_list[isize].list;
+       if (bh) {
+               __remove_from_free_list(bh, isize);
+               atomic_set(&bh->b_count, 1);
+       }
+       spin_unlock(&free_list[isize].lock);
        if (!bh)
                goto refill;
-       remove_from_free_list(bh);
 
        /* OK, FINALLY we know that this buffer is the only one of its kind,
-        * and that it's unused (b_count=0), unlocked, and clean.
+        * we hold a reference (b_count>0), it is unlocked, and it is clean.
         */
        init_buffer(bh, end_buffer_io_sync, NULL);
        bh->b_dev = dev;
        bh->b_blocknr = block;
-       bh->b_count = 1;
        bh->b_state = 1 << BH_Mapped;
 
        /* Insert the buffer into the regular lists */
-       insert_into_lru_list(bh);
-       insert_into_hash_list(bh);
+       insert_into_queues(bh);
        goto out;
 
        /*
@@ -820,24+827,12 @@ get_free:
         */
 refill:
        refill_freelist(size);
-       if (!find_buffer(dev,block,size))
-               goto get_free;
        goto repeat;
 out:
        return bh;
 }
 
 /*
- * Put a buffer into the appropriate list, without side-effects.
- */
-static void file_buffer(struct buffer_head *bh, int list)
-{
-       remove_from_lru_list(bh);
-       bh->b_list = list;
-       insert_into_lru_list(bh);
-}
-
-/*
  * if a new dirty buffer is created we need to balance bdflush.
  *
  * in the future we might want to make bdflush aware of different
@@ -875,34+870,29 @@ void __mark_buffer_dirty(struct buffer_head *bh, int flag)
        __mark_dirty(bh, flag);
 }
 
-void __atomic_mark_buffer_dirty(struct buffer_head *bh, int flag)
-{
-       lock_kernel();
-       __mark_dirty(bh, flag);
-       unlock_kernel();
-}
-
 /*
  * A buffer may need to be moved from one buffer list to another
  * (e.g. in case it is not shared any more). Handle this.
  */
-void refile_buffer(struct buffer_head * buf)
+static __inline__ void __refile_buffer(struct buffer_head *bh)
 {
-       int dispose;
-
-       if (buf->b_dev == B_FREE) {
-               printk("Attempt to refile free buffer\n");
-               return;
-       }
-
-       dispose = BUF_CLEAN;
-       if (buffer_locked(buf))
+       int dispose = BUF_CLEAN;
+       if (buffer_locked(bh))
                dispose = BUF_LOCKED;
-       if (buffer_dirty(buf))
+       if (buffer_dirty(bh))
                dispose = BUF_DIRTY;
+       if (dispose != bh->b_list) {
+               __remove_from_lru_list(bh, bh->b_list);
+               bh->b_list = dispose;
+               __insert_into_lru_list(bh, dispose);
+       }
+}
 
-       if (dispose != buf->b_list)
-               file_buffer(buf, dispose);
+void refile_buffer(struct buffer_head *bh)
+{
+       spin_lock(&lru_list_lock);
+       __refile_buffer(bh);
+       spin_unlock(&lru_list_lock);
 }
 
 /*
@@ -912,8+902,8 @@ void __brelse(struct buffer_head * buf)
 {
        touch_buffer(buf);
 
-       if (buf->b_count) {
-               buf->b_count--;
+       if (atomic_read(&buf->b_count)) {
+               atomic_dec(&buf->b_count);
                wake_up(&buffer_wait);
                return;
        }
@@ -928,14+918,22 @@ void __brelse(struct buffer_head * buf)
  */
 void __bforget(struct buffer_head * buf)
 {
-       if (buf->b_count != 1 || buffer_locked(buf)) {
-               __brelse(buf);
-               return;
+       spin_lock(&lru_list_lock);
+       write_lock(&hash_table_lock);
+       if (atomic_read(&buf->b_count) != 1 || buffer_locked(buf)) {
+               touch_buffer(buf);
+               atomic_dec(&buf->b_count);
+               wake_up(&buffer_wait);
+       } else {
+               atomic_set(&buf->b_count, 0);
+               buf->b_state = 0;
+               if (buf->b_pprev)
+                       __hash_unlink(buf);
+               __remove_from_lru_list(buf, buf->b_list);
+               put_last_free(buf);
        }
-       buf->b_count = 0;
-       buf->b_state = 0;
-       remove_from_queues(buf);
-       put_last_free(buf);
+       write_unlock(&hash_table_lock);
+       spin_unlock(&lru_list_lock);
 }
 
 /*
@@ -1025,21+1023,25 @@ struct buffer_head * breada(kdev_t dev, int block, int bufsize,
 /*
  * Note: the caller should wake up the buffer_wait list if needed.
  */
-static void put_unused_buffer_head(struct buffer_head * bh)
+static __inline__ void __put_unused_buffer_head(struct buffer_head * bh)
 {
        if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) {
-               nr_buffer_heads--;
                kmem_cache_free(bh_cachep, bh);
-               return;
+       } else {
+               bh->b_blocknr = -1;
+               init_waitqueue_head(&bh->b_wait);
+               nr_unused_buffer_heads++;
+               bh->b_next_free = unused_list;
+               bh->b_this_page = NULL;
+               unused_list = bh;
        }
+}
 
-//     memset(bh, 0, sizeof(*bh));
-       bh->b_blocknr = -1;
-       init_waitqueue_head(&bh->b_wait);
-       nr_unused_buffer_heads++;
-       bh->b_next_free = unused_list;
-       bh->b_this_page = NULL;
-       unused_list = bh;
+static void put_unused_buffer_head(struct buffer_head *bh)
+{
+       spin_lock(&unused_list_lock);
+       __put_unused_buffer_head(bh);
+       spin_unlock(&unused_list_lock);
 }
 
 /*
@@ -1051,12+1053,15 @@ static struct buffer_head * get_unused_buffer_head(int async)
 {
        struct buffer_head * bh;
 
+       spin_lock(&unused_list_lock);
        if (nr_unused_buffer_heads > NR_RESERVED) {
                bh = unused_list;
                unused_list = bh->b_next_free;
                nr_unused_buffer_heads--;
+               spin_unlock(&unused_list_lock);
                return bh;
        }
+       spin_unlock(&unused_list_lock);
 
        /* This is critical.  We can't swap out pages to get
         * more buffer heads, because the swap-out may need
@@ -1065,20+1070,23 @@ static struct buffer_head * get_unused_buffer_head(int async)
        if((bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER)) != NULL) {
                memset(bh, 0, sizeof(*bh));
                init_waitqueue_head(&bh->b_wait);
-               nr_buffer_heads++;
                return bh;
        }
 
        /*
         * If we need an async buffer, use the reserved buffer heads.
         */
-       if (async && unused_list) {
-               bh = unused_list;
-               unused_list = bh->b_next_free;
-               nr_unused_buffer_heads--;
-               return bh;
+       if (async) {
+               spin_lock(&unused_list_lock);
+               if (unused_list) {
+                       bh = unused_list;
+                       unused_list = bh->b_next_free;
+                       nr_unused_buffer_heads--;
+                       spin_unlock(&unused_list_lock);
+                       return bh;
+               }
+               spin_unlock(&unused_list_lock);
        }
-
 #if 0
        /*
         * (Pending further analysis ...)
@@ -1090,7+1098,6 @@ static struct buffer_head * get_unused_buffer_head(int async)
           (bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL)) != NULL) {
                memset(bh, 0, sizeof(*bh));
                init_waitqueue_head(&bh->b_wait);
-               nr_buffer_heads++;
                return bh;
        }
 #endif
@@ -1127,7+1134,8 @@ try_again:
 
                bh->b_state = 0;
                bh->b_next_free = NULL;
-               bh->b_count = 0;
+               bh->b_pprev = NULL;
+               atomic_set(&bh->b_count, 0);
                bh->b_size = size;
 
                bh->b_data = (char *) (page+offset);
@@ -1197,9+1205,7 @@ static int create_page_buffers(int rw, struct page *page, kdev_t dev, int b[], i
         * They show up in the buffer hash table and are registered in
         * page->buffers.
         */
-       lock_kernel();
        head = create_buffers(page_address(page), size, 1);
-       unlock_kernel();
        if (page->buffers)
                BUG();
        if (!head)
@@ -1248,7+1254,6 @@ int block_flushpage(struct inode *inode, struct page *page, unsigned long offset
                BUG();
        if (!page->buffers)
                return 0;
-       lock_kernel();
 
        head = page->buffers;
        bh = head;
@@ -1261,7+1266,7 @@ int block_flushpage(struct inode *inode, struct page *page, unsigned long offset
                 */
                if (offset <= curr_off) {
                        if (buffer_mapped(bh)) {
-                               bh->b_count++;
+                               atomic_inc(&bh->b_count);
                                wait_on_buffer(bh);
                                if (bh->b_dev == B_FREE)
                                        BUG();
@@ -1269,7+1274,7 @@ int block_flushpage(struct inode *inode, struct page *page, unsigned long offset
                                clear_bit(BH_Uptodate, &bh->b_state);
                                clear_bit(BH_Mapped, &bh->b_state);
                                bh->b_blocknr = 0;
-                               bh->b_count--;
+                               atomic_dec(&bh->b_count);
                        }
                }
                curr_off = next_off;
@@ -1288,10+1293,9 @@ int block_flushpage(struct inode *inode, struct page *page, unsigned long offset
         */
        if (!offset) {
                if (!try_to_free_buffers(page))
-                       buffermem += PAGE_CACHE_SIZE;
+                       atomic_add(PAGE_CACHE_SIZE, &buffermem);
        }
 
-       unlock_kernel();
        return 0;
 }
 
@@ -1299,9+1303,7 @@ static void create_empty_buffers(struct page *page, struct inode *inode, unsigne
 {
        struct buffer_head *bh, *head, *tail;
 
-       lock_kernel();
        head = create_buffers(page_address(page), blocksize, 1);
-       unlock_kernel();
        if (page->buffers)
                BUG();
 
@@ -1365,7+1367,7 @@ int block_write_full_page(struct file *file, struct page *page)
                                goto out;
                }
                set_bit(BH_Uptodate, &bh->b_state);
-               atomic_mark_buffer_dirty(bh,0);
+               mark_buffer_dirty(bh,0);
 
                bh = bh->b_this_page;
                block++;
@@ -1458,10+1460,8 @@ int block_write_partial_page(struct file *file, struct page *page, unsigned long
                        if (buffer_new(bh)) {
                                memset(bh->b_data, 0, bh->b_size);
                        } else {
-                               lock_kernel();
                                ll_rw_block(READ, 1, &bh);
                                wait_on_buffer(bh);
-                               unlock_kernel();
                                err = -EIO;
                                if (!buffer_uptodate(bh))
                                        goto out;
@@ -1498,11+1498,9 @@ int block_write_partial_page(struct file *file, struct page *page, unsigned long
                 */
                set_bit(BH_Uptodate, &bh->b_state);
                if (!test_and_set_bit(BH_Dirty, &bh->b_state)) {
-                       lock_kernel();
                        __mark_dirty(bh, 0);
                        if (too_many_dirty_buffers)
                                balance_dirty(bh->b_dev);
-                       unlock_kernel();
                }
 
                if (err) {
@@ -1569,7+1567,7 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
        do {
                block = *(b++);
 
-               if (fresh && (bh->b_count != 0))
+               if (fresh && (atomic_read(&bh->b_count) != 0))
                        BUG();
                if (rw == READ) {
                        if (!fresh)
@@ -1582,7+1580,7 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
                                        BUG();
                                if (!buffer_uptodate(bh)) {
                                        arr[nr++] = bh;
-                                       bh->b_count++;
+                                       atomic_inc(&bh->b_count);
                                }
                        }
                } else { /* WRITE */
@@ -1597,7+1595,7 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
                        set_bit(BH_Uptodate, &bh->b_state);
                        set_bit(BH_Dirty, &bh->b_state);
                        arr[nr++] = bh;
-                       bh->b_count++;
+                       atomic_inc(&bh->b_count);
                }
                bh = bh->b_this_page;
        } while (bh != head);
@@ -1663,7+1661,7 @@ int block_read_full_page(struct file * file, struct page * page)
                }
 
                init_buffer(bh, end_buffer_io_async, NULL);
-               bh->b_count++;
+               atomic_inc(&bh->b_count);
                arr[nr] = bh;
                nr++;
        } while (iblock++, (bh = bh->b_this_page) != head);
@@ -1710,8+1708,9 @@ static int grow_buffers(int size)
        }
 
        isize = BUFSIZE_INDEX(size);
-       insert_point = free_list[isize];
 
+       spin_lock(&free_list[isize].lock);
+       insert_point = free_list[isize].list;
        tmp = bh;
        while (1) {
                if (insert_point) {
@@ -1730,9+1729,11 @@ static int grow_buffers(int size)
                        break;
        }
        tmp->b_this_page = bh;
-       free_list[isize] = bh;
+       free_list[isize].list = bh;
+       spin_unlock(&free_list[isize].lock);
+
        mem_map[MAP_NR(page)].buffers = bh;
-       buffermem += PAGE_SIZE;
+       atomic_add(PAGE_SIZE, &buffermem);
        return 1;
 }
 
@@ -1740,7+1741,7 @@ static int grow_buffers(int size)
  * Can the buffer be thrown out?
  */
 #define BUFFER_BUSY_BITS       ((1<<BH_Dirty) | (1<<BH_Lock) | (1<<BH_Protected))
-#define buffer_busy(bh)                ((bh)->b_count | ((bh)->b_state & BUFFER_BUSY_BITS))
+#define buffer_busy(bh)                (atomic_read(&(bh)->b_count) | ((bh)->b_state & BUFFER_BUSY_BITS))
 
 /*
  * try_to_free_buffers() checks if all the buffers on this particular page
@@ -1748,11+1749,20 @@ static int grow_buffers(int size)
  *
  * Wake up bdflush() if this fails - if we're running low on memory due
  * to dirty buffers, we need to flush them out as quickly as possible.
+ *
+ * NOTE: There are quite a number of ways that threads of control can
+ *       obtain a reference to a buffer head within a page.  So we must
+ *      lock out all of these paths to cleanly toss the page.
  */
 int try_to_free_buffers(struct page * page)
 {
        struct buffer_head * tmp, * bh = page->buffers;
+       int index = BUFSIZE_INDEX(bh->b_size);
+       int ret;
 
+       spin_lock(&lru_list_lock);
+       write_lock(&hash_table_lock);
+       spin_lock(&free_list[index].lock);
        tmp = bh;
        do {
                struct buffer_head * p = tmp;
@@ -1762,19+1772,25 @@ int try_to_free_buffers(struct page * page)
                        goto busy_buffer_page;
        } while (tmp != bh);
 
+       spin_lock(&unused_list_lock);
        tmp = bh;
        do {
                struct buffer_head * p = tmp;
                tmp = tmp->b_this_page;
 
-               /* The buffer can be either on the regular queues or on the free list.. */              
-               if (p->b_dev == B_FREE)
-                       remove_from_free_list(p);
-               else
-                       remove_from_queues(p);
-
-               put_unused_buffer_head(p);
+               /* The buffer can be either on the regular
+                * queues or on the free list..
+                */
+               if (p->b_dev == B_FREE) {
+                       __remove_from_free_list(p, index);
+               } else {
+                       if (p->b_pprev)
+                               __hash_unlink(p);
+                       __remove_from_lru_list(p, p->b_list);
+               }
+               __put_unused_buffer_head(p);
        } while (tmp != bh);
+       spin_unlock(&unused_list_lock);
 
        /* Wake up anyone waiting for buffer heads */
        wake_up(&buffer_wait);
@@ -1782,55+1798,21 @@ int try_to_free_buffers(struct page * page)
        /* And free the page */
        page->buffers = NULL;
        __free_page(page);
-       return 1;
+       ret = 1;
+out:
+       spin_unlock(&free_list[index].lock);
+       write_unlock(&hash_table_lock);
+       spin_unlock(&lru_list_lock);
+       return ret;
 
 busy_buffer_page:
        /* Uhhuh, start writeback so that we don't end up with all dirty pages */
        too_many_dirty_buffers = 1;
        wakeup_bdflush(0);
-       return 0;
-}
-
-/* ================== Debugging =================== */
-
-void show_buffers(void)
-{
-       struct buffer_head * bh;
-       int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
-       int protected = 0;
-       int nlist;
-       static char *buf_types[NR_LIST] = {"CLEAN","LOCKED","DIRTY"};
-
-       printk("Buffer memory:   %6dkB\n",buffermem>>10);
-       printk("Buffer heads:    %6d\n",nr_buffer_heads);
-       printk("Buffer blocks:   %6d\n",nr_buffers);
-       printk("Buffer hashed:   %6d\n",nr_hashed_buffers);
-
-       for(nlist = 0; nlist < NR_LIST; nlist++) {
-         found = locked = dirty = used = lastused = protected = 0;
-         bh = lru_list[nlist];
-         if(!bh) continue;
-
-         do {
-               found++;
-               if (buffer_locked(bh))
-                       locked++;
-               if (buffer_protected(bh))
-                       protected++;
-               if (buffer_dirty(bh))
-                       dirty++;
-               if (bh->b_count)
-                       used++, lastused = found;
-               bh = bh->b_next_free;
-         } while (bh != lru_list[nlist]);
-         printk("%8s: %d buffers, %d used (last=%d), "
-                "%d locked, %d protected, %d dirty\n",
-                buf_types[nlist], found, used, lastused,
-                locked, protected, dirty);
-       };
+       ret = 0;
+       goto out;
 }
 
-
 /* ===================== Init ======================= */
 
 /*
@@ -1840,31+1822,46 @@ void show_buffers(void)
  */
 void __init buffer_init(unsigned long memory_size)
 {
-       int order;
+       int order, i;
        unsigned int nr_hash;
 
-       /* we need to guess at the right sort of size for a buffer cache.
-          the heuristic from working with large databases and getting
-          fsync times (ext2) manageable, is the following */
-
-       memory_size >>= 22;
-       for (order = 5; (1UL << order) < memory_size; order++);
+       /* The buffer cache hash table is less important these days,
+        * trim it a bit.
+        */
+       memory_size >>= 14;
+       memory_size *= sizeof(struct buffer_head *);
+       for (order = 0; (PAGE_SIZE << order) < memory_size; order++)
+               ;
 
        /* try to allocate something until we get it or we're asking
           for something that is really too small */
 
        do {
-               nr_hash = (1UL << order) * PAGE_SIZE /
-                   sizeof(struct buffer_head *);
+               nr_hash = (PAGE_SIZE << order) / sizeof(struct buffer_head *);
+               bh_hash_mask = (nr_hash - 1);
+               bh_hash_shift = (PAGE_SHIFT + order);
                hash_table = (struct buffer_head **)
                    __get_free_pages(GFP_ATOMIC, order);
-       } while (hash_table == NULL && --order > 4);
-       printk("buffer-cache hash table entries: %d (order: %d, %ld bytes)\n", nr_hash, order, (1UL<<order) * PAGE_SIZE);
-       
+       } while (hash_table == NULL && --order > 0);
+       printk("Buffer-cache hash table entries: %d (order: %d, %ld bytes)\n",
+              nr_hash, order, (1UL<<order) * PAGE_SIZE);
+
        if (!hash_table)
                panic("Failed to allocate buffer hash table\n");
-       memset(hash_table, 0, nr_hash * sizeof(struct buffer_head *));
-       bh_hash_mask = nr_hash-1;
+
+       /* Setup hash chains. */
+       for(i = 0; i < nr_hash; i++)
+               hash_table[i] = NULL;
+
+       /* Setup free lists. */
+       for(i = 0; i < NR_SIZES; i++) {
+               free_list[i].list = NULL;
+               free_list[i].lock = SPIN_LOCK_UNLOCKED;
+       }
+
+       /* Setup lru lists. */
+       for(i = 0; i < NR_LIST; i++)
+               lru_list[i] = NULL;
 
        bh_cachep = kmem_cache_create("buffer_head",
                                      sizeof(struct buffer_head),
@@ -1872,21+1869,6 @@ void __init buffer_init(unsigned long memory_size)
                                      SLAB_HWCACHE_ALIGN, NULL, NULL);
        if(!bh_cachep)
                panic("Cannot create buffer head SLAB cache\n");
-       /*
-        * Allocate the reserved buffer heads.
-        */
-       while (nr_buffer_heads < NR_RESERVED) {
-               struct buffer_head * bh;
-
-               bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC);
-               if (!bh)
-                       break;
-               put_unused_buffer_head(bh);
-               nr_buffer_heads++;
-       }
-
-       lru_list[BUF_CLEAN] = 0;
-       grow_buffers(BLOCK_SIZE);
 }
 
 
@@ -1922,70+1904,49 @@ void wakeup_bdflush(int wait)
 
 static int sync_old_buffers(void)
 {
-       int i;
-       int ndirty, nwritten;
        int nlist;
-       int ncount;
-       struct buffer_head * bh, *next;
 
+       lock_kernel();
        sync_supers(0);
        sync_inodes(0);
+       unlock_kernel();
 
-       ncount = 0;
-#ifdef DEBUG
-       for(nlist = 0; nlist < NR_LIST; nlist++)
-#else
-       for(nlist = BUF_LOCKED; nlist <= BUF_DIRTY; nlist++)
-#endif
-       {
-               ndirty = 0;
-               nwritten = 0;
+       for(nlist = BUF_LOCKED; nlist <= BUF_DIRTY; nlist++) {
+               struct buffer_head *bh;
        repeat:
-
+               spin_lock(&lru_list_lock);
                bh = lru_list[nlist];
-               if(bh) 
-                        for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
-                                /* We may have stalled while waiting for I/O to complete. */
-                                if(bh->b_list != nlist) goto repeat;
-                                next = bh->b_next_free;
-                                if(!lru_list[nlist]) {
-                                        printk("Dirty list empty %d\n", i);
-                                        break;
-                                }
-                                
-                                /* Clean buffer on dirty list?  Refile it */
-                                if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh)) {
-                                        refile_buffer(bh);
-                                        continue;
-                                }
-                                 
-                                 /* Unlocked buffer on locked list?  Refile it */
-                                 if (nlist == BUF_LOCKED && !buffer_locked(bh)) {
-                                         refile_buffer(bh);
-                                         continue;
-                                 }
+               if(bh) {
+                       struct buffer_head *next;
+                       int i;
+                       for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
+                               next = bh->b_next_free;
+
+                               /* If the buffer is not on the proper list,
+                                * then refile it.
+                                */
+                               if ((nlist == BUF_DIRTY &&
+                                    (!buffer_dirty(bh) && !buffer_locked(bh))) ||
+                                   (nlist == BUF_LOCKED && !buffer_locked(bh))) {
+                                       __refile_buffer(bh);
+                                       continue;
+                               }
                                 
-                                if (buffer_locked(bh) || !buffer_dirty(bh))
-                                         continue;
-                                ndirty++;
-                                nwritten++;
-                                next->b_count++;
-                                bh->b_count++;
-                                bh->b_flushtime = 0;
-#ifdef DEBUG
-                                if(nlist != BUF_DIRTY) ncount++;
-#endif
-                                ll_rw_block(WRITE, 1, &bh);
-                                bh->b_count--;
-                                next->b_count--;
-                        }
+                               if (buffer_locked(bh) || !buffer_dirty(bh))
+                                       continue;
+
+                               /* OK, now we are committed to write it out. */
+                               bh->b_flushtime = 0;
+                               atomic_inc(&bh->b_count);
+                               spin_unlock(&lru_list_lock);
+                               ll_rw_block(WRITE, 1, &bh);
+                               atomic_dec(&bh->b_count);
+                               goto repeat;
+                       }
+               }
+               spin_unlock(&lru_list_lock);
        }
        run_task_queue(&tq_disk);
-#ifdef DEBUG
-       if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
-       printk("Wrote %d/%d buffers\n", nwritten, ndirty);
-#endif
-       run_task_queue(&tq_disk);
        return 0;
 }
 
@@ -1999,7+1960,6 @@ asmlinkage int sys_bdflush(int func, long data)
 {
        int i, error = -EPERM;
 
-       lock_kernel();
        if (!capable(CAP_SYS_ADMIN))
                goto out;
 
@@ -2031,7+1991,6 @@ asmlinkage int sys_bdflush(int func, long data)
         */
        error = 0;
 out:
-       unlock_kernel();
        return error;
 }
 
@@ -2053,52+2012,37 @@ int bdflush(void * unused)
        sprintf(current->comm, "kflushd");
        bdflush_tsk = current;
 
-       /*
-        *      As a kernel thread we want to tamper with system buffers
-        *      and other internals and thus be subject to the SMP locking
-        *      rules. (On a uniprocessor box this does nothing).
-        */
-       lock_kernel();
-                
        for (;;) {
                int nlist;
 
                CHECK_EMERGENCY_SYNC
 
-               for(nlist = BUF_LOCKED; nlist <= BUF_DIRTY; nlist++)
-               {
-                       int nr;
-                       int written = 0;
+               for(nlist = BUF_LOCKED; nlist <= BUF_DIRTY; nlist++) {
+                       int nr, major, written = 0;
                        struct buffer_head *next;
-                       int major;
 
                repeat:
+                       spin_lock(&lru_list_lock);
                        next = lru_list[nlist];
                        nr = nr_buffers_type[nlist];
-
                        while (nr-- > 0) {
                                struct buffer_head *bh = next;
-                               /* We may have stalled while waiting for I/O to complete. */
-                               if (next->b_list != nlist)
-                                       goto repeat;
+
                                next = next->b_next_free;
                                        
-                               /* Clean buffer on dirty list?  Refile it */
-                               if (nlist == BUF_DIRTY && !buffer_dirty(bh)) {
-                                       refile_buffer(bh);
-                                       continue;
-                               }
-                                       
-                               /* Unlocked buffer on locked list?  Refile it */
-                               if (nlist == BUF_LOCKED && !buffer_locked(bh)) {
-                                       refile_buffer(bh);
+                               /* If the buffer is not on the correct list,
+                                * then refile it.
+                                */
+                               if ((nlist == BUF_DIRTY &&
+                                    (!buffer_dirty(bh) && !buffer_locked(bh))) ||
+                                   (nlist == BUF_LOCKED && !buffer_locked(bh))) {
+                                       __refile_buffer(bh);
                                        continue;
                                }
 
-                               /*
-                                * If we aren't in panic mode, don't write out too much
-                                * at a time. Also, don't write out buffers we don't really
-                                * have to write out yet..
+                               /* If we aren't in panic mode, don't write out too much
+                                * at a time. Also, don't write out buffers we don't
+                                * really have to write out yet..
                                 */
                                if (!too_many_dirty_buffers) {
                                        if (written > bdf_prm.b_un.ndirty)
@@ -2111,9+2055,6 @@ int bdflush(void * unused)
                                         continue;
 
                                major = MAJOR(bh->b_dev);
-                               if (next)
-                                       next->b_count++;
-                               bh->b_count++;
                                written++;
                                bh->b_flushtime = 0;
 
@@ -2121,18+2062,19 @@ int bdflush(void * unused)
                                 * For the loop major we can try to do asynchronous writes,
                                 * but we have to guarantee that we're making some progress..
                                 */
+                               atomic_inc(&bh->b_count);
+                               spin_unlock(&lru_list_lock);
                                if (major == LOOP_MAJOR && written > 1) {
                                        ll_rw_block(WRITEA, 1, &bh);
                                        if (buffer_dirty(bh))
                                                --written;
                                } else
                                        ll_rw_block(WRITE, 1, &bh);
-
-                               bh->b_count--;
-                               if (next)
-                                       next->b_count--;
                                wake_up(&buffer_wait);
+                               atomic_dec(&bh->b_count);
+                               goto repeat;
                        }
+                       spin_unlock(&lru_list_lock);
                }
                run_task_queue(&tq_disk);
                wake_up(&bdflush_done);
index 54a87e3..7e83ea9 100644 (file)
@@ -256,7+256,7 @@ void coda_prepare_openfile(struct inode *i, struct file *coda_file,
         cont_file->f_pos = coda_file->f_pos;
         cont_file->f_mode = coda_file->f_mode;
         cont_file->f_flags = coda_file->f_flags;
-        cont_file->f_count  = coda_file->f_count;
+        atomic_set(&cont_file->f_count, atomic_read(&coda_file->f_count));
         cont_file->f_owner  = coda_file->f_owner;
        cont_file->f_op = cont_inode->i_op->default_file_ops;
        cont_file->f_dentry = cont_dentry;
index 8ae361e..3969e17 100644 (file)
@@ -56,7+56,7 @@ static int sync_indirect(struct inode * inode, u32 * block, int wait)
                return 0;
        }
        ll_rw_block(WRITE, 1, &bh);
-       bh->b_count--;
+       atomic_dec(&bh->b_count);
        return 0;
 }
 
index af6e2e9..d824edb 100644 (file)
@@ -129,7+129,7 @@ static int check_block_empty(struct inode *inode, struct buffer_head *bh,
                if (*(ind++))
                        goto in_use;
 
-       if (bh->b_count == 1) {
+       if (atomic_read(&bh->b_count) == 1) {
                int tmp;
                tmp = le32_to_cpu(*p);
                *p = 0;
@@ -158,7+158,7 @@ out:
 }
 
 #define DATA_BUFFER_USED(bh) \
-       (bh->b_count || buffer_locked(bh))
+       (atomic_read(&bh->b_count) || buffer_locked(bh))
 
 static int trunc_direct (struct inode * inode)
 {
index 1a5d56f..01fc64d 100644 (file)
@@ -231,7+231,7 @@ void write_inode_now(struct inode *inode)
 void clear_inode(struct inode *inode)
 {
        if (inode->i_nrpages)
-               truncate_inode_pages(inode, 0);
+               BUG();
        wait_on_inode(inode);
        if (IS_QUOTAINIT(inode))
                DQUOT_DROP(inode);
@@ -261,6+261,8 @@ static void dispose_list(struct list_head * head)
                if (tmp == head)
                        break;
                inode = list_entry(tmp, struct inode, i_list);
+               if (inode->i_nrpages)
+                       truncate_inode_pages(inode, 0);
                clear_inode(inode);
                count++;
        }
@@ -735,6+737,8 @@ void iput(struct inode *inode)
                                if (op && op->delete_inode) {
                                        void (*delete)(struct inode *) = op->delete_inode;
                                        spin_unlock(&inode_lock);
+                                       if (inode->i_nrpages)
+                                               truncate_inode_pages(inode, 0);
                                        delete(inode);
                                        spin_lock(&inode_lock);
                                }
index ef3d15d..2fcdddf 100644 (file)
@@ -53,7+53,7 @@ static int V1_sync_block (struct inode * inode, unsigned short * block, int wait
                return 0;
        }
        ll_rw_block(WRITE, 1, &bh);
-       bh->b_count--;
+       atomic_dec(&bh->b_count);
        return 0;
 }
 
@@ -190,7+190,7 @@ static int V2_sync_block (struct inode * inode, unsigned long * block, int wait)
                return 0;
        }
        ll_rw_block(WRITE, 1, &bh);
-       bh->b_count--;
+       atomic_dec(&bh->b_count);
        return 0;
 }
 
index 4718e09..6724d00 100644 (file)
  */
 
 #define DATA_BUFFER_USED(bh) \
-       ((bh->b_count > 1) || buffer_locked(bh))
+       (atomic_read(&bh->b_count) || buffer_locked(bh))
 
 /*
  * The functions for minix V1 fs truncation.
@@ -121,7+121,7 @@ repeat:
                if (*(ind++))
                        break;
        if (i >= 512) {
-               if (ind_bh->b_count != 1)
+               if (atomic_read(&ind_bh->b_count) != 1)
                        retry = 1;
                else {
                        tmp = *p;
@@ -166,7+166,7 @@ repeat:
                if (*(dind++))
                        break;
        if (i >= 512) {
-               if (dind_bh->b_count != 1)
+               if (atomic_read(&dind_bh->b_count) != 1)
                        retry = 1;
                else {
                        tmp = *p;
@@ -285,7+285,7 @@ repeat:
                if (*(ind++))
                        break;
        if (i >= 256) {
-               if (ind_bh->b_count != 1)
+               if (atomic_read(&ind_bh->b_count) != 1)
                        retry = 1;
                else {
                        tmp = *p;
@@ -330,7+330,7 @@ repeat:
                if (*(dind++))
                        break;
        if (i >= 256) {
-               if (dind_bh->b_count != 1)
+               if (atomic_read(&dind_bh->b_count) != 1)
                        retry = 1;
                else {
                        tmp = *p;
@@ -376,7+376,7 @@ repeat:
                 if (*(tind++))
                         break;
         if (i >= 256) {
-                if (tind_bh->b_count != 1)
+                if (atomic_read(&tind_bh->b_count) != 1)
                         retry = 1;
                 else {
                         tmp = *p;
index 582b185..63294ae 100644 (file)
@@ -342,7+342,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
 
        memset(filp, 0, sizeof(*filp));
        filp->f_op    = inode->i_op->default_file_ops;
-       filp->f_count = 1;
+       atomic_set(&filp->f_count, 1);
        filp->f_flags = wflag? O_WRONLY : O_RDONLY;
        filp->f_mode  = wflag? FMODE_WRITE : FMODE_READ;
        filp->f_dentry = dentry;
@@ -360,7+360,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
                        /* I nearly added put_filp() call here, but this filp
                         * is really on callers stack frame. -DaveM
                         */
-                       filp->f_count--;
+                       atomic_dec(&filp->f_count);
                }
        }
 out_nfserr:
index 72bb194..bbc5483 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: openpromfs.c,v 1.33 1999/04/28 11:57:33 davem Exp $
+/* $Id: openpromfs.c,v 1.35 1999/06/27 00:37:36 davem Exp $
  * openpromfs.c: /proc/openprom handling routines
  *
  * Copyright (C) 1996-1998 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
index 826ee8a..4cea74f 100644 (file)
@@ -55,7+55,7 @@ static int sync_block(struct inode *inode, unsigned short *block, int wait)
                return 0;
        }
        ll_rw_block(WRITE, 1, &bh);
-       bh->b_count--;
+       atomic_dec(&bh->b_count);
        return 0;
 }
 
index 375ff6f..f5240a7 100644 (file)
@@ -201,7+201,7 @@ int sysv_new_block(struct super_block * sb)
                unlock_super(sb);
                return 0;
        }
-       if (bh->b_count != 1) {
+       if (atomic_read(&bh->b_count) != 1) {
                printk("sysv_new_block: block already in use\n");
                unlock_super(sb);
                return 0;
index ddffd7d..b0e1138 100644 (file)
@@ -54,7+54,7 @@ static int sync_block (struct inode * inode, u32 *blockp, int convert, int wait)
                return 0;
        }
        ll_rw_block(WRITE, 1, &bh);
-       bh->b_count--;
+       atomic_dec(&bh->b_count);
        return 0;
 }
 
index a8c0e07..db0f725 100644 (file)
  */
 
 #define DATA_BUFFER_USED(bh) \
-       ((bh->b_count > 1) || buffer_locked(bh))
+       (atomic_read(&bh->b_count) || buffer_locked(bh))
 
 /* We throw away any data beyond inode->i_size. */
 
index db3f11f..12937ad 100644 (file)
@@ -271,6+271,8 @@ static int ufs_parse_options (char * options, unsigned * mount_options)
                                ufs_set_opt (*mount_options, UFSTYPE_44BSD);
                        else if (!strcmp (value, "nextstep"))
                                ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP);
+                       else if (!strcmp (value, "nextstep-cd"))
+                               ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP_CD);
                        else if (!strcmp (value, "openstep"))
                                ufs_set_opt (*mount_options, UFSTYPE_OPENSTEP);
                        else if (!strcmp (value, "sunx86"))
@@ -465,7+467,7 @@ struct super_block * ufs_read_super (struct super_block * sb, void * data,
        }
        if (!(sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE)) {
                printk("You didn't specify the type of your ufs filesystem\n\n"
-               "       mount -t ufs -o ufstype=sun|sunx86|44bsd|old|nextstep|openstep ....\n\n"
+               "       mount -t ufs -o ufstype=sun|sunx86|44bsd|old|nextstep|netxstep-cd|openstep ...\n\n"
                ">>>WARNING<<< Wrong ufstype may corrupt your filesystem, "
                "default is ufstype=old\n");
                ufs_set_opt (sb->u.ufs_sb.s_mount_opt, UFSTYPE_OLD);
@@ -535,6+537,20 @@ struct super_block * ufs_read_super (struct super_block * sb, void * data,
                }
                break;
        
+       case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD:
+               UFSD(("ufstype=nextstep-cd\n"))
+               uspi->s_fsize = block_size = 2048;
+               uspi->s_fmask = ~(2048 - 1);
+               uspi->s_fshift = 11;
+               uspi->s_sbsize = super_block_size = 2048;
+               uspi->s_sbbase = 0;
+               flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
+               if (!(sb->s_flags & MS_RDONLY)) {
+                       printk(KERN_INFO "ufstype=nextstep-cd is supported read-only\n");
+                       sb->s_flags |= MS_RDONLY;
+               }
+               break;
+       
        case UFS_MOUNT_UFSTYPE_OPENSTEP:
                UFSD(("ufstype=openstep\n"))
                uspi->s_fsize = block_size = 1024;
@@ -592,6+608,7 @@ again:
 #endif
 
        if ((((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP) 
+         || ((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD) 
          || ((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP)) 
          && uspi->s_sbbase < 256) {
                ubh_brelse_uspi(uspi);
@@ -616,8+633,8 @@ magic_found:
                printk("ufs_read_super: fs_bsize %u != {4096, 8192}\n", uspi->s_bsize);
                goto failed;
        }
-       if (uspi->s_fsize != 512 && uspi->s_fsize != 1024) {
-               printk("ufs_read_super: fs_fsize %u != {512, 1024}\n", uspi->s_fsize);
+       if (uspi->s_fsize != 512 && uspi->s_fsize != 1024 && uspi->s_fsize != 2048) {
+               printk("ufs_read_super: fs_fsize %u != {512, 1024, 2048}\n", uspi->s_fsize);
                goto failed;
        }
        if (uspi->s_fsize != block_size || uspi->s_sbsize != super_block_size) {
index 4649a42..b7214fc 100644 (file)
 #define DIRECT_FRAGMENT howmany (inode->i_size, uspi->s_fsize)
 
 #define DATA_BUFFER_USED(bh) \
-       ((bh->b_count > 1) || buffer_locked(bh))
+       (atomic_read(&bh->b_count) || buffer_locked(bh))
 
 static int ufs_trunc_direct (struct inode * inode)
 {
index 11978a7..e6d5f3a 100644 (file)
@@ -137,8+137,8 @@ unsigned ubh_max_bcount (struct ufs_buffer_head * ubh)
        if (!ubh)
                return 0;
        for ( i = 0; i < ubh->count; i++ ) 
-               if ( ubh->bh[i]->b_count > max )
-                       max = ubh->bh[i]->b_count;
+               if ( atomic_read(&ubh->bh[i]->b_count) > max )
+                       max = atomic_read(&ubh->bh[i]->b_count);
        return max;
 }
 
index b014e17..a630c9e 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: page.h,v 1.43 1998/05/11 08:40:11 davem Exp $
+/* $Id: page.h,v 1.44 1999/06/23 03:53:11 davem Exp $
  * page.h:  Various defines and such for MMU operations on the Sparc for
  *          the Linux kernel.
  *
index 05dfda7..877f317 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: pgtable.h,v 1.80 1999/05/27 04:52:40 davem Exp $ */
+/* $Id: pgtable.h,v 1.81 1999/06/27 00:38:28 davem Exp $ */
 #ifndef _SPARC_PGTABLE_H
 #define _SPARC_PGTABLE_H
 
index 1a808c0..34c7e38 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: page.h,v 1.24 1998/10/20 03:09:16 jj Exp $ */
+/* $Id: page.h,v 1.25 1999/06/23 03:53:15 davem Exp $ */
 
 #ifndef _SPARC64_PAGE_H
 #define _SPARC64_PAGE_H
index d3a8ec9..b411d04 100644 (file)
@@ -1,4+1,4 @@
-/* $Id: pgtable.h,v 1.105 1999/05/27 04:52:51 davem Exp $
+/* $Id: pgtable.h,v 1.106 1999/06/27 00:38:33 davem Exp $
  * pgtable.h: SpitFire page table operations.
  *
  * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
index 228ede1..2915718 100644 (file)
@@ -198,40+198,36 @@ typedef char buffer_block[BLOCK_SIZE];
  * particularly beneficial on 32-bit processors.
  * 
  * We use the first 16 bytes for the data which is used in searches
- * over the block hash lists (ie. getblk(), find_buffer() and
- * friends).
+ * over the block hash lists (ie. getblk() and friends).
  * 
  * The second 16 bytes we use for lru buffer scans, as used by
  * sync_buffers() and refill_freelist().  -- sct
  */
 struct buffer_head {
        /* First cache line: */
-       struct buffer_head * b_next;    /* Hash queue list */
+       struct buffer_head *b_next;     /* Hash queue list */
        unsigned long b_blocknr;        /* block number */
-       unsigned long b_size;           /* block size */
+       unsigned short b_size;          /* block size */
+       unsigned short b_list;          /* List that this buffer appears */
        kdev_t b_dev;                   /* device (B_FREE = free) */
+
+       atomic_t b_count;               /* users using this block */
        kdev_t b_rdev;                  /* Real device */
-       unsigned long b_rsector;        /* Real buffer location on disk */
-       struct buffer_head * b_this_page;       /* circular list of buffers in one page */
        unsigned long b_state;          /* buffer state bitmap (see above) */
-       struct buffer_head * b_next_free;
-       unsigned int b_count;           /* users using this block */
-
-       /* Non-performance-critical data follows. */
-       char * b_data;                  /* pointer to data block (1024 bytes) */
-       unsigned int b_list;            /* List that this buffer appears */
-       unsigned long b_flushtime;      /* Time when this (dirty) buffer
-                                        * should be written */
-       wait_queue_head_t b_wait;
-       struct buffer_head ** b_pprev;          /* doubly linked list of hash-queue */
-       struct buffer_head * b_prev_free;       /* doubly linked list of buffers */
-       struct buffer_head * b_reqnext;         /* request queue */
+       unsigned long b_flushtime;      /* Time when (dirty) buffer should be written */
 
-       /*
-        * I/O completion
-        */
-       void (*b_end_io)(struct buffer_head *bh, int uptodate);
+       struct buffer_head *b_next_free;/* lru/free list linkage */
+       struct buffer_head *b_prev_free;/* doubly linked list of buffers */
+       struct buffer_head *b_this_page;/* circular list of buffers in one page */
+       struct buffer_head *b_reqnext;  /* request queue */
+
+       struct buffer_head **b_pprev;   /* doubly linked list of hash-queue */
+       char *b_data;                   /* pointer to data block (1024 bytes) */
+       void (*b_end_io)(struct buffer_head *bh, int uptodate); /* I/O completion */
        void *b_dev_id;
+
+       unsigned long b_rsector;        /* Real buffer location on disk */
+       wait_queue_head_t b_wait;
 };
 
 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
@@ -753,7+749,7 @@ extern struct file *inuse_filps;
 extern int try_to_free_buffers(struct page *);
 extern void refile_buffer(struct buffer_head * buf);
 
-extern int buffermem;
+extern atomic_t buffermem;
 
 #define BUF_CLEAN      0
 #define BUF_LOCKED     1       /* Buffers scheduled for write */
@@ -785,7+781,6 @@ extern inline void mark_buffer_clean(struct buffer_head * bh)
 }
 
 extern void FASTCALL(__mark_buffer_dirty(struct buffer_head *bh, int flag));
-extern void FASTCALL(__atomic_mark_buffer_dirty(struct buffer_head *bh, int flag));
 
 #define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state)
 
@@ -795,20+790,6 @@ extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
                __mark_buffer_dirty(bh, flag);
 }
 
-/*
- * SMP-safe version of the above - does synchronization with
- * other users of buffer-cache data structures.
- *
- * since we test-set the dirty bit in a CPU-atomic way we also
- * have optimized the common 'redirtying' case away completely.
- */
-extern inline void atomic_mark_buffer_dirty(struct buffer_head * bh, int flag)
-{
-       if (!atomic_set_buffer_dirty(bh))
-               __atomic_mark_buffer_dirty(bh, flag);
-}
-
-
 extern void balance_dirty(kdev_t);
 extern int check_disk_change(kdev_t);
 extern int invalidate_inodes(struct super_block *);
@@ -875,7+856,6 @@ extern void remove_inode_hash(struct inode *);
 extern struct file * get_empty_filp(void);
 extern struct buffer_head * get_hash_table(kdev_t, int, int);
 extern struct buffer_head * getblk(kdev_t, int, int);
-extern struct buffer_head * find_buffer(kdev_t, int, int);
 extern void ll_rw_block(int, int, struct buffer_head * bh[]);
 extern int is_read_only(kdev_t);
 extern void __brelse(struct buffer_head *);
@@ -916,7+896,6 @@ unsigned long generate_cluster(kdev_t, int b[], int);
 unsigned long generate_cluster_swab32(kdev_t, int b[], int);
 extern kdev_t ROOT_DEV;
 
-extern void show_buffers(void);
 extern void mount_root(void);
 
 #ifdef CONFIG_BLK_DEV_INITRD
index 4fdfd31..1d2bf41 100644 (file)
@@ -409,7+409,7 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
 
 extern struct vm_area_struct *find_extend_vma(struct task_struct *tsk, unsigned long addr);
 
-#define buffer_under_min()     ((buffermem >> PAGE_SHIFT) * 100 < \
+#define buffer_under_min()     ((atomic_read(&buffermem) >> PAGE_SHIFT) * 100 < \
                                buffer_mem.min_percent * num_physpages)
 #define pgcache_under_min()    (atomic_read(&page_cache_size) * 100 < \
                                page_cache.min_percent * num_physpages)
index e95a388..fa41779 100644 (file)
@@ -67,7+67,7 @@ extern int nr_free_pages;
 extern atomic_t nr_async_pages;
 extern struct inode swapper_inode;
 extern atomic_t page_cache_size;
-extern int buffermem;
+extern atomic_t buffermem;
 
 /* Incomplete types for prototype declarations: */
 struct task_struct;
index 4e85be8..d938298 100644 (file)
 #define UFS_MOUNT_ONERROR_UMOUNT       0x00000004
 #define UFS_MOUNT_ONERROR_REPAIR       0x00000008
 
-#define UFS_MOUNT_UFSTYPE              0x000003F0
+#define UFS_MOUNT_UFSTYPE              0x000007F0
 #define UFS_MOUNT_UFSTYPE_OLD          0x00000010
 #define UFS_MOUNT_UFSTYPE_44BSD                0x00000020
 #define UFS_MOUNT_UFSTYPE_SUN          0x00000040
 #define UFS_MOUNT_UFSTYPE_NEXTSTEP     0x00000080
-#define UFS_MOUNT_UFSTYPE_OPENSTEP     0x00000100
-#define UFS_MOUNT_UFSTYPE_SUNx86       0x00000200
+#define UFS_MOUNT_UFSTYPE_NEXTSTEP_CD  0x00000100
+#define UFS_MOUNT_UFSTYPE_OPENSTEP     0x00000200
+#define UFS_MOUNT_UFSTYPE_SUNx86       0x00000400
 
 #define ufs_clear_opt(o,opt)   o &= ~UFS_MOUNT_##opt
 #define ufs_set_opt(o,opt)     o |= UFS_MOUNT_##opt
index c0f447e..63ee871 100644 (file)
@@ -276,7+276,7 @@ static int do_acct_process(long exitcode, struct file *file)
         */
        if (!file)
                return 0;
-       file->f_count++;
+       atomic_inc(&file->f_count);
        if (!check_free_space(file)) {
                fput(file);
                return 0;
index a5b5b01..1c0ba5f 100644 (file)
@@ -135,6+135,7 @@ EXPORT_SYMBOL(d_instantiate);
 EXPORT_SYMBOL(d_alloc);
 EXPORT_SYMBOL(d_lookup);
 EXPORT_SYMBOL(d_path);
+EXPORT_SYMBOL(__mark_buffer_dirty);
 EXPORT_SYMBOL(__mark_inode_dirty);
 EXPORT_SYMBOL(get_empty_filp);
 EXPORT_SYMBOL(init_private_file);
@@ -165,6+166,7 @@ EXPORT_SYMBOL(__wait_on_buffer);
 EXPORT_SYMBOL(add_blkdev_randomness);
 EXPORT_SYMBOL(block_read_full_page);
 EXPORT_SYMBOL(block_write_full_page);
+EXPORT_SYMBOL(block_write_partial_page);
 EXPORT_SYMBOL(block_flushpage);
 EXPORT_SYMBOL(generic_file_read);
 EXPORT_SYMBOL(generic_file_write);
@@ -223,8+225,8 @@ EXPORT_SYMBOL(resetup_one_dev);
 EXPORT_SYMBOL(unplug_device);
 EXPORT_SYMBOL(make_request);
 EXPORT_SYMBOL(tq_disk);
-EXPORT_SYMBOL(find_buffer);
 EXPORT_SYMBOL(init_buffer);
+EXPORT_SYMBOL(refile_buffer);
 EXPORT_SYMBOL(max_sectors);
 EXPORT_SYMBOL(max_readahead);
 
index e29d741..f2cb29a 100644 (file)
@@ -296,7+296,7 @@ int shrink_mmap(int priority, int gfp_mask)
                        spin_unlock(&pagecache_lock);
                        if (!try_to_free_buffers(page))
                                goto unlock_continue;
-                       buffermem -= mem;
+                       atomic_sub(mem, &buffermem);
                        spin_lock(&pagecache_lock);
                }
 
@@ -359,6+359,7 @@ inside:
                if (page->offset == offset)
                        break;
        }
+       set_bit(PG_referenced, &page->flags);
 not_found:
        return page;
 }
index 360087a..c9d07a2 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -62,7+62,7 @@ int vm_enough_memory(long pages)
        if (sysctl_overcommit_memory)
            return 1;
 
-       free = buffermem >> PAGE_SHIFT;
+       free = atomic_read(&buffermem) >> PAGE_SHIFT;
        free += atomic_read(&page_cache_size);
        free += nr_free_pages;
        free += nr_swap_pages;
index 28c3093..91f572f 100644 (file)
@@ -4,7+4,7 @@
  *
  *     Copyright (c) 1994 Pauline Middelink
  *
- *     $Id: ip_masq.c,v 1.34 1999/03/17 01:53:51 davem Exp $
+ *     $Id: ip_masq.c,v 1.35 1999/06/29 12:35:46 davem Exp $
  *
  *
  *     See ip_fw.c for original log
index db6d66d..8c428a6 100644 (file)
@@ -3,7+3,7 @@
  *
  *     Does (reverse-masq) forwarding based on skb->fwmark value
  *
- *     $Id: ip_masq_mfw.c,v 1.4 1999/05/13 23:25:07 davem Exp $
+ *     $Id: ip_masq_mfw.c,v 1.5 1999/06/29 12:35:49 davem Exp $
  *
  * Author:     Juan Jose Ciarlante   <jjciarla@raiz.uncu.edu.ar>
  *               based on Steven Clarke's portfw
index 654ab9f..65fb50a 100644 (file)
@@ -4,7+4,7 @@
  *
  * Author:     Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
  *
- *     $Id: ip_masq_mod.c,v 1.5 1998/08/29 23:51:09 davem Exp $
+ *     $Id: ip_masq_mod.c,v 1.6 1999/06/29 12:35:51 davem Exp $
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License
index 91e1b72..90b2688 100644 (file)
@@ -2,7+2,7 @@
  *             IP_MASQ_PORTFW masquerading module
  *
  *
- *     $Id: ip_masq_portfw.c,v 1.3 1998/12/08 05:42:12 davem Exp $
+ *     $Id: ip_masq_portfw.c,v 1.4 1999/06/29 12:35:53 davem Exp $
  *
  * Author:     Steven Clarke <steven.clarke@monmouth.demon.co.uk>
  *
index 51e27ad..52dde7a 100644 (file)
@@ -1,5+1,5 @@
 /*
- *  $Id: ipconfig.c,v 1.22 1999/06/09 10:10:57 davem Exp $
+ *  $Id: ipconfig.c,v 1.23 1999/06/28 11:35:07 davem Exp $
  *
  *  Automatic Configuration of IP -- use BOOTP or RARP or user-supplied
  *  information to configure own IP address and routes.
@@ -666,7+666,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct device *dev, struct
            b->vendor_area[2] == 83 &&
            b->vendor_area[3] == 99) {
                u8 *ext = &b->vendor_area[4];
-               u8 *end = (u8 *) b + len;
+                u8 *end = (u8 *) b + ntohs(b->iph.tot_len);
                while (ext < end && *ext != 0xff) {
                        if (*ext == 0)          /* Padding */
                                ext++;
index 4a1bdde..b52a299 100644 (file)
@@ -5,7+5,7 @@
  *
  *             Implementation of the Transmission Control Protocol(TCP).
  *
- * Version:    $Id: tcp.c,v 1.144 1999/05/27 01:03:37 davem Exp $
+ * Version:    $Id: tcp.c,v 1.145 1999/06/29 12:35:56 davem Exp $
  *
  * Authors:    Ross Biro, <bir7@leland.Stanford.Edu>
  *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
index d43ebe0..516304d 100644 (file)
@@ -5,7+5,7 @@
  *
  *             The User Datagram Protocol (UDP).
  *
- * Version:    $Id: udp.c,v 1.69 1999/06/09 11:15:31 davem Exp $
+ * Version:    $Id: udp.c,v 1.70 1999/06/13 05:55:16 davem Exp $
  *
  * Authors:    Ross Biro, <bir7@leland.Stanford.Edu>
  *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -654,9+654,9 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, int len)
                struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
                if (msg->msg_namelen < sizeof(*usin))
                        return(-EINVAL);
-               if (usin->sin_family != AF_INET) {
+               if (usin->sin_family != AF_INET)
                        return -EINVAL;
-               }
+
                ufh.daddr = usin->sin_addr.s_addr;
                ufh.uh.dest = usin->sin_port;
                if (ufh.uh.dest == 0)
index aa1bfa8..ae10c33 100644 (file)
@@ -1456,7+1456,7 @@ xprt_create(struct file *file, struct sockaddr_in *ap, struct rpc_timeout *to)
        proto = (sock->type == SOCK_DGRAM)? IPPROTO_UDP : IPPROTO_TCP;
        if ((xprt = xprt_setup(sock, proto, ap, to)) != NULL) {
                xprt->file = file;
-               file->f_count++;
+               atomic_inc(&file->f_count);
        }
 
        return xprt;
index 3d0f25f..1e6ff87 100644 (file)
@@ -8,7+8,7 @@
  *             as published by the Free Software Foundation; either version
  *             2 of the License, or (at your option) any later version.
  *
- * Version:    $Id: af_unix.c,v 1.78 1999/05/27 00:38:41 davem Exp $
+ * Version:    $Id: af_unix.c,v 1.79 1999/06/29 12:36:07 davem Exp $
  *
  * Fixes:
  *             Linus Torvalds  :       Assorted bug cures.
close