2 * Network device driver for the MACE ethernet controller on 3 * Apple Powermacs. Assumes it's under a DBDMA controller. 5 * Copyright (C) 1996 Paul Mackerras. 8 #include <linux/module.h> 9 #include <linux/version.h> 10 #include <linux/kernel.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/delay.h> 14 #include <linux/string.h> 15 #include <linux/timer.h> 16 #include <linux/init.h> 18 #include <asm/dbdma.h> 20 #include <asm/pgtable.h> 23 static struct net_device
*mace_devs
= NULL
; 27 #define MAX_TX_ACTIVE 1 28 #define NCMDS_TX 1/* dma commands per element in tx ring */ 29 #define RX_BUFLEN (ETH_FRAME_LEN + 8) 30 #define TX_TIMEOUT HZ/* 1 second */ 32 /* Bits in transmit DMA status */ 33 #define TX_DMA_ERR 0x80 36 volatilestruct mace
*mace
; 37 volatilestruct dbdma_regs
*tx_dma
; 39 volatilestruct dbdma_regs
*rx_dma
; 41 volatilestruct dbdma_cmd
*tx_cmds
;/* xmit dma command list */ 42 volatilestruct dbdma_cmd
*rx_cmds
;/* recv dma command list */ 43 struct sk_buff
*rx_bufs
[N_RX_RING
]; 46 struct sk_buff
*tx_bufs
[N_TX_RING
]; 50 unsigned char tx_fullup
; 51 unsigned char tx_active
; 52 unsigned char tx_bad_runt
; 53 struct net_device_stats stats
; 54 struct timer_list tx_timeout
; 56 struct net_device
*next_mace
; 60 * Number of bytes of private data per MACE: allow enough for 61 * the rx and tx dma commands plus a branch dma command each, 62 * and another 16 bytes to allow us to align the dma command 63 * buffers on a 16 byte boundary. 65 #define PRIV_BYTES (sizeof(struct mace_data) \ 66 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) 68 static intbitrev(int); 69 static intmace_probe(void); 70 static voidmace_probe1(struct device_node
*mace
); 71 static intmace_open(struct net_device
*dev
); 72 static intmace_close(struct net_device
*dev
); 73 static intmace_xmit_start(struct sk_buff
*skb
,struct net_device
*dev
); 74 static struct net_device_stats
*mace_stats(struct net_device
*dev
); 75 static voidmace_set_multicast(struct net_device
*dev
); 76 static voidmace_reset(struct net_device
*dev
); 77 static intmace_set_address(struct net_device
*dev
,void*addr
); 78 static voidmace_interrupt(int irq
,void*dev_id
,struct pt_regs
*regs
); 79 static voidmace_txdma_intr(int irq
,void*dev_id
,struct pt_regs
*regs
); 80 static voidmace_rxdma_intr(int irq
,void*dev_id
,struct pt_regs
*regs
); 81 static voidmace_set_timeout(struct net_device
*dev
); 82 static voidmace_tx_timeout(unsigned long data
); 83 staticinlinevoiddbdma_reset(volatilestruct dbdma_regs
*dma
); 84 staticinlinevoidmace_clean_rings(struct mace_data
*mp
); 85 static void__mace_set_address(struct net_device
*dev
,void*addr
); 88 * If we can't get a skbuff when we need it, we use this area for DMA. 90 static unsigned char dummy_buf
[RX_BUFLEN
+2]; 92 /* Bit-reverse one byte of an ethernet hardware address. */ 98 for(i
=0; i
<8; ++i
, b
>>=1) 103 static int __init
mace_probe(void) 105 struct device_node
*mace
; 107 for(mace
=find_devices("mace"); mace
!= NULL
; mace
= mace
->next
) 112 static void __init
mace_probe1(struct device_node
*mace
) 115 struct net_device
*dev
; 116 struct mace_data
*mp
; 119 if(mace
->n_addrs
!=3|| mace
->n_intrs
!=3) { 120 printk(KERN_ERR
"can't use MACE %s: need 3 addrs and 3 irqs\n", 125 addr
=get_property(mace
,"mac-address", NULL
); 127 addr
=get_property(mace
,"local-mac-address", NULL
); 129 printk(KERN_ERR
"Can't get mac-address for MACE %s\n", 135 dev
=init_etherdev(0, PRIV_BYTES
); 138 SET_MODULE_OWNER(dev
); 141 dev
->base_addr
= mace
->addrs
[0].address
; 142 mp
->mace
= (volatilestruct mace
*) 143 ioremap(mace
->addrs
[0].address
,0x1000); 144 dev
->irq
= mace
->intrs
[0].line
; 146 printk(KERN_INFO
"%s: MACE at", dev
->name
); 147 rev
= addr
[0] ==0&& addr
[1] ==0xA0; 148 for(j
=0; j
<6; ++j
) { 149 dev
->dev_addr
[j
] = rev
?bitrev(addr
[j
]): addr
[j
]; 150 printk("%c%.2x", (j
?':':' '), dev
->dev_addr
[j
]); 152 printk(", chip revision %d.%d\n", 153 in_8(&mp
->mace
->chipid_hi
),in_8(&mp
->mace
->chipid_lo
)); 155 mp
= (struct mace_data
*) dev
->priv
; 156 mp
->maccc
= ENXMT
| ENRCV
; 157 mp
->tx_dma
= (volatilestruct dbdma_regs
*) 158 ioremap(mace
->addrs
[1].address
,0x1000); 159 mp
->tx_dma_intr
= mace
->intrs
[1].line
; 160 mp
->rx_dma
= (volatilestruct dbdma_regs
*) 161 ioremap(mace
->addrs
[2].address
,0x1000); 162 mp
->rx_dma_intr
= mace
->intrs
[2].line
; 164 mp
->tx_cmds
= (volatilestruct dbdma_cmd
*)DBDMA_ALIGN(mp
+1); 165 mp
->rx_cmds
= mp
->tx_cmds
+ NCMDS_TX
* N_TX_RING
+1; 167 memset(&mp
->stats
,0,sizeof(mp
->stats
)); 168 memset((char*) mp
->tx_cmds
,0, 169 (NCMDS_TX
*N_TX_RING
+ N_RX_RING
+2) *sizeof(struct dbdma_cmd
)); 170 init_timer(&mp
->tx_timeout
); 171 mp
->timeout_active
=0; 173 dev
->open
= mace_open
; 174 dev
->stop
= mace_close
; 175 dev
->hard_start_xmit
= mace_xmit_start
; 176 dev
->get_stats
= mace_stats
; 177 dev
->set_multicast_list
= mace_set_multicast
; 178 dev
->set_mac_address
= mace_set_address
; 184 if(request_irq(dev
->irq
, mace_interrupt
,0,"MACE", dev
)) 185 printk(KERN_ERR
"MACE: can't get irq %d\n", dev
->irq
); 186 if(request_irq(mace
->intrs
[1].line
, mace_txdma_intr
,0,"MACE-txdma", 188 printk(KERN_ERR
"MACE: can't get irq %d\n", mace
->intrs
[1].line
); 189 if(request_irq(mace
->intrs
[2].line
, mace_rxdma_intr
,0,"MACE-rxdma", 191 printk(KERN_ERR
"MACE: can't get irq %d\n", mace
->intrs
[2].line
); 193 mp
->next_mace
= mace_devs
; 197 static voiddbdma_reset(volatilestruct dbdma_regs
*dma
) 201 out_le32(&dma
->control
, (WAKE
|FLUSH
|PAUSE
|RUN
) <<16); 204 * Yes this looks peculiar, but apparently it needs to be this 205 * way on some machines. 207 for(i
=200; i
>0; --i
) 208 if(ld_le32(&dma
->control
) & RUN
) 212 static voidmace_reset(struct net_device
*dev
) 214 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 215 volatilestruct mace
*mb
= mp
->mace
; 218 /* soft-reset the chip */ 221 out_8(&mb
->biucc
, SWRST
); 222 if(in_8(&mb
->biucc
) & SWRST
) { 229 printk(KERN_ERR
"mace: cannot reset chip!\n"); 233 out_8(&mb
->imr
,0xff);/* disable all intrs for now */ 235 out_8(&mb
->maccc
,0);/* turn off tx, rx */ 237 out_8(&mb
->biucc
, XMTSP_64
); 238 out_8(&mb
->utr
, RTRD
); 239 out_8(&mb
->fifocc
, RCVFW_32
| XMTFW_16
| XMTFWU
| RCVFWU
| XMTBRST
); 240 out_8(&mb
->xmtfc
, AUTO_PAD_XMIT
);/* auto-pad short frames */ 243 /* load up the hardware address */ 244 __mace_set_address(dev
, dev
->dev_addr
); 246 /* clear the multicast filter */ 247 out_8(&mb
->iac
, ADDRCHG
| LOGADDR
); 248 while((in_8(&mb
->iac
) & ADDRCHG
) !=0) 250 for(i
=0; i
<8; ++i
) { 253 /* done changing address */ 256 out_8(&mb
->plscc
, PORTSEL_GPSI
+ ENPLSIO
); 259 static void__mace_set_address(struct net_device
*dev
,void*addr
) 261 volatilestruct mace
*mb
= ((struct mace_data
*) dev
->priv
)->mace
; 262 unsigned char*p
= addr
; 265 /* load up the hardware address */ 266 out_8(&mb
->iac
, ADDRCHG
| PHYADDR
); 267 while((in_8(&mb
->iac
) & ADDRCHG
) !=0) 270 out_8(&mb
->padr
, dev
->dev_addr
[i
] = p
[i
]); 273 static intmace_set_address(struct net_device
*dev
,void*addr
) 275 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 276 volatilestruct mace
*mb
= mp
->mace
; 279 save_flags(flags
);cli(); 281 __mace_set_address(dev
, addr
); 284 /* note: setting ADDRCHG clears ENRCV */ 285 out_8(&mb
->maccc
, mp
->maccc
); 287 restore_flags(flags
); 291 static intmace_open(struct net_device
*dev
) 293 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 294 volatilestruct mace
*mb
= mp
->mace
; 295 volatilestruct dbdma_regs
*rd
= mp
->rx_dma
; 296 volatilestruct dbdma_regs
*td
= mp
->tx_dma
; 297 volatilestruct dbdma_cmd
*cp
; 305 /* initialize list of sk_buffs for receiving and set up recv dma */ 306 mace_clean_rings(mp
); 307 memset((char*)mp
->rx_cmds
,0, N_RX_RING
*sizeof(struct dbdma_cmd
)); 309 for(i
=0; i
< N_RX_RING
-1; ++i
) { 310 skb
=dev_alloc_skb(RX_BUFLEN
+2); 314 skb_reserve(skb
,2);/* so IP header lands on 4-byte bdry */ 317 mp
->rx_bufs
[i
] = skb
; 318 st_le16(&cp
->req_count
, RX_BUFLEN
); 319 st_le16(&cp
->command
, INPUT_LAST
+ INTR_ALWAYS
); 320 st_le32(&cp
->phy_addr
,virt_to_bus(data
)); 325 st_le16(&cp
->command
, DBDMA_STOP
); 329 /* Put a branch back to the beginning of the receive command list */ 331 st_le16(&cp
->command
, DBDMA_NOP
+ BR_ALWAYS
); 332 st_le32(&cp
->cmd_dep
,virt_to_bus(mp
->rx_cmds
)); 335 out_le32(&rd
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) <<16);/* clear run bit */ 336 out_le32(&rd
->cmdptr
,virt_to_bus(mp
->rx_cmds
)); 337 out_le32(&rd
->control
, (RUN
<<16) | RUN
); 339 /* put a branch at the end of the tx command list */ 340 cp
= mp
->tx_cmds
+ NCMDS_TX
* N_TX_RING
; 341 st_le16(&cp
->command
, DBDMA_NOP
+ BR_ALWAYS
); 342 st_le32(&cp
->cmd_dep
,virt_to_bus(mp
->tx_cmds
)); 345 out_le32(&td
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) <<16); 346 out_le32(&td
->cmdptr
,virt_to_bus(mp
->tx_cmds
)); 354 out_8(&mb
->maccc
, mp
->maccc
); 355 /* enable all interrupts except receive interrupts */ 356 out_8(&mb
->imr
, RCVINT
); 361 staticinlinevoidmace_clean_rings(struct mace_data
*mp
) 365 /* free some skb's */ 366 for(i
=0; i
< N_RX_RING
; ++i
) { 367 if(mp
->rx_bufs
[i
] !=0) { 368 dev_kfree_skb(mp
->rx_bufs
[i
]); 372 for(i
= mp
->tx_empty
; i
!= mp
->tx_fill
; ) { 373 dev_kfree_skb(mp
->tx_bufs
[i
]); 379 static intmace_close(struct net_device
*dev
) 381 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 382 volatilestruct mace
*mb
= mp
->mace
; 383 volatilestruct dbdma_regs
*rd
= mp
->rx_dma
; 384 volatilestruct dbdma_regs
*td
= mp
->tx_dma
; 386 /* disable rx and tx */ 388 out_8(&mb
->imr
,0xff);/* disable all intrs */ 390 /* disable rx and tx dma */ 391 st_le32(&rd
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) <<16);/* clear run bit */ 392 st_le32(&td
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) <<16);/* clear run bit */ 394 mace_clean_rings(mp
); 399 staticinlinevoidmace_set_timeout(struct net_device
*dev
) 401 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 406 if(mp
->timeout_active
) 407 del_timer(&mp
->tx_timeout
); 408 mp
->tx_timeout
.expires
= jiffies
+ TX_TIMEOUT
; 409 mp
->tx_timeout
.function
= mace_tx_timeout
; 410 mp
->tx_timeout
.data
= (unsigned long) dev
; 411 add_timer(&mp
->tx_timeout
); 412 mp
->timeout_active
=1; 413 restore_flags(flags
); 416 static intmace_xmit_start(struct sk_buff
*skb
,struct net_device
*dev
) 418 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 419 volatilestruct dbdma_regs
*td
= mp
->tx_dma
; 420 volatilestruct dbdma_cmd
*cp
, *np
; 424 /* see if there's a free slot in the tx ring */ 425 save_flags(flags
);cli(); 428 if(next
>= N_TX_RING
) 430 if(next
== mp
->tx_empty
) { 431 netif_stop_queue(dev
); 433 restore_flags(flags
); 434 return1;/* can't take it at the moment */ 436 restore_flags(flags
); 438 /* partially fill in the dma command block */ 440 if(len
> ETH_FRAME_LEN
) { 441 printk(KERN_DEBUG
"mace: xmit frame too long (%d)\n", len
); 444 mp
->tx_bufs
[fill
] = skb
; 445 cp
= mp
->tx_cmds
+ NCMDS_TX
* fill
; 446 st_le16(&cp
->req_count
, len
); 447 st_le32(&cp
->phy_addr
,virt_to_bus(skb
->data
)); 449 np
= mp
->tx_cmds
+ NCMDS_TX
* next
; 450 out_le16(&np
->command
, DBDMA_STOP
); 452 /* poke the tx dma channel */ 456 if(!mp
->tx_bad_runt
&& mp
->tx_active
< MAX_TX_ACTIVE
) { 457 out_le16(&cp
->xfer_status
,0); 458 out_le16(&cp
->command
, OUTPUT_LAST
); 459 out_le32(&td
->control
, ((RUN
|WAKE
) <<16) + (RUN
|WAKE
)); 461 mace_set_timeout(dev
); 463 if(++next
>= N_TX_RING
) 465 if(next
== mp
->tx_empty
) 466 netif_stop_queue(dev
); 467 restore_flags(flags
); 472 static struct net_device_stats
*mace_stats(struct net_device
*dev
) 474 struct mace_data
*p
= (struct mace_data
*) dev
->priv
; 480 * CRC polynomial - used in working out multicast filter bits. 482 #define CRC_POLY 0xedb88320 484 static voidmace_set_multicast(struct net_device
*dev
) 486 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 487 volatilestruct mace
*mb
= mp
->mace
; 492 if(dev
->flags
& IFF_PROMISC
) { 495 unsigned char multicast_filter
[8]; 496 struct dev_mc_list
*dmi
= dev
->mc_list
; 498 if(dev
->flags
& IFF_ALLMULTI
) { 500 multicast_filter
[i
] =0xff; 503 multicast_filter
[i
] =0; 504 for(i
=0; i
< dev
->mc_count
; i
++) { 506 for(j
=0; j
<6; ++j
) { 507 b
= dmi
->dmi_addr
[j
]; 508 for(k
=0; k
<8; ++k
) { 510 crc
= (crc
>>1) ^ CRC_POLY
; 516 j
= crc
>>26;/* bit number in multicast_filter */ 517 multicast_filter
[j
>>3] |=1<< (j
&7); 522 printk("Multicast filter :"); 524 printk("%02x ", multicast_filter
[i
]); 528 out_8(&mb
->iac
, ADDRCHG
| LOGADDR
); 529 while((in_8(&mb
->iac
) & ADDRCHG
) !=0) 531 for(i
=0; i
<8; ++i
) { 532 out_8(&mb
->ladrf
, multicast_filter
[i
]); 536 out_8(&mb
->maccc
, mp
->maccc
); 539 static voidmace_handle_misc_intrs(struct mace_data
*mp
,int intr
) 541 volatilestruct mace
*mb
= mp
->mace
; 542 static int mace_babbles
, mace_jabbers
; 545 mp
->stats
.rx_missed_errors
+=256; 546 mp
->stats
.rx_missed_errors
+=in_8(&mb
->mpc
);/* reading clears it */ 548 mp
->stats
.rx_length_errors
+=256; 549 mp
->stats
.rx_length_errors
+=in_8(&mb
->rntpc
);/* reading clears it */ 551 ++mp
->stats
.tx_heartbeat_errors
; 553 if(mace_babbles
++ <4) 554 printk(KERN_DEBUG
"mace: babbling transmitter\n"); 556 if(mace_jabbers
++ <4) 557 printk(KERN_DEBUG
"mace: jabbering transceiver\n"); 560 static voidmace_interrupt(int irq
,void*dev_id
,struct pt_regs
*regs
) 562 struct net_device
*dev
= (struct net_device
*) dev_id
; 563 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 564 volatilestruct mace
*mb
= mp
->mace
; 565 volatilestruct dbdma_regs
*td
= mp
->tx_dma
; 566 volatilestruct dbdma_cmd
*cp
; 567 int intr
, fs
, i
, stat
, x
; 569 /* static int mace_last_fs, mace_last_xcount; */ 571 intr
=in_8(&mb
->ir
);/* read interrupt register */ 572 in_8(&mb
->xmtrc
);/* get retries */ 573 mace_handle_misc_intrs(mp
, intr
); 576 while(in_8(&mb
->pr
) & XMTSV
) { 577 del_timer(&mp
->tx_timeout
); 578 mp
->timeout_active
=0; 580 * Clear any interrupt indication associated with this status 581 * word. This appears to unlatch any error indication from 582 * the DMA controller. 586 mace_handle_misc_intrs(mp
, intr
); 587 if(mp
->tx_bad_runt
) { 588 fs
=in_8(&mb
->xmtfs
); 590 out_8(&mb
->xmtfc
, AUTO_PAD_XMIT
); 593 dstat
=ld_le32(&td
->status
); 594 /* stop DMA controller */ 595 out_le32(&td
->control
, RUN
<<16); 597 * xcount is the number of complete frames which have been 598 * written to the fifo but for which status has not been read. 600 xcount
= (in_8(&mb
->fifofc
) >> XMTFC_SH
) & XMTFC_MASK
; 601 if(xcount
==0|| (dstat
& DEAD
)) { 603 * If a packet was aborted before the DMA controller has 604 * finished transferring it, it seems that there are 2 bytes 605 * which are stuck in some buffer somewhere. These will get 606 * transmitted as soon as we read the frame status (which 607 * reenables the transmit data transfer request). Turning 608 * off the DMA controller and/or resetting the MACE doesn't 609 * help. So we disable auto-padding and FCS transmission 610 * so the two bytes will only be a runt packet which should 611 * be ignored by other stations. 613 out_8(&mb
->xmtfc
, DXMTFCS
); 615 fs
=in_8(&mb
->xmtfs
); 616 if((fs
& XMTSV
) ==0) { 617 printk(KERN_ERR
"mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n", 621 * XXX mace likes to hang the machine after a xmtfs error. 622 * This is hard to reproduce, reseting *may* help 625 cp
= mp
->tx_cmds
+ NCMDS_TX
* i
; 626 stat
=ld_le16(&cp
->xfer_status
); 627 if((fs
& (UFLO
|LCOL
|LCAR
|RTRY
)) || (dstat
& DEAD
) || xcount
==0) { 629 * Check whether there were in fact 2 bytes written to 633 x
= (in_8(&mb
->fifofc
) >> XMTFC_SH
) & XMTFC_MASK
; 635 /* there were two bytes with an end-of-packet indication */ 637 mace_set_timeout(dev
); 640 * Either there weren't the two bytes buffered up, or they 641 * didn't have an end-of-packet indication. 642 * We flush the transmit FIFO just in case (by setting the 643 * XMTFWU bit with the transmitter disabled). 645 out_8(&mb
->maccc
,in_8(&mb
->maccc
) & ~ENXMT
); 646 out_8(&mb
->fifocc
,in_8(&mb
->fifocc
) | XMTFWU
); 648 out_8(&mb
->maccc
,in_8(&mb
->maccc
) | ENXMT
); 649 out_8(&mb
->xmtfc
, AUTO_PAD_XMIT
); 652 /* dma should have finished */ 653 if(i
== mp
->tx_fill
) { 654 printk(KERN_DEBUG
"mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n", 659 if(fs
& (UFLO
|LCOL
|LCAR
|RTRY
)) { 660 ++mp
->stats
.tx_errors
; 662 ++mp
->stats
.tx_carrier_errors
; 663 if(fs
& (UFLO
|LCOL
|RTRY
)) 664 ++mp
->stats
.tx_aborted_errors
; 666 mp
->stats
.tx_bytes
+= mp
->tx_bufs
[i
]->len
; 667 ++mp
->stats
.tx_packets
; 669 dev_kfree_skb_irq(mp
->tx_bufs
[i
]); 675 mace_last_xcount
= xcount
; 679 if(i
!= mp
->tx_empty
) { 681 netif_wake_queue(dev
); 687 if(!mp
->tx_bad_runt
&& i
!= mp
->tx_fill
&& mp
->tx_active
< MAX_TX_ACTIVE
) { 689 /* set up the next one */ 690 cp
= mp
->tx_cmds
+ NCMDS_TX
* i
; 691 out_le16(&cp
->xfer_status
,0); 692 out_le16(&cp
->command
, OUTPUT_LAST
); 696 }while(i
!= mp
->tx_fill
&& mp
->tx_active
< MAX_TX_ACTIVE
); 697 out_le32(&td
->control
, ((RUN
|WAKE
) <<16) + (RUN
|WAKE
)); 698 mace_set_timeout(dev
); 702 static voidmace_tx_timeout(unsigned long data
) 704 struct net_device
*dev
= (struct net_device
*) data
; 705 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 706 volatilestruct mace
*mb
= mp
->mace
; 707 volatilestruct dbdma_regs
*td
= mp
->tx_dma
; 708 volatilestruct dbdma_regs
*rd
= mp
->rx_dma
; 709 volatilestruct dbdma_cmd
*cp
; 715 mp
->timeout_active
=0; 716 if(mp
->tx_active
==0&& !mp
->tx_bad_runt
) 719 /* update various counters */ 720 mace_handle_misc_intrs(mp
,in_8(&mb
->ir
)); 722 cp
= mp
->tx_cmds
+ NCMDS_TX
* mp
->tx_empty
; 724 /* turn off both tx and rx and reset the chip */ 726 printk(KERN_ERR
"mace: transmit timeout - resetting\n"); 731 cp
=bus_to_virt(ld_le32(&rd
->cmdptr
)); 733 out_le16(&cp
->xfer_status
,0); 734 out_le32(&rd
->cmdptr
,virt_to_bus(cp
)); 735 out_le32(&rd
->control
, (RUN
<<16) | RUN
); 737 /* fix up the transmit side */ 740 ++mp
->stats
.tx_errors
; 741 if(mp
->tx_bad_runt
) { 743 }else if(i
!= mp
->tx_fill
) { 744 dev_kfree_skb(mp
->tx_bufs
[i
]); 750 netif_wake_queue(dev
); 751 if(i
!= mp
->tx_fill
) { 752 cp
= mp
->tx_cmds
+ NCMDS_TX
* i
; 753 out_le16(&cp
->xfer_status
,0); 754 out_le16(&cp
->command
, OUTPUT_LAST
); 755 out_le32(&td
->cmdptr
,virt_to_bus(cp
)); 756 out_le32(&td
->control
, (RUN
<<16) | RUN
); 758 mace_set_timeout(dev
); 761 /* turn it back on */ 762 out_8(&mb
->imr
, RCVINT
); 763 out_8(&mb
->maccc
, mp
->maccc
); 766 restore_flags(flags
); 769 static voidmace_txdma_intr(int irq
,void*dev_id
,struct pt_regs
*regs
) 773 static voidmace_rxdma_intr(int irq
,void*dev_id
,struct pt_regs
*regs
) 775 struct net_device
*dev
= (struct net_device
*) dev_id
; 776 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
; 777 volatilestruct dbdma_regs
*rd
= mp
->rx_dma
; 778 volatilestruct dbdma_cmd
*cp
, *np
; 779 int i
, nb
, stat
, next
; 781 unsigned frame_status
; 782 static int mace_lost_status
; 785 for(i
= mp
->rx_empty
; i
!= mp
->rx_fill
; ) { 786 cp
= mp
->rx_cmds
+ i
; 787 stat
=ld_le16(&cp
->xfer_status
); 788 if((stat
& ACTIVE
) ==0) { 790 if(next
>= N_RX_RING
) 792 np
= mp
->rx_cmds
+ next
; 793 if(next
!= mp
->rx_fill
794 && (ld_le16(&np
->xfer_status
) & ACTIVE
) !=0) { 795 printk(KERN_DEBUG
"mace: lost a status word\n"); 800 nb
=ld_le16(&cp
->req_count
) -ld_le16(&cp
->res_count
); 801 out_le16(&cp
->command
, DBDMA_STOP
); 802 /* got a packet, have a look at it */ 803 skb
= mp
->rx_bufs
[i
]; 805 ++mp
->stats
.rx_dropped
; 808 frame_status
= (data
[nb
-3] <<8) + data
[nb
-4]; 809 if(frame_status
& (RS_OFLO
|RS_CLSN
|RS_FRAMERR
|RS_FCSERR
)) { 810 ++mp
->stats
.rx_errors
; 811 if(frame_status
& RS_OFLO
) 812 ++mp
->stats
.rx_over_errors
; 813 if(frame_status
& RS_FRAMERR
) 814 ++mp
->stats
.rx_frame_errors
; 815 if(frame_status
& RS_FCSERR
) 816 ++mp
->stats
.rx_crc_errors
; 818 /* Mace feature AUTO_STRIP_RCV is on by default, dropping the 819 * FCS on frames with 802.3 headers. This means that Ethernet 820 * frames have 8 extra octets at the end, while 802.3 frames 821 * have only 4. We need to correctly account for this. */ 822 if(*(unsigned short*)(data
+12) <1536)/* 802.3 header */ 824 else/* Ethernet header; mace includes FCS */ 828 skb
->protocol
=eth_type_trans(skb
, dev
); 831 mp
->stats
.rx_bytes
+= skb
->len
; 832 ++mp
->stats
.rx_packets
; 835 ++mp
->stats
.rx_errors
; 836 ++mp
->stats
.rx_length_errors
; 839 /* advance to next */ 848 if(next
>= N_RX_RING
) 850 if(next
== mp
->rx_empty
) 852 cp
= mp
->rx_cmds
+ i
; 853 skb
= mp
->rx_bufs
[i
]; 855 skb
=dev_alloc_skb(RX_BUFLEN
+2); 858 mp
->rx_bufs
[i
] = skb
; 861 st_le16(&cp
->req_count
, RX_BUFLEN
); 862 data
= skb
? skb
->data
: dummy_buf
; 863 st_le32(&cp
->phy_addr
,virt_to_bus(data
)); 864 out_le16(&cp
->xfer_status
,0); 865 out_le16(&cp
->command
, INPUT_LAST
+ INTR_ALWAYS
); 867 if((ld_le32(&rd
->status
) & ACTIVE
) !=0) { 868 out_le32(&rd
->control
, (PAUSE
<<16) | PAUSE
); 869 while((in_le32(&rd
->status
) & ACTIVE
) !=0) 875 if(i
!= mp
->rx_fill
) { 876 out_le32(&rd
->control
, ((RUN
|WAKE
) <<16) | (RUN
|WAKE
)); 881 MODULE_AUTHOR("Paul Mackerras"); 882 MODULE_DESCRIPTION("PowerMac MACE driver."); 884 static void __exit
mace_cleanup(void) 886 struct net_device
*dev
; 887 struct mace_data
*mp
; 889 while((dev
= mace_devs
) !=0) { 890 mp
= (struct mace_data
*) mace_devs
->priv
; 891 mace_devs
= mp
->next_mace
; 893 free_irq(dev
->irq
, dev
); 894 free_irq(mp
->tx_dma_intr
, dev
); 895 free_irq(mp
->rx_dma_intr
, dev
); 897 unregister_netdev(dev
); 902 module_init(mace_probe
); 903 module_exit(mace_cleanup
);