Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / net / mace.c
blob213b9b964394c306b6abe76df9af7807059bff67
1 /*
2 * Network device driver for the MACE ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1996 Paul Mackerras.
6 */
8 #include <linux/module.h>
9 #include <linux/version.h>
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/delay.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/init.h>
17 #include <asm/prom.h>
18 #include <asm/dbdma.h>
19 #include <asm/io.h>
20 #include <asm/pgtable.h>
21 #include"mace.h"
23 static struct net_device *mace_devs = NULL;
25 #define N_RX_RING 8
26 #define N_TX_RING 6
27 #define MAX_TX_ACTIVE 1
28 #define NCMDS_TX 1/* dma commands per element in tx ring */
29 #define RX_BUFLEN (ETH_FRAME_LEN + 8)
30 #define TX_TIMEOUT HZ/* 1 second */
32 /* Bits in transmit DMA status */
33 #define TX_DMA_ERR 0x80
35 struct mace_data {
36 volatilestruct mace *mace;
37 volatilestruct dbdma_regs *tx_dma;
38 int tx_dma_intr;
39 volatilestruct dbdma_regs *rx_dma;
40 int rx_dma_intr;
41 volatilestruct dbdma_cmd *tx_cmds;/* xmit dma command list */
42 volatilestruct dbdma_cmd *rx_cmds;/* recv dma command list */
43 struct sk_buff *rx_bufs[N_RX_RING];
44 int rx_fill;
45 int rx_empty;
46 struct sk_buff *tx_bufs[N_TX_RING];
47 int tx_fill;
48 int tx_empty;
49 unsigned char maccc;
50 unsigned char tx_fullup;
51 unsigned char tx_active;
52 unsigned char tx_bad_runt;
53 struct net_device_stats stats;
54 struct timer_list tx_timeout;
55 int timeout_active;
56 struct net_device *next_mace;
60 * Number of bytes of private data per MACE: allow enough for
61 * the rx and tx dma commands plus a branch dma command each,
62 * and another 16 bytes to allow us to align the dma command
63 * buffers on a 16 byte boundary.
65 #define PRIV_BYTES (sizeof(struct mace_data) \
66 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
68 static intbitrev(int);
69 static intmace_probe(void);
70 static voidmace_probe1(struct device_node *mace);
71 static intmace_open(struct net_device *dev);
72 static intmace_close(struct net_device *dev);
73 static intmace_xmit_start(struct sk_buff *skb,struct net_device *dev);
74 static struct net_device_stats *mace_stats(struct net_device *dev);
75 static voidmace_set_multicast(struct net_device *dev);
76 static voidmace_reset(struct net_device *dev);
77 static intmace_set_address(struct net_device *dev,void*addr);
78 static voidmace_interrupt(int irq,void*dev_id,struct pt_regs *regs);
79 static voidmace_txdma_intr(int irq,void*dev_id,struct pt_regs *regs);
80 static voidmace_rxdma_intr(int irq,void*dev_id,struct pt_regs *regs);
81 static voidmace_set_timeout(struct net_device *dev);
82 static voidmace_tx_timeout(unsigned long data);
83 staticinlinevoiddbdma_reset(volatilestruct dbdma_regs *dma);
84 staticinlinevoidmace_clean_rings(struct mace_data *mp);
85 static void__mace_set_address(struct net_device *dev,void*addr);
88 * If we can't get a skbuff when we need it, we use this area for DMA.
90 static unsigned char dummy_buf[RX_BUFLEN+2];
92 /* Bit-reverse one byte of an ethernet hardware address. */
93 staticinlineint
94 bitrev(int b)
96 int d =0, i;
98 for(i =0; i <8; ++i, b >>=1)
99 d = (d <<1) | (b &1);
100 return d;
103 static int __init mace_probe(void)
105 struct device_node *mace;
107 for(mace =find_devices("mace"); mace != NULL; mace = mace->next)
108 mace_probe1(mace);
109 return0;
112 static void __init mace_probe1(struct device_node *mace)
114 int j, rev;
115 struct net_device *dev;
116 struct mace_data *mp;
117 unsigned char*addr;
119 if(mace->n_addrs !=3|| mace->n_intrs !=3) {
120 printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n",
121 mace->full_name);
122 return;
125 addr =get_property(mace,"mac-address", NULL);
126 if(addr == NULL) {
127 addr =get_property(mace,"local-mac-address", NULL);
128 if(addr == NULL) {
129 printk(KERN_ERR "Can't get mac-address for MACE %s\n",
130 mace->full_name);
131 return;
135 dev =init_etherdev(0, PRIV_BYTES);
136 if(!dev)
137 return;
138 SET_MODULE_OWNER(dev);
140 mp = dev->priv;
141 dev->base_addr = mace->addrs[0].address;
142 mp->mace = (volatilestruct mace *)
143 ioremap(mace->addrs[0].address,0x1000);
144 dev->irq = mace->intrs[0].line;
146 printk(KERN_INFO "%s: MACE at", dev->name);
147 rev = addr[0] ==0&& addr[1] ==0xA0;
148 for(j =0; j <6; ++j) {
149 dev->dev_addr[j] = rev?bitrev(addr[j]): addr[j];
150 printk("%c%.2x", (j?':':' '), dev->dev_addr[j]);
152 printk(", chip revision %d.%d\n",
153 in_8(&mp->mace->chipid_hi),in_8(&mp->mace->chipid_lo));
155 mp = (struct mace_data *) dev->priv;
156 mp->maccc = ENXMT | ENRCV;
157 mp->tx_dma = (volatilestruct dbdma_regs *)
158 ioremap(mace->addrs[1].address,0x1000);
159 mp->tx_dma_intr = mace->intrs[1].line;
160 mp->rx_dma = (volatilestruct dbdma_regs *)
161 ioremap(mace->addrs[2].address,0x1000);
162 mp->rx_dma_intr = mace->intrs[2].line;
164 mp->tx_cmds = (volatilestruct dbdma_cmd *)DBDMA_ALIGN(mp +1);
165 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING +1;
167 memset(&mp->stats,0,sizeof(mp->stats));
168 memset((char*) mp->tx_cmds,0,
169 (NCMDS_TX*N_TX_RING + N_RX_RING +2) *sizeof(struct dbdma_cmd));
170 init_timer(&mp->tx_timeout);
171 mp->timeout_active =0;
173 dev->open = mace_open;
174 dev->stop = mace_close;
175 dev->hard_start_xmit = mace_xmit_start;
176 dev->get_stats = mace_stats;
177 dev->set_multicast_list = mace_set_multicast;
178 dev->set_mac_address = mace_set_address;
180 ether_setup(dev);
182 mace_reset(dev);
184 if(request_irq(dev->irq, mace_interrupt,0,"MACE", dev))
185 printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
186 if(request_irq(mace->intrs[1].line, mace_txdma_intr,0,"MACE-txdma",
187 dev))
188 printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[1].line);
189 if(request_irq(mace->intrs[2].line, mace_rxdma_intr,0,"MACE-rxdma",
190 dev))
191 printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[2].line);
193 mp->next_mace = mace_devs;
194 mace_devs = dev;
197 static voiddbdma_reset(volatilestruct dbdma_regs *dma)
199 int i;
201 out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) <<16);
204 * Yes this looks peculiar, but apparently it needs to be this
205 * way on some machines.
207 for(i =200; i >0; --i)
208 if(ld_le32(&dma->control) & RUN)
209 udelay(1);
212 static voidmace_reset(struct net_device *dev)
214 struct mace_data *mp = (struct mace_data *) dev->priv;
215 volatilestruct mace *mb = mp->mace;
216 int i;
218 /* soft-reset the chip */
219 i =200;
220 while(--i) {
221 out_8(&mb->biucc, SWRST);
222 if(in_8(&mb->biucc) & SWRST) {
223 udelay(10);
224 continue;
226 break;
228 if(!i) {
229 printk(KERN_ERR "mace: cannot reset chip!\n");
230 return;
233 out_8(&mb->imr,0xff);/* disable all intrs for now */
234 i =in_8(&mb->ir);
235 out_8(&mb->maccc,0);/* turn off tx, rx */
237 out_8(&mb->biucc, XMTSP_64);
238 out_8(&mb->utr, RTRD);
239 out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
240 out_8(&mb->xmtfc, AUTO_PAD_XMIT);/* auto-pad short frames */
241 out_8(&mb->rcvfc,0);
243 /* load up the hardware address */
244 __mace_set_address(dev, dev->dev_addr);
246 /* clear the multicast filter */
247 out_8(&mb->iac, ADDRCHG | LOGADDR);
248 while((in_8(&mb->iac) & ADDRCHG) !=0)
250 for(i =0; i <8; ++i) {
251 out_8(&mb->ladrf,0);
253 /* done changing address */
254 out_8(&mb->iac,0);
256 out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
259 static void__mace_set_address(struct net_device *dev,void*addr)
261 volatilestruct mace *mb = ((struct mace_data *) dev->priv)->mace;
262 unsigned char*p = addr;
263 int i;
265 /* load up the hardware address */
266 out_8(&mb->iac, ADDRCHG | PHYADDR);
267 while((in_8(&mb->iac) & ADDRCHG) !=0)
269 for(i =0; i <6; ++i)
270 out_8(&mb->padr, dev->dev_addr[i] = p[i]);
273 static intmace_set_address(struct net_device *dev,void*addr)
275 struct mace_data *mp = (struct mace_data *) dev->priv;
276 volatilestruct mace *mb = mp->mace;
277 unsigned long flags;
279 save_flags(flags);cli();
281 __mace_set_address(dev, addr);
283 out_8(&mb->iac,0);
284 /* note: setting ADDRCHG clears ENRCV */
285 out_8(&mb->maccc, mp->maccc);
287 restore_flags(flags);
288 return0;
291 static intmace_open(struct net_device *dev)
293 struct mace_data *mp = (struct mace_data *) dev->priv;
294 volatilestruct mace *mb = mp->mace;
295 volatilestruct dbdma_regs *rd = mp->rx_dma;
296 volatilestruct dbdma_regs *td = mp->tx_dma;
297 volatilestruct dbdma_cmd *cp;
298 int i;
299 struct sk_buff *skb;
300 unsigned char*data;
302 /* reset the chip */
303 mace_reset(dev);
305 /* initialize list of sk_buffs for receiving and set up recv dma */
306 mace_clean_rings(mp);
307 memset((char*)mp->rx_cmds,0, N_RX_RING *sizeof(struct dbdma_cmd));
308 cp = mp->rx_cmds;
309 for(i =0; i < N_RX_RING -1; ++i) {
310 skb =dev_alloc_skb(RX_BUFLEN +2);
311 if(skb ==0) {
312 data = dummy_buf;
313 }else{
314 skb_reserve(skb,2);/* so IP header lands on 4-byte bdry */
315 data = skb->data;
317 mp->rx_bufs[i] = skb;
318 st_le16(&cp->req_count, RX_BUFLEN);
319 st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
320 st_le32(&cp->phy_addr,virt_to_bus(data));
321 cp->xfer_status =0;
322 ++cp;
324 mp->rx_bufs[i] =0;
325 st_le16(&cp->command, DBDMA_STOP);
326 mp->rx_fill = i;
327 mp->rx_empty =0;
329 /* Put a branch back to the beginning of the receive command list */
330 ++cp;
331 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
332 st_le32(&cp->cmd_dep,virt_to_bus(mp->rx_cmds));
334 /* start rx dma */
335 out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) <<16);/* clear run bit */
336 out_le32(&rd->cmdptr,virt_to_bus(mp->rx_cmds));
337 out_le32(&rd->control, (RUN <<16) | RUN);
339 /* put a branch at the end of the tx command list */
340 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
341 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
342 st_le32(&cp->cmd_dep,virt_to_bus(mp->tx_cmds));
344 /* reset tx dma */
345 out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) <<16);
346 out_le32(&td->cmdptr,virt_to_bus(mp->tx_cmds));
347 mp->tx_fill =0;
348 mp->tx_empty =0;
349 mp->tx_fullup =0;
350 mp->tx_active =0;
351 mp->tx_bad_runt =0;
353 /* turn it on! */
354 out_8(&mb->maccc, mp->maccc);
355 /* enable all interrupts except receive interrupts */
356 out_8(&mb->imr, RCVINT);
358 return0;
361 staticinlinevoidmace_clean_rings(struct mace_data *mp)
363 int i;
365 /* free some skb's */
366 for(i =0; i < N_RX_RING; ++i) {
367 if(mp->rx_bufs[i] !=0) {
368 dev_kfree_skb(mp->rx_bufs[i]);
369 mp->rx_bufs[i] =0;
372 for(i = mp->tx_empty; i != mp->tx_fill; ) {
373 dev_kfree_skb(mp->tx_bufs[i]);
374 if(++i >= N_TX_RING)
375 i =0;
379 static intmace_close(struct net_device *dev)
381 struct mace_data *mp = (struct mace_data *) dev->priv;
382 volatilestruct mace *mb = mp->mace;
383 volatilestruct dbdma_regs *rd = mp->rx_dma;
384 volatilestruct dbdma_regs *td = mp->tx_dma;
386 /* disable rx and tx */
387 out_8(&mb->maccc,0);
388 out_8(&mb->imr,0xff);/* disable all intrs */
390 /* disable rx and tx dma */
391 st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) <<16);/* clear run bit */
392 st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) <<16);/* clear run bit */
394 mace_clean_rings(mp);
396 return0;
399 staticinlinevoidmace_set_timeout(struct net_device *dev)
401 struct mace_data *mp = (struct mace_data *) dev->priv;
402 unsigned long flags;
404 save_flags(flags);
405 cli();
406 if(mp->timeout_active)
407 del_timer(&mp->tx_timeout);
408 mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
409 mp->tx_timeout.function = mace_tx_timeout;
410 mp->tx_timeout.data = (unsigned long) dev;
411 add_timer(&mp->tx_timeout);
412 mp->timeout_active =1;
413 restore_flags(flags);
416 static intmace_xmit_start(struct sk_buff *skb,struct net_device *dev)
418 struct mace_data *mp = (struct mace_data *) dev->priv;
419 volatilestruct dbdma_regs *td = mp->tx_dma;
420 volatilestruct dbdma_cmd *cp, *np;
421 unsigned long flags;
422 int fill, next, len;
424 /* see if there's a free slot in the tx ring */
425 save_flags(flags);cli();
426 fill = mp->tx_fill;
427 next = fill +1;
428 if(next >= N_TX_RING)
429 next =0;
430 if(next == mp->tx_empty) {
431 netif_stop_queue(dev);
432 mp->tx_fullup =1;
433 restore_flags(flags);
434 return1;/* can't take it at the moment */
436 restore_flags(flags);
438 /* partially fill in the dma command block */
439 len = skb->len;
440 if(len > ETH_FRAME_LEN) {
441 printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
442 len = ETH_FRAME_LEN;
444 mp->tx_bufs[fill] = skb;
445 cp = mp->tx_cmds + NCMDS_TX * fill;
446 st_le16(&cp->req_count, len);
447 st_le32(&cp->phy_addr,virt_to_bus(skb->data));
449 np = mp->tx_cmds + NCMDS_TX * next;
450 out_le16(&np->command, DBDMA_STOP);
452 /* poke the tx dma channel */
453 save_flags(flags);
454 cli();
455 mp->tx_fill = next;
456 if(!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
457 out_le16(&cp->xfer_status,0);
458 out_le16(&cp->command, OUTPUT_LAST);
459 out_le32(&td->control, ((RUN|WAKE) <<16) + (RUN|WAKE));
460 ++mp->tx_active;
461 mace_set_timeout(dev);
463 if(++next >= N_TX_RING)
464 next =0;
465 if(next == mp->tx_empty)
466 netif_stop_queue(dev);
467 restore_flags(flags);
469 return0;
472 static struct net_device_stats *mace_stats(struct net_device *dev)
474 struct mace_data *p = (struct mace_data *) dev->priv;
476 return&p->stats;
480 * CRC polynomial - used in working out multicast filter bits.
482 #define CRC_POLY 0xedb88320
484 static voidmace_set_multicast(struct net_device *dev)
486 struct mace_data *mp = (struct mace_data *) dev->priv;
487 volatilestruct mace *mb = mp->mace;
488 int i, j, k, b;
489 unsigned long crc;
491 mp->maccc &= ~PROM;
492 if(dev->flags & IFF_PROMISC) {
493 mp->maccc |= PROM;
494 }else{
495 unsigned char multicast_filter[8];
496 struct dev_mc_list *dmi = dev->mc_list;
498 if(dev->flags & IFF_ALLMULTI) {
499 for(i =0; i <8; i++)
500 multicast_filter[i] =0xff;
501 }else{
502 for(i =0; i <8; i++)
503 multicast_filter[i] =0;
504 for(i =0; i < dev->mc_count; i++) {
505 crc = ~0;
506 for(j =0; j <6; ++j) {
507 b = dmi->dmi_addr[j];
508 for(k =0; k <8; ++k) {
509 if((crc ^ b) &1)
510 crc = (crc >>1) ^ CRC_POLY;
511 else
512 crc >>=1;
513 b >>=1;
516 j = crc >>26;/* bit number in multicast_filter */
517 multicast_filter[j >>3] |=1<< (j &7);
518 dmi = dmi->next;
521 #if 0
522 printk("Multicast filter :");
523 for(i =0; i <8; i++)
524 printk("%02x ", multicast_filter[i]);
525 printk("\n");
526 #endif
528 out_8(&mb->iac, ADDRCHG | LOGADDR);
529 while((in_8(&mb->iac) & ADDRCHG) !=0)
531 for(i =0; i <8; ++i) {
532 out_8(&mb->ladrf, multicast_filter[i]);
535 /* reset maccc */
536 out_8(&mb->maccc, mp->maccc);
539 static voidmace_handle_misc_intrs(struct mace_data *mp,int intr)
541 volatilestruct mace *mb = mp->mace;
542 static int mace_babbles, mace_jabbers;
544 if(intr & MPCO)
545 mp->stats.rx_missed_errors +=256;
546 mp->stats.rx_missed_errors +=in_8(&mb->mpc);/* reading clears it */
547 if(intr & RNTPCO)
548 mp->stats.rx_length_errors +=256;
549 mp->stats.rx_length_errors +=in_8(&mb->rntpc);/* reading clears it */
550 if(intr & CERR)
551 ++mp->stats.tx_heartbeat_errors;
552 if(intr & BABBLE)
553 if(mace_babbles++ <4)
554 printk(KERN_DEBUG "mace: babbling transmitter\n");
555 if(intr & JABBER)
556 if(mace_jabbers++ <4)
557 printk(KERN_DEBUG "mace: jabbering transceiver\n");
560 static voidmace_interrupt(int irq,void*dev_id,struct pt_regs *regs)
562 struct net_device *dev = (struct net_device *) dev_id;
563 struct mace_data *mp = (struct mace_data *) dev->priv;
564 volatilestruct mace *mb = mp->mace;
565 volatilestruct dbdma_regs *td = mp->tx_dma;
566 volatilestruct dbdma_cmd *cp;
567 int intr, fs, i, stat, x;
568 int xcount, dstat;
569 /* static int mace_last_fs, mace_last_xcount; */
571 intr =in_8(&mb->ir);/* read interrupt register */
572 in_8(&mb->xmtrc);/* get retries */
573 mace_handle_misc_intrs(mp, intr);
575 i = mp->tx_empty;
576 while(in_8(&mb->pr) & XMTSV) {
577 del_timer(&mp->tx_timeout);
578 mp->timeout_active =0;
580 * Clear any interrupt indication associated with this status
581 * word. This appears to unlatch any error indication from
582 * the DMA controller.
584 intr =in_8(&mb->ir);
585 if(intr !=0)
586 mace_handle_misc_intrs(mp, intr);
587 if(mp->tx_bad_runt) {
588 fs =in_8(&mb->xmtfs);
589 mp->tx_bad_runt =0;
590 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
591 continue;
593 dstat =ld_le32(&td->status);
594 /* stop DMA controller */
595 out_le32(&td->control, RUN <<16);
597 * xcount is the number of complete frames which have been
598 * written to the fifo but for which status has not been read.
600 xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
601 if(xcount ==0|| (dstat & DEAD)) {
603 * If a packet was aborted before the DMA controller has
604 * finished transferring it, it seems that there are 2 bytes
605 * which are stuck in some buffer somewhere. These will get
606 * transmitted as soon as we read the frame status (which
607 * reenables the transmit data transfer request). Turning
608 * off the DMA controller and/or resetting the MACE doesn't
609 * help. So we disable auto-padding and FCS transmission
610 * so the two bytes will only be a runt packet which should
611 * be ignored by other stations.
613 out_8(&mb->xmtfc, DXMTFCS);
615 fs =in_8(&mb->xmtfs);
616 if((fs & XMTSV) ==0) {
617 printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
618 fs, xcount, dstat);
619 mace_reset(dev);
621 * XXX mace likes to hang the machine after a xmtfs error.
622 * This is hard to reproduce, reseting *may* help
625 cp = mp->tx_cmds + NCMDS_TX * i;
626 stat =ld_le16(&cp->xfer_status);
627 if((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount ==0) {
629 * Check whether there were in fact 2 bytes written to
630 * the transmit FIFO.
632 udelay(1);
633 x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
634 if(x !=0) {
635 /* there were two bytes with an end-of-packet indication */
636 mp->tx_bad_runt =1;
637 mace_set_timeout(dev);
638 }else{
640 * Either there weren't the two bytes buffered up, or they
641 * didn't have an end-of-packet indication.
642 * We flush the transmit FIFO just in case (by setting the
643 * XMTFWU bit with the transmitter disabled).
645 out_8(&mb->maccc,in_8(&mb->maccc) & ~ENXMT);
646 out_8(&mb->fifocc,in_8(&mb->fifocc) | XMTFWU);
647 udelay(1);
648 out_8(&mb->maccc,in_8(&mb->maccc) | ENXMT);
649 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
652 /* dma should have finished */
653 if(i == mp->tx_fill) {
654 printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
655 fs, xcount, dstat);
656 continue;
658 /* Update stats */
659 if(fs & (UFLO|LCOL|LCAR|RTRY)) {
660 ++mp->stats.tx_errors;
661 if(fs & LCAR)
662 ++mp->stats.tx_carrier_errors;
663 if(fs & (UFLO|LCOL|RTRY))
664 ++mp->stats.tx_aborted_errors;
665 }else{
666 mp->stats.tx_bytes += mp->tx_bufs[i]->len;
667 ++mp->stats.tx_packets;
669 dev_kfree_skb_irq(mp->tx_bufs[i]);
670 --mp->tx_active;
671 if(++i >= N_TX_RING)
672 i =0;
673 #if 0
674 mace_last_fs = fs;
675 mace_last_xcount = xcount;
676 #endif
679 if(i != mp->tx_empty) {
680 mp->tx_fullup =0;
681 netif_wake_queue(dev);
683 mp->tx_empty = i;
684 i += mp->tx_active;
685 if(i >= N_TX_RING)
686 i -= N_TX_RING;
687 if(!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
689 /* set up the next one */
690 cp = mp->tx_cmds + NCMDS_TX * i;
691 out_le16(&cp->xfer_status,0);
692 out_le16(&cp->command, OUTPUT_LAST);
693 ++mp->tx_active;
694 if(++i >= N_TX_RING)
695 i =0;
696 }while(i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
697 out_le32(&td->control, ((RUN|WAKE) <<16) + (RUN|WAKE));
698 mace_set_timeout(dev);
702 static voidmace_tx_timeout(unsigned long data)
704 struct net_device *dev = (struct net_device *) data;
705 struct mace_data *mp = (struct mace_data *) dev->priv;
706 volatilestruct mace *mb = mp->mace;
707 volatilestruct dbdma_regs *td = mp->tx_dma;
708 volatilestruct dbdma_regs *rd = mp->rx_dma;
709 volatilestruct dbdma_cmd *cp;
710 unsigned long flags;
711 int i;
713 save_flags(flags);
714 cli();
715 mp->timeout_active =0;
716 if(mp->tx_active ==0&& !mp->tx_bad_runt)
717 goto out;
719 /* update various counters */
720 mace_handle_misc_intrs(mp,in_8(&mb->ir));
722 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
724 /* turn off both tx and rx and reset the chip */
725 out_8(&mb->maccc,0);
726 printk(KERN_ERR "mace: transmit timeout - resetting\n");
727 dbdma_reset(td);
728 mace_reset(dev);
730 /* restart rx dma */
731 cp =bus_to_virt(ld_le32(&rd->cmdptr));
732 dbdma_reset(rd);
733 out_le16(&cp->xfer_status,0);
734 out_le32(&rd->cmdptr,virt_to_bus(cp));
735 out_le32(&rd->control, (RUN <<16) | RUN);
737 /* fix up the transmit side */
738 i = mp->tx_empty;
739 mp->tx_active =0;
740 ++mp->stats.tx_errors;
741 if(mp->tx_bad_runt) {
742 mp->tx_bad_runt =0;
743 }else if(i != mp->tx_fill) {
744 dev_kfree_skb(mp->tx_bufs[i]);
745 if(++i >= N_TX_RING)
746 i =0;
747 mp->tx_empty = i;
749 mp->tx_fullup =0;
750 netif_wake_queue(dev);
751 if(i != mp->tx_fill) {
752 cp = mp->tx_cmds + NCMDS_TX * i;
753 out_le16(&cp->xfer_status,0);
754 out_le16(&cp->command, OUTPUT_LAST);
755 out_le32(&td->cmdptr,virt_to_bus(cp));
756 out_le32(&td->control, (RUN <<16) | RUN);
757 ++mp->tx_active;
758 mace_set_timeout(dev);
761 /* turn it back on */
762 out_8(&mb->imr, RCVINT);
763 out_8(&mb->maccc, mp->maccc);
765 out:
766 restore_flags(flags);
769 static voidmace_txdma_intr(int irq,void*dev_id,struct pt_regs *regs)
773 static voidmace_rxdma_intr(int irq,void*dev_id,struct pt_regs *regs)
775 struct net_device *dev = (struct net_device *) dev_id;
776 struct mace_data *mp = (struct mace_data *) dev->priv;
777 volatilestruct dbdma_regs *rd = mp->rx_dma;
778 volatilestruct dbdma_cmd *cp, *np;
779 int i, nb, stat, next;
780 struct sk_buff *skb;
781 unsigned frame_status;
782 static int mace_lost_status;
783 unsigned char*data;
785 for(i = mp->rx_empty; i != mp->rx_fill; ) {
786 cp = mp->rx_cmds + i;
787 stat =ld_le16(&cp->xfer_status);
788 if((stat & ACTIVE) ==0) {
789 next = i +1;
790 if(next >= N_RX_RING)
791 next =0;
792 np = mp->rx_cmds + next;
793 if(next != mp->rx_fill
794 && (ld_le16(&np->xfer_status) & ACTIVE) !=0) {
795 printk(KERN_DEBUG "mace: lost a status word\n");
796 ++mace_lost_status;
797 }else
798 break;
800 nb =ld_le16(&cp->req_count) -ld_le16(&cp->res_count);
801 out_le16(&cp->command, DBDMA_STOP);
802 /* got a packet, have a look at it */
803 skb = mp->rx_bufs[i];
804 if(skb ==0) {
805 ++mp->stats.rx_dropped;
806 }else if(nb >8) {
807 data = skb->data;
808 frame_status = (data[nb-3] <<8) + data[nb-4];
809 if(frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
810 ++mp->stats.rx_errors;
811 if(frame_status & RS_OFLO)
812 ++mp->stats.rx_over_errors;
813 if(frame_status & RS_FRAMERR)
814 ++mp->stats.rx_frame_errors;
815 if(frame_status & RS_FCSERR)
816 ++mp->stats.rx_crc_errors;
817 }else{
818 /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
819 * FCS on frames with 802.3 headers. This means that Ethernet
820 * frames have 8 extra octets at the end, while 802.3 frames
821 * have only 4. We need to correctly account for this. */
822 if(*(unsigned short*)(data+12) <1536)/* 802.3 header */
823 nb -=4;
824 else/* Ethernet header; mace includes FCS */
825 nb -=8;
826 skb_put(skb, nb);
827 skb->dev = dev;
828 skb->protocol =eth_type_trans(skb, dev);
829 netif_rx(skb);
830 mp->rx_bufs[i] =0;
831 mp->stats.rx_bytes += skb->len;
832 ++mp->stats.rx_packets;
834 }else{
835 ++mp->stats.rx_errors;
836 ++mp->stats.rx_length_errors;
839 /* advance to next */
840 if(++i >= N_RX_RING)
841 i =0;
843 mp->rx_empty = i;
845 i = mp->rx_fill;
846 for(;;) {
847 next = i +1;
848 if(next >= N_RX_RING)
849 next =0;
850 if(next == mp->rx_empty)
851 break;
852 cp = mp->rx_cmds + i;
853 skb = mp->rx_bufs[i];
854 if(skb ==0) {
855 skb =dev_alloc_skb(RX_BUFLEN +2);
856 if(skb !=0) {
857 skb_reserve(skb,2);
858 mp->rx_bufs[i] = skb;
861 st_le16(&cp->req_count, RX_BUFLEN);
862 data = skb? skb->data: dummy_buf;
863 st_le32(&cp->phy_addr,virt_to_bus(data));
864 out_le16(&cp->xfer_status,0);
865 out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
866 #if 0
867 if((ld_le32(&rd->status) & ACTIVE) !=0) {
868 out_le32(&rd->control, (PAUSE <<16) | PAUSE);
869 while((in_le32(&rd->status) & ACTIVE) !=0)
872 #endif
873 i = next;
875 if(i != mp->rx_fill) {
876 out_le32(&rd->control, ((RUN|WAKE) <<16) | (RUN|WAKE));
877 mp->rx_fill = i;
881 MODULE_AUTHOR("Paul Mackerras");
882 MODULE_DESCRIPTION("PowerMac MACE driver.");
884 static void __exit mace_cleanup(void)
886 struct net_device *dev;
887 struct mace_data *mp;
889 while((dev = mace_devs) !=0) {
890 mp = (struct mace_data *) mace_devs->priv;
891 mace_devs = mp->next_mace;
893 free_irq(dev->irq, dev);
894 free_irq(mp->tx_dma_intr, dev);
895 free_irq(mp->rx_dma_intr, dev);
897 unregister_netdev(dev);
898 kfree(dev);
902 module_init(mace_probe);
903 module_exit(mace_cleanup);
close