Import 2.1.127pre3
[davej-history.git] / drivers / net / bmac.c
blob2ced2f1db7bbb93b4a03b89c381f3aa2404212d0
1 /*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/netdevice.h>
10 #include <linux/etherdevice.h>
11 #include <linux/delay.h>
12 #include <linux/string.h>
13 #include <linux/timer.h>
14 #include <linux/proc_fs.h>
15 #include <asm/prom.h>
16 #include <asm/dbdma.h>
17 #include <asm/io.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/feature.h>
21 #include"bmac.h"
23 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
24 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
27 * CRC polynomial - used in working out multicast filter bits.
29 #define ENET_CRCPOLY 0x04c11db7
31 /* switch to use multicast code lifted from sunhme driver */
32 #define SUNHME_MULTICAST
34 #define N_RX_RING 64
35 #define N_TX_RING 32
36 #define MAX_TX_ACTIVE 1
37 #define ETHERCRC 4
38 #define ETHERMINPACKET 64
39 #define ETHERMTU 1500
40 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
41 #define TX_TIMEOUT HZ/* 1 second */
43 /* Bits in transmit DMA status */
44 #define TX_DMA_ERR 0x80
46 #define XXDEBUG(args)
48 struct bmac_data {
49 /* volatile struct bmac *bmac; */
50 struct sk_buff_head *queue;
51 volatilestruct dbdma_regs *tx_dma;
52 int tx_dma_intr;
53 volatilestruct dbdma_regs *rx_dma;
54 int rx_dma_intr;
55 volatilestruct dbdma_cmd *tx_cmds;/* xmit dma command list */
56 volatilestruct dbdma_cmd *rx_cmds;/* recv dma command list */
57 struct sk_buff *rx_bufs[N_RX_RING];
58 int rx_fill;
59 int rx_empty;
60 struct sk_buff *tx_bufs[N_TX_RING];
61 char*tx_double[N_TX_RING];/* yuck--double buffering */
62 int tx_fill;
63 int tx_empty;
64 unsigned char tx_fullup;
65 struct net_device_stats stats;
66 struct timer_list tx_timeout;
67 int timeout_active;
68 int reset_and_enabled;
69 int rx_allocated;
70 int tx_allocated;
71 unsigned short hash_use_count[64];
72 unsigned short hash_table_mask[4];
75 typedefstruct bmac_reg_entry {
76 char*name;
77 unsigned short reg_offset;
78 } bmac_reg_entry_t;
80 #define N_REG_ENTRIES 31
82 bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
83 {"MEMADD", MEMADD},
84 {"MEMDATAHI", MEMDATAHI},
85 {"MEMDATALO", MEMDATALO},
86 {"TXPNTR", TXPNTR},
87 {"RXPNTR", RXPNTR},
88 {"IPG1", IPG1},
89 {"IPG2", IPG2},
90 {"ALIMIT", ALIMIT},
91 {"SLOT", SLOT},
92 {"PALEN", PALEN},
93 {"PAPAT", PAPAT},
94 {"TXSFD", TXSFD},
95 {"JAM", JAM},
96 {"TXCFG", TXCFG},
97 {"TXMAX", TXMAX},
98 {"TXMIN", TXMIN},
99 {"PAREG", PAREG},
100 {"DCNT", DCNT},
101 {"NCCNT", NCCNT},
102 {"NTCNT", NTCNT},
103 {"EXCNT", EXCNT},
104 {"LTCNT", LTCNT},
105 {"TXSM", TXSM},
106 {"RXCFG", RXCFG},
107 {"RXMAX", RXMAX},
108 {"RXMIN", RXMIN},
109 {"FRCNT", FRCNT},
110 {"AECNT", AECNT},
111 {"FECNT", FECNT},
112 {"RXSM", RXSM},
113 {"RXCV", RXCV}
116 struct device *bmac_devs = NULL;
117 static int is_bmac_plus;
119 #if 0
121 * If we can't get a skbuff when we need it, we use this area for DMA.
123 static unsigned char dummy_buf[RX_BUFLEN];
124 #endif
127 * Number of bytes of private data per BMAC: allow enough for
128 * the rx and tx dma commands plus a branch dma command each,
129 * and another 16 bytes to allow us to align the dma command
130 * buffers on a 16 byte boundary.
132 #define PRIV_BYTES (sizeof(struct bmac_data) \
133 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
134 + sizeof(struct sk_buff_head))
136 static unsigned charbitrev(unsigned char b);
137 static intbmac_open(struct device *dev);
138 static intbmac_close(struct device *dev);
139 static intbmac_transmit_packet(struct sk_buff *skb,struct device *dev);
140 static struct net_device_stats *bmac_stats(struct device *dev);
141 static voidbmac_set_multicast(struct device *dev);
142 static intbmac_reset_and_enable(struct device *dev,int enable);
143 static voidbmac_start_chip(struct device *dev);
144 static intbmac_init_chip(struct device *dev);
145 static voidbmac_init_registers(struct device *dev);
146 static voidbmac_reset_chip(struct device *dev);
147 static intbmac_set_address(struct device *dev,void*addr);
148 static voidbmac_misc_intr(int irq,void*dev_id,struct pt_regs *regs);
149 static voidbmac_txdma_intr(int irq,void*dev_id,struct pt_regs *regs);
150 static voidbmac_rxdma_intr(int irq,void*dev_id,struct pt_regs *regs);
151 static voidbmac_set_timeout(struct device *dev);
152 static voidbmac_tx_timeout(unsigned long data);
153 static intbmac_proc_info(char*buffer,char**start, off_t offset,int length,int dummy);
154 static intbmac_output(struct sk_buff *skb,struct device *dev);
155 static voidbmac_start(struct device *dev);
157 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
158 #define DBDMA_CLEAR(x) ( (x) << 16)
160 static __inline__ void
161 dbdma_st32(volatileunsigned long*a,unsigned long x)
163 __asm__ volatile("stwbrx %0,0,%1": :"r"(x),"r"(a) :"memory");
164 return;
167 static __inline__ unsigned long
168 dbdma_ld32(volatileunsigned long*a)
170 unsigned long swap;
171 __asm__ volatile("lwbrx %0,0,%1":"=r"(swap) :"r"(a));
172 return swap;
175 void
176 dbdma_stop(volatilestruct dbdma_regs *dmap)
178 dbdma_st32((volatileunsigned long*)&dmap->control,DBDMA_CLEAR(RUN) |DBDMA_SET(FLUSH));
179 eieio();
181 while(dbdma_ld32((volatileunsigned long*)&dmap->status) & (ACTIVE|FLUSH))
182 eieio();
185 static void
186 dbdma_continue(volatilestruct dbdma_regs *dmap)
188 dbdma_st32((volatileunsigned long*)&dmap->control,
189 DBDMA_SET(RUN|WAKE) |DBDMA_CLEAR(PAUSE|DEAD));
190 eieio();
193 static void
194 dbdma_reset(volatilestruct dbdma_regs *dmap)
196 dbdma_st32((volatileunsigned long*)&dmap->control,
197 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
198 eieio();
199 while(dbdma_ld32((volatileunsigned long*)&dmap->status) & RUN)eieio();
202 static void
203 dbdma_setcmd(volatilestruct dbdma_cmd *cp,
204 unsigned short cmd,unsigned count,unsigned long addr,
205 unsigned long cmd_dep)
207 out_le16(&cp->command, cmd);
208 out_le16(&cp->req_count, count);
209 out_le32(&cp->phy_addr, addr);
210 out_le32(&cp->cmd_dep, cmd_dep);
211 out_le16(&cp->xfer_status,0);
212 out_le16(&cp->res_count,0);
215 static __inline__
216 voidbmwrite(struct device *dev,unsigned long reg_offset,unsigned data )
218 out_le16((void*)dev->base_addr + reg_offset, data);
222 static __inline__
223 volatileunsigned shortbmread(struct device *dev,unsigned long reg_offset )
225 returnin_le16((void*)dev->base_addr + reg_offset);
228 static void
229 bmac_reset_chip(struct device *dev)
231 struct bmac_data *bp = (struct bmac_data *) dev->priv;
232 volatilestruct dbdma_regs *rd = bp->rx_dma;
233 volatilestruct dbdma_regs *td = bp->tx_dma;
235 dbdma_reset(rd);
236 dbdma_reset(td);
238 feature_set(FEATURE_BMac_IO_enable);
239 udelay(10000);
240 feature_set(FEATURE_BMac_reset);
241 udelay(10000);
242 feature_clear(FEATURE_BMac_reset);
243 udelay(10000);
246 #define MIFDELAY udelay(500)
248 static unsigned int
249 bmac_mif_readbits(struct device *dev,int nb)
251 unsigned int val =0;
253 while(--nb >=0) {
254 bmwrite(dev, MIFCSR,0);
255 MIFDELAY;
256 if(bmread(dev, MIFCSR) &8)
257 val |=1<< nb;
258 bmwrite(dev, MIFCSR,1);
259 MIFDELAY;
261 bmwrite(dev, MIFCSR,0);
262 MIFDELAY;
263 bmwrite(dev, MIFCSR,1);
264 MIFDELAY;
265 return val;
268 static void
269 bmac_mif_writebits(struct device *dev,unsigned int val,int nb)
271 int b;
273 while(--nb >=0) {
274 b = (val & (1<< nb))?6:4;
275 bmwrite(dev, MIFCSR, b);
276 MIFDELAY;
277 bmwrite(dev, MIFCSR, b|1);
278 MIFDELAY;
282 static unsigned int
283 bmac_mif_read(struct device *dev,unsigned int addr)
285 unsigned int val;
287 bmwrite(dev, MIFCSR,4);
288 MIFDELAY;
289 bmac_mif_writebits(dev, ~0U,32);
290 bmac_mif_writebits(dev,6,4);
291 bmac_mif_writebits(dev, addr,10);
292 bmwrite(dev, MIFCSR,2);
293 MIFDELAY;
294 bmwrite(dev, MIFCSR,1);
295 MIFDELAY;
296 val =bmac_mif_readbits(dev,17);
297 bmwrite(dev, MIFCSR,4);
298 MIFDELAY;
299 printk(KERN_DEBUG "bmac_mif_read(%x) -> %x\n", addr, val);
300 return val;
303 static void
304 bmac_mif_write(struct device *dev,unsigned int addr,unsigned int val)
306 bmwrite(dev, MIFCSR,4);
307 MIFDELAY;
308 bmac_mif_writebits(dev, ~0U,32);
309 bmac_mif_writebits(dev,5,4);
310 bmac_mif_writebits(dev, addr,10);
311 bmac_mif_writebits(dev,2,2);
312 bmac_mif_writebits(dev, val,16);
313 bmac_mif_writebits(dev,3,2);
316 static void
317 bmac_init_registers(struct device *dev)
319 struct bmac_data *bp = (struct bmac_data *) dev->priv;
320 volatileunsigned short regValue;
321 unsigned short*pWord16;
322 int i;
324 /* XXDEBUG(("bmac: enter init_registers\n")); */
326 bmwrite(dev, RXRST, RxResetValue);
327 bmwrite(dev, TXRST, TxResetBit);
329 i =100;
331 --i;
332 udelay(10000);
333 regValue =bmread(dev, TXRST);/* wait for reset to clear..acknowledge */
334 }while((regValue & TxResetBit) && i >0);
336 if(!is_bmac_plus) {
337 regValue =bmread(dev, XCVRIF);
338 regValue |= ClkBit | SerialMode | COLActiveLow;
339 bmwrite(dev, XCVRIF, regValue);
340 udelay(10000);
343 bmwrite(dev, RSEED, (unsigned short)0x1968);
345 regValue =bmread(dev, XIFC);
346 regValue |= TxOutputEnable;
347 bmwrite(dev, XIFC, regValue);
349 bmread(dev, PAREG);
351 /* set collision counters to 0 */
352 bmwrite(dev, NCCNT,0);
353 bmwrite(dev, NTCNT,0);
354 bmwrite(dev, EXCNT,0);
355 bmwrite(dev, LTCNT,0);
357 /* set rx counters to 0 */
358 bmwrite(dev, FRCNT,0);
359 bmwrite(dev, LECNT,0);
360 bmwrite(dev, AECNT,0);
361 bmwrite(dev, FECNT,0);
362 bmwrite(dev, RXCV,0);
364 /* set tx fifo information */
365 bmwrite(dev, TXTH,4);/* 4 octets before tx starts */
367 bmwrite(dev, TXFIFOCSR,0);/* first disable txFIFO */
368 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
370 /* set rx fifo information */
371 bmwrite(dev, RXFIFOCSR,0);/* first disable rxFIFO */
372 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
374 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
375 bmread(dev, STATUS);/* read it just to clear it */
377 bmwrite(dev, INTDISABLE, EnableNormal);
379 /* zero out the chip Hash Filter registers */
380 for(i=0; i<4; i++) bp->hash_table_mask[i] =0;
381 bmwrite(dev, BHASH3, bp->hash_table_mask[0]);/* bits 15 - 0 */
382 bmwrite(dev, BHASH2, bp->hash_table_mask[1]);/* bits 31 - 16 */
383 bmwrite(dev, BHASH1, bp->hash_table_mask[2]);/* bits 47 - 32 */
384 bmwrite(dev, BHASH0, bp->hash_table_mask[3]);/* bits 63 - 48 */
386 pWord16 = (unsigned short*)dev->dev_addr;
387 bmwrite(dev, MADD0, *pWord16++);
388 bmwrite(dev, MADD1, *pWord16++);
389 bmwrite(dev, MADD2, *pWord16);
392 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
394 return;
397 #if 0
398 static void
399 bmac_disable_interrupts(struct device *dev)
401 bmwrite(dev, INTDISABLE, DisableAll);
404 static void
405 bmac_enable_interrupts(struct device *dev)
407 bmwrite(dev, INTDISABLE, EnableNormal);
409 #endif
412 static void
413 bmac_start_chip(struct device *dev)
415 struct bmac_data *bp = (struct bmac_data *) dev->priv;
416 volatilestruct dbdma_regs *rd = bp->rx_dma;
417 unsigned short oldConfig;
419 /* enable rx dma channel */
420 dbdma_continue(rd);
422 oldConfig =bmread(dev, TXCFG);
423 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
425 /* turn on rx plus any other bits already on (promiscuous possibly) */
426 oldConfig =bmread(dev, RXCFG);
427 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
428 udelay(20000);
431 static int
432 bmac_init_chip(struct device *dev)
434 if(is_bmac_plus &&bmac_mif_read(dev,2) ==0x7810) {
435 if(bmac_mif_read(dev,4) ==0xa1) {
436 bmac_mif_write(dev,0,0x1000);
437 }else{
438 bmac_mif_write(dev,4,0xa1);
439 bmac_mif_write(dev,0,0x1200);
441 /* XXX debugging */
442 bmac_mif_read(dev,0);
443 bmac_mif_read(dev,4);
445 bmac_init_registers(dev);
446 return1;
449 static intbmac_set_address(struct device *dev,void*addr)
451 unsigned char*p = addr;
452 unsigned short*pWord16;
453 unsigned long flags;
454 int i;
456 XXDEBUG(("bmac: enter set_address\n"));
457 save_flags(flags);cli();
459 for(i =0; i <6; ++i) {
460 dev->dev_addr[i] = p[i];
462 /* load up the hardware address */
463 pWord16 = (unsigned short*)dev->dev_addr;
464 bmwrite(dev, MADD0, *pWord16++);
465 bmwrite(dev, MADD1, *pWord16++);
466 bmwrite(dev, MADD2, *pWord16);
468 restore_flags(flags);
469 XXDEBUG(("bmac: exit set_address\n"));
470 return0;
473 staticinlinevoidbmac_set_timeout(struct device *dev)
475 struct bmac_data *bp = (struct bmac_data *) dev->priv;
476 unsigned long flags;
478 save_flags(flags);
479 cli();
480 if(bp->timeout_active)
481 del_timer(&bp->tx_timeout);
482 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
483 bp->tx_timeout.function = bmac_tx_timeout;
484 bp->tx_timeout.data = (unsigned long) dev;
485 add_timer(&bp->tx_timeout);
486 bp->timeout_active =1;
487 restore_flags(flags);
490 static void
491 bmac_construct_xmt(struct sk_buff *skb,volatilestruct dbdma_cmd *cp,
492 char*doubleBuf)
494 void*vaddr, *page_break;
495 unsigned long baddr;
496 unsigned long len;
498 len = skb->len;
499 vaddr = skb->data;
500 baddr =virt_to_bus(vaddr);
501 page_break =round_page(vaddr);
502 if(trunc_page(vaddr) !=trunc_page(vaddr+len) &&
503 (unsigned long)round_page(baddr) !=virt_to_bus(page_break)) {
504 baddr =virt_to_bus(doubleBuf);
505 XXDEBUG(("bmac: double buffering, double=%#08x, skb->data=%#08x, len=%d\n", doubleBuf, skb->data, len));
506 }else
507 flush_page_to_ram((unsigned long)vaddr);
509 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr,0);
512 static void
513 bmac_construct_rxbuff(unsigned char*addr,volatilestruct dbdma_cmd *cp)
515 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,virt_to_bus(addr),0);
518 /* Bit-reverse one byte of an ethernet hardware address. */
519 static unsigned char
520 bitrev(unsigned char b)
522 int d =0, i;
524 for(i =0; i <8; ++i, b >>=1)
525 d = (d <<1) | (b &1);
526 return d;
530 static int
531 bmac_init_tx_ring(struct bmac_data *bp)
533 int i;
534 volatilestruct dbdma_regs *td = bp->tx_dma;
535 char*addr;
537 if(!bp->tx_allocated) {
538 /* zero out tx cmds, alloc space for double buffering */
539 addr = (char*)kmalloc(ETHERMTU * N_TX_RING, GFP_DMA);
540 if(addr == NULL)return0;
541 for(i =0; i < N_TX_RING; i++, addr += ETHERMTU) bp->tx_double[i] = addr;
542 bp->tx_allocated =1;
544 memset((char*)bp->tx_cmds,0, (N_TX_RING+1) *sizeof(struct dbdma_cmd));
546 bp->tx_empty =0;
547 bp->tx_fill =0;
548 bp->tx_fullup =0;
550 /* put a branch at the end of the tx command list */
551 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
552 (DBDMA_NOP | BR_ALWAYS),0,0,virt_to_bus(bp->tx_cmds));
554 /* reset tx dma */
555 dbdma_reset(td);
556 out_le32(&td->wait_sel,0x00200020);
557 out_le32(&td->cmdptr,virt_to_bus(bp->tx_cmds));
559 return1;
563 static int
564 bmac_init_rx_ring(struct bmac_data *bp)
566 volatilestruct dbdma_regs *rd = bp->rx_dma;
567 int i;
569 /* initialize list of sk_buffs for receiving and set up recv dma */
570 if(!bp->rx_allocated) {
571 for(i =0; i < N_RX_RING; i++) {
572 bp->rx_bufs[i] =dev_alloc_skb(RX_BUFLEN+2);
573 if(bp->rx_bufs[i] == NULL)return0;
574 skb_reserve(bp->rx_bufs[i],2);
576 bp->rx_allocated =1;
579 memset((char*)bp->rx_cmds,0, (N_RX_RING+1) *sizeof(struct dbdma_cmd));
580 for(i =0; i < N_RX_RING; i++)
581 bmac_construct_rxbuff(bp->rx_bufs[i]->data, &bp->rx_cmds[i]);
583 bp->rx_empty =0;
584 bp->rx_fill = i;
586 /* Put a branch back to the beginning of the receive command list */
587 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
588 (DBDMA_NOP | BR_ALWAYS),0,0,virt_to_bus(bp->rx_cmds));
590 /* start rx dma */
591 dbdma_reset(rd);
592 out_le32(&rd->cmdptr,virt_to_bus(bp->rx_cmds));
594 return1;
598 static intbmac_transmit_packet(struct sk_buff *skb,struct device *dev)
600 struct bmac_data *bp = (struct bmac_data *) dev->priv;
601 volatilestruct dbdma_regs *td = bp->tx_dma;
602 int i;
604 /* see if there's a free slot in the tx ring */
605 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
606 /* bp->tx_empty, bp->tx_fill)); */
607 i = bp->tx_fill +1;
608 if(i >= N_TX_RING) i =0;
609 if(i == bp->tx_empty) {
610 dev->tbusy =1;
611 bp->tx_fullup =1;
612 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
613 return-1;/* can't take it at the moment */
616 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP,0,0,0);
618 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill], bp->tx_double[bp->tx_fill]);
620 bp->tx_bufs[bp->tx_fill] = skb;
621 bp->tx_fill = i;
623 dbdma_continue(td);
625 return0;
628 static int rxintcount =0;
630 static voidbmac_rxdma_intr(int irq,void*dev_id,struct pt_regs *regs)
632 struct device *dev = (struct device *) dev_id;
633 struct bmac_data *bp = (struct bmac_data *) dev->priv;
634 volatilestruct dbdma_regs *rd = bp->rx_dma;
635 volatilestruct dbdma_cmd *cp;
636 int i, nb, stat;
637 struct sk_buff *skb;
638 unsigned int residual;
639 int last;
640 unsigned long flags;
642 save_flags(flags);cli();
644 if(++rxintcount <10) {
645 XXDEBUG(("bmac_rxdma_intr\n"));
648 last = -1;
649 i = bp->rx_empty;
651 while(1) {
652 cp = &bp->rx_cmds[i];
653 stat =ld_le16(&cp->xfer_status);
654 residual =ld_le16(&cp->res_count);
655 if((stat & ACTIVE) ==0)break;
656 nb = RX_BUFLEN - residual -2;
657 if(nb < (ETHERMINPACKET - ETHERCRC)) {
658 skb = NULL;
659 bp->stats.rx_length_errors++;
660 bp->stats.rx_errors++;
661 }else skb = bp->rx_bufs[i];
662 if(skb != NULL) {
663 nb -= ETHERCRC;
664 skb_put(skb, nb);
665 skb->dev = dev;
666 skb->protocol =eth_type_trans(skb, dev);
667 netif_rx(skb);
668 bp->rx_bufs[i] =dev_alloc_skb(RX_BUFLEN+2);
669 skb_reserve(bp->rx_bufs[i],2);
670 bmac_construct_rxbuff(bp->rx_bufs[i]->data, &bp->rx_cmds[i]);
671 ++bp->stats.rx_packets;
672 }else{
673 ++bp->stats.rx_dropped;
675 st_le16(&cp->res_count,0);
676 st_le16(&cp->xfer_status,0);
677 last = i;
678 if(++i >= N_RX_RING) i =0;
681 if(last != -1) {
682 bp->rx_fill = last;
683 bp->rx_empty = i;
686 restore_flags(flags);
688 dbdma_continue(rd);
690 if(rxintcount <10) {
691 XXDEBUG(("bmac_rxdma_intr done\n"));
695 static int txintcount =0;
697 static voidbmac_txdma_intr(int irq,void*dev_id,struct pt_regs *regs)
699 struct device *dev = (struct device *) dev_id;
700 struct bmac_data *bp = (struct bmac_data *) dev->priv;
701 volatilestruct dbdma_cmd *cp;
702 int stat;
703 unsigned long flags;
705 save_flags(flags);cli();
707 if(txintcount++ <10) {
708 XXDEBUG(("bmac_txdma_intr\n"));
711 /* del_timer(&bp->tx_timeout); */
712 /* bp->timeout_active = 0; */
714 while(1) {
715 cp = &bp->tx_cmds[bp->tx_empty];
716 stat =ld_le16(&cp->xfer_status);
717 if(txintcount <10) {
718 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
720 if(!(stat & ACTIVE))break;
722 if(bp->tx_bufs[bp->tx_empty]) {
723 ++bp->stats.tx_packets;
724 dev_kfree_skb(bp->tx_bufs[bp->tx_empty]);
726 bp->tx_bufs[bp->tx_empty] = NULL;
727 bp->tx_fullup =0;
728 dev->tbusy =0;
729 /* XXDEBUG(("bmac_intr: cleared tbusy, empty=%d fill=%d\n", */
730 /* i, bp->tx_fill)); */
731 mark_bh(NET_BH);
732 if(++bp->tx_empty >= N_TX_RING) bp->tx_empty =0;
733 if(bp->tx_empty == bp->tx_fill)break;
736 restore_flags(flags);
738 if(txintcount <10) {
739 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
742 bmac_start(dev);
745 static struct net_device_stats *bmac_stats(struct device *dev)
747 struct bmac_data *p = (struct bmac_data *) dev->priv;
749 return&p->stats;
752 #ifndef SUNHME_MULTICAST
753 /* Real fast bit-reversal algorithm, 6-bit values */
754 static int reverse6[64] = {
755 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
756 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
757 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
758 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
759 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
760 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
761 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
762 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
765 static unsigned int
766 crc416(unsigned int curval,unsigned short nxtval)
768 registerunsigned int counter, cur = curval, next = nxtval;
769 registerint high_crc_set, low_data_set;
771 /* Swap bytes */
772 next = ((next &0x00FF) <<8) | (next >>8);
774 /* Compute bit-by-bit */
775 for(counter =0; counter <16; ++counter) {
776 /* is high CRC bit set? */
777 if((cur &0x80000000) ==0) high_crc_set =0;
778 else high_crc_set =1;
780 cur = cur <<1;
782 if((next &0x0001) ==0) low_data_set =0;
783 else low_data_set =1;
785 next = next >>1;
787 /* do the XOR */
788 if(high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
790 return cur;
793 static unsigned int
794 bmac_crc(unsigned short*address)
796 unsigned int newcrc;
798 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
799 newcrc =crc416(0xffffffff, *address);/* address bits 47 - 32 */
800 newcrc =crc416(newcrc, address[1]);/* address bits 31 - 16 */
801 newcrc =crc416(newcrc, address[2]);/* address bits 15 - 0 */
803 return(newcrc);
807 * Add requested mcast addr to BMac's hash table filter.
811 static void
812 bmac_addhash(struct bmac_data *bp,unsigned char*addr)
814 unsigned int crc;
815 unsigned short mask;
817 if(!(*addr))return;
818 crc =bmac_crc((unsigned short*)addr) &0x3f;/* Big-endian alert! */
819 crc = reverse6[crc];/* Hyperfast bit-reversing algorithm */
820 if(bp->hash_use_count[crc]++)return;/* This bit is already set */
821 mask = crc %16;
822 mask = (unsigned char)1<< mask;
823 bp->hash_use_count[crc/16] |= mask;
826 static void
827 bmac_removehash(struct bmac_data *bp,unsigned char*addr)
829 unsigned int crc;
830 unsigned char mask;
832 /* Now, delete the address from the filter copy, as indicated */
833 crc =bmac_crc((unsigned short*)addr) &0x3f;/* Big-endian alert! */
834 crc = reverse6[crc];/* Hyperfast bit-reversing algorithm */
835 if(bp->hash_use_count[crc] ==0)return;/* That bit wasn't in use! */
836 if(--bp->hash_use_count[crc])return;/* That bit is still in use */
837 mask = crc %16;
838 mask = ((unsigned char)1<< mask) ^0xffff;/* To turn off bit */
839 bp->hash_table_mask[crc/16] &= mask;
843 * Sync the adapter with the software copy of the multicast mask
844 * (logical address filter).
847 static void
848 bmac_rx_off(struct device *dev)
850 unsigned short rx_cfg;
852 rx_cfg =bmread(dev, RXCFG);
853 rx_cfg &= ~RxMACEnable;
854 bmwrite(dev, RXCFG, rx_cfg);
856 rx_cfg =bmread(dev, RXCFG);
857 }while(rx_cfg & RxMACEnable);
860 unsigned short
861 bmac_rx_on(struct device *dev,int hash_enable,int promisc_enable)
863 unsigned short rx_cfg;
865 rx_cfg =bmread(dev, RXCFG);
866 rx_cfg |= RxMACEnable;
867 if(hash_enable) rx_cfg |= RxHashFilterEnable;
868 else rx_cfg &= ~RxHashFilterEnable;
869 if(promisc_enable) rx_cfg |= RxPromiscEnable;
870 else rx_cfg &= ~RxPromiscEnable;
871 bmwrite(dev, RXRST, RxResetValue);
872 bmwrite(dev, RXFIFOCSR,0);/* first disable rxFIFO */
873 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
874 bmwrite(dev, RXCFG, rx_cfg );
875 return rx_cfg;
878 static void
879 bmac_update_hash_table_mask(struct device *dev,struct bmac_data *bp)
881 bmwrite(dev, BHASH3, bp->hash_table_mask[0]);/* bits 15 - 0 */
882 bmwrite(dev, BHASH2, bp->hash_table_mask[1]);/* bits 31 - 16 */
883 bmwrite(dev, BHASH1, bp->hash_table_mask[2]);/* bits 47 - 32 */
884 bmwrite(dev, BHASH0, bp->hash_table_mask[3]);/* bits 63 - 48 */
887 #if 0
888 static void
889 bmac_add_multi(struct device *dev,
890 struct bmac_data *bp,unsigned char*addr)
892 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
893 bmac_addhash(bp, addr);
894 bmac_rx_off(dev);
895 bmac_update_hash_table_mask(dev, bp);
896 bmac_rx_on(dev,1, (dev->flags & IFF_PROMISC)?1:0);
897 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
900 static void
901 bmac_remove_multi(struct device *dev,
902 struct bmac_data *bp,unsigned char*addr)
904 bmac_removehash(bp, addr);
905 bmac_rx_off(dev);
906 bmac_update_hash_table_mask(dev, bp);
907 bmac_rx_on(dev,1, (dev->flags & IFF_PROMISC)?1:0);
909 #endif
911 /* Set or clear the multicast filter for this adaptor.
912 num_addrs == -1 Promiscuous mode, receive all packets
913 num_addrs == 0 Normal mode, clear multicast list
914 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
915 best-effort filtering.
917 static voidbmac_set_multicast(struct device *dev)
919 struct dev_mc_list *dmi;
920 struct bmac_data *bp = (struct bmac_data *) dev->priv;
921 int num_addrs = dev->mc_count;
922 unsigned short rx_cfg;
923 int i;
925 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
927 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count >64)) {
928 for(i=0; i<4; i++) bp->hash_table_mask[i] =0xffff;
929 bmac_update_hash_table_mask(dev, bp);
930 rx_cfg =bmac_rx_on(dev,1,0);
931 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
932 }else if((dev->flags & IFF_PROMISC) || (num_addrs <0)) {
933 rx_cfg =bmread(dev, RXCFG);
934 rx_cfg |= RxPromiscEnable;
935 bmwrite(dev, RXCFG, rx_cfg);
936 rx_cfg =bmac_rx_on(dev,0,1);
937 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
938 }else{
939 for(i=0; i<4; i++) bp->hash_table_mask[i] =0;
940 for(i=0; i<64; i++) bp->hash_use_count[i] =0;
941 if(num_addrs ==0) {
942 rx_cfg =bmac_rx_on(dev,0,0);
943 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
944 }else{
945 for(dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
946 bmac_addhash(bp, dmi->dmi_addr);
947 bmac_update_hash_table_mask(dev, bp);
948 rx_cfg =bmac_rx_on(dev,1,0);
949 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
952 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
954 #else/* ifdef SUNHME_MULTICAST */
956 /* The version of set_multicast below was lifted from sunhme.c */
958 #define CRC_POLYNOMIAL_BE 0x04c11db7UL/* Ethernet CRC, big endian */
959 #define CRC_POLYNOMIAL_LE 0xedb88320UL/* Ethernet CRC, little endian */
961 static voidbmac_set_multicast(struct device *dev)
963 struct dev_mc_list *dmi = dev->mc_list;
964 char*addrs;
965 int i, j, bit, byte;
966 unsigned short rx_cfg;
967 u32 crc, poly = CRC_POLYNOMIAL_LE;
969 /* Let the transmits drain. */
970 /* while(dev->tbusy) schedule(); */
972 /* Lock out others. */
973 /* set_bit(0, (void *) &dev->tbusy); */
975 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count >64)) {
976 bmwrite(dev, BHASH0,0xffff);
977 bmwrite(dev, BHASH1,0xffff);
978 bmwrite(dev, BHASH2,0xffff);
979 bmwrite(dev, BHASH3,0xffff);
980 }else if(dev->flags & IFF_PROMISC) {
981 rx_cfg =bmread(dev, RXCFG);
982 rx_cfg |= RxPromiscEnable;
983 bmwrite(dev, RXCFG, rx_cfg);
984 }else{
985 u16 hash_table[4];
987 rx_cfg =bmread(dev, RXCFG);
988 rx_cfg &= ~RxPromiscEnable;
989 bmwrite(dev, RXCFG, rx_cfg);
991 for(i =0; i <4; i++) hash_table[i] =0;
993 for(i =0; i < dev->mc_count; i++) {
994 addrs = dmi->dmi_addr;
995 dmi = dmi->next;
997 if(!(*addrs &1))
998 continue;
1000 crc =0xffffffffU;
1001 for(byte =0; byte <6; byte++) {
1002 for(bit = *addrs++, j =0; j <8; j++, bit >>=1) {
1003 int test;
1005 test = ((bit ^ crc) &0x01);
1006 crc >>=1;
1007 if(test)
1008 crc = crc ^ poly;
1011 crc >>=26;
1012 hash_table[crc >>4] |=1<< (crc &0xf);
1014 bmwrite(dev, BHASH0, hash_table[0]);
1015 bmwrite(dev, BHASH1, hash_table[1]);
1016 bmwrite(dev, BHASH2, hash_table[2]);
1017 bmwrite(dev, BHASH3, hash_table[3]);
1020 /* Let us get going again. */
1021 /* dev->tbusy = 0; */
1023 #endif/* SUNHME_MULTICAST */
1025 static int miscintcount =0;
1027 static voidbmac_misc_intr(int irq,void*dev_id,struct pt_regs *regs)
1029 struct device *dev = (struct device *) dev_id;
1030 struct bmac_data *bp = (struct bmac_data *)dev->priv;
1031 unsigned int status =bmread(dev, STATUS);
1032 if(miscintcount++ <10) {
1033 XXDEBUG(("bmac_misc_intr\n"));
1035 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1036 /* bmac_txdma_intr_inner(irq, dev_id, regs); */
1037 /* if (status & FrameReceived) bp->stats.rx_dropped++; */
1038 if(status & RxErrorMask) bp->stats.rx_errors++;
1039 if(status & RxCRCCntExp) bp->stats.rx_crc_errors++;
1040 if(status & RxLenCntExp) bp->stats.rx_length_errors++;
1041 if(status & RxOverFlow) bp->stats.rx_over_errors++;
1042 if(status & RxAlignCntExp) bp->stats.rx_frame_errors++;
1044 /* if (status & FrameSent) bp->stats.tx_dropped++; */
1045 if(status & TxErrorMask) bp->stats.tx_errors++;
1046 if(status & TxUnderrun) bp->stats.tx_fifo_errors++;
1047 if(status & TxNormalCollExp) bp->stats.collisions++;
1051 * Procedure for reading EEPROM
1053 #define SROMAddressLength 5
1054 #define DataInOn 0x0008
1055 #define DataInOff 0x0000
1056 #define Clk 0x0002
1057 #define ChipSelect 0x0001
1058 #define SDIShiftCount 3
1059 #define SD0ShiftCount 2
1060 #define DelayValue 1000/* number of microseconds */
1061 #define SROMStartOffset 10/* this is in words */
1062 #define SROMReadCount 3/* number of words to read from SROM */
1063 #define SROMAddressBits 6
1064 #define EnetAddressOffset 20
1066 static unsigned char
1067 bmac_clock_out_bit(struct device *dev)
1069 unsigned short data;
1070 unsigned short val;
1072 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1073 udelay(DelayValue);
1075 data =bmread(dev, SROMCSR);
1076 udelay(DelayValue);
1077 val = (data >> SD0ShiftCount) &1;
1079 bmwrite(dev, SROMCSR, ChipSelect);
1080 udelay(DelayValue);
1082 return val;
1085 static void
1086 bmac_clock_in_bit(struct device *dev,unsigned int val)
1088 unsigned short data;
1090 if(val !=0&& val !=1)return;
1092 data = (val << SDIShiftCount);
1093 bmwrite(dev, SROMCSR, data | ChipSelect );
1094 udelay(DelayValue);
1096 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1097 udelay(DelayValue);
1099 bmwrite(dev, SROMCSR, data | ChipSelect);
1100 udelay(DelayValue);
1103 static void
1104 reset_and_select_srom(struct device *dev)
1106 /* first reset */
1107 bmwrite(dev, SROMCSR,0);
1108 udelay(DelayValue);
1110 /* send it the read command (110) */
1111 bmac_clock_in_bit(dev,1);
1112 bmac_clock_in_bit(dev,1);
1113 bmac_clock_in_bit(dev,0);
1116 static unsigned short
1117 read_srom(struct device *dev,unsigned int addr,unsigned int addr_len)
1119 unsigned short data, val;
1120 int i;
1122 /* send out the address we want to read from */
1123 for(i =0; i < addr_len; i++) {
1124 val = addr >> (addr_len-i-1);
1125 bmac_clock_in_bit(dev, val &1);
1128 /* Now read in the 16-bit data */
1129 data =0;
1130 for(i =0; i <16; i++) {
1131 val =bmac_clock_out_bit(dev);
1132 data <<=1;
1133 data |= val;
1135 bmwrite(dev, SROMCSR,0);
1137 return data;
1141 * It looks like Cogent and SMC use different methods for calculating
1142 * checksums. What a pain..
1145 static int
1146 bmac_verify_checksum(struct device *dev)
1148 unsigned short data, storedCS;
1150 reset_and_select_srom(dev);
1151 data =read_srom(dev,3, SROMAddressBits);
1152 storedCS = ((data >>8) &0x0ff) | ((data <<8) &0xff00);
1154 return0;
1158 static void
1159 bmac_get_station_address(struct device *dev,unsigned char*ea)
1161 int i;
1162 unsigned short data;
1164 for(i =0; i <6; i++)
1166 reset_and_select_srom(dev);
1167 data =read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1168 ea[2*i] =bitrev(data &0x0ff);
1169 ea[2*i+1] =bitrev((data >>8) &0x0ff);
1173 static intbmac_reset_and_enable(struct device *dev,int enable)
1175 struct bmac_data *bp = dev->priv;
1176 unsigned long flags;
1178 save_flags(flags);cli();
1179 bp->reset_and_enabled =0;
1180 bmac_reset_chip(dev);
1181 if(enable) {
1182 if(!bmac_init_tx_ring(bp) || !bmac_init_rx_ring(bp))return0;
1183 if(!bmac_init_chip(dev))return0;
1184 bmac_start_chip(dev);
1185 bmwrite(dev, INTDISABLE, EnableNormal);
1186 bp->reset_and_enabled =1;
1187 /* { */
1188 /* unsigned char random_packet[100]; */
1189 /* unsigned int i; */
1190 /* struct sk_buff *skb = dev_alloc_skb(RX_BUFLEN+2); */
1191 /* unsigned char *data = skb_put(skb, sizeof(random_packet)); */
1192 /* XXDEBUG(("transmitting random packet\n")); */
1193 /* for (i = 0; i < sizeof(random_packet); i++) data[i] = i; */
1194 /* bmac_transmit_packet(skb, dev); */
1195 /* XXDEBUG(("done transmitting random packet\n")); */
1196 /* } */
1198 restore_flags(flags);
1199 return1;
1203 bmac_probe(struct device *dev)
1205 int j, rev;
1206 struct bmac_data *bp;
1207 struct device_node *bmacs;
1208 unsigned char*addr;
1209 static struct device_node *all_bmacs = NULL, *next_bmac;
1211 if(all_bmacs == NULL) {
1212 all_bmacs =find_devices("bmac");
1213 is_bmac_plus =0;
1214 if(all_bmacs == NULL) {
1215 all_bmacs =find_compatible_devices("network","bmac+");
1216 if(all_bmacs)
1217 is_bmac_plus =1;
1219 next_bmac = all_bmacs;
1221 bmacs = next_bmac;
1222 if(bmacs == NULL)return-ENODEV;
1223 next_bmac = bmacs->next;
1225 bmac_devs = dev;/* KLUDGE!! */
1227 if(bmacs->n_addrs !=3|| bmacs->n_intrs !=3) {
1228 printk(KERN_ERR "can't use BMAC %s: expect 3 addrs and 3 intrs\n",
1229 bmacs->full_name);
1230 return-EINVAL;
1233 if(dev == NULL) {
1234 dev =init_etherdev(NULL, PRIV_BYTES);
1235 bmac_devs = dev;/*KLUDGE!!*/
1236 }else{
1237 /* XXX this doesn't look right (but it's never used :-) */
1238 dev->priv =kmalloc(PRIV_BYTES, GFP_KERNEL);
1239 if(dev->priv ==0)return-ENOMEM;
1242 dev->base_addr = bmacs->addrs[0].address;
1243 dev->irq = bmacs->intrs[0].line;
1245 bmwrite(dev, INTDISABLE, DisableAll);
1247 addr =get_property(bmacs,"mac-address", NULL);
1248 if(addr == NULL) {
1249 addr =get_property(bmacs,"local-mac-address", NULL);
1250 if(addr == NULL) {
1251 printk(KERN_ERR "Can't get mac-address for BMAC at %lx\n",
1252 dev->base_addr);
1253 return-EAGAIN;
1257 printk(KERN_INFO "%s: BMAC at", dev->name);
1258 rev = addr[0] ==0&& addr[1] ==0xA0;
1259 for(j =0; j <6; ++j) {
1260 dev->dev_addr[j] = rev?bitrev(addr[j]): addr[j];
1261 printk("%c%.2x", (j?':':' '), dev->dev_addr[j]);
1263 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1264 printk("\n");
1266 dev->open = bmac_open;
1267 dev->stop = bmac_close;
1268 dev->hard_start_xmit = bmac_output;
1269 dev->get_stats = bmac_stats;
1270 dev->set_multicast_list = bmac_set_multicast;
1271 dev->set_mac_address = bmac_set_address;
1273 bmac_get_station_address(dev, addr);
1274 if(bmac_verify_checksum(dev) !=0)return-EINVAL;
1276 ether_setup(dev);
1278 bp = (struct bmac_data *) dev->priv;
1279 memset(bp,0,sizeof(struct bmac_data));
1280 bp->tx_dma = (volatilestruct dbdma_regs *) bmacs->addrs[1].address;
1281 bp->tx_dma_intr = bmacs->intrs[1].line;
1282 bp->rx_dma = (volatilestruct dbdma_regs *) bmacs->addrs[2].address;
1283 bp->rx_dma_intr = bmacs->intrs[2].line;
1285 bp->tx_cmds = (volatilestruct dbdma_cmd *)DBDMA_ALIGN(bp +1);
1286 bp->rx_cmds = bp->tx_cmds + N_TX_RING +1;
1288 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING +1);
1289 skb_queue_head_init(bp->queue);
1291 memset(&bp->stats,0,sizeof(bp->stats));
1292 memset((char*) bp->tx_cmds,0,
1293 (N_TX_RING + N_RX_RING +2) *sizeof(struct dbdma_cmd));
1294 /* init_timer(&bp->tx_timeout); */
1295 /* bp->timeout_active = 0; */
1297 if(request_irq(dev->irq, bmac_misc_intr,0,"BMAC-misc", dev)) {
1298 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1299 return-EAGAIN;
1301 if(request_irq(bmacs->intrs[1].line, bmac_txdma_intr,0,"BMAC-txdma",
1302 dev)) {
1303 printk(KERN_ERR "BMAC: can't get irq %d\n", bmacs->intrs[1].line);
1304 return-EAGAIN;
1306 if(request_irq(bmacs->intrs[2].line, bmac_rxdma_intr,0,"BMAC-rxdma",
1307 dev)) {
1308 printk(KERN_ERR "BMAC: can't get irq %d\n", bmacs->intrs[2].line);
1309 return-EAGAIN;
1312 if(!bmac_reset_and_enable(dev,0))return-ENOMEM;
1314 #ifdef CONFIG_PROC_FS
1315 proc_net_register(&(struct proc_dir_entry) {
1316 PROC_NET_BMAC,4,"bmac",
1317 S_IFREG | S_IRUGO,1,0,0,
1318 0, &proc_net_inode_operations,
1319 bmac_proc_info
1321 #endif
1323 return0;
1326 static intbmac_open(struct device *dev)
1328 /* XXDEBUG(("bmac: enter open\n")); */
1329 /* reset the chip */
1330 bmac_reset_and_enable(dev,1);
1332 dev->flags |= IFF_UP | IFF_RUNNING;
1334 return0;
1337 static intbmac_close(struct device *dev)
1339 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1340 volatilestruct dbdma_regs *rd = bp->rx_dma;
1341 volatilestruct dbdma_regs *td = bp->tx_dma;
1342 unsigned short config;
1343 int i;
1345 dev->flags &= ~(IFF_UP | IFF_RUNNING);
1347 /* disable rx and tx */
1348 config =bmread(dev, RXCFG);
1349 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1351 config =bmread(dev, TXCFG);
1352 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1354 bmwrite(dev, INTDISABLE, DisableAll);/* disable all intrs */
1356 /* disable rx and tx dma */
1357 st_le32(&rd->control,DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));/* clear run bit */
1358 st_le32(&td->control,DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));/* clear run bit */
1360 /* free some skb's */
1361 XXDEBUG(("bmac: free rx bufs\n"));
1362 for(i=0; i<N_RX_RING; i++) {
1363 if(bp->rx_bufs[i] != NULL) {
1364 dev_kfree_skb(bp->rx_bufs[i]);
1365 bp->rx_bufs[i] = NULL;
1368 bp->rx_allocated =0;
1369 XXDEBUG(("bmac: free doubles\n"));/*MEMORY LEAK BELOW!!! FIX!!! */
1370 if(bp->tx_double[0] != NULL)kfree(bp->tx_double[0]);
1371 XXDEBUG(("bmac: free tx bufs\n"));
1372 for(i =0; i<N_TX_RING; i++) {
1373 if(bp->tx_bufs[i] != NULL) {
1374 dev_kfree_skb(bp->tx_bufs[i]);
1375 bp->tx_bufs[i] = NULL;
1378 bp->tx_allocated =0;
1379 bp->reset_and_enabled =0;
1380 XXDEBUG(("bmac: all bufs freed\n"));
1382 return0;
1385 static void
1386 bmac_start(struct device *dev)
1388 struct bmac_data *bp = dev->priv;
1389 int i;
1390 struct sk_buff *skb;
1391 unsigned long flags;
1393 save_flags(flags);cli();
1394 while(1) {
1395 i = bp->tx_fill +1;
1396 if(i >= N_TX_RING) i =0;
1397 if(i == bp->tx_empty)break;
1398 skb =skb_dequeue(bp->queue);
1399 if(skb == NULL)break;
1400 bmac_transmit_packet(skb, dev);
1402 restore_flags(flags);
1405 static int
1406 bmac_output(struct sk_buff *skb,struct device *dev)
1408 struct bmac_data *bp = dev->priv;
1409 skb_queue_tail(bp->queue, skb);
1410 bmac_start(dev);
1411 return0;
1414 static voidbmac_tx_timeout(unsigned long data)
1416 struct device *dev = (struct device *) data;
1417 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1418 volatilestruct dbdma_regs *td = bp->tx_dma;
1419 volatilestruct dbdma_regs *rd = bp->rx_dma;
1420 volatilestruct dbdma_cmd *cp;
1421 unsigned long flags;
1422 unsigned short config, oldConfig;
1423 int i;
1425 XXDEBUG(("bmac: tx_timeout called\n"));
1426 save_flags(flags);cli();
1427 bp->timeout_active =0;
1429 /* update various counters */
1430 /* bmac_handle_misc_intrs(bp, 0); */
1432 cp = &bp->tx_cmds[bp->tx_empty];
1433 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1434 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1435 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1437 /* turn off both tx and rx and reset the chip */
1438 config =bmread(dev, RXCFG);
1439 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1440 config =bmread(dev, TXCFG);
1441 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1442 out_le32(&td->control,DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1443 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1444 bmac_reset_chip(dev);
1446 /* restart rx dma */
1447 cp =bus_to_virt(ld_le32(&rd->cmdptr));
1448 out_le32(&rd->control,DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1449 out_le16(&cp->xfer_status,0);
1450 out_le32(&rd->cmdptr,virt_to_bus(cp));
1451 out_le32(&rd->control,DBDMA_SET(RUN|WAKE));
1453 /* fix up the transmit side */
1454 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1455 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1456 i = bp->tx_empty;
1457 ++bp->stats.tx_errors;
1458 if(i != bp->tx_fill) {
1459 dev_kfree_skb(bp->tx_bufs[i]);
1460 bp->tx_bufs[i] = NULL;
1461 if(++i >= N_TX_RING) i =0;
1462 bp->tx_empty = i;
1464 bp->tx_fullup =0;
1465 dev->tbusy =0;
1466 mark_bh(NET_BH);
1467 XXDEBUG((KERN_DEBUG "bmac: clearing tbusy\n"));
1468 if(i != bp->tx_fill) {
1469 cp = &bp->tx_cmds[i];
1470 out_le16(&cp->xfer_status,0);
1471 out_le16(&cp->command, OUTPUT_LAST);
1472 out_le32(&td->cmdptr,virt_to_bus(cp));
1473 out_le32(&td->control,DBDMA_SET(RUN));
1474 /* bmac_set_timeout(dev); */
1475 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1478 /* turn it back on */
1479 oldConfig =bmread(dev, RXCFG);
1480 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1481 oldConfig =bmread(dev, TXCFG);
1482 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1484 restore_flags(flags);
1487 #if 0
1488 static voiddump_dbdma(volatilestruct dbdma_cmd *cp,int count)
1490 int i,*ip;
1492 for(i=0;i< count;i++) {
1493 ip = (int*)(cp+i);
1495 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1496 ld_le32(ip+0),
1497 ld_le32(ip+1),
1498 ld_le32(ip+2),
1499 ld_le32(ip+3));
1503 #endif
1505 static int
1506 bmac_proc_info(char*buffer,char**start, off_t offset,int length,int dummy)
1508 int len =0;
1509 off_t pos =0;
1510 off_t begin =0;
1511 int i;
1513 if(bmac_devs == NULL)return(-ENOSYS);
1515 len +=sprintf(buffer,"BMAC counters & registers\n");
1517 for(i =0; i<N_REG_ENTRIES; i++) {
1518 len +=sprintf(buffer + len,"%s: %#08x\n",
1519 reg_entries[i].name,
1520 bmread(bmac_devs, reg_entries[i].reg_offset));
1521 pos = begin + len;
1523 if(pos < offset) {
1524 len =0;
1525 begin = pos;
1528 if(pos > offset+length)break;
1531 *start = buffer + (offset - begin);
1532 len -= (offset - begin);
1534 if(len > length) len = length;
1536 return len;
close