From 849369d6c66d3054688672f97d31fceb8e8230fb Mon Sep 17 00:00:00 2001 From: root Date: Fri, 25 Dec 2015 04:40:36 +0000 Subject: initial_commit --- drivers/net/tokenring/3c359.c | 1846 +++++++++++ drivers/net/tokenring/3c359.h | 291 ++ drivers/net/tokenring/Kconfig | 185 ++ drivers/net/tokenring/Makefile | 15 + drivers/net/tokenring/abyss.c | 469 +++ drivers/net/tokenring/abyss.h | 58 + drivers/net/tokenring/ibmtr.c | 1963 ++++++++++++ drivers/net/tokenring/lanstreamer.c | 1918 ++++++++++++ drivers/net/tokenring/lanstreamer.h | 343 +++ drivers/net/tokenring/madgemc.c | 763 +++++ drivers/net/tokenring/madgemc.h | 70 + drivers/net/tokenring/olympic.c | 1750 +++++++++++ drivers/net/tokenring/olympic.h | 321 ++ drivers/net/tokenring/proteon.c | 423 +++ drivers/net/tokenring/skisa.c | 433 +++ drivers/net/tokenring/smctr.c | 5718 +++++++++++++++++++++++++++++++++++ drivers/net/tokenring/smctr.h | 1585 ++++++++++ drivers/net/tokenring/tms380tr.c | 2352 ++++++++++++++ drivers/net/tokenring/tms380tr.h | 1141 +++++++ drivers/net/tokenring/tmspci.c | 249 ++ 20 files changed, 21893 insertions(+) create mode 100644 drivers/net/tokenring/3c359.c create mode 100644 drivers/net/tokenring/3c359.h create mode 100644 drivers/net/tokenring/Kconfig create mode 100644 drivers/net/tokenring/Makefile create mode 100644 drivers/net/tokenring/abyss.c create mode 100644 drivers/net/tokenring/abyss.h create mode 100644 drivers/net/tokenring/ibmtr.c create mode 100644 drivers/net/tokenring/lanstreamer.c create mode 100644 drivers/net/tokenring/lanstreamer.h create mode 100644 drivers/net/tokenring/madgemc.c create mode 100644 drivers/net/tokenring/madgemc.h create mode 100644 drivers/net/tokenring/olympic.c create mode 100644 drivers/net/tokenring/olympic.h create mode 100644 drivers/net/tokenring/proteon.c create mode 100644 drivers/net/tokenring/skisa.c create mode 100644 drivers/net/tokenring/smctr.c create mode 100644 drivers/net/tokenring/smctr.h create mode 100644 drivers/net/tokenring/tms380tr.c create mode 100644 drivers/net/tokenring/tms380tr.h create mode 100644 drivers/net/tokenring/tmspci.c (limited to 'drivers/net/tokenring') diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c new file mode 100644 index 00000000..ff32befd --- /dev/null +++ b/drivers/net/tokenring/3c359.c @@ -0,0 +1,1846 @@ +/* + * 3c359.c (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved + * + * Linux driver for 3Com 3c359 Tokenlink Velocity XL PCI NIC + * + * Base Driver Olympic: + * Written 1999 Peter De Schrijver & Mike Phillips + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * 7/17/00 - Clean up, version number 0.9.0. Ready to release to the world. + * + * 2/16/01 - Port up to kernel 2.4.2 ready for submission into the kernel. + * 3/05/01 - Last clean up stuff before submission. + * 2/15/01 - Finally, update to new pci api. + * + * To Do: + */ + +/* + * Technical Card Details + * + * All access to data is done with 16/8 bit transfers. The transfer + * method really sucks. You can only read or write one location at a time. + * + * Also, the microcode for the card must be uploaded if the card does not have + * the flashrom on board. This is a 28K bloat in the driver when compiled + * as a module. + * + * Rx is very simple, status into a ring of descriptors, dma data transfer, + * interrupts to tell us when a packet is received. + * + * Tx is a little more interesting. Similar scenario, descriptor and dma data + * transfers, but we don't have to interrupt the card to tell it another packet + * is ready for transmission, we are just doing simple memory writes, not io or mmio + * writes. The card can be set up to simply poll on the next + * descriptor pointer and when this value is non-zero will automatically download + * the next packet. The card then interrupts us when the packet is done. + * + */ + +#define XL_DEBUG 0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "3c359.h" + +static char version[] __devinitdata = +"3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ; + +#define FW_NAME "3com/3C359.bin" +MODULE_AUTHOR("Mike Phillips ") ; +MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ; +MODULE_FIRMWARE(FW_NAME); + +/* Module parameters */ + +/* Ring Speed 0,4,16 + * 0 = Autosense + * 4,16 = Selected speed only, no autosense + * This allows the card to be the first on the ring + * and become the active monitor. + * + * WARNING: Some hubs will allow you to insert + * at the wrong speed. + * + * The adapter will _not_ fail to open if there are no + * active monitors on the ring, it will simply open up in + * its last known ringspeed if no ringspeed is specified. + */ + +static int ringspeed[XL_MAX_ADAPTERS] = {0,} ; + +module_param_array(ringspeed, int, NULL, 0); +MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ; + +/* Packet buffer size */ + +static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ; + +module_param_array(pkt_buf_sz, int, NULL, 0) ; +MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ; +/* Message Level */ + +static int message_level[XL_MAX_ADAPTERS] = {0,} ; + +module_param_array(message_level, int, NULL, 0) ; +MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ; +/* + * This is a real nasty way of doing this, but otherwise you + * will be stuck with 1555 lines of hex #'s in the code. + */ + +static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) = +{ + {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci,xl_pci_tbl) ; + +static int xl_init(struct net_device *dev); +static int xl_open(struct net_device *dev); +static int xl_open_hw(struct net_device *dev) ; +static int xl_hw_reset(struct net_device *dev); +static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev); +static void xl_dn_comp(struct net_device *dev); +static int xl_close(struct net_device *dev); +static void xl_set_rx_mode(struct net_device *dev); +static irqreturn_t xl_interrupt(int irq, void *dev_id); +static int xl_set_mac_address(struct net_device *dev, void *addr) ; +static void xl_arb_cmd(struct net_device *dev); +static void xl_asb_cmd(struct net_device *dev) ; +static void xl_srb_cmd(struct net_device *dev, int srb_cmd) ; +static void xl_wait_misr_flags(struct net_device *dev) ; +static int xl_change_mtu(struct net_device *dev, int mtu); +static void xl_srb_bh(struct net_device *dev) ; +static void xl_asb_bh(struct net_device *dev) ; +static void xl_reset(struct net_device *dev) ; +static void xl_freemem(struct net_device *dev) ; + + +/* EEProm Access Functions */ +static u16 xl_ee_read(struct net_device *dev, int ee_addr) ; +static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) ; + +/* Debugging functions */ +#if XL_DEBUG +static void print_tx_state(struct net_device *dev) ; +static void print_rx_state(struct net_device *dev) ; + +static void print_tx_state(struct net_device *dev) +{ + + struct xl_private *xl_priv = netdev_priv(dev); + struct xl_tx_desc *txd ; + u8 __iomem *xl_mmio = xl_priv->xl_mmio ; + int i ; + + printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head, + xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ; + printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n"); + for (i = 0; i < 16; i++) { + txd = &(xl_priv->xl_tx_ring[i]) ; + printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd), + txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ; + } + + printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) ); + + printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) ); + printk("Queue status = %0x\n",netif_running(dev) ) ; +} + +static void print_rx_state(struct net_device *dev) +{ + + struct xl_private *xl_priv = netdev_priv(dev); + struct xl_rx_desc *rxd ; + u8 __iomem *xl_mmio = xl_priv->xl_mmio ; + int i ; + + printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail); + printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n"); + for (i = 0; i < 16; i++) { + /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */ + rxd = &(xl_priv->xl_rx_ring[i]) ; + printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd), + rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ; + } + + printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR)); + + printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL)); + printk("Queue status = %0x\n",netif_running(dev)); +} +#endif + +/* + * Read values from the on-board EEProm. This looks very strange + * but you have to wait for the EEProm to get/set the value before + * passing/getting the next value from the nic. As with all requests + * on this nic it has to be done in two stages, a) tell the nic which + * memory address you want to access and b) pass/get the value from the nic. + * With the EEProm, you have to wait before and between access a) and b). + * As this is only read at initialization time and the wait period is very + * small we shouldn't have to worry about scheduling issues. + */ + +static u16 xl_ee_read(struct net_device *dev, int ee_addr) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem *xl_mmio = xl_priv->xl_mmio ; + + /* Wait for EEProm to not be busy */ + writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; + + /* Tell EEProm what we want to do and where */ + writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ; + + /* Wait for EEProm to not be busy */ + writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; + + /* Tell EEProm what we want to do and where */ + writel(IO_WORD_WRITE | EECONTROL , xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ; + + /* Finally read the value from the EEProm */ + writel(IO_WORD_READ | EEDATA , xl_mmio + MMIO_MAC_ACCESS_CMD) ; + return readw(xl_mmio + MMIO_MACDATA) ; +} + +/* + * Write values to the onboard eeprom. As with eeprom read you need to + * set which location to write, wait, value to write, wait, with the + * added twist of having to enable eeprom writes as well. + */ + +static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem *xl_mmio = xl_priv->xl_mmio ; + + /* Wait for EEProm to not be busy */ + writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; + + /* Enable write/erase */ + writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(EE_ENABLE_WRITE, xl_mmio + MMIO_MACDATA) ; + + /* Wait for EEProm to not be busy */ + writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; + + /* Put the value we want to write into EEDATA */ + writel(IO_WORD_WRITE | EEDATA, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(ee_value, xl_mmio + MMIO_MACDATA) ; + + /* Tell EEProm to write eevalue into ee_addr */ + writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(EEWRITE + ee_addr, xl_mmio + MMIO_MACDATA) ; + + /* Wait for EEProm to not be busy, to ensure write gets done */ + writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; + + return ; +} + +static const struct net_device_ops xl_netdev_ops = { + .ndo_open = xl_open, + .ndo_stop = xl_close, + .ndo_start_xmit = xl_xmit, + .ndo_change_mtu = xl_change_mtu, + .ndo_set_multicast_list = xl_set_rx_mode, + .ndo_set_mac_address = xl_set_mac_address, +}; + +static int __devinit xl_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev ; + struct xl_private *xl_priv ; + static int card_no = -1 ; + int i ; + + card_no++ ; + + if (pci_enable_device(pdev)) { + return -ENODEV ; + } + + pci_set_master(pdev); + + if ((i = pci_request_regions(pdev,"3c359"))) { + return i ; + } ; + + /* + * Allowing init_trdev to allocate the private data will align + * xl_private on a 32 bytes boundary which we need for the rx/tx + * descriptors + */ + + dev = alloc_trdev(sizeof(struct xl_private)) ; + if (!dev) { + pci_release_regions(pdev) ; + return -ENOMEM ; + } + xl_priv = netdev_priv(dev); + +#if XL_DEBUG + printk("pci_device: %p, dev:%p, dev->priv: %p, ba[0]: %10x, ba[1]:%10x\n", + pdev, dev, netdev_priv(dev), (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start); +#endif + + dev->irq=pdev->irq; + dev->base_addr=pci_resource_start(pdev,0) ; + xl_priv->xl_card_name = pci_name(pdev); + xl_priv->xl_mmio=ioremap(pci_resource_start(pdev,1), XL_IO_SPACE); + xl_priv->pdev = pdev ; + + if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) ) + xl_priv->pkt_buf_sz = PKT_BUF_SZ ; + else + xl_priv->pkt_buf_sz = pkt_buf_sz[card_no] ; + + dev->mtu = xl_priv->pkt_buf_sz - TR_HLEN ; + xl_priv->xl_ring_speed = ringspeed[card_no] ; + xl_priv->xl_message_level = message_level[card_no] ; + xl_priv->xl_functional_addr[0] = xl_priv->xl_functional_addr[1] = xl_priv->xl_functional_addr[2] = xl_priv->xl_functional_addr[3] = 0 ; + xl_priv->xl_copy_all_options = 0 ; + + if((i = xl_init(dev))) { + iounmap(xl_priv->xl_mmio) ; + free_netdev(dev) ; + pci_release_regions(pdev) ; + return i ; + } + + dev->netdev_ops = &xl_netdev_ops; + SET_NETDEV_DEV(dev, &pdev->dev); + + pci_set_drvdata(pdev,dev) ; + if ((i = register_netdev(dev))) { + printk(KERN_ERR "3C359, register netdev failed\n") ; + pci_set_drvdata(pdev,NULL) ; + iounmap(xl_priv->xl_mmio) ; + free_netdev(dev) ; + pci_release_regions(pdev) ; + return i ; + } + + printk(KERN_INFO "3C359: %s registered as: %s\n",xl_priv->xl_card_name,dev->name) ; + + return 0; +} + +static int xl_init_firmware(struct xl_private *xl_priv) +{ + int err; + + err = request_firmware(&xl_priv->fw, FW_NAME, &xl_priv->pdev->dev); + if (err) { + printk(KERN_ERR "Failed to load firmware \"%s\"\n", FW_NAME); + return err; + } + + if (xl_priv->fw->size < 16) { + printk(KERN_ERR "Bogus length %zu in \"%s\"\n", + xl_priv->fw->size, FW_NAME); + release_firmware(xl_priv->fw); + err = -EINVAL; + } + + return err; +} + +static int __devinit xl_init(struct net_device *dev) +{ + struct xl_private *xl_priv = netdev_priv(dev); + int err; + + printk(KERN_INFO "%s\n", version); + printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n", + xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq); + + spin_lock_init(&xl_priv->xl_lock) ; + + err = xl_init_firmware(xl_priv); + if (err == 0) + err = xl_hw_reset(dev); + + return err; +} + + +/* + * Hardware reset. This needs to be a separate entity as we need to reset the card + * when we change the EEProm settings. + */ + +static int xl_hw_reset(struct net_device *dev) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem *xl_mmio = xl_priv->xl_mmio ; + unsigned long t ; + u16 i ; + u16 result_16 ; + u8 result_8 ; + u16 start ; + int j ; + + if (xl_priv->fw == NULL) + return -EINVAL; + + /* + * Reset the card. If the card has got the microcode on board, we have + * missed the initialization interrupt, so we must always do this. + */ + + writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; + + /* + * Must wait for cmdInProgress bit (12) to clear before continuing with + * card configuration. + */ + + t=jiffies; + while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { + schedule(); + if (time_after(jiffies, t + 40 * HZ)) { + printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name); + return -ENODEV; + } + } + + /* + * Enable pmbar by setting bit in CPAttention + */ + + writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + result_8 = readb(xl_mmio + MMIO_MACDATA) ; + result_8 = result_8 | CPA_PMBARVIS ; + writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(result_8, xl_mmio + MMIO_MACDATA) ; + + /* + * Read cpHold bit in pmbar, if cleared we have got Flashrom on board. + * If not, we need to upload the microcode to the card + */ + + writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD); + +#if XL_DEBUG + printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA)); +#endif + + if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) { + + /* Set PmBar, privateMemoryBase bits (8:2) to 0 */ + + writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD); + result_16 = readw(xl_mmio + MMIO_MACDATA) ; + result_16 = result_16 & ~((0x7F) << 2) ; + writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(result_16,xl_mmio + MMIO_MACDATA) ; + + /* Set CPAttention, memWrEn bit */ + + writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + result_8 = readb(xl_mmio + MMIO_MACDATA) ; + result_8 = result_8 | CPA_MEMWREN ; + writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(result_8, xl_mmio + MMIO_MACDATA) ; + + /* + * Now to write the microcode into the shared ram + * The microcode must finish at position 0xFFFF, + * so we must subtract to get the start position for the code + * + * Looks strange but ensures compiler only uses + * 16 bit unsigned int + */ + start = (0xFFFF - (xl_priv->fw->size) + 1) ; + + printk(KERN_INFO "3C359: Uploading Microcode: "); + + for (i = start, j = 0; j < xl_priv->fw->size; i++, j++) { + writel(MEM_BYTE_WRITE | 0XD0000 | i, + xl_mmio + MMIO_MAC_ACCESS_CMD); + writeb(xl_priv->fw->data[j], xl_mmio + MMIO_MACDATA); + if (j % 1024 == 0) + printk("."); + } + printk("\n") ; + + for (i = 0; i < 16; i++) { + writel((MEM_BYTE_WRITE | 0xDFFF0) + i, + xl_mmio + MMIO_MAC_ACCESS_CMD); + writeb(xl_priv->fw->data[xl_priv->fw->size - 16 + i], + xl_mmio + MMIO_MACDATA); + } + + /* + * Have to write the start address of the upload to FFF4, but + * the address must be >> 4. You do not want to know how long + * it took me to discover this. + */ + + writel(MEM_WORD_WRITE | 0xDFFF4, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(start >> 4, xl_mmio + MMIO_MACDATA); + + /* Clear the CPAttention, memWrEn Bit */ + + writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + result_8 = readb(xl_mmio + MMIO_MACDATA) ; + result_8 = result_8 & ~CPA_MEMWREN ; + writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(result_8, xl_mmio + MMIO_MACDATA) ; + + /* Clear the cpHold bit in pmbar */ + + writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD); + result_16 = readw(xl_mmio + MMIO_MACDATA) ; + result_16 = result_16 & ~PMB_CPHOLD ; + writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(result_16,xl_mmio + MMIO_MACDATA) ; + + + } /* If microcode upload required */ + + /* + * The card should now go though a self test procedure and get itself ready + * to be opened, we must wait for an srb response with the initialization + * information. + */ + +#if XL_DEBUG + printk(KERN_INFO "%s: Microcode uploaded, must wait for the self test to complete\n", dev->name); +#endif + + writew(SETINDENABLE | 0xFFF, xl_mmio + MMIO_COMMAND) ; + + t=jiffies; + while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) { + schedule(); + if (time_after(jiffies, t + 15 * HZ)) { + printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); + return -ENODEV; + } + } + + /* + * Write the RxBufArea with D000, RxEarlyThresh, TxStartThresh, + * DnPriReqThresh, read the tech docs if you want to know what + * values they need to be. + */ + + writel(MMIO_WORD_WRITE | RXBUFAREA, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(0xD000, xl_mmio + MMIO_MACDATA) ; + + writel(MMIO_WORD_WRITE | RXEARLYTHRESH, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(0X0020, xl_mmio + MMIO_MACDATA) ; + + writew( SETTXSTARTTHRESH | 0x40 , xl_mmio + MMIO_COMMAND) ; + + writeb(0x04, xl_mmio + MMIO_DNBURSTTHRESH) ; + writeb(0x04, xl_mmio + DNPRIREQTHRESH) ; + + /* + * Read WRBR to provide the location of the srb block, have to use byte reads not word reads. + * Tech docs have this wrong !!!! + */ + + writel(MMIO_BYTE_READ | WRBR, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + xl_priv->srb = readb(xl_mmio + MMIO_MACDATA) << 8 ; + writel( (MMIO_BYTE_READ | WRBR) + 1, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + xl_priv->srb = xl_priv->srb | readb(xl_mmio + MMIO_MACDATA) ; + +#if XL_DEBUG + writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + if ( readw(xl_mmio + MMIO_MACDATA) & 2) { + printk(KERN_INFO "Default ring speed 4 mbps\n"); + } else { + printk(KERN_INFO "Default ring speed 16 mbps\n"); + } + printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb); +#endif + + return 0; +} + +static int xl_open(struct net_device *dev) +{ + struct xl_private *xl_priv=netdev_priv(dev); + u8 __iomem *xl_mmio = xl_priv->xl_mmio ; + u8 i ; + __le16 hwaddr[3] ; /* Should be u8[6] but we get word return values */ + int open_err ; + + u16 switchsettings, switchsettings_eeprom ; + + if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev)) + return -EAGAIN; + + /* + * Read the information from the EEPROM that we need. + */ + + hwaddr[0] = cpu_to_le16(xl_ee_read(dev,0x10)); + hwaddr[1] = cpu_to_le16(xl_ee_read(dev,0x11)); + hwaddr[2] = cpu_to_le16(xl_ee_read(dev,0x12)); + + /* Ring speed */ + + switchsettings_eeprom = xl_ee_read(dev,0x08) ; + switchsettings = switchsettings_eeprom ; + + if (xl_priv->xl_ring_speed != 0) { + if (xl_priv->xl_ring_speed == 4) + switchsettings = switchsettings | 0x02 ; + else + switchsettings = switchsettings & ~0x02 ; + } + + /* Only write EEProm if there has been a change */ + if (switchsettings != switchsettings_eeprom) { + xl_ee_write(dev,0x08,switchsettings) ; + /* Hardware reset after changing EEProm */ + xl_hw_reset(dev) ; + } + + memcpy(dev->dev_addr,hwaddr,dev->addr_len) ; + + open_err = xl_open_hw(dev) ; + + /* + * This really needs to be cleaned up with better error reporting. + */ + + if (open_err != 0) { /* Something went wrong with the open command */ + if (open_err & 0x07) { /* Wrong speed, retry at different speed */ + printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name); + switchsettings = switchsettings ^ 2 ; + xl_ee_write(dev,0x08,switchsettings) ; + xl_hw_reset(dev) ; + open_err = xl_open_hw(dev) ; + if (open_err != 0) { + printk(KERN_WARNING "%s: Open error returned a second time, we're bombing out now\n", dev->name); + free_irq(dev->irq,dev) ; + return -ENODEV ; + } + } else { + printk(KERN_WARNING "%s: Open Error = %04x\n", dev->name, open_err) ; + free_irq(dev->irq,dev) ; + return -ENODEV ; + } + } + + /* + * Now to set up the Rx and Tx buffer structures + */ + /* These MUST be on 8 byte boundaries */ + xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL); + if (xl_priv->xl_tx_ring == NULL) { + printk(KERN_WARNING "%s: Not enough memory to allocate tx buffers.\n", + dev->name); + free_irq(dev->irq,dev); + return -ENOMEM; + } + xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL); + if (xl_priv->xl_rx_ring == NULL) { + printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n", + dev->name); + free_irq(dev->irq,dev); + kfree(xl_priv->xl_tx_ring); + return -ENOMEM; + } + + /* Setup Rx Ring */ + for (i=0 ; i < XL_RX_RING_SIZE ; i++) { + struct sk_buff *skb ; + + skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; + if (skb==NULL) + break ; + + skb->dev = dev ; + xl_priv->xl_rx_ring[i].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); + xl_priv->xl_rx_ring[i].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG; + xl_priv->rx_ring_skb[i] = skb ; + } + + if (i==0) { + printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name); + free_irq(dev->irq,dev) ; + kfree(xl_priv->xl_tx_ring); + kfree(xl_priv->xl_rx_ring); + return -EIO ; + } + + xl_priv->rx_ring_no = i ; + xl_priv->rx_ring_tail = 0 ; + xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ; + for (i=0;i<(xl_priv->rx_ring_no-1);i++) { + xl_priv->xl_rx_ring[i].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1))); + } + xl_priv->xl_rx_ring[i].upnextptr = 0 ; + + writel(xl_priv->rx_ring_dma_addr, xl_mmio + MMIO_UPLISTPTR) ; + + /* Setup Tx Ring */ + + xl_priv->tx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_tx_ring, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE,PCI_DMA_TODEVICE) ; + + xl_priv->tx_ring_head = 1 ; + xl_priv->tx_ring_tail = 255 ; /* Special marker for first packet */ + xl_priv->free_ring_entries = XL_TX_RING_SIZE ; + + /* + * Setup the first dummy DPD entry for polling to start working. + */ + + xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY; + xl_priv->xl_tx_ring[0].buffer = 0 ; + xl_priv->xl_tx_ring[0].buffer_length = 0 ; + xl_priv->xl_tx_ring[0].dnnextptr = 0 ; + + writel(xl_priv->tx_ring_dma_addr, xl_mmio + MMIO_DNLISTPTR) ; + writel(DNUNSTALL, xl_mmio + MMIO_COMMAND) ; + writel(UPUNSTALL, xl_mmio + MMIO_COMMAND) ; + writel(DNENABLE, xl_mmio + MMIO_COMMAND) ; + writeb(0x40, xl_mmio + MMIO_DNPOLL) ; + + /* + * Enable interrupts on the card + */ + + writel(SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; + writel(SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; + + netif_start_queue(dev) ; + return 0; + +} + +static int xl_open_hw(struct net_device *dev) +{ + struct xl_private *xl_priv=netdev_priv(dev); + u8 __iomem *xl_mmio = xl_priv->xl_mmio ; + u16 vsoff ; + char ver_str[33]; + int open_err ; + int i ; + unsigned long t ; + + /* + * Okay, let's build up the Open.NIC srb command + * + */ + + writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(OPEN_NIC, xl_mmio + MMIO_MACDATA) ; + + /* + * Use this as a test byte, if it comes back with the same value, the command didn't work + */ + + writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb)+ 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(0xff,xl_mmio + MMIO_MACDATA) ; + + /* Open options */ + writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(0x00, xl_mmio + MMIO_MACDATA) ; + writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 9, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(0x00, xl_mmio + MMIO_MACDATA) ; + + /* + * Node address, be careful here, the docs say you can just put zeros here and it will use + * the hardware address, it doesn't, you must include the node address in the open command. + */ + + if (xl_priv->xl_laa[0]) { /* If using a LAA address */ + for (i=10;i<16;i++) { + writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(xl_priv->xl_laa[i-10],xl_mmio + MMIO_MACDATA) ; + } + memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ; + } else { /* Regular hardware address */ + for (i=10;i<16;i++) { + writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(dev->dev_addr[i-10], xl_mmio + MMIO_MACDATA) ; + } + } + + /* Default everything else to 0 */ + for (i = 16; i < 34; i++) { + writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(0x00,xl_mmio + MMIO_MACDATA) ; + } + + /* + * Set the csrb bit in the MISR register + */ + + xl_wait_misr_flags(dev) ; + writel(MEM_BYTE_WRITE | MF_CSRB, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(0xFF, xl_mmio + MMIO_MACDATA) ; + writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(MISR_CSRB , xl_mmio + MMIO_MACDATA) ; + + /* + * Now wait for the command to run + */ + + t=jiffies; + while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { + schedule(); + if (time_after(jiffies, t + 40 * HZ)) { + printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); + break ; + } + } + + /* + * Let's interpret the open response + */ + + writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb)+2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + if (readb(xl_mmio + MMIO_MACDATA)!=0) { + open_err = readb(xl_mmio + MMIO_MACDATA) << 8 ; + writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb) + 7, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + open_err |= readb(xl_mmio + MMIO_MACDATA) ; + return open_err ; + } else { + writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + xl_priv->asb = swab16(readw(xl_mmio + MMIO_MACDATA)) ; + printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ; + printk("ASB: %04x",xl_priv->asb ) ; + writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + printk(", SRB: %04x",swab16(readw(xl_mmio + MMIO_MACDATA)) ) ; + + writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ; + printk(", ARB: %04x\n",xl_priv->arb ); + writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ; + + /* + * Interesting, sending the individual characters directly to printk was causing klogd to use + * use 100% of processor time, so we build up the string and print that instead. + */ + + for (i=0;i<0x20;i++) { + writel( (MEM_BYTE_READ | 0xD0000 | vsoff) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ; + } + ver_str[i] = '\0' ; + printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str); + } + + /* + * Issue the AckInterrupt + */ + writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + + return 0 ; +} + +/* + * There are two ways of implementing rx on the 359 NIC, either + * interrupt driven or polling. We are going to uses interrupts, + * it is the easier way of doing things. + * + * The Rx works with a ring of Rx descriptors. At initialise time the ring + * entries point to the next entry except for the last entry in the ring + * which points to 0. The card is programmed with the location of the first + * available descriptor and keeps reading the next_ptr until next_ptr is set + * to 0. Hopefully with a ring size of 16 the card will never get to read a next_ptr + * of 0. As the Rx interrupt is received we copy the frame up to the protocol layers + * and then point the end of the ring to our current position and point our current + * position to 0, therefore making the current position the last position on the ring. + * The last position on the ring therefore loops continually loops around the rx ring. + * + * rx_ring_tail is the position on the ring to process next. (Think of a snake, the head + * expands as the card adds new packets and we go around eating the tail processing the + * packets.) + * + * Undoubtably it could be streamlined and improved upon, but at the moment it works + * and the fast path through the routine is fine. + * + * adv_rx_ring could be inlined to increase performance, but its called a *lot* of times + * in xl_rx so would increase the size of the function significantly. + */ + +static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */ +{ + struct xl_private *xl_priv=netdev_priv(dev); + int n = xl_priv->rx_ring_tail; + int prev_ring_loc; + + prev_ring_loc = (n + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1); + xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * n)); + xl_priv->xl_rx_ring[n].framestatus = 0; + xl_priv->xl_rx_ring[n].upnextptr = 0; + xl_priv->rx_ring_tail++; + xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1); +} + +static void xl_rx(struct net_device *dev) +{ + struct xl_private *xl_priv=netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + struct sk_buff *skb, *skb2 ; + int frame_length = 0, copy_len = 0 ; + int temp_ring_loc ; + + /* + * Receive the next frame, loop around the ring until all frames + * have been received. + */ + + while (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & (RXUPDCOMPLETE | RXUPDFULL) ) { /* Descriptor to process */ + + if (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & RXUPDFULL ) { /* UpdFull, Multiple Descriptors used for the frame */ + + /* + * This is a pain, you need to go through all the descriptors until the last one + * for this frame to find the framelength + */ + + temp_ring_loc = xl_priv->rx_ring_tail ; + + while (xl_priv->xl_rx_ring[temp_ring_loc].framestatus & RXUPDFULL ) { + temp_ring_loc++ ; + temp_ring_loc &= (XL_RX_RING_SIZE-1) ; + } + + frame_length = le32_to_cpu(xl_priv->xl_rx_ring[temp_ring_loc].framestatus) & 0x7FFF; + + skb = dev_alloc_skb(frame_length) ; + + if (skb==NULL) { /* No memory for frame, still need to roll forward the rx ring */ + printk(KERN_WARNING "%s: dev_alloc_skb failed - multi buffer !\n", dev->name) ; + while (xl_priv->rx_ring_tail != temp_ring_loc) + adv_rx_ring(dev) ; + + adv_rx_ring(dev) ; /* One more time just for luck :) */ + dev->stats.rx_dropped++ ; + + writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; + return ; + } + + while (xl_priv->rx_ring_tail != temp_ring_loc) { + copy_len = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen) & 0x7FFF; + frame_length -= copy_len ; + pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], + skb_put(skb, copy_len), + copy_len); + pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE); + adv_rx_ring(dev) ; + } + + /* Now we have found the last fragment */ + pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], + skb_put(skb,copy_len), frame_length); +/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ + pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE); + adv_rx_ring(dev) ; + skb->protocol = tr_type_trans(skb,dev) ; + netif_rx(skb) ; + + } else { /* Single Descriptor Used, simply swap buffers over, fast path */ + + frame_length = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus) & 0x7FFF; + + skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; + + if (skb==NULL) { /* Still need to fix the rx ring */ + printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name); + adv_rx_ring(dev) ; + dev->stats.rx_dropped++ ; + writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; + return ; + } + + skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; + pci_unmap_single(xl_priv->pdev, le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; + skb_put(skb2, frame_length) ; + skb2->protocol = tr_type_trans(skb2,dev) ; + + xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ; + xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); + xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG; + adv_rx_ring(dev) ; + dev->stats.rx_packets++ ; + dev->stats.rx_bytes += frame_length ; + + netif_rx(skb2) ; + } /* if multiple buffers */ + } /* while packet to do */ + + /* Clear the updComplete interrupt */ + writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; + return ; +} + +/* + * This is ruthless, it doesn't care what state the card is in it will + * completely reset the adapter. + */ + +static void xl_reset(struct net_device *dev) +{ + struct xl_private *xl_priv=netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + unsigned long t; + + writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; + + /* + * Must wait for cmdInProgress bit (12) to clear before continuing with + * card configuration. + */ + + t=jiffies; + while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { + if (time_after(jiffies, t + 40 * HZ)) { + printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); + break ; + } + } + +} + +static void xl_freemem(struct net_device *dev) +{ + struct xl_private *xl_priv=netdev_priv(dev); + int i ; + + for (i=0;irx_ring_skb[xl_priv->rx_ring_tail]) ; + pci_unmap_single(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE); + xl_priv->rx_ring_tail++ ; + xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1; + } + + /* unmap ring */ + pci_unmap_single(xl_priv->pdev,xl_priv->rx_ring_dma_addr, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_FROMDEVICE) ; + + pci_unmap_single(xl_priv->pdev,xl_priv->tx_ring_dma_addr, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE, PCI_DMA_TODEVICE) ; + + kfree(xl_priv->xl_rx_ring) ; + kfree(xl_priv->xl_tx_ring) ; + + return ; +} + +static irqreturn_t xl_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct xl_private *xl_priv =netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + u16 intstatus, macstatus ; + + intstatus = readw(xl_mmio + MMIO_INTSTATUS) ; + + if (!(intstatus & 1)) /* We didn't generate the interrupt */ + return IRQ_NONE; + + spin_lock(&xl_priv->xl_lock) ; + + /* + * Process the interrupt + */ + /* + * Something fishy going on here, we shouldn't get 0001 ints, not fatal though. + */ + if (intstatus == 0x0001) { + writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + printk(KERN_INFO "%s: 00001 int received\n",dev->name); + } else { + if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) { + + /* + * Host Error. + * It may be possible to recover from this, but usually it means something + * is seriously fubar, so we just close the adapter. + */ + + if (intstatus & HOSTERRINT) { + printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus); + writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; + printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name); + netif_stop_queue(dev) ; + xl_freemem(dev) ; + free_irq(dev->irq,dev); + xl_reset(dev) ; + writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + spin_unlock(&xl_priv->xl_lock) ; + return IRQ_HANDLED; + } /* Host Error */ + + if (intstatus & SRBRINT ) { /* Srbc interrupt */ + writel(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + if (xl_priv->srb_queued) + xl_srb_bh(dev) ; + } /* SRBR Interrupt */ + + if (intstatus & TXUNDERRUN) { /* Issue DnReset command */ + writel(DNRESET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { /* Wait for command to run */ + /* !!! FIX-ME !!!! + Must put a timeout check here ! */ + /* Empty Loop */ + } + printk(KERN_WARNING "%s: TX Underrun received\n",dev->name); + writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + } /* TxUnderRun */ + + if (intstatus & ARBCINT ) { /* Arbc interrupt */ + xl_arb_cmd(dev) ; + } /* Arbc */ + + if (intstatus & ASBFINT) { + if (xl_priv->asb_queued == 1) { + xl_asb_cmd(dev) ; + } else if (xl_priv->asb_queued == 2) { + xl_asb_bh(dev) ; + } else { + writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ; + } + } /* Asbf */ + + if (intstatus & UPCOMPINT ) /* UpComplete */ + xl_rx(dev) ; + + if (intstatus & DNCOMPINT ) /* DnComplete */ + xl_dn_comp(dev) ; + + if (intstatus & HARDERRINT ) { /* Hardware error */ + writel(MMIO_WORD_READ | MACSTATUS, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + macstatus = readw(xl_mmio + MMIO_MACDATA) ; + printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name); + if (macstatus & (1<<14)) + printk(KERN_WARNING "tchk error: Unrecoverable error\n"); + if (macstatus & (1<<3)) + printk(KERN_WARNING "eint error: Internal watchdog timer expired\n"); + if (macstatus & (1<<2)) + printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n"); + printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ; + printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name); + netif_stop_queue(dev) ; + xl_freemem(dev) ; + free_irq(dev->irq,dev); + unregister_netdev(dev) ; + free_netdev(dev) ; + xl_reset(dev) ; + writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + spin_unlock(&xl_priv->xl_lock) ; + return IRQ_HANDLED; + } + } else { + printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus); + writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + } + } + + /* Turn interrupts back on */ + + writel( SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; + writel( SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; + + spin_unlock(&xl_priv->xl_lock) ; + return IRQ_HANDLED; +} + +/* + * Tx - Polling configuration + */ + +static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xl_private *xl_priv=netdev_priv(dev); + struct xl_tx_desc *txd ; + int tx_head, tx_tail, tx_prev ; + unsigned long flags ; + + spin_lock_irqsave(&xl_priv->xl_lock,flags) ; + + netif_stop_queue(dev) ; + + if (xl_priv->free_ring_entries > 1 ) { + /* + * Set up the descriptor for the packet + */ + tx_head = xl_priv->tx_ring_head ; + tx_tail = xl_priv->tx_ring_tail ; + + txd = &(xl_priv->xl_tx_ring[tx_head]) ; + txd->dnnextptr = 0 ; + txd->framestartheader = cpu_to_le32(skb->len) | TXDNINDICATE; + txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); + txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST; + xl_priv->tx_ring_skb[tx_head] = skb ; + dev->stats.tx_packets++ ; + dev->stats.tx_bytes += skb->len ; + + /* + * Set the nextptr of the previous descriptor equal to this descriptor, add XL_TX_RING_SIZE -1 + * to ensure no negative numbers in unsigned locations. + */ + + tx_prev = (xl_priv->tx_ring_head + XL_TX_RING_SIZE - 1) & (XL_TX_RING_SIZE - 1) ; + + xl_priv->tx_ring_head++ ; + xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ; + xl_priv->free_ring_entries-- ; + + xl_priv->xl_tx_ring[tx_prev].dnnextptr = cpu_to_le32(xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head)); + + /* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */ + /* readl(xl_mmio + MMIO_DNLISTPTR) ; */ + + netif_wake_queue(dev) ; + + spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ; + + return NETDEV_TX_OK; + } else { + spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ; + return NETDEV_TX_BUSY; + } + +} + +/* + * The NIC has told us that a packet has been downloaded onto the card, we must + * find out which packet it has done, clear the skb and information for the packet + * then advance around the ring for all transmitted packets + */ + +static void xl_dn_comp(struct net_device *dev) +{ + struct xl_private *xl_priv=netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + struct xl_tx_desc *txd ; + + + if (xl_priv->tx_ring_tail == 255) {/* First time */ + xl_priv->xl_tx_ring[0].framestartheader = 0 ; + xl_priv->xl_tx_ring[0].dnnextptr = 0 ; + xl_priv->tx_ring_tail = 1 ; + } + + while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) { + txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ; + pci_unmap_single(xl_priv->pdev, le32_to_cpu(txd->buffer), xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE); + txd->framestartheader = 0 ; + txd->buffer = cpu_to_le32(0xdeadbeef); + txd->buffer_length = 0 ; + dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ; + xl_priv->tx_ring_tail++ ; + xl_priv->tx_ring_tail &= (XL_TX_RING_SIZE - 1) ; + xl_priv->free_ring_entries++ ; + } + + netif_wake_queue(dev) ; + + writel(ACK_INTERRUPT | DNCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; +} + +/* + * Close the adapter properly. + * This srb reply cannot be handled from interrupt context as we have + * to free the interrupt from the driver. + */ + +static int xl_close(struct net_device *dev) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + unsigned long t ; + + netif_stop_queue(dev) ; + + /* + * Close the adapter, need to stall the rx and tx queues. + */ + + writew(DNSTALL, xl_mmio + MMIO_COMMAND) ; + t=jiffies; + while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { + schedule(); + if (time_after(jiffies, t + 10 * HZ)) { + printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name); + break ; + } + } + writew(DNDISABLE, xl_mmio + MMIO_COMMAND) ; + t=jiffies; + while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { + schedule(); + if (time_after(jiffies, t + 10 * HZ)) { + printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name); + break ; + } + } + writew(UPSTALL, xl_mmio + MMIO_COMMAND) ; + t=jiffies; + while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { + schedule(); + if (time_after(jiffies, t + 10 * HZ)) { + printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name); + break ; + } + } + + /* Turn off interrupts, we will still get the indication though + * so we can trap it + */ + + writel(SETINTENABLE, xl_mmio + MMIO_COMMAND) ; + + xl_srb_cmd(dev,CLOSE_NIC) ; + + t=jiffies; + while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { + schedule(); + if (time_after(jiffies, t + 10 * HZ)) { + printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name); + break ; + } + } + /* Read the srb response from the adapter */ + + writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD); + if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) { + printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name); + } else { + writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + if (readb(xl_mmio + MMIO_MACDATA)==0) { + printk(KERN_INFO "%s: Adapter has been closed\n",dev->name); + writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + + xl_freemem(dev) ; + free_irq(dev->irq,dev) ; + } else { + printk(KERN_INFO "%s: Close nic command returned error code %02x\n",dev->name, readb(xl_mmio + MMIO_MACDATA)) ; + } + } + + /* Reset the upload and download logic */ + + writew(UPRESET, xl_mmio + MMIO_COMMAND) ; + t=jiffies; + while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { + schedule(); + if (time_after(jiffies, t + 10 * HZ)) { + printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name); + break ; + } + } + writew(DNRESET, xl_mmio + MMIO_COMMAND) ; + t=jiffies; + while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { + schedule(); + if (time_after(jiffies, t + 10 * HZ)) { + printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name); + break ; + } + } + xl_hw_reset(dev) ; + return 0 ; +} + +static void xl_set_rx_mode(struct net_device *dev) +{ + struct xl_private *xl_priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + unsigned char dev_mc_address[4] ; + u16 options ; + + if (dev->flags & IFF_PROMISC) + options = 0x0004 ; + else + options = 0x0000 ; + + if (options ^ xl_priv->xl_copy_all_options) { /* Changed, must send command */ + xl_priv->xl_copy_all_options = options ; + xl_srb_cmd(dev, SET_RECEIVE_MODE) ; + return ; + } + + dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; + + netdev_for_each_mc_addr(ha, dev) { + dev_mc_address[0] |= ha->addr[2]; + dev_mc_address[1] |= ha->addr[3]; + dev_mc_address[2] |= ha->addr[4]; + dev_mc_address[3] |= ha->addr[5]; + } + + if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */ + memcpy(xl_priv->xl_functional_addr, dev_mc_address,4) ; + xl_srb_cmd(dev, SET_FUNC_ADDRESS) ; + } + return ; +} + + +/* + * We issued an srb command and now we must read + * the response from the completed command. + */ + +static void xl_srb_bh(struct net_device *dev) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + u8 srb_cmd, ret_code ; + int i ; + + writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + srb_cmd = readb(xl_mmio + MMIO_MACDATA) ; + writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + ret_code = readb(xl_mmio + MMIO_MACDATA) ; + + /* Ret_code is standard across all commands */ + + switch (ret_code) { + case 1: + printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ; + break ; + case 4: + printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd); + break ; + + case 6: + printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd); + break ; + + case 0: /* Successful command execution */ + switch (srb_cmd) { + case READ_LOG: /* Returns 14 bytes of data from the NIC */ + if(xl_priv->xl_message_level) + printk(KERN_INFO "%s: READ.LOG 14 bytes of data ",dev->name) ; + /* + * We still have to read the log even if message_level = 0 and we don't want + * to see it + */ + for (i=0;i<14;i++) { + writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + if(xl_priv->xl_message_level) + printk("%02x:",readb(xl_mmio + MMIO_MACDATA)) ; + } + printk("\n") ; + break ; + case SET_FUNC_ADDRESS: + if(xl_priv->xl_message_level) + printk(KERN_INFO "%s: Functional Address Set\n",dev->name); + break ; + case CLOSE_NIC: + if(xl_priv->xl_message_level) + printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name); + break ; + case SET_MULTICAST_MODE: + if(xl_priv->xl_message_level) + printk(KERN_INFO "%s: Multicast options successfully changed\n",dev->name) ; + break ; + case SET_RECEIVE_MODE: + if(xl_priv->xl_message_level) { + if (xl_priv->xl_copy_all_options == 0x0004) + printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name); + else + printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name); + } + break ; + + } /* switch */ + break ; + } /* switch */ + return ; +} + +static int xl_set_mac_address (struct net_device *dev, void *addr) +{ + struct sockaddr *saddr = addr ; + struct xl_private *xl_priv = netdev_priv(dev); + + if (netif_running(dev)) { + printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; + return -EIO ; + } + + memcpy(xl_priv->xl_laa, saddr->sa_data,dev->addr_len) ; + + if (xl_priv->xl_message_level) { + printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, xl_priv->xl_laa[0], + xl_priv->xl_laa[1], xl_priv->xl_laa[2], + xl_priv->xl_laa[3], xl_priv->xl_laa[4], + xl_priv->xl_laa[5]); + } + + return 0 ; +} + +static void xl_arb_cmd(struct net_device *dev) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + u8 arb_cmd ; + u16 lan_status, lan_status_diff ; + + writel( ( MEM_BYTE_READ | 0xD0000 | xl_priv->arb), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + arb_cmd = readb(xl_mmio + MMIO_MACDATA) ; + + if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */ + writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + + printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, swab16(readw(xl_mmio + MMIO_MACDATA) )) ; + + lan_status = swab16(readw(xl_mmio + MMIO_MACDATA)); + + /* Acknowledge interrupt, this tells nic we are done with the arb */ + writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + + lan_status_diff = xl_priv->xl_lan_status ^ lan_status ; + + if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) { + if (lan_status_diff & LSC_LWF) + printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name); + if (lan_status_diff & LSC_ARW) + printk(KERN_WARNING "%s: Auto removal error\n",dev->name); + if (lan_status_diff & LSC_FPE) + printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name); + if (lan_status_diff & LSC_RR) + printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name); + + /* Adapter has been closed by the hardware */ + + netif_stop_queue(dev); + xl_freemem(dev) ; + free_irq(dev->irq,dev); + + printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name); + } /* If serious error */ + + if (xl_priv->xl_message_level) { + if (lan_status_diff & LSC_SIG_LOSS) + printk(KERN_WARNING "%s: No receive signal detected\n", dev->name); + if (lan_status_diff & LSC_HARD_ERR) + printk(KERN_INFO "%s: Beaconing\n",dev->name); + if (lan_status_diff & LSC_SOFT_ERR) + printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); + if (lan_status_diff & LSC_TRAN_BCN) + printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); + if (lan_status_diff & LSC_SS) + printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); + if (lan_status_diff & LSC_RING_REC) + printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); + if (lan_status_diff & LSC_FDX_MODE) + printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name); + } + + if (lan_status_diff & LSC_CO) { + if (xl_priv->xl_message_level) + printk(KERN_INFO "%s: Counter Overflow\n", dev->name); + /* Issue READ.LOG command */ + xl_srb_cmd(dev, READ_LOG) ; + } + + /* There is no command in the tech docs to issue the read_sr_counters */ + if (lan_status_diff & LSC_SR_CO) { + if (xl_priv->xl_message_level) + printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name); + } + + xl_priv->xl_lan_status = lan_status ; + + } /* Lan.change.status */ + else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */ +#if XL_DEBUG + printk(KERN_INFO "Received.Data\n"); +#endif + writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ; + + /* Now we are going to be really basic here and not do anything + * with the data at all. The tech docs do not give me enough + * information to calculate the buffers properly so we're + * just going to tell the nic that we've dealt with the frame + * anyway. + */ + + /* Acknowledge interrupt, this tells nic we are done with the arb */ + writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; + + /* Is the ASB free ? */ + + xl_priv->asb_queued = 0 ; + writel( ((MEM_BYTE_READ | 0xD0000 | xl_priv->asb) + 2), xl_mmio + MMIO_MAC_ACCESS_CMD) ; + if (readb(xl_mmio + MMIO_MACDATA) != 0xff) { + xl_priv->asb_queued = 1 ; + + xl_wait_misr_flags(dev) ; + + writel(MEM_BYTE_WRITE | MF_ASBFR, xl_mmio + MMIO_MAC_ACCESS_CMD); + writeb(0xff, xl_mmio + MMIO_MACDATA) ; + writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(MISR_ASBFR, xl_mmio + MMIO_MACDATA) ; + return ; + /* Drop out and wait for the bottom half to be run */ + } + + xl_asb_cmd(dev) ; + + } else { + printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd); + } + + /* Acknowledge the arb interrupt */ + + writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; + + return ; +} + + +/* + * There is only one asb command, but we can get called from different + * places. + */ + +static void xl_asb_cmd(struct net_device *dev) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + + if (xl_priv->asb_queued == 1) + writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ; + + writel(MEM_BYTE_WRITE | 0xd0000 | xl_priv->asb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(0x81, xl_mmio + MMIO_MACDATA) ; + + writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(swab16(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ; + + xl_wait_misr_flags(dev) ; + + writel(MEM_BYTE_WRITE | MF_RASB, xl_mmio + MMIO_MAC_ACCESS_CMD); + writeb(0xff, xl_mmio + MMIO_MACDATA) ; + + writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(MISR_RASB, xl_mmio + MMIO_MACDATA) ; + + xl_priv->asb_queued = 2 ; + + return ; +} + +/* + * This will only get called if there was an error + * from the asb cmd. + */ +static void xl_asb_bh(struct net_device *dev) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + u8 ret_code ; + + writel(MMIO_BYTE_READ | 0xd0000 | xl_priv->asb | 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + ret_code = readb(xl_mmio + MMIO_MACDATA) ; + switch (ret_code) { + case 0x01: + printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name); + break ; + case 0x26: + printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name); + break ; + case 0x40: + printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name); + break ; + } + xl_priv->asb_queued = 0 ; + writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ; + return ; +} + +/* + * Issue srb commands to the nic + */ + +static void xl_srb_cmd(struct net_device *dev, int srb_cmd) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + + switch (srb_cmd) { + case READ_LOG: + writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(READ_LOG, xl_mmio + MMIO_MACDATA) ; + break; + + case CLOSE_NIC: + writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(CLOSE_NIC, xl_mmio + MMIO_MACDATA) ; + break ; + + case SET_RECEIVE_MODE: + writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(SET_RECEIVE_MODE, xl_mmio + MMIO_MACDATA) ; + writel(MEM_WORD_WRITE | 0xD0000 | xl_priv->srb | 4, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writew(xl_priv->xl_copy_all_options, xl_mmio + MMIO_MACDATA) ; + break ; + + case SET_FUNC_ADDRESS: + writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(SET_FUNC_ADDRESS, xl_mmio + MMIO_MACDATA) ; + writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 6 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(xl_priv->xl_functional_addr[0], xl_mmio + MMIO_MACDATA) ; + writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 7 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(xl_priv->xl_functional_addr[1], xl_mmio + MMIO_MACDATA) ; + writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 8 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(xl_priv->xl_functional_addr[2], xl_mmio + MMIO_MACDATA) ; + writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 9 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(xl_priv->xl_functional_addr[3], xl_mmio + MMIO_MACDATA) ; + break ; + } /* switch */ + + + xl_wait_misr_flags(dev) ; + + /* Write 0xff to the CSRB flag */ + writel(MEM_BYTE_WRITE | MF_CSRB , xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(0xFF, xl_mmio + MMIO_MACDATA) ; + /* Set csrb bit in MISR register to process command */ + writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(MISR_CSRB, xl_mmio + MMIO_MACDATA) ; + xl_priv->srb_queued = 1 ; + + return ; +} + +/* + * This is nasty, to use the MISR command you have to wait for 6 memory locations + * to be zero. This is the way the driver does on other OS'es so we should be ok with + * the empty loop. + */ + +static void xl_wait_misr_flags(struct net_device *dev) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u8 __iomem * xl_mmio = xl_priv->xl_mmio ; + + int i ; + + writel(MMIO_BYTE_READ | MISR_RW, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + if (readb(xl_mmio + MMIO_MACDATA) != 0) { /* Misr not clear */ + for (i=0; i<6; i++) { + writel(MEM_BYTE_READ | 0xDFFE0 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + while (readb(xl_mmio + MMIO_MACDATA) != 0 ) {} ; /* Empty Loop */ + } + } + + writel(MMIO_BYTE_WRITE | MISR_AND, xl_mmio + MMIO_MAC_ACCESS_CMD) ; + writeb(0x80, xl_mmio + MMIO_MACDATA) ; + + return ; +} + +/* + * Change mtu size, this should work the same as olympic + */ + +static int xl_change_mtu(struct net_device *dev, int mtu) +{ + struct xl_private *xl_priv = netdev_priv(dev); + u16 max_mtu ; + + if (xl_priv->xl_ring_speed == 4) + max_mtu = 4500 ; + else + max_mtu = 18000 ; + + if (mtu > max_mtu) + return -EINVAL ; + if (mtu < 100) + return -EINVAL ; + + dev->mtu = mtu ; + xl_priv->pkt_buf_sz = mtu + TR_HLEN ; + + return 0 ; +} + +static void __devexit xl_remove_one (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct xl_private *xl_priv=netdev_priv(dev); + + release_firmware(xl_priv->fw); + unregister_netdev(dev); + iounmap(xl_priv->xl_mmio) ; + pci_release_regions(pdev) ; + pci_set_drvdata(pdev,NULL) ; + free_netdev(dev); + return ; +} + +static struct pci_driver xl_3c359_driver = { + .name = "3c359", + .id_table = xl_pci_tbl, + .probe = xl_probe, + .remove = __devexit_p(xl_remove_one), +}; + +static int __init xl_pci_init (void) +{ + return pci_register_driver(&xl_3c359_driver); +} + + +static void __exit xl_pci_cleanup (void) +{ + pci_unregister_driver (&xl_3c359_driver); +} + +module_init(xl_pci_init); +module_exit(xl_pci_cleanup); + +MODULE_LICENSE("GPL") ; diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h new file mode 100644 index 00000000..bcb1a6b4 --- /dev/null +++ b/drivers/net/tokenring/3c359.h @@ -0,0 +1,291 @@ +/* + * 3c359.h (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved + * + * Linux driver for 3Com 3C359 Token Link PCI XL cards. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License Version 2 or (at your option) + * any later verion, incorporated herein by reference. + */ + +/* Memory Access Commands */ +#define IO_BYTE_READ 0x28 << 24 +#define IO_BYTE_WRITE 0x18 << 24 +#define IO_WORD_READ 0x20 << 24 +#define IO_WORD_WRITE 0x10 << 24 +#define MMIO_BYTE_READ 0x88 << 24 +#define MMIO_BYTE_WRITE 0x48 << 24 +#define MMIO_WORD_READ 0x80 << 24 +#define MMIO_WORD_WRITE 0x40 << 24 +#define MEM_BYTE_READ 0x8C << 24 +#define MEM_BYTE_WRITE 0x4C << 24 +#define MEM_WORD_READ 0x84 << 24 +#define MEM_WORD_WRITE 0x44 << 24 + +#define PMBAR 0x1C80 +#define PMB_CPHOLD (1<<10) + +#define CPATTENTION 0x180D +#define CPA_PMBARVIS (1<<7) +#define CPA_MEMWREN (1<<6) + +#define SWITCHSETTINGS 0x1C88 +#define EECONTROL 0x1C8A +#define EEDATA 0x1C8C +#define EEREAD 0x0080 +#define EEWRITE 0x0040 +#define EEERASE 0x0060 +#define EE_ENABLE_WRITE 0x0030 +#define EEBUSY (1<<15) + +#define WRBR 0xCDE02 +#define WWOR 0xCDE04 +#define WWCR 0xCDE06 +#define MACSTATUS 0xCDE08 +#define MISR_RW 0xCDE0B +#define MISR_AND 0xCDE2B +#define MISR_SET 0xCDE4B +#define RXBUFAREA 0xCDE10 +#define RXEARLYTHRESH 0xCDE12 +#define TXSTARTTHRESH 0x58 +#define DNPRIREQTHRESH 0x2C + +#define MISR_CSRB (1<<5) +#define MISR_RASB (1<<4) +#define MISR_SRBFR (1<<3) +#define MISR_ASBFR (1<<2) +#define MISR_ARBF (1<<1) + +/* MISR Flags memory locations */ +#define MF_SSBF 0xDFFE0 +#define MF_ARBF 0xDFFE1 +#define MF_ASBFR 0xDFFE2 +#define MF_SRBFR 0xDFFE3 +#define MF_RASB 0xDFFE4 +#define MF_CSRB 0xDFFE5 + +#define MMIO_MACDATA 0x10 +#define MMIO_MAC_ACCESS_CMD 0x14 +#define MMIO_TIMER 0x1A +#define MMIO_DMA_CTRL 0x20 +#define MMIO_DNLISTPTR 0x24 +#define MMIO_HASHFILTER 0x28 +#define MMIO_CONFIG 0x29 +#define MMIO_DNPRIREQTHRESH 0x2C +#define MMIO_DNPOLL 0x2D +#define MMIO_UPPKTSTATUS 0x30 +#define MMIO_FREETIMER 0x34 +#define MMIO_COUNTDOWN 0x36 +#define MMIO_UPLISTPTR 0x38 +#define MMIO_UPPOLL 0x3C +#define MMIO_UPBURSTTHRESH 0x40 +#define MMIO_DNBURSTTHRESH 0x41 +#define MMIO_INTSTATUS_AUTO 0x56 +#define MMIO_TXSTARTTHRESH 0x58 +#define MMIO_INTERRUPTENABLE 0x5A +#define MMIO_INDICATIONENABLE 0x5C +#define MMIO_COMMAND 0x5E /* These two are meant to be the same */ +#define MMIO_INTSTATUS 0x5E /* Makes the code more readable this way */ +#define INTSTAT_CMD_IN_PROGRESS (1<<12) +#define INTSTAT_SRB (1<<14) +#define INTSTAT_INTLATCH (1<<0) + +/* Indication / Interrupt Mask + * Annoyingly the bits to be set in the indication and interrupt enable + * do not match with the actual bits received in the interrupt, although + * they are in the same order. + * The mapping for the indication / interrupt are: + * Bit Indication / Interrupt + * 0 HostError + * 1 txcomplete + * 2 updneeded + * 3 rxcomplete + * 4 intrequested + * 5 macerror + * 6 dncomplete + * 7 upcomplete + * 8 txunderrun + * 9 asbf + * 10 srbr + * 11 arbc + * + * The only ones we don't want to receive are txcomplete and rxcomplete + * we use dncomplete and upcomplete instead. + */ + +#define INT_MASK 0xFF5 + +/* Note the subtle difference here, IND and INT */ + +#define SETINDENABLE (8<<12) +#define SETINTENABLE (7<<12) +#define SRBBIT (1<<10) +#define ASBBIT (1<<9) +#define ARBBIT (1<<11) + +#define SRB 0xDFE90 +#define ASB 0xDFED0 +#define ARB 0xD0000 +#define SCRATCH 0xDFEF0 + +#define INT_REQUEST 0x6000 /* (6 << 12) */ +#define ACK_INTERRUPT 0x6800 /* (13 <<11) */ +#define GLOBAL_RESET 0x00 +#define DNDISABLE 0x5000 +#define DNENABLE 0x4800 +#define DNSTALL 0x3002 +#define DNRESET 0x5800 +#define DNUNSTALL 0x3003 +#define UPRESET 0x2800 +#define UPSTALL 0x3000 +#define UPUNSTALL 0x3001 +#define SETCONFIG 0x4000 +#define SETTXSTARTTHRESH 0x9800 + +/* Received Interrupts */ +#define ASBFINT (1<<13) +#define SRBRINT (1<<14) +#define ARBCINT (1<<15) +#define TXUNDERRUN (1<<11) + +#define UPCOMPINT (1<<10) +#define DNCOMPINT (1<<9) +#define HARDERRINT (1<<7) +#define RXCOMPLETE (1<<4) +#define TXCOMPINT (1<<2) +#define HOSTERRINT (1<<1) + +/* Receive descriptor bits */ +#define RXOVERRUN cpu_to_le32(1<<19) +#define RXFC cpu_to_le32(1<<21) +#define RXAR cpu_to_le32(1<<22) +#define RXUPDCOMPLETE cpu_to_le32(1<<23) +#define RXUPDFULL cpu_to_le32(1<<24) +#define RXUPLASTFRAG cpu_to_le32(1<<31) + +/* Transmit descriptor bits */ +#define TXDNCOMPLETE cpu_to_le32(1<<16) +#define TXTXINDICATE cpu_to_le32(1<<27) +#define TXDPDEMPTY cpu_to_le32(1<<29) +#define TXDNINDICATE cpu_to_le32(1<<31) +#define TXDNFRAGLAST cpu_to_le32(1<<31) + +/* Interrupts to Acknowledge */ +#define LATCH_ACK 1 +#define TXCOMPACK (1<<1) +#define INTREQACK (1<<2) +#define DNCOMPACK (1<<3) +#define UPCOMPACK (1<<4) +#define ASBFACK (1<<5) +#define SRBRACK (1<<6) +#define ARBCACK (1<<7) + +#define XL_IO_SPACE 128 +#define SRB_COMMAND_SIZE 50 + +/* Adapter Commands */ +#define REQUEST_INT 0x00 +#define MODIFY_OPEN_PARMS 0x01 +#define RESTORE_OPEN_PARMS 0x02 +#define OPEN_NIC 0x03 +#define CLOSE_NIC 0x04 +#define SET_SLEEP_MODE 0x05 +#define SET_GROUP_ADDRESS 0x06 +#define SET_FUNC_ADDRESS 0x07 +#define READ_LOG 0x08 +#define SET_MULTICAST_MODE 0x0C +#define CHANGE_WAKEUP_PATTERN 0x0D +#define GET_STATISTICS 0x13 +#define SET_RECEIVE_MODE 0x1F + +/* ARB Commands */ +#define RECEIVE_DATA 0x81 +#define RING_STATUS_CHANGE 0x84 + +/* ASB Commands */ +#define ASB_RECEIVE_DATE 0x81 + +/* Defines for LAN STATUS CHANGE reports */ +#define LSC_SIG_LOSS 0x8000 +#define LSC_HARD_ERR 0x4000 +#define LSC_SOFT_ERR 0x2000 +#define LSC_TRAN_BCN 0x1000 +#define LSC_LWF 0x0800 +#define LSC_ARW 0x0400 +#define LSC_FPE 0x0200 +#define LSC_RR 0x0100 +#define LSC_CO 0x0080 +#define LSC_SS 0x0040 +#define LSC_RING_REC 0x0020 +#define LSC_SR_CO 0x0010 +#define LSC_FDX_MODE 0x0004 + +#define XL_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */ + +/* 3c359 defaults for buffers */ + +#define XL_RX_RING_SIZE 16 /* must be a power of 2 */ +#define XL_TX_RING_SIZE 16 /* must be a power of 2 */ + +#define PKT_BUF_SZ 4096 /* Default packet size */ + +/* 3c359 data structures */ + +struct xl_tx_desc { + __le32 dnnextptr; + __le32 framestartheader; + __le32 buffer; + __le32 buffer_length; +}; + +struct xl_rx_desc { + __le32 upnextptr; + __le32 framestatus; + __le32 upfragaddr; + __le32 upfraglen; +}; + +struct xl_private { + + + /* These two structures must be aligned on 8 byte boundaries */ + + /* struct xl_rx_desc xl_rx_ring[XL_RX_RING_SIZE]; */ + /* struct xl_tx_desc xl_tx_ring[XL_TX_RING_SIZE]; */ + struct xl_rx_desc *xl_rx_ring ; + struct xl_tx_desc *xl_tx_ring ; + struct sk_buff *tx_ring_skb[XL_TX_RING_SIZE], *rx_ring_skb[XL_RX_RING_SIZE]; + int tx_ring_head, tx_ring_tail ; + int rx_ring_tail, rx_ring_no ; + int free_ring_entries ; + + u16 srb; + u16 arb; + u16 asb; + + u8 __iomem *xl_mmio; + const char *xl_card_name; + struct pci_dev *pdev ; + + spinlock_t xl_lock ; + + volatile int srb_queued; + struct wait_queue *srb_wait; + volatile int asb_queued; + + u16 mac_buffer ; + u16 xl_lan_status ; + u8 xl_ring_speed ; + u16 pkt_buf_sz ; + u8 xl_message_level; + u16 xl_copy_all_options ; + unsigned char xl_functional_addr[4] ; + u16 xl_addr_table_addr, xl_parms_addr ; + u8 xl_laa[6] ; + u32 rx_ring_dma_addr ; + u32 tx_ring_dma_addr ; + + /* firmware section */ + const struct firmware *fw; +}; + diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig new file mode 100644 index 00000000..c4137b0f --- /dev/null +++ b/drivers/net/tokenring/Kconfig @@ -0,0 +1,185 @@ +# +# Token Ring driver configuration +# + +# So far, we only have PCI, ISA, and MCA token ring devices +menuconfig TR + tristate "Token Ring driver support" + depends on NETDEVICES && !UML + depends on (PCI || ISA || MCA || CCW) + select LLC + help + Token Ring is IBM's way of communication on a local network; the + rest of the world uses Ethernet. To participate on a Token Ring + network, you need a special Token ring network card. If you are + connected to such a Token Ring network and want to use your Token + Ring card under Linux, say Y here and to the driver for your + particular card below and read the Token-Ring mini-HOWTO, available + from . Most people can + say N here. + +if TR + +config IBMTR + tristate "IBM Tropic chipset based adapter support" + depends on ISA || MCA + ---help--- + This is support for all IBM Token Ring cards that don't use DMA. If + you have such a beast, say Y and read the Token-Ring mini-HOWTO, + available from . + + Warning: this driver will almost definitely fail if more than one + active Token Ring card is present. + + To compile this driver as a module, choose M here: the module will be + called ibmtr. + +config IBMOL + tristate "IBM Olympic chipset PCI adapter support" + depends on PCI + ---help--- + This is support for all non-Lanstreamer IBM PCI Token Ring Cards. + Specifically this is all IBM PCI, PCI Wake On Lan, PCI II, PCI II + Wake On Lan, and PCI 100/16/4 adapters. + + If you have such an adapter, say Y and read the Token-Ring + mini-HOWTO, available from . + + To compile this driver as a module, choose M here: the module will be + called olympic. + + Also read or check the + Linux Token Ring Project site for the latest information at + . + +config IBMLS + tristate "IBM Lanstreamer chipset PCI adapter support" + depends on PCI && !64BIT + help + This is support for IBM Lanstreamer PCI Token Ring Cards. + + If you have such an adapter, say Y and read the Token-Ring + mini-HOWTO, available from . + + To compile this driver as a module, choose M here: the module will be + called lanstreamer. + +config 3C359 + tristate "3Com 3C359 Token Link Velocity XL adapter support" + depends on PCI + ---help--- + This is support for the 3Com PCI Velocity XL cards, specifically + the 3Com 3C359, please note this is not for the 3C339 cards, you + should use the tms380 driver instead. + + If you have such an adapter, say Y and read the Token-Ring + mini-HOWTO, available from . + + To compile this driver as a module, choose M here: the module will be + called 3c359. + + Also read the file or check the + Linux Token Ring Project site for the latest information at + + +config TMS380TR + tristate "Generic TMS380 Token Ring ISA/PCI adapter support" + depends on PCI || ISA && ISA_DMA_API || MCA + select FW_LOADER + ---help--- + This driver provides generic support for token ring adapters + based on the Texas Instruments TMS380 series chipsets. This + includes the SysKonnect TR4/16(+) ISA (SK-4190), SysKonnect + TR4/16(+) PCI (SK-4590), SysKonnect TR4/16 PCI (SK-4591), + Compaq 4/16 PCI, Thomas-Conrad TC4048 4/16 PCI, and several + Madge adapters. If you say Y here, you will be asked to select + which cards to support below. If you're using modules, each + class of card will be supported by a separate module. + + If you have such an adapter and would like to use it, say Y and + read the Token-Ring mini-HOWTO, available from + . + + Also read the file or + check . + + To compile this driver as a module, choose M here: the module will be + called tms380tr. + +config TMSPCI + tristate "Generic TMS380 PCI support" + depends on TMS380TR && PCI + ---help--- + This tms380 module supports generic TMS380-based PCI cards. + + These cards are known to work: + - Compaq 4/16 TR PCI + - SysKonnect TR4/16 PCI (SK-4590/SK-4591) + - Thomas-Conrad TC4048 PCI 4/16 + - 3Com Token Link Velocity + + To compile this driver as a module, choose M here: the module will be + called tmspci. + +config SKISA + tristate "SysKonnect TR4/16 ISA support" + depends on TMS380TR && ISA + help + This tms380 module supports SysKonnect TR4/16 ISA cards. + + These cards are known to work: + - SysKonnect TR4/16 ISA (SK-4190) + + To compile this driver as a module, choose M here: the module will be + called skisa. + +config PROTEON + tristate "Proteon ISA support" + depends on TMS380TR && ISA + help + This tms380 module supports Proteon ISA cards. + + These cards are known to work: + - Proteon 1392 + - Proteon 1392 plus + + To compile this driver as a module, choose M here: the module will be + called proteon. + +config ABYSS + tristate "Madge Smart 16/4 PCI Mk2 support" + depends on TMS380TR && PCI + help + This tms380 module supports the Madge Smart 16/4 PCI Mk2 + cards (51-02). + + To compile this driver as a module, choose M here: the module will be + called abyss. + +config MADGEMC + tristate "Madge Smart 16/4 Ringnode MicroChannel" + depends on TMS380TR && MCA + help + This tms380 module supports the Madge Smart 16/4 MC16 and MC32 + MicroChannel adapters. + + To compile this driver as a module, choose M here: the module will be + called madgemc. + +config SMCTR + tristate "SMC ISA/MCA adapter support" + depends on (ISA || MCA_LEGACY) && (BROKEN || !64BIT) + ---help--- + This is support for the ISA and MCA SMC Token Ring cards, + specifically SMC TokenCard Elite (8115T) and SMC TokenCard Elite/A + (8115T/A) adapters. + + If you have such an adapter and would like to use it, say Y or M and + read the Token-Ring mini-HOWTO, available from + and the file + . + + To compile this driver as a module, choose M here: the module will be + called smctr. + +endif # TR diff --git a/drivers/net/tokenring/Makefile b/drivers/net/tokenring/Makefile new file mode 100644 index 00000000..c88b0a5e --- /dev/null +++ b/drivers/net/tokenring/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for drivers/net/tokenring +# + +obj-$(CONFIG_IBMTR) += ibmtr.o +obj-$(CONFIG_IBMOL) += olympic.o +obj-$(CONFIG_IBMLS) += lanstreamer.o +obj-$(CONFIG_TMS380TR) += tms380tr.o +obj-$(CONFIG_ABYSS) += abyss.o +obj-$(CONFIG_MADGEMC) += madgemc.o +obj-$(CONFIG_PROTEON) += proteon.o +obj-$(CONFIG_TMSPCI) += tmspci.o +obj-$(CONFIG_SKISA) += skisa.o +obj-$(CONFIG_SMCTR) += smctr.o +obj-$(CONFIG_3C359) += 3c359.o diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c new file mode 100644 index 00000000..515f1227 --- /dev/null +++ b/drivers/net/tokenring/abyss.c @@ -0,0 +1,469 @@ +/* + * abyss.c: Network driver for the Madge Smart 16/4 PCI Mk2 token ring card. + * + * Written 1999-2000 by Adam Fritzler + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This driver module supports the following cards: + * - Madge Smart 16/4 PCI Mk2 + * + * Maintainer(s): + * AF Adam Fritzler + * + * Modification History: + * 30-Dec-99 AF Split off from the tms380tr driver. + * 22-Jan-00 AF Updated to use indirect read/writes + * 23-Nov-00 JG New PCI API, cleanups + * + * + * TODO: + * 1. See if we can use MMIO instead of inb/outb/inw/outw + * 2. Add support for Mk1 (has AT24 attached to the PCI + * config registers) + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "tms380tr.h" +#include "abyss.h" /* Madge-specific constants */ + +static char version[] __devinitdata = +"abyss.c: v1.02 23/11/2000 by Adam Fritzler\n"; + +#define ABYSS_IO_EXTENT 64 + +static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = { + { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, }, + { } /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, abyss_pci_tbl); + +MODULE_LICENSE("GPL"); + +static int abyss_open(struct net_device *dev); +static int abyss_close(struct net_device *dev); +static void abyss_enable(struct net_device *dev); +static int abyss_chipset_init(struct net_device *dev); +static void abyss_read_eeprom(struct net_device *dev); +static unsigned short abyss_setnselout_pins(struct net_device *dev); + +static void at24_writedatabyte(unsigned long regaddr, unsigned char byte); +static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr); +static int at24_sendcmd(unsigned long regaddr, unsigned char cmd); +static unsigned char at24_readdatabit(unsigned long regaddr); +static unsigned char at24_readdatabyte(unsigned long regaddr); +static int at24_waitforack(unsigned long regaddr); +static int at24_waitfornack(unsigned long regaddr); +static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data); +static void at24_start(unsigned long regaddr); +static unsigned char at24_readb(unsigned long regaddr, unsigned char addr); + +static unsigned short abyss_sifreadb(struct net_device *dev, unsigned short reg) +{ + return inb(dev->base_addr + reg); +} + +static unsigned short abyss_sifreadw(struct net_device *dev, unsigned short reg) +{ + return inw(dev->base_addr + reg); +} + +static void abyss_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) +{ + outb(val, dev->base_addr + reg); +} + +static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) +{ + outw(val, dev->base_addr + reg); +} + +static struct net_device_ops abyss_netdev_ops; + +static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int versionprinted; + struct net_device *dev; + struct net_local *tp; + int ret, pci_irq_line; + unsigned long pci_ioaddr; + + if (versionprinted++ == 0) + printk("%s", version); + + if (pci_enable_device(pdev)) + return -EIO; + + /* Remove I/O space marker in bit 0. */ + pci_irq_line = pdev->irq; + pci_ioaddr = pci_resource_start (pdev, 0); + + /* At this point we have found a valid card. */ + + dev = alloc_trdev(sizeof(struct net_local)); + if (!dev) + return -ENOMEM; + + if (!request_region(pci_ioaddr, ABYSS_IO_EXTENT, dev->name)) { + ret = -EBUSY; + goto err_out_trdev; + } + + ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED, + dev->name, dev); + if (ret) + goto err_out_region; + + dev->base_addr = pci_ioaddr; + dev->irq = pci_irq_line; + + printk("%s: Madge Smart 16/4 PCI Mk2 (Abyss)\n", dev->name); + printk("%s: IO: %#4lx IRQ: %d\n", + dev->name, pci_ioaddr, dev->irq); + /* + * The TMS SIF registers lay 0x10 above the card base address. + */ + dev->base_addr += 0x10; + + ret = tmsdev_init(dev, &pdev->dev); + if (ret) { + printk("%s: unable to get memory for dev->priv.\n", + dev->name); + goto err_out_irq; + } + + abyss_read_eeprom(dev); + + printk("%s: Ring Station Address: %pM\n", dev->name, dev->dev_addr); + + tp = netdev_priv(dev); + tp->setnselout = abyss_setnselout_pins; + tp->sifreadb = abyss_sifreadb; + tp->sifreadw = abyss_sifreadw; + tp->sifwriteb = abyss_sifwriteb; + tp->sifwritew = abyss_sifwritew; + + memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1); + + dev->netdev_ops = &abyss_netdev_ops; + + pci_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + ret = register_netdev(dev); + if (ret) + goto err_out_tmsdev; + return 0; + +err_out_tmsdev: + pci_set_drvdata(pdev, NULL); + tmsdev_term(dev); +err_out_irq: + free_irq(pdev->irq, dev); +err_out_region: + release_region(pci_ioaddr, ABYSS_IO_EXTENT); +err_out_trdev: + free_netdev(dev); + return ret; +} + +static unsigned short abyss_setnselout_pins(struct net_device *dev) +{ + unsigned short val = 0; + struct net_local *tp = netdev_priv(dev); + + if(tp->DataRate == SPEED_4) + val |= 0x01; /* Set 4Mbps */ + else + val |= 0x00; /* Set 16Mbps */ + + return val; +} + +/* + * The following Madge boards should use this code: + * - Smart 16/4 PCI Mk2 (Abyss) + * - Smart 16/4 PCI Mk1 (PCI T) + * - Smart 16/4 Client Plus PnP (Big Apple) + * - Smart 16/4 Cardbus Mk2 + * + * These access an Atmel AT24 SEEPROM using their glue chip registers. + * + */ +static void at24_writedatabyte(unsigned long regaddr, unsigned char byte) +{ + int i; + + for (i = 0; i < 8; i++) { + at24_setlines(regaddr, 0, (byte >> (7-i))&0x01); + at24_setlines(regaddr, 1, (byte >> (7-i))&0x01); + at24_setlines(regaddr, 0, (byte >> (7-i))&0x01); + } +} + +static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr) +{ + if (at24_sendcmd(regaddr, cmd)) { + at24_writedatabyte(regaddr, addr); + return at24_waitforack(regaddr); + } + return 0; +} + +static int at24_sendcmd(unsigned long regaddr, unsigned char cmd) +{ + int i; + + for (i = 0; i < 10; i++) { + at24_start(regaddr); + at24_writedatabyte(regaddr, cmd); + if (at24_waitforack(regaddr)) + return 1; + } + return 0; +} + +static unsigned char at24_readdatabit(unsigned long regaddr) +{ + unsigned char val; + + at24_setlines(regaddr, 0, 1); + at24_setlines(regaddr, 1, 1); + val = (inb(regaddr) & AT24_DATA)?1:0; + at24_setlines(regaddr, 1, 1); + at24_setlines(regaddr, 0, 1); + return val; +} + +static unsigned char at24_readdatabyte(unsigned long regaddr) +{ + unsigned char data = 0; + int i; + + for (i = 0; i < 8; i++) { + data <<= 1; + data |= at24_readdatabit(regaddr); + } + + return data; +} + +static int at24_waitforack(unsigned long regaddr) +{ + int i; + + for (i = 0; i < 10; i++) { + if ((at24_readdatabit(regaddr) & 0x01) == 0x00) + return 1; + } + return 0; +} + +static int at24_waitfornack(unsigned long regaddr) +{ + int i; + for (i = 0; i < 10; i++) { + if ((at24_readdatabit(regaddr) & 0x01) == 0x01) + return 1; + } + return 0; +} + +static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data) +{ + unsigned char val = AT24_ENABLE; + if (clock) + val |= AT24_CLOCK; + if (data) + val |= AT24_DATA; + + outb(val, regaddr); + tms380tr_wait(20); /* Very necessary. */ +} + +static void at24_start(unsigned long regaddr) +{ + at24_setlines(regaddr, 0, 1); + at24_setlines(regaddr, 1, 1); + at24_setlines(regaddr, 1, 0); + at24_setlines(regaddr, 0, 1); +} + +static unsigned char at24_readb(unsigned long regaddr, unsigned char addr) +{ + unsigned char data = 0xff; + + if (at24_sendfullcmd(regaddr, AT24_WRITE, addr)) { + if (at24_sendcmd(regaddr, AT24_READ)) { + data = at24_readdatabyte(regaddr); + if (!at24_waitfornack(regaddr)) + data = 0xff; + } + } + return data; +} + + +/* + * Enable basic functions of the Madge chipset needed + * for initialization. + */ +static void abyss_enable(struct net_device *dev) +{ + unsigned char reset_reg; + unsigned long ioaddr; + + ioaddr = dev->base_addr; + reset_reg = inb(ioaddr + PCIBM2_RESET_REG); + reset_reg |= PCIBM2_RESET_REG_CHIP_NRES; + outb(reset_reg, ioaddr + PCIBM2_RESET_REG); + tms380tr_wait(100); +} + +/* + * Enable the functions of the Madge chipset needed for + * full working order. + */ +static int abyss_chipset_init(struct net_device *dev) +{ + unsigned char reset_reg; + unsigned long ioaddr; + + ioaddr = dev->base_addr; + + reset_reg = inb(ioaddr + PCIBM2_RESET_REG); + + reset_reg |= PCIBM2_RESET_REG_CHIP_NRES; + outb(reset_reg, ioaddr + PCIBM2_RESET_REG); + + reset_reg &= ~(PCIBM2_RESET_REG_CHIP_NRES | + PCIBM2_RESET_REG_FIFO_NRES | + PCIBM2_RESET_REG_SIF_NRES); + outb(reset_reg, ioaddr + PCIBM2_RESET_REG); + + tms380tr_wait(100); + + reset_reg |= PCIBM2_RESET_REG_CHIP_NRES; + outb(reset_reg, ioaddr + PCIBM2_RESET_REG); + + reset_reg |= PCIBM2_RESET_REG_SIF_NRES; + outb(reset_reg, ioaddr + PCIBM2_RESET_REG); + + reset_reg |= PCIBM2_RESET_REG_FIFO_NRES; + outb(reset_reg, ioaddr + PCIBM2_RESET_REG); + + outb(PCIBM2_INT_CONTROL_REG_SINTEN | + PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE, + ioaddr + PCIBM2_INT_CONTROL_REG); + + outb(30, ioaddr + PCIBM2_FIFO_THRESHOLD); + + return 0; +} + +static inline void abyss_chipset_close(struct net_device *dev) +{ + unsigned long ioaddr; + + ioaddr = dev->base_addr; + outb(0, ioaddr + PCIBM2_RESET_REG); +} + +/* + * Read configuration data from the AT24 SEEPROM on Madge cards. + * + */ +static void abyss_read_eeprom(struct net_device *dev) +{ + struct net_local *tp; + unsigned long ioaddr; + unsigned short val; + int i; + + tp = netdev_priv(dev); + ioaddr = dev->base_addr; + + /* Must enable glue chip first */ + abyss_enable(dev); + + val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG, + PCIBM2_SEEPROM_RING_SPEED); + tp->DataRate = val?SPEED_4:SPEED_16; /* set open speed */ + printk("%s: SEEPROM: ring speed: %dMb/sec\n", dev->name, tp->DataRate); + + val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG, + PCIBM2_SEEPROM_RAM_SIZE) * 128; + printk("%s: SEEPROM: adapter RAM: %dkb\n", dev->name, val); + + dev->addr_len = 6; + for (i = 0; i < 6; i++) + dev->dev_addr[i] = at24_readb(ioaddr + PCIBM2_SEEPROM_REG, + PCIBM2_SEEPROM_BIA+i); +} + +static int abyss_open(struct net_device *dev) +{ + abyss_chipset_init(dev); + tms380tr_open(dev); + return 0; +} + +static int abyss_close(struct net_device *dev) +{ + tms380tr_close(dev); + abyss_chipset_close(dev); + return 0; +} + +static void __devexit abyss_detach (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + BUG_ON(!dev); + unregister_netdev(dev); + release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT); + free_irq(dev->irq, dev); + tmsdev_term(dev); + free_netdev(dev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver abyss_driver = { + .name = "abyss", + .id_table = abyss_pci_tbl, + .probe = abyss_attach, + .remove = __devexit_p(abyss_detach), +}; + +static int __init abyss_init (void) +{ + abyss_netdev_ops = tms380tr_netdev_ops; + + abyss_netdev_ops.ndo_open = abyss_open; + abyss_netdev_ops.ndo_stop = abyss_close; + + return pci_register_driver(&abyss_driver); +} + +static void __exit abyss_rmmod (void) +{ + pci_unregister_driver (&abyss_driver); +} + +module_init(abyss_init); +module_exit(abyss_rmmod); + diff --git a/drivers/net/tokenring/abyss.h b/drivers/net/tokenring/abyss.h new file mode 100644 index 00000000..b0a473b8 --- /dev/null +++ b/drivers/net/tokenring/abyss.h @@ -0,0 +1,58 @@ +/* + * abyss.h: Header for the abyss tms380tr module + * + * Authors: + * - Adam Fritzler + */ + +#ifndef __LINUX_MADGETR_H +#define __LINUX_MADGETR_H + +#ifdef __KERNEL__ + +/* + * For Madge Smart 16/4 PCI Mk2. Since we increment the base address + * to get everything correct for the TMS SIF, we do these as negatives + * as they fall below the SIF in addressing. + */ +#define PCIBM2_INT_STATUS_REG ((short)-15)/* 0x01 */ +#define PCIBM2_INT_CONTROL_REG ((short)-14)/* 0x02 */ +#define PCIBM2_RESET_REG ((short)-12)/* 0x04 */ +#define PCIBM2_SEEPROM_REG ((short)-9) /* 0x07 */ + +#define PCIBM2_INT_CONTROL_REG_SINTEN 0x02 +#define PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE 0x80 +#define PCIBM2_INT_STATUS_REG_PCI_ERR 0x80 + +#define PCIBM2_RESET_REG_CHIP_NRES 0x01 +#define PCIBM2_RESET_REG_FIFO_NRES 0x02 +#define PCIBM2_RESET_REG_SIF_NRES 0x04 + +#define PCIBM2_FIFO_THRESHOLD 0x21 +#define PCIBM2_BURST_LENGTH 0x22 + +/* + * Bits in PCIBM2_SEEPROM_REG. + */ +#define AT24_ENABLE 0x04 +#define AT24_DATA 0x02 +#define AT24_CLOCK 0x01 + +/* + * AT24 Commands. + */ +#define AT24_WRITE 0xA0 +#define AT24_READ 0xA1 + +/* + * Addresses in AT24 SEEPROM. + */ +#define PCIBM2_SEEPROM_BIA 0x12 +#define PCIBM2_SEEPROM_RING_SPEED 0x18 +#define PCIBM2_SEEPROM_RAM_SIZE 0x1A +#define PCIBM2_SEEPROM_HWF1 0x1C +#define PCIBM2_SEEPROM_HWF2 0x1E + + +#endif /* __KERNEL__ */ +#endif /* __LINUX_MADGETR_H */ diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c new file mode 100644 index 00000000..4786497d --- /dev/null +++ b/drivers/net/tokenring/ibmtr.c @@ -0,0 +1,1963 @@ +/* ibmtr.c: A shared-memory IBM Token Ring 16/4 driver for linux + * + * Written 1993 by Mark Swanson and Peter De Schrijver. + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This device driver should work with Any IBM Token Ring Card that does + * not use DMA. + * + * I used Donald Becker's (becker@scyld.com) device driver work + * as a base for most of my initial work. + * + * Changes by Peter De Schrijver + * (Peter.Deschrijver@linux.cc.kuleuven.ac.be) : + * + * + changed name to ibmtr.c in anticipation of other tr boards. + * + changed reset code and adapter open code. + * + added SAP open code. + * + a first attempt to write interrupt, transmit and receive routines. + * + * Changes by David W. Morris (dwm@shell.portal.com) : + * 941003 dwm: - Restructure tok_probe for multiple adapters, devices. + * + Add comments, misc reorg for clarity. + * + Flatten interrupt handler levels. + * + * Changes by Farzad Farid (farzy@zen.via.ecp.fr) + * and Pascal Andre (andre@chimay.via.ecp.fr) (March 9 1995) : + * + multi ring support clean up. + * + RFC1042 compliance enhanced. + * + * Changes by Pascal Andre (andre@chimay.via.ecp.fr) (September 7 1995) : + * + bug correction in tr_tx + * + removed redundant information display + * + some code reworking + * + * Changes by Michel Lespinasse (walken@via.ecp.fr), + * Yann Doussot (doussot@via.ecp.fr) and Pascal Andre (andre@via.ecp.fr) + * (February 18, 1996) : + * + modified shared memory and mmio access port the driver to + * alpha platform (structure access -> readb/writeb) + * + * Changes by Steve Kipisz (bungy@ibm.net or kipisz@vnet.ibm.com) + * (January 18 1996): + * + swapped WWOR and WWCR in ibmtr.h + * + moved some init code from tok_probe into trdev_init. The + * PCMCIA code can call trdev_init to complete initializing + * the driver. + * + added -DPCMCIA to support PCMCIA + * + detecting PCMCIA Card Removal in interrupt handler. If + * ISRP is FF, then a PCMCIA card has been removed + * 10/2000 Burt needed a new method to avoid crashing the OS + * + * Changes by Paul Norton (pnorton@cts.com) : + * + restructured the READ.LOG logic to prevent the transmit SRB + * from being rudely overwritten before the transmit cycle is + * complete. (August 15 1996) + * + completed multiple adapter support. (November 20 1996) + * + implemented csum_partial_copy in tr_rx and increased receive + * buffer size and count. Minor fixes. (March 15, 1997) + * + * Changes by Christopher Turcksin + * + Now compiles ok as a module again. + * + * Changes by Paul Norton (pnorton@ieee.org) : + * + moved the header manipulation code in tr_tx and tr_rx to + * net/802/tr.c. (July 12 1997) + * + add retry and timeout on open if cable disconnected. (May 5 1998) + * + lifted 2000 byte mtu limit. now depends on shared-RAM size. + * May 25 1998) + * + can't allocate 2k recv buff at 8k shared-RAM. (20 October 1998) + * + * Changes by Joel Sloan (jjs@c-me.com) : + * + disable verbose debug messages by default - to enable verbose + * debugging, edit the IBMTR_DEBUG_MESSAGES define below + * + * Changes by Mike Phillips : + * + Added extra #ifdef's to work with new PCMCIA Token Ring Code. + * The PCMCIA code now just sets up the card so it can be recognized + * by ibmtr_probe. Also checks allocated memory vs. on-board memory + * for correct figure to use. + * + * Changes by Tim Hockin (thockin@isunix.it.ilstu.edu) : + * + added spinlocks for SMP sanity (10 March 1999) + * + * Changes by Jochen Friedrich to enable RFC1469 Option 2 multicasting + * i.e. using functional address C0 00 00 04 00 00 to transmit and + * receive multicast packets. + * + * Changes by Mike Sullivan (based on original sram patch by Dave Grothe + * to support windowing into on adapter shared ram. + * i.e. Use LANAID to setup a PnP configuration with 16K RAM. Paging + * will shift this 16K window over the entire available shared RAM. + * + * Changes by Peter De Schrijver (p2@mind.be) : + * + fixed a problem with PCMCIA card removal + * + * Change by Mike Sullivan et al.: + * + added turbo card support. No need to use lanaid to configure + * the adapter into isa compatibility mode. + * + * Changes by Burt Silverman to allow the computer to behave nicely when + * a cable is pulled or not in place, or a PCMCIA card is removed hot. + */ + +/* change the define of IBMTR_DEBUG_MESSAGES to a nonzero value +in the event that chatty debug messages are desired - jjs 12/30/98 */ + +#define IBMTR_DEBUG_MESSAGES 0 + +#include +#include + +#ifdef PCMCIA /* required for ibmtr_cs.c to build */ +#undef MODULE /* yes, really */ +#undef ENABLE_PAGING +#else +#define ENABLE_PAGING 1 +#endif + +/* changes the output format of driver initialization */ +#define TR_VERBOSE 0 + +/* some 95 OS send many non UI frame; this allow removing the warning */ +#define TR_FILTERNONUI 1 + +#include +#include +#include +#include +#include + +#include + +#include + +#define DPRINTK(format, args...) printk("%s: " format, dev->name , ## args) +#define DPRINTD(format, args...) DummyCall("%s: " format, dev->name , ## args) + +/* version and credits */ +#ifndef PCMCIA +static char version[] __devinitdata = + "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" + " v2.1.125 10/20/98 Paul Norton \n" + " v2.2.0 12/30/98 Joel Sloan \n" + " v2.2.1 02/08/00 Mike Sullivan \n" + " v2.2.2 07/27/00 Burt Silverman \n" + " v2.4.0 03/01/01 Mike Sullivan \n"; +#endif + +/* this allows displaying full adapter information */ + +static char *channel_def[] __devinitdata = { "ISA", "MCA", "ISA P&P" }; + +static char pcchannelid[] __devinitdata = { + 0x05, 0x00, 0x04, 0x09, + 0x04, 0x03, 0x04, 0x0f, + 0x03, 0x06, 0x03, 0x01, + 0x03, 0x01, 0x03, 0x00, + 0x03, 0x09, 0x03, 0x09, + 0x03, 0x00, 0x02, 0x00 +}; + +static char mcchannelid[] __devinitdata = { + 0x04, 0x0d, 0x04, 0x01, + 0x05, 0x02, 0x05, 0x03, + 0x03, 0x06, 0x03, 0x03, + 0x05, 0x08, 0x03, 0x04, + 0x03, 0x05, 0x03, 0x01, + 0x03, 0x08, 0x02, 0x00 +}; + +static char __devinit *adapter_def(char type) +{ + switch (type) { + case 0xF: return "PC Adapter | PC Adapter II | Adapter/A"; + case 0xE: return "16/4 Adapter | 16/4 Adapter/A (long)"; + case 0xD: return "16/4 Adapter/A (short) | 16/4 ISA-16 Adapter"; + case 0xC: return "Auto 16/4 Adapter"; + default: return "adapter (unknown type)"; + }; +}; + +#define TRC_INIT 0x01 /* Trace initialization & PROBEs */ +#define TRC_INITV 0x02 /* verbose init trace points */ +static unsigned char ibmtr_debug_trace = 0; + +static int ibmtr_probe1(struct net_device *dev, int ioaddr); +static unsigned char get_sram_size(struct tok_info *adapt_info); +static int trdev_init(struct net_device *dev); +static int tok_open(struct net_device *dev); +static int tok_init_card(struct net_device *dev); +static void tok_open_adapter(unsigned long dev_addr); +static void open_sap(unsigned char type, struct net_device *dev); +static void tok_set_multicast_list(struct net_device *dev); +static netdev_tx_t tok_send_packet(struct sk_buff *skb, + struct net_device *dev); +static int tok_close(struct net_device *dev); +static irqreturn_t tok_interrupt(int irq, void *dev_id); +static void initial_tok_int(struct net_device *dev); +static void tr_tx(struct net_device *dev); +static void tr_rx(struct net_device *dev); +static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev); +static void tok_rerun(unsigned long dev_addr); +static void ibmtr_readlog(struct net_device *dev); +static int ibmtr_change_mtu(struct net_device *dev, int mtu); +static void find_turbo_adapters(int *iolist); + +static int ibmtr_portlist[IBMTR_MAX_ADAPTERS+1] __devinitdata = { + 0xa20, 0xa24, 0, 0, 0 +}; +static int __devinitdata turbo_io[IBMTR_MAX_ADAPTERS] = {0}; +static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0}; +static int __devinitdata turbo_searched = 0; + +#ifndef PCMCIA +static __u32 ibmtr_mem_base __devinitdata = 0xd0000; +#endif + +static void __devinit PrtChanID(char *pcid, short stride) +{ + short i, j; + for (i = 0, j = 0; i < 24; i++, j += stride) + printk("%1x", ((int) pcid[j]) & 0x0f); + printk("\n"); +} + +static void __devinit HWPrtChanID(void __iomem *pcid, short stride) +{ + short i, j; + for (i = 0, j = 0; i < 24; i++, j += stride) + printk("%1x", ((int) readb(pcid + j)) & 0x0f); + printk("\n"); +} + +/* We have to ioremap every checked address, because isa_readb is + * going away. + */ + +static void __devinit find_turbo_adapters(int *iolist) +{ + int ram_addr; + int index=0; + void __iomem *chanid; + int found_turbo=0; + unsigned char *tchanid, ctemp; + int i, j; + unsigned long jif; + void __iomem *ram_mapped ; + + if (turbo_searched == 1) return; + turbo_searched=1; + for (ram_addr=0xC0000; ram_addr < 0xE0000; ram_addr+=0x2000) { + + __u32 intf_tbl=0; + + found_turbo=1; + ram_mapped = ioremap((u32)ram_addr,0x1fff) ; + if (ram_mapped==NULL) + continue ; + chanid=(CHANNEL_ID + ram_mapped); + tchanid=pcchannelid; + ctemp=readb(chanid) & 0x0f; + if (ctemp != *tchanid) continue; + for (i=2,j=1; i<=46; i=i+2,j++) { + if ((readb(chanid+i) & 0x0f) != tchanid[j]){ + found_turbo=0; + break; + } + } + if (!found_turbo) continue; + + writeb(0x90, ram_mapped+0x1E01); + for(i=2; i<0x0f; i++) { + writeb(0x00, ram_mapped+0x1E01+i); + } + writeb(0x00, ram_mapped+0x1E01); + for(jif=jiffies+TR_BUSY_INTERVAL; time_before_eq(jiffies,jif);); + intf_tbl=ntohs(readw(ram_mapped+ACA_OFFSET+ACA_RW+WRBR_EVEN)); + if (intf_tbl) { +#if IBMTR_DEBUG_MESSAGES + printk("ibmtr::find_turbo_adapters, Turbo found at " + "ram_addr %x\n",ram_addr); + printk("ibmtr::find_turbo_adapters, interface_table "); + for(i=0; i<6; i++) { + printk("%x:",readb(ram_addr+intf_tbl+i)); + } + printk("\n"); +#endif + turbo_io[index]=ntohs(readw(ram_mapped+intf_tbl+4)); + turbo_irq[index]=readb(ram_mapped+intf_tbl+3); + outb(0, turbo_io[index] + ADAPTRESET); + for(jif=jiffies+TR_RST_TIME;time_before_eq(jiffies,jif);); + outb(0, turbo_io[index] + ADAPTRESETREL); + index++; + continue; + } +#if IBMTR_DEBUG_MESSAGES + printk("ibmtr::find_turbo_adapters, ibmtr card found at" + " %x but not a Turbo model\n",ram_addr); +#endif + iounmap(ram_mapped) ; + } /* for */ + for(i=0; ibase_addr) { + outb(0,dev->base_addr+ADAPTRESET); + + schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */ + + outb(0,dev->base_addr+ADAPTRESETREL); + } + +#ifndef PCMCIA + free_irq(dev->irq, dev); + release_region(dev->base_addr, IBMTR_IO_EXTENT); + + { + struct tok_info *ti = netdev_priv(dev); + iounmap(ti->mmio); + iounmap(ti->sram_virt); + } +#endif +} + +/**************************************************************************** + * ibmtr_probe(): Routine specified in the network device structure + * to probe for an IBM Token Ring Adapter. Routine outline: + * I. Interrogate hardware to determine if an adapter exists + * and what the speeds and feeds are + * II. Setup data structures to control execution based upon + * adapter characteristics. + * + * We expect ibmtr_probe to be called once for each device entry + * which references it. + ****************************************************************************/ + +static int __devinit ibmtr_probe(struct net_device *dev) +{ + int i; + int base_addr = dev->base_addr; + + if (base_addr && base_addr <= 0x1ff) /* Don't probe at all. */ + return -ENXIO; + if (base_addr > 0x1ff) { /* Check a single specified location. */ + if (!ibmtr_probe1(dev, base_addr)) return 0; + return -ENODEV; + } + find_turbo_adapters(ibmtr_portlist); + for (i = 0; ibmtr_portlist[i]; i++) { + int ioaddr = ibmtr_portlist[i]; + + if (!ibmtr_probe1(dev, ioaddr)) return 0; + } + return -ENODEV; +} + +int __devinit ibmtr_probe_card(struct net_device *dev) +{ + int err = ibmtr_probe(dev); + if (!err) { + err = register_netdev(dev); + if (err) + ibmtr_cleanup_card(dev); + } + return err; +} + +/*****************************************************************************/ + +static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) +{ + + unsigned char segment, intr=0, irq=0, i, j, cardpresent=NOTOK, temp=0; + void __iomem * t_mmio = NULL; + struct tok_info *ti = netdev_priv(dev); + void __iomem *cd_chanid; + unsigned char *tchanid, ctemp; +#ifndef PCMCIA + unsigned char t_irq=0; + unsigned long timeout; + static int version_printed; +#endif + + /* Query the adapter PIO base port which will return + * indication of where MMIO was placed. We also have a + * coded interrupt number. + */ + segment = inb(PIOaddr); + if (segment < 0x40 || segment > 0xe0) { + /* Out of range values so we'll assume non-existent IO device + * but this is not necessarily a problem, esp if a turbo + * adapter is being used. */ +#if IBMTR_DEBUG_MESSAGES + DPRINTK("ibmtr_probe1(): unhappy that inb(0x%X) == 0x%X, " + "Hardware Problem?\n",PIOaddr,segment); +#endif + return -ENODEV; + } + /* + * Compute the linear base address of the MMIO area + * as LINUX doesn't care about segments + */ + t_mmio = ioremap(((__u32) (segment & 0xfc) << 11) + 0x80000,2048); + if (!t_mmio) { + DPRINTK("Cannot remap mmiobase memory area") ; + return -ENODEV ; + } + intr = segment & 0x03; /* low bits is coded interrupt # */ + if (ibmtr_debug_trace & TRC_INIT) + DPRINTK("PIOaddr: %4hx seg/intr: %2x mmio base: %p intr: %d\n" + , PIOaddr, (int) segment, t_mmio, (int) intr); + + /* + * Now we will compare expected 'channelid' strings with + * what we is there to learn of ISA/MCA or not TR card + */ +#ifdef PCMCIA + iounmap(t_mmio); + t_mmio = ti->mmio; /*BMS to get virtual address */ + irq = ti->irq; /*BMS to display the irq! */ +#endif + cd_chanid = (CHANNEL_ID + t_mmio); /* for efficiency */ + tchanid = pcchannelid; + cardpresent = TR_ISA; /* try ISA */ + + /* Suboptimize knowing first byte different */ + ctemp = readb(cd_chanid) & 0x0f; + if (ctemp != *tchanid) { /* NOT ISA card, try MCA */ + tchanid = mcchannelid; + cardpresent = TR_MCA; + if (ctemp != *tchanid) /* Neither ISA nor MCA */ + cardpresent = NOTOK; + } + if (cardpresent != NOTOK) { + /* Know presumed type, try rest of ID */ + for (i = 2, j = 1; i <= 46; i = i + 2, j++) { + if( (readb(cd_chanid+i)&0x0f) == tchanid[j]) continue; + /* match failed, not TR card */ + cardpresent = NOTOK; + break; + } + } + /* + * If we have an ISA board check for the ISA P&P version, + * as it has different IRQ settings + */ + if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio) == 0x0e)) + cardpresent = TR_ISAPNP; + if (cardpresent == NOTOK) { /* "channel_id" did not match, report */ + if (!(ibmtr_debug_trace & TRC_INIT)) { +#ifndef PCMCIA + iounmap(t_mmio); +#endif + return -ENODEV; + } + DPRINTK( "Channel ID string not found for PIOaddr: %4hx\n", + PIOaddr); + DPRINTK("Expected for ISA: "); + PrtChanID(pcchannelid, 1); + DPRINTK(" found: "); +/* BMS Note that this can be misleading, when hardware is flaky, because you + are reading it a second time here. So with my flaky hardware, I'll see my- + self in this block, with the HW ID matching the ISA ID exactly! */ + HWPrtChanID(cd_chanid, 2); + DPRINTK("Expected for MCA: "); + PrtChanID(mcchannelid, 1); + } + /* Now, setup some of the pl0 buffers for this driver.. */ + /* If called from PCMCIA, it is already set up, so no need to + waste the memory, just use the existing structure */ +#ifndef PCMCIA + ti->mmio = t_mmio; + for (i = 0; i < IBMTR_MAX_ADAPTERS; i++) { + if (turbo_io[i] != PIOaddr) + continue; +#if IBMTR_DEBUG_MESSAGES + printk("ibmtr::tr_probe1, setting PIOaddr %x to Turbo\n", + PIOaddr); +#endif + ti->turbo = 1; + t_irq = turbo_irq[i]; + } +#endif /* !PCMCIA */ + ti->readlog_pending = 0; + init_waitqueue_head(&ti->wait_for_reset); + + /* if PCMCIA, the card can be recognized as either TR_ISA or TR_ISAPNP + * depending which card is inserted. */ + +#ifndef PCMCIA + switch (cardpresent) { + case TR_ISA: + if (intr == 0) irq = 9; /* irq2 really is irq9 */ + if (intr == 1) irq = 3; + if (intr == 2) irq = 6; + if (intr == 3) irq = 7; + ti->adapter_int_enable = PIOaddr + ADAPTINTREL; + break; + case TR_MCA: + if (intr == 0) irq = 9; + if (intr == 1) irq = 3; + if (intr == 2) irq = 10; + if (intr == 3) irq = 11; + ti->global_int_enable = 0; + ti->adapter_int_enable = 0; + ti->sram_phys=(__u32)(inb(PIOaddr+ADAPTRESETREL) & 0xfe) << 12; + break; + case TR_ISAPNP: + if (!t_irq) { + if (intr == 0) irq = 9; + if (intr == 1) irq = 3; + if (intr == 2) irq = 10; + if (intr == 3) irq = 11; + } else + irq=t_irq; + timeout = jiffies + TR_SPIN_INTERVAL; + while (!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)){ + if (!time_after(jiffies, timeout)) continue; + DPRINTK( "Hardware timeout during initialization.\n"); + iounmap(t_mmio); + return -ENODEV; + } + ti->sram_phys = + ((__u32)readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_EVEN)<<12); + ti->adapter_int_enable = PIOaddr + ADAPTINTREL; + break; + } /*end switch (cardpresent) */ +#endif /*not PCMCIA */ + + if (ibmtr_debug_trace & TRC_INIT) { /* just report int */ + DPRINTK("irq=%d", irq); + printk(", sram_phys=0x%x", ti->sram_phys); + if(ibmtr_debug_trace&TRC_INITV){ /* full chat in verbose only */ + DPRINTK(", ti->mmio=%p", ti->mmio); + printk(", segment=%02X", segment); + } + printk(".\n"); + } + + /* Get hw address of token ring card */ + j = 0; + for (i = 0; i < 0x18; i = i + 2) { + /* technical reference states to do this */ + temp = readb(ti->mmio + AIP + i) & 0x0f; + ti->hw_address[j] = temp; + if (j & 1) + dev->dev_addr[(j / 2)] = + ti->hw_address[j]+ (ti->hw_address[j - 1] << 4); + ++j; + } + /* get Adapter type: 'F' = Adapter/A, 'E' = 16/4 Adapter II,... */ + ti->adapter_type = readb(ti->mmio + AIPADAPTYPE); + + /* get Data Rate: F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */ + ti->data_rate = readb(ti->mmio + AIPDATARATE); + + /* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */ + ti->token_release = readb(ti->mmio + AIPEARLYTOKEN); + + /* How much shared RAM is on adapter ? */ + if (ti->turbo) { + ti->avail_shared_ram=127; + } else { + ti->avail_shared_ram = get_sram_size(ti);/*in 512 byte units */ + } + /* We need to set or do a bunch of work here based on previous results*/ + /* Support paging? What sizes?: F=no, E=16k, D=32k, C=16 & 32k */ + ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE); + + /* Available DHB 4Mb size: F=2048, E=4096, D=4464 */ + switch (readb(ti->mmio + AIP4MBDHB)) { + case 0xe: ti->dhb_size4mb = 4096; break; + case 0xd: ti->dhb_size4mb = 4464; break; + default: ti->dhb_size4mb = 2048; break; + } + + /* Available DHB 16Mb size: F=2048, E=4096, D=8192, C=16384, B=17960 */ + switch (readb(ti->mmio + AIP16MBDHB)) { + case 0xe: ti->dhb_size16mb = 4096; break; + case 0xd: ti->dhb_size16mb = 8192; break; + case 0xc: ti->dhb_size16mb = 16384; break; + case 0xb: ti->dhb_size16mb = 17960; break; + default: ti->dhb_size16mb = 2048; break; + } + + /* We must figure out how much shared memory space this adapter + * will occupy so that if there are two adapters we can fit both + * in. Given a choice, we will limit this adapter to 32K. The + * maximum space will will use for two adapters is 64K so if the + * adapter we are working on demands 64K (it also doesn't support + * paging), then only one adapter can be supported. + */ + + /* + * determine how much of total RAM is mapped into PC space + */ + ti->mapped_ram_size= /*sixteen to onehundredtwentyeight 512byte blocks*/ + 1<< ((readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03) + 4); + ti->page_mask = 0; + if (ti->turbo) ti->page_mask=0xf0; + else if (ti->shared_ram_paging == 0xf); /* No paging in adapter */ + else { +#ifdef ENABLE_PAGING + unsigned char pg_size = 0; + /* BMS: page size: PCMCIA, use configuration register; + ISAPNP, use LANAIDC config tool from www.ibm.com */ + switch (ti->shared_ram_paging) { + case 0xf: + break; + case 0xe: + ti->page_mask = (ti->mapped_ram_size == 32) ? 0xc0 : 0; + pg_size = 32; /* 16KB page size */ + break; + case 0xd: + ti->page_mask = (ti->mapped_ram_size == 64) ? 0x80 : 0; + pg_size = 64; /* 32KB page size */ + break; + case 0xc: + switch (ti->mapped_ram_size) { + case 32: + ti->page_mask = 0xc0; + pg_size = 32; + break; + case 64: + ti->page_mask = 0x80; + pg_size = 64; + break; + } + break; + default: + DPRINTK("Unknown shared ram paging info %01X\n", + ti->shared_ram_paging); + iounmap(t_mmio); + return -ENODEV; + break; + } /*end switch shared_ram_paging */ + + if (ibmtr_debug_trace & TRC_INIT) + DPRINTK("Shared RAM paging code: %02X, " + "mapped RAM size: %dK, shared RAM size: %dK, " + "page mask: %02X\n:", + ti->shared_ram_paging, ti->mapped_ram_size / 2, + ti->avail_shared_ram / 2, ti->page_mask); +#endif /*ENABLE_PAGING */ + } + +#ifndef PCMCIA + /* finish figuring the shared RAM address */ + if (cardpresent == TR_ISA) { + static const __u32 ram_bndry_mask[] = { + 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000 + }; + __u32 new_base, rrr_32, chk_base, rbm; + + rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03; + rbm = ram_bndry_mask[rrr_32]; + new_base = (ibmtr_mem_base + (~rbm)) & rbm;/* up to boundary */ + chk_base = new_base + (ti->mapped_ram_size << 9); + if (chk_base > (ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE)) { + DPRINTK("Shared RAM for this adapter (%05x) exceeds " + "driver limit (%05x), adapter not started.\n", + chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE); + iounmap(t_mmio); + return -ENODEV; + } else { /* seems cool, record what we have figured out */ + ti->sram_base = new_base >> 12; + ibmtr_mem_base = chk_base; + } + } + else ti->sram_base = ti->sram_phys >> 12; + + /* The PCMCIA has already got the interrupt line and the io port, + so no chance of anybody else getting it - MLP */ + if (request_irq(dev->irq = irq, tok_interrupt, 0, "ibmtr", dev) != 0) { + DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n", + irq); + iounmap(t_mmio); + return -ENODEV; + } + /*?? Now, allocate some of the PIO PORTs for this driver.. */ + /* record PIOaddr range as busy */ + if (!request_region(PIOaddr, IBMTR_IO_EXTENT, "ibmtr")) { + DPRINTK("Could not grab PIO range. Halting driver.\n"); + free_irq(dev->irq, dev); + iounmap(t_mmio); + return -EBUSY; + } + + if (!version_printed++) { + printk(version); + } +#endif /* !PCMCIA */ + DPRINTK("%s %s found\n", + channel_def[cardpresent - 1], adapter_def(ti->adapter_type)); + DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n", + irq, PIOaddr, ti->mapped_ram_size / 2); + DPRINTK("Hardware address : %pM\n", dev->dev_addr); + if (ti->page_mask) + DPRINTK("Shared RAM paging enabled. " + "Page size: %uK Shared Ram size %dK\n", + ((ti->page_mask^0xff)+1) >>2, ti->avail_shared_ram / 2); + else + DPRINTK("Shared RAM paging disabled. ti->page_mask %x\n", + ti->page_mask); + + /* Calculate the maximum DHB we can use */ + /* two cases where avail_shared_ram doesn't equal mapped_ram_size: + 1. avail_shared_ram is 127 but mapped_ram_size is 128 (typical) + 2. user has configured adapter for less than avail_shared_ram + but is not using paging (she should use paging, I believe) + */ + if (!ti->page_mask) { + ti->avail_shared_ram= + min(ti->mapped_ram_size,ti->avail_shared_ram); + } + + switch (ti->avail_shared_ram) { + case 16: /* 8KB shared RAM */ + ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)2048); + ti->rbuf_len4 = 1032; + ti->rbuf_cnt4=2; + ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)2048); + ti->rbuf_len16 = 1032; + ti->rbuf_cnt16=2; + break; + case 32: /* 16KB shared RAM */ + ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464); + ti->rbuf_len4 = 1032; + ti->rbuf_cnt4=4; + ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)4096); + ti->rbuf_len16 = 1032; /*1024 usable */ + ti->rbuf_cnt16=4; + break; + case 64: /* 32KB shared RAM */ + ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464); + ti->rbuf_len4 = 1032; + ti->rbuf_cnt4=6; + ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)10240); + ti->rbuf_len16 = 1032; + ti->rbuf_cnt16=6; + break; + case 127: /* 63.5KB shared RAM */ + ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464); + ti->rbuf_len4 = 1032; + ti->rbuf_cnt4=6; + ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)16384); + ti->rbuf_len16 = 1032; + ti->rbuf_cnt16=16; + break; + case 128: /* 64KB shared RAM */ + ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464); + ti->rbuf_len4 = 1032; + ti->rbuf_cnt4=6; + ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)17960); + ti->rbuf_len16 = 1032; + ti->rbuf_cnt16=16; + break; + default: + ti->dhb_size4mb = 2048; + ti->rbuf_len4 = 1032; + ti->rbuf_cnt4=2; + ti->dhb_size16mb = 2048; + ti->rbuf_len16 = 1032; + ti->rbuf_cnt16=2; + break; + } + /* this formula is not smart enough for the paging case + ti->rbuf_cnt = (ti->avail_shared_ram * BLOCKSZ - ADAPT_PRIVATE - + ARBLENGTH - SSBLENGTH - DLC_MAX_SAP * SAPLENGTH - + DLC_MAX_STA * STALENGTH - ti->dhb_sizemb * NUM_DHB - + SRBLENGTH - ASBLENGTH) / ti->rbuf_len; + */ + ti->maxmtu16 = (ti->rbuf_len16 - 8) * ti->rbuf_cnt16 - TR_HLEN; + ti->maxmtu4 = (ti->rbuf_len4 - 8) * ti->rbuf_cnt4 - TR_HLEN; + /*BMS assuming 18 bytes of Routing Information (usually works) */ + DPRINTK("Maximum Receive Internet Protocol MTU 16Mbps: %d, 4Mbps: %d\n", + ti->maxmtu16, ti->maxmtu4); + + dev->base_addr = PIOaddr; /* set the value for device */ + dev->mem_start = ti->sram_base << 12; + dev->mem_end = dev->mem_start + (ti->mapped_ram_size << 9) - 1; + trdev_init(dev); + return 0; /* Return 0 to indicate we have found a Token Ring card. */ +} /*ibmtr_probe1() */ + +/*****************************************************************************/ + +/* query the adapter for the size of shared RAM */ +/* the function returns the RAM size in units of 512 bytes */ + +static unsigned char __devinit get_sram_size(struct tok_info *adapt_info) +{ + unsigned char avail_sram_code; + static unsigned char size_code[] = { 0, 16, 32, 64, 127, 128 }; + /* Adapter gives + 'F' -- use RRR bits 3,2 + 'E' -- 8kb 'D' -- 16kb + 'C' -- 32kb 'A' -- 64KB + 'B' - 64KB less 512 bytes at top + (WARNING ... must zero top bytes in INIT */ + + avail_sram_code = 0xf - readb(adapt_info->mmio + AIPAVAILSHRAM); + if (avail_sram_code) return size_code[avail_sram_code]; + else /* for code 'F', must compute size from RRR(3,2) bits */ + return 1 << + ((readb(adapt_info->mmio+ACA_OFFSET+ACA_RW+RRR_ODD)>>2&3)+4); +} + +/*****************************************************************************/ + +static const struct net_device_ops trdev_netdev_ops = { + .ndo_open = tok_open, + .ndo_stop = tok_close, + .ndo_start_xmit = tok_send_packet, + .ndo_set_multicast_list = tok_set_multicast_list, + .ndo_change_mtu = ibmtr_change_mtu, +}; + +static int __devinit trdev_init(struct net_device *dev) +{ + struct tok_info *ti = netdev_priv(dev); + + SET_PAGE(ti->srb_page); + ti->open_failure = NO ; + dev->netdev_ops = &trdev_netdev_ops; + + return 0; +} + +/*****************************************************************************/ + +static int tok_init_card(struct net_device *dev) +{ + struct tok_info *ti; + short PIOaddr; + unsigned long i; + + PIOaddr = dev->base_addr; + ti = netdev_priv(dev); + /* Special processing for first interrupt after reset */ + ti->do_tok_int = FIRST_INT; + /* Reset adapter */ + writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); + outb(0, PIOaddr + ADAPTRESET); + + schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */ + + outb(0, PIOaddr + ADAPTRESETREL); +#ifdef ENABLE_PAGING + if (ti->page_mask) + writeb(SRPR_ENABLE_PAGING,ti->mmio+ACA_OFFSET+ACA_RW+SRPR_EVEN); +#endif + writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + i = sleep_on_timeout(&ti->wait_for_reset, 4 * HZ); + return i? 0 : -EAGAIN; +} + +/*****************************************************************************/ +static int tok_open(struct net_device *dev) +{ + struct tok_info *ti = netdev_priv(dev); + int i; + + /*the case we were left in a failure state during a previous open */ + if (ti->open_failure == YES) { + DPRINTK("Last time you were disconnected, how about now?\n"); + printk("You can't insert with an ICS connector half-cocked.\n"); + } + + ti->open_status = CLOSED; /* CLOSED or OPEN */ + ti->sap_status = CLOSED; /* CLOSED or OPEN */ + ti->open_failure = NO; /* NO or YES */ + ti->open_mode = MANUAL; /* MANUAL or AUTOMATIC */ + + ti->sram_phys &= ~1; /* to reverse what we do in tok_close */ + /* init the spinlock */ + spin_lock_init(&ti->lock); + init_timer(&ti->tr_timer); + + i = tok_init_card(dev); + if (i) return i; + + while (1){ + tok_open_adapter((unsigned long) dev); + i= interruptible_sleep_on_timeout(&ti->wait_for_reset, 25 * HZ); + /* sig catch: estimate opening adapter takes more than .5 sec*/ + if (i>(245*HZ)/10) break; /* fancier than if (i==25*HZ) */ + if (i==0) break; + if (ti->open_status == OPEN && ti->sap_status==OPEN) { + netif_start_queue(dev); + DPRINTK("Adapter is up and running\n"); + return 0; + } + i=schedule_timeout_interruptible(TR_RETRY_INTERVAL); + /* wait 30 seconds */ + if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */ + } + outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/ + DPRINTK("TERMINATED via signal\n"); /*BMS useful */ + return -EAGAIN; +} + +/*****************************************************************************/ + +#define COMMAND_OFST 0 +#define OPEN_OPTIONS_OFST 8 +#define NUM_RCV_BUF_OFST 24 +#define RCV_BUF_LEN_OFST 26 +#define DHB_LENGTH_OFST 28 +#define NUM_DHB_OFST 30 +#define DLC_MAX_SAP_OFST 32 +#define DLC_MAX_STA_OFST 33 + +static void tok_open_adapter(unsigned long dev_addr) +{ + struct net_device *dev = (struct net_device *) dev_addr; + struct tok_info *ti; + int i; + + ti = netdev_priv(dev); + SET_PAGE(ti->init_srb_page); + writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); + for (i = 0; i < sizeof(struct dir_open_adapter); i++) + writeb(0, ti->init_srb + i); + writeb(DIR_OPEN_ADAPTER, ti->init_srb + COMMAND_OFST); + writew(htons(OPEN_PASS_BCON_MAC), ti->init_srb + OPEN_OPTIONS_OFST); + if (ti->ring_speed == 16) { + writew(htons(ti->dhb_size16mb), ti->init_srb + DHB_LENGTH_OFST); + writew(htons(ti->rbuf_cnt16), ti->init_srb + NUM_RCV_BUF_OFST); + writew(htons(ti->rbuf_len16), ti->init_srb + RCV_BUF_LEN_OFST); + } else { + writew(htons(ti->dhb_size4mb), ti->init_srb + DHB_LENGTH_OFST); + writew(htons(ti->rbuf_cnt4), ti->init_srb + NUM_RCV_BUF_OFST); + writew(htons(ti->rbuf_len4), ti->init_srb + RCV_BUF_LEN_OFST); + } + writeb(NUM_DHB, /* always 2 */ ti->init_srb + NUM_DHB_OFST); + writeb(DLC_MAX_SAP, ti->init_srb + DLC_MAX_SAP_OFST); + writeb(DLC_MAX_STA, ti->init_srb + DLC_MAX_STA_OFST); + ti->srb = ti->init_srb; /* We use this one in the interrupt handler */ + ti->srb_page = ti->init_srb_page; + DPRINTK("Opening adapter: Xmit bfrs: %d X %d, Rcv bfrs: %d X %d\n", + readb(ti->init_srb + NUM_DHB_OFST), + ntohs(readw(ti->init_srb + DHB_LENGTH_OFST)), + ntohs(readw(ti->init_srb + NUM_RCV_BUF_OFST)), + ntohs(readw(ti->init_srb + RCV_BUF_LEN_OFST))); + writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); +} + +/*****************************************************************************/ + +static void open_sap(unsigned char type, struct net_device *dev) +{ + int i; + struct tok_info *ti = netdev_priv(dev); + + SET_PAGE(ti->srb_page); + for (i = 0; i < sizeof(struct dlc_open_sap); i++) + writeb(0, ti->srb + i); + +#define MAX_I_FIELD_OFST 14 +#define SAP_VALUE_OFST 16 +#define SAP_OPTIONS_OFST 17 +#define STATION_COUNT_OFST 18 + + writeb(DLC_OPEN_SAP, ti->srb + COMMAND_OFST); + writew(htons(MAX_I_FIELD), ti->srb + MAX_I_FIELD_OFST); + writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, ti->srb+ SAP_OPTIONS_OFST); + writeb(SAP_OPEN_STATION_CNT, ti->srb + STATION_COUNT_OFST); + writeb(type, ti->srb + SAP_VALUE_OFST); + writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); +} + + +/*****************************************************************************/ + +static void tok_set_multicast_list(struct net_device *dev) +{ + struct tok_info *ti = netdev_priv(dev); + struct netdev_hw_addr *ha; + unsigned char address[4]; + + int i; + + /*BMS the next line is CRUCIAL or you may be sad when you */ + /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/ + if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return; + address[0] = address[1] = address[2] = address[3] = 0; + netdev_for_each_mc_addr(ha, dev) { + address[0] |= ha->addr[2]; + address[1] |= ha->addr[3]; + address[2] |= ha->addr[4]; + address[3] |= ha->addr[5]; + } + SET_PAGE(ti->srb_page); + for (i = 0; i < sizeof(struct srb_set_funct_addr); i++) + writeb(0, ti->srb + i); + +#define FUNCT_ADDRESS_OFST 6 + + writeb(DIR_SET_FUNC_ADDR, ti->srb + COMMAND_OFST); + for (i = 0; i < 4; i++) + writeb(address[i], ti->srb + FUNCT_ADDRESS_OFST + i); + writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); +#if TR_VERBOSE + DPRINTK("Setting functional address: "); + for (i=0;i<4;i++) printk("%02X ", address[i]); + printk("\n"); +#endif +} + +/*****************************************************************************/ + +#define STATION_ID_OFST 4 + +static netdev_tx_t tok_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct tok_info *ti; + unsigned long flags; + ti = netdev_priv(dev); + + netif_stop_queue(dev); + + /* lock against other CPUs */ + spin_lock_irqsave(&(ti->lock), flags); + + /* Save skb; we'll need it when the adapter asks for the data */ + ti->current_skb = skb; + SET_PAGE(ti->srb_page); + writeb(XMIT_UI_FRAME, ti->srb + COMMAND_OFST); + writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST); + writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + spin_unlock_irqrestore(&(ti->lock), flags); + return NETDEV_TX_OK; +} + +/*****************************************************************************/ + +static int tok_close(struct net_device *dev) +{ + struct tok_info *ti = netdev_priv(dev); + + /* Important for PCMCIA hot unplug, otherwise, we'll pull the card, */ + /* unloading the module from memory, and then if a timer pops, ouch */ + del_timer_sync(&ti->tr_timer); + outb(0, dev->base_addr + ADAPTRESET); + ti->sram_phys |= 1; + ti->open_status = CLOSED; + + netif_stop_queue(dev); + DPRINTK("Adapter is closed.\n"); + return 0; +} + +/*****************************************************************************/ + +#define RETCODE_OFST 2 +#define OPEN_ERROR_CODE_OFST 6 +#define ASB_ADDRESS_OFST 8 +#define SRB_ADDRESS_OFST 10 +#define ARB_ADDRESS_OFST 12 +#define SSB_ADDRESS_OFST 14 + +static char *printphase[]= {"Lobe media test","Physical insertion", + "Address verification","Roll call poll","Request Parameters"}; +static char *printerror[]={"Function failure","Signal loss","Reserved", + "Frequency error","Timeout","Ring failure","Ring beaconing", + "Duplicate node address", + "Parameter request-retry count exceeded","Remove received", + "IMPL force received","Duplicate modifier", + "No monitor detected","Monitor contention failed for RPL"}; + +static void __iomem *map_address(struct tok_info *ti, unsigned index, __u8 *page) +{ + if (ti->page_mask) { + *page = (index >> 8) & ti->page_mask; + index &= ~(ti->page_mask << 8); + } + return ti->sram_virt + index; +} + +static void dir_open_adapter (struct net_device *dev) +{ + struct tok_info *ti = netdev_priv(dev); + unsigned char ret_code; + __u16 err; + + ti->srb = map_address(ti, + ntohs(readw(ti->init_srb + SRB_ADDRESS_OFST)), + &ti->srb_page); + ti->ssb = map_address(ti, + ntohs(readw(ti->init_srb + SSB_ADDRESS_OFST)), + &ti->ssb_page); + ti->arb = map_address(ti, + ntohs(readw(ti->init_srb + ARB_ADDRESS_OFST)), + &ti->arb_page); + ti->asb = map_address(ti, + ntohs(readw(ti->init_srb + ASB_ADDRESS_OFST)), + &ti->asb_page); + ti->current_skb = NULL; + ret_code = readb(ti->init_srb + RETCODE_OFST); + err = ntohs(readw(ti->init_srb + OPEN_ERROR_CODE_OFST)); + if (!ret_code) { + ti->open_status = OPEN; /* TR adapter is now available */ + if (ti->open_mode == AUTOMATIC) { + DPRINTK("Adapter reopened.\n"); + } + writeb(~SRB_RESP_INT, ti->mmio+ACA_OFFSET+ACA_RESET+ISRP_ODD); + open_sap(EXTENDED_SAP, dev); + return; + } + ti->open_failure = YES; + if (ret_code == 7){ + if (err == 0x24) { + if (!ti->auto_speedsave) { + DPRINTK("Open failed: Adapter speed must match " + "ring speed if Automatic Ring Speed Save is " + "disabled.\n"); + ti->open_action = FAIL; + }else + DPRINTK("Retrying open to adjust to " + "ring speed, "); + } else if (err == 0x2d) { + DPRINTK("Physical Insertion: No Monitor Detected, "); + printk("retrying after %ds delay...\n", + TR_RETRY_INTERVAL/HZ); + } else if (err == 0x11) { + DPRINTK("Lobe Media Function Failure (0x11), "); + printk(" retrying after %ds delay...\n", + TR_RETRY_INTERVAL/HZ); + } else { + char **prphase = printphase; + char **prerror = printerror; + int pnr = err / 16 - 1; + int enr = err % 16 - 1; + DPRINTK("TR Adapter misc open failure, error code = "); + if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) || + enr < 0 || + enr >= ARRAY_SIZE(printerror)) + printk("0x%x, invalid Phase/Error.", err); + else + printk("0x%x, Phase: %s, Error: %s\n", err, + prphase[pnr], prerror[enr]); + printk(" retrying after %ds delay...\n", + TR_RETRY_INTERVAL/HZ); + } + } else DPRINTK("open failed: ret_code = %02X..., ", ret_code); + if (ti->open_action != FAIL) { + if (ti->open_mode==AUTOMATIC){ + ti->open_action = REOPEN; + ibmtr_reset_timer(&(ti->tr_timer), dev); + return; + } + wake_up(&ti->wait_for_reset); + return; + } + DPRINTK("FAILURE, CAPUT\n"); +} + +/******************************************************************************/ + +static irqreturn_t tok_interrupt(int irq, void *dev_id) +{ + unsigned char status; + /* unsigned char status_even ; */ + struct tok_info *ti; + struct net_device *dev; +#ifdef ENABLE_PAGING + unsigned char save_srpr; +#endif + + dev = dev_id; +#if TR_VERBOSE + DPRINTK("Int from tok_driver, dev : %p irq%d\n", dev,irq); +#endif + ti = netdev_priv(dev); + if (ti->sram_phys & 1) + return IRQ_NONE; /* PCMCIA card extraction flag */ + spin_lock(&(ti->lock)); +#ifdef ENABLE_PAGING + save_srpr = readb(ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); +#endif + + /* Disable interrupts till processing is finished */ + writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); + + /* Reset interrupt for ISA boards */ + if (ti->adapter_int_enable) + outb(0, ti->adapter_int_enable); + else /* used for PCMCIA cards */ + outb(0, ti->global_int_enable); + if (ti->do_tok_int == FIRST_INT){ + initial_tok_int(dev); +#ifdef ENABLE_PAGING + writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); +#endif + spin_unlock(&(ti->lock)); + return IRQ_HANDLED; + } + /* Begin interrupt handler HERE inline to avoid the extra + levels of logic and call depth for the original solution. */ + status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD); + /*BMSstatus_even = readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) */ + /*BMSdebugprintk("tok_interrupt: ISRP_ODD = 0x%x ISRP_EVEN = 0x%x\n", */ + /*BMS status,status_even); */ + + if (status & ADAP_CHK_INT) { + int i; + void __iomem *check_reason; + __u8 check_reason_page = 0; + check_reason = map_address(ti, + ntohs(readw(ti->mmio+ ACA_OFFSET+ACA_RW + WWCR_EVEN)), + &check_reason_page); + SET_PAGE(check_reason_page); + + DPRINTK("Adapter check interrupt\n"); + DPRINTK("8 reason bytes follow: "); + for (i = 0; i < 8; i++, check_reason++) + printk("%02X ", (int) readb(check_reason)); + printk("\n"); + writeb(~ADAP_CHK_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD); + status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRA_EVEN); + DPRINTK("ISRA_EVEN == 0x02%x\n",status); + ti->open_status = CLOSED; + ti->sap_status = CLOSED; + ti->open_mode = AUTOMATIC; + netif_carrier_off(dev); + netif_stop_queue(dev); + ti->open_action = RESTART; + outb(0, dev->base_addr + ADAPTRESET); + ibmtr_reset_timer(&(ti->tr_timer), dev);/*BMS try to reopen*/ + spin_unlock(&(ti->lock)); + return IRQ_HANDLED; + } + if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) + & (TCR_INT | ERR_INT | ACCESS_INT)) { + DPRINTK("adapter error: ISRP_EVEN : %02x\n", + (int)readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRP_EVEN)); + writeb(~(TCR_INT | ERR_INT | ACCESS_INT), + ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); + status= readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRA_EVEN);/*BMS*/ + DPRINTK("ISRA_EVEN == 0x02%x\n",status);/*BMS*/ + writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); +#ifdef ENABLE_PAGING + writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); +#endif + spin_unlock(&(ti->lock)); + return IRQ_HANDLED; + } + if (status & SRB_RESP_INT) { /* SRB response */ + SET_PAGE(ti->srb_page); +#if TR_VERBOSE + DPRINTK("SRB resp: cmd=%02X rsp=%02X\n", + readb(ti->srb), readb(ti->srb + RETCODE_OFST)); +#endif + switch (readb(ti->srb)) { /* SRB command check */ + case XMIT_DIR_FRAME:{ + unsigned char xmit_ret_code; + xmit_ret_code = readb(ti->srb + RETCODE_OFST); + if (xmit_ret_code == 0xff) break; + DPRINTK("error on xmit_dir_frame request: %02X\n", + xmit_ret_code); + if (ti->current_skb) { + dev_kfree_skb_irq(ti->current_skb); + ti->current_skb = NULL; + } + /*dev->tbusy = 0;*/ + netif_wake_queue(dev); + if (ti->readlog_pending) + ibmtr_readlog(dev); + break; + } + case XMIT_UI_FRAME:{ + unsigned char xmit_ret_code; + + xmit_ret_code = readb(ti->srb + RETCODE_OFST); + if (xmit_ret_code == 0xff) break; + DPRINTK("error on xmit_ui_frame request: %02X\n", + xmit_ret_code); + if (ti->current_skb) { + dev_kfree_skb_irq(ti->current_skb); + ti->current_skb = NULL; + } + netif_wake_queue(dev); + if (ti->readlog_pending) + ibmtr_readlog(dev); + break; + } + case DIR_OPEN_ADAPTER: + dir_open_adapter(dev); + break; + case DLC_OPEN_SAP: + if (readb(ti->srb + RETCODE_OFST)) { + DPRINTK("open_sap failed: ret_code = %02X, " + "retrying\n", + (int) readb(ti->srb + RETCODE_OFST)); + ti->open_action = REOPEN; + ibmtr_reset_timer(&(ti->tr_timer), dev); + break; + } + ti->exsap_station_id = readw(ti->srb + STATION_ID_OFST); + ti->sap_status = OPEN;/* TR adapter is now available */ + if (ti->open_mode==MANUAL){ + wake_up(&ti->wait_for_reset); + break; + } + netif_wake_queue(dev); + netif_carrier_on(dev); + break; + case DIR_INTERRUPT: + case DIR_MOD_OPEN_PARAMS: + case DIR_SET_GRP_ADDR: + case DIR_SET_FUNC_ADDR: + case DLC_CLOSE_SAP: + if (readb(ti->srb + RETCODE_OFST)) + DPRINTK("error on %02X: %02X\n", + (int) readb(ti->srb + COMMAND_OFST), + (int) readb(ti->srb + RETCODE_OFST)); + break; + case DIR_READ_LOG: + if (readb(ti->srb + RETCODE_OFST)){ + DPRINTK("error on dir_read_log: %02X\n", + (int) readb(ti->srb + RETCODE_OFST)); + netif_wake_queue(dev); + break; + } +#if IBMTR_DEBUG_MESSAGES + +#define LINE_ERRORS_OFST 0 +#define INTERNAL_ERRORS_OFST 1 +#define BURST_ERRORS_OFST 2 +#define AC_ERRORS_OFST 3 +#define ABORT_DELIMITERS_OFST 4 +#define LOST_FRAMES_OFST 6 +#define RECV_CONGEST_COUNT_OFST 7 +#define FRAME_COPIED_ERRORS_OFST 8 +#define FREQUENCY_ERRORS_OFST 9 +#define TOKEN_ERRORS_OFST 10 + + DPRINTK("Line errors %02X, Internal errors %02X, " + "Burst errors %02X\n" "A/C errors %02X, " + "Abort delimiters %02X, Lost frames %02X\n" + "Receive congestion count %02X, " + "Frame copied errors %02X\nFrequency errors %02X, " + "Token errors %02X\n", + (int) readb(ti->srb + LINE_ERRORS_OFST), + (int) readb(ti->srb + INTERNAL_ERRORS_OFST), + (int) readb(ti->srb + BURST_ERRORS_OFST), + (int) readb(ti->srb + AC_ERRORS_OFST), + (int) readb(ti->srb + ABORT_DELIMITERS_OFST), + (int) readb(ti->srb + LOST_FRAMES_OFST), + (int) readb(ti->srb + RECV_CONGEST_COUNT_OFST), + (int) readb(ti->srb + FRAME_COPIED_ERRORS_OFST), + (int) readb(ti->srb + FREQUENCY_ERRORS_OFST), + (int) readb(ti->srb + TOKEN_ERRORS_OFST)); +#endif + netif_wake_queue(dev); + break; + default: + DPRINTK("Unknown command %02X encountered\n", + (int) readb(ti->srb)); + } /* end switch SRB command check */ + writeb(~SRB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD); + } /* if SRB response */ + if (status & ASB_FREE_INT) { /* ASB response */ + SET_PAGE(ti->asb_page); +#if TR_VERBOSE + DPRINTK("ASB resp: cmd=%02X\n", readb(ti->asb)); +#endif + + switch (readb(ti->asb)) { /* ASB command check */ + case REC_DATA: + case XMIT_UI_FRAME: + case XMIT_DIR_FRAME: + break; + default: + DPRINTK("unknown command in asb %02X\n", + (int) readb(ti->asb)); + } /* switch ASB command check */ + if (readb(ti->asb + 2) != 0xff) /* checks ret_code */ + DPRINTK("ASB error %02X in cmd %02X\n", + (int) readb(ti->asb + 2), (int) readb(ti->asb)); + writeb(~ASB_FREE_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD); + } /* if ASB response */ + +#define STATUS_OFST 6 +#define NETW_STATUS_OFST 6 + + if (status & ARB_CMD_INT) { /* ARB response */ + SET_PAGE(ti->arb_page); +#if TR_VERBOSE + DPRINTK("ARB resp: cmd=%02X\n", readb(ti->arb)); +#endif + + switch (readb(ti->arb)) { /* ARB command check */ + case DLC_STATUS: + DPRINTK("DLC_STATUS new status: %02X on station %02X\n", + ntohs(readw(ti->arb + STATUS_OFST)), + ntohs(readw(ti->arb+ STATION_ID_OFST))); + break; + case REC_DATA: + tr_rx(dev); + break; + case RING_STAT_CHANGE:{ + unsigned short ring_status; + ring_status= ntohs(readw(ti->arb + NETW_STATUS_OFST)); + if (ibmtr_debug_trace & TRC_INIT) + DPRINTK("Ring Status Change...(0x%x)\n", + ring_status); + if(ring_status& (REMOVE_RECV|AUTO_REMOVAL|LOBE_FAULT)){ + netif_stop_queue(dev); + netif_carrier_off(dev); + DPRINTK("Remove received, or Auto-removal error" + ", or Lobe fault\n"); + DPRINTK("We'll try to reopen the closed adapter" + " after a %d second delay.\n", + TR_RETRY_INTERVAL/HZ); + /*I was confused: I saw the TR reopening but */ + /*forgot:with an RJ45 in an RJ45/ICS adapter */ + /*but adapter not in the ring, the TR will */ + /* open, and then soon close and come here. */ + ti->open_mode = AUTOMATIC; + ti->open_status = CLOSED; /*12/2000 BMS*/ + ti->open_action = REOPEN; + ibmtr_reset_timer(&(ti->tr_timer), dev); + } else if (ring_status & LOG_OVERFLOW) { + if(netif_queue_stopped(dev)) + ti->readlog_pending = 1; + else + ibmtr_readlog(dev); + } + break; + } + case XMIT_DATA_REQ: + tr_tx(dev); + break; + default: + DPRINTK("Unknown command %02X in arb\n", + (int) readb(ti->arb)); + break; + } /* switch ARB command check */ + writeb(~ARB_CMD_INT, ti->mmio+ ACA_OFFSET+ACA_RESET + ISRP_ODD); + writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + } /* if ARB response */ + if (status & SSB_RESP_INT) { /* SSB response */ + unsigned char retcode; + SET_PAGE(ti->ssb_page); +#if TR_VERBOSE + DPRINTK("SSB resp: cmd=%02X rsp=%02X\n", + readb(ti->ssb), readb(ti->ssb + 2)); +#endif + + switch (readb(ti->ssb)) { /* SSB command check */ + case XMIT_DIR_FRAME: + case XMIT_UI_FRAME: + retcode = readb(ti->ssb + 2); + if (retcode && (retcode != 0x22))/* checks ret_code */ + DPRINTK("xmit ret_code: %02X xmit error code: " + "%02X\n", + (int)retcode, (int)readb(ti->ssb + 6)); + else + dev->stats.tx_packets++; + break; + case XMIT_XID_CMD: + DPRINTK("xmit xid ret_code: %02X\n", + (int) readb(ti->ssb + 2)); + default: + DPRINTK("Unknown command %02X in ssb\n", + (int) readb(ti->ssb)); + } /* SSB command check */ + writeb(~SSB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD); + writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + } /* if SSB response */ +#ifdef ENABLE_PAGING + writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); +#endif + writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + spin_unlock(&(ti->lock)); + return IRQ_HANDLED; +} /*tok_interrupt */ + +/*****************************************************************************/ + +#define INIT_STATUS_OFST 1 +#define INIT_STATUS_2_OFST 2 +#define ENCODED_ADDRESS_OFST 8 + +static void initial_tok_int(struct net_device *dev) +{ + + __u32 encoded_addr, hw_encoded_addr; + struct tok_info *ti; + unsigned char init_status; /*BMS 12/2000*/ + + ti = netdev_priv(dev); + + ti->do_tok_int = NOT_FIRST; + + /* we assign the shared-ram address for ISA devices */ + writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN); +#ifndef PCMCIA + ti->sram_virt = ioremap(((__u32)ti->sram_base << 12), ti->avail_shared_ram); +#endif + ti->init_srb = map_address(ti, + ntohs(readw(ti->mmio + ACA_OFFSET + WRBR_EVEN)), + &ti->init_srb_page); + if (ti->page_mask && ti->avail_shared_ram == 127) { + void __iomem *last_512; + __u8 last_512_page=0; + int i; + last_512 = map_address(ti, 0xfe00, &last_512_page); + /* initialize high section of ram (if necessary) */ + SET_PAGE(last_512_page); + for (i = 0; i < 512; i++) + writeb(0, last_512 + i); + } + SET_PAGE(ti->init_srb_page); + +#if TR_VERBOSE + { + int i; + + DPRINTK("ti->init_srb_page=0x%x\n", ti->init_srb_page); + DPRINTK("init_srb(%p):", ti->init_srb ); + for (i = 0; i < 20; i++) + printk("%02X ", (int) readb(ti->init_srb + i)); + printk("\n"); + } +#endif + + hw_encoded_addr = readw(ti->init_srb + ENCODED_ADDRESS_OFST); + encoded_addr = ntohs(hw_encoded_addr); + init_status= /*BMS 12/2000 check for shallow mode possibility (Turbo)*/ + readb(ti->init_srb+offsetof(struct srb_init_response,init_status)); + /*printk("Initial interrupt: init_status= 0x%02x\n",init_status);*/ + ti->ring_speed = init_status & 0x01 ? 16 : 4; + DPRINTK("Initial interrupt : %d Mbps, shared RAM base %08x.\n", + ti->ring_speed, (unsigned int)dev->mem_start); + ti->auto_speedsave = (readb(ti->init_srb+INIT_STATUS_2_OFST) & 4) != 0; + + if (ti->open_mode == MANUAL) wake_up(&ti->wait_for_reset); + else tok_open_adapter((unsigned long)dev); + +} /*initial_tok_int() */ + +/*****************************************************************************/ + +#define CMD_CORRELATE_OFST 1 +#define DHB_ADDRESS_OFST 6 + +#define FRAME_LENGTH_OFST 6 +#define HEADER_LENGTH_OFST 8 +#define RSAP_VALUE_OFST 9 + +static void tr_tx(struct net_device *dev) +{ + struct tok_info *ti = netdev_priv(dev); + struct trh_hdr *trhdr = (struct trh_hdr *) ti->current_skb->data; + unsigned int hdr_len; + __u32 dhb=0,dhb_base; + void __iomem *dhbuf = NULL; + unsigned char xmit_command; + int i,dhb_len=0x4000,src_len,src_offset; + struct trllc *llc; + struct srb_xmit xsrb; + __u8 dhb_page = 0; + __u8 llc_ssap; + + SET_PAGE(ti->asb_page); + + if (readb(ti->asb+RETCODE_OFST) != 0xFF) DPRINTK("ASB not free !!!\n"); + + /* in providing the transmit interrupts, is telling us it is ready for + data and providing a shared memory address for us to stuff with data. + Here we compute the effective address where we will place data. + */ + SET_PAGE(ti->arb_page); + dhb=dhb_base=ntohs(readw(ti->arb + DHB_ADDRESS_OFST)); + if (ti->page_mask) { + dhb_page = (dhb_base >> 8) & ti->page_mask; + dhb=dhb_base & ~(ti->page_mask << 8); + } + dhbuf = ti->sram_virt + dhb; + + /* Figure out the size of the 802.5 header */ + if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */ + hdr_len = sizeof(struct trh_hdr) - TR_MAXRIFLEN; + else + hdr_len = ((ntohs(trhdr->rcf) & TR_RCF_LEN_MASK) >> 8) + + sizeof(struct trh_hdr) - TR_MAXRIFLEN; + + llc = (struct trllc *) (ti->current_skb->data + hdr_len); + + llc_ssap = llc->ssap; + SET_PAGE(ti->srb_page); + memcpy_fromio(&xsrb, ti->srb, sizeof(xsrb)); + SET_PAGE(ti->asb_page); + xmit_command = xsrb.command; + + writeb(xmit_command, ti->asb + COMMAND_OFST); + writew(xsrb.station_id, ti->asb + STATION_ID_OFST); + writeb(llc_ssap, ti->asb + RSAP_VALUE_OFST); + writeb(xsrb.cmd_corr, ti->asb + CMD_CORRELATE_OFST); + writeb(0, ti->asb + RETCODE_OFST); + if ((xmit_command == XMIT_XID_CMD) || (xmit_command == XMIT_TEST_CMD)) { + writew(htons(0x11), ti->asb + FRAME_LENGTH_OFST); + writeb(0x0e, ti->asb + HEADER_LENGTH_OFST); + SET_PAGE(dhb_page); + writeb(AC, dhbuf); + writeb(LLC_FRAME, dhbuf + 1); + for (i = 0; i < TR_ALEN; i++) + writeb((int) 0x0FF, dhbuf + i + 2); + for (i = 0; i < TR_ALEN; i++) + writeb(0, dhbuf + i + TR_ALEN + 2); + writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + return; + } + /* + * the token ring packet is copied from sk_buff to the adapter + * buffer identified in the command data received with the interrupt. + */ + writeb(hdr_len, ti->asb + HEADER_LENGTH_OFST); + writew(htons(ti->current_skb->len), ti->asb + FRAME_LENGTH_OFST); + src_len=ti->current_skb->len; + src_offset=0; + dhb=dhb_base; + while(1) { + if (ti->page_mask) { + dhb_page=(dhb >> 8) & ti->page_mask; + dhb=dhb & ~(ti->page_mask << 8); + dhb_len=0x4000-dhb; /* remaining size of this page */ + } + dhbuf = ti->sram_virt + dhb; + SET_PAGE(dhb_page); + if (src_len > dhb_len) { + memcpy_toio(dhbuf,&ti->current_skb->data[src_offset], + dhb_len); + src_len -= dhb_len; + src_offset += dhb_len; + dhb_base+=dhb_len; + dhb=dhb_base; + continue; + } + memcpy_toio(dhbuf, &ti->current_skb->data[src_offset], src_len); + break; + } + writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + dev->stats.tx_bytes += ti->current_skb->len; + dev_kfree_skb_irq(ti->current_skb); + ti->current_skb = NULL; + netif_wake_queue(dev); + if (ti->readlog_pending) + ibmtr_readlog(dev); +} /*tr_tx */ + +/*****************************************************************************/ + + +#define RECEIVE_BUFFER_OFST 6 +#define LAN_HDR_LENGTH_OFST 8 +#define DLC_HDR_LENGTH_OFST 9 + +#define DSAP_OFST 0 +#define SSAP_OFST 1 +#define LLC_OFST 2 +#define PROTID_OFST 3 +#define ETHERTYPE_OFST 6 + +static void tr_rx(struct net_device *dev) +{ + struct tok_info *ti = netdev_priv(dev); + __u32 rbuffer; + void __iomem *rbuf, *rbufdata, *llc; + __u8 rbuffer_page = 0; + unsigned char *data; + unsigned int rbuffer_len, lan_hdr_len, hdr_len, ip_len, length; + unsigned char dlc_hdr_len; + struct sk_buff *skb; + unsigned int skb_size = 0; + int IPv4_p = 0; + unsigned int chksum = 0; + struct iphdr *iph; + struct arb_rec_req rarb; + + SET_PAGE(ti->arb_page); + memcpy_fromio(&rarb, ti->arb, sizeof(rarb)); + rbuffer = ntohs(rarb.rec_buf_addr) ; + rbuf = map_address(ti, rbuffer, &rbuffer_page); + + SET_PAGE(ti->asb_page); + + if (readb(ti->asb + RETCODE_OFST) !=0xFF) DPRINTK("ASB not free !!!\n"); + + writeb(REC_DATA, ti->asb + COMMAND_OFST); + writew(rarb.station_id, ti->asb + STATION_ID_OFST); + writew(rarb.rec_buf_addr, ti->asb + RECEIVE_BUFFER_OFST); + + lan_hdr_len = rarb.lan_hdr_len; + if (lan_hdr_len > sizeof(struct trh_hdr)) { + DPRINTK("Linux cannot handle greater than 18 bytes RIF\n"); + return; + } /*BMS I added this above just to be very safe */ + dlc_hdr_len = readb(ti->arb + DLC_HDR_LENGTH_OFST); + hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr); + + SET_PAGE(rbuffer_page); + llc = rbuf + offsetof(struct rec_buf, data) + lan_hdr_len; + +#if TR_VERBOSE + DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n", + (__u32) offsetof(struct rec_buf, data), (unsigned int) lan_hdr_len); + DPRINTK("llc: %08X rec_buf_addr: %04X dev->mem_start: %lX\n", + llc, ntohs(rarb.rec_buf_addr), dev->mem_start); + DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, " + "ethertype: %04X\n", + (int) readb(llc + DSAP_OFST), (int) readb(llc + SSAP_OFST), + (int) readb(llc + LLC_OFST), (int) readb(llc + PROTID_OFST), + (int) readb(llc+PROTID_OFST+1),(int)readb(llc+PROTID_OFST + 2), + (int) ntohs(readw(llc + ETHERTYPE_OFST))); +#endif + if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) { + SET_PAGE(ti->asb_page); + writeb(DATA_LOST, ti->asb + RETCODE_OFST); + dev->stats.rx_dropped++; + writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + return; + } + length = ntohs(rarb.frame_len); + if (readb(llc + DSAP_OFST) == EXTENDED_SAP && + readb(llc + SSAP_OFST) == EXTENDED_SAP && + length >= hdr_len) IPv4_p = 1; +#if TR_VERBOSE +#define SADDR_OFST 8 +#define DADDR_OFST 2 + + if (!IPv4_p) { + + void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data); + u8 saddr[6]; + u8 daddr[6]; + int i; + for (i = 0 ; i < 6 ; i++) + saddr[i] = readb(trhhdr + SADDR_OFST + i); + for (i = 0 ; i < 6 ; i++) + daddr[i] = readb(trhhdr + DADDR_OFST + i); + DPRINTK("Probably non-IP frame received.\n"); + DPRINTK("ssap: %02X dsap: %02X " + "saddr: %pM daddr: %pM\n", + readb(llc + SSAP_OFST), readb(llc + DSAP_OFST), + saddr, daddr); + } +#endif + + /*BMS handle the case she comes in with few hops but leaves with many */ + skb_size=length-lan_hdr_len+sizeof(struct trh_hdr)+sizeof(struct trllc); + + if (!(skb = dev_alloc_skb(skb_size))) { + DPRINTK("out of memory. frame dropped.\n"); + dev->stats.rx_dropped++; + SET_PAGE(ti->asb_page); + writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); + writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + return; + } + /*BMS again, if she comes in with few but leaves with many */ + skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len); + skb_put(skb, length); + data = skb->data; + rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len))); + rbufdata = rbuf + offsetof(struct rec_buf, data); + + if (IPv4_p) { + /* Copy the headers without checksumming */ + memcpy_fromio(data, rbufdata, hdr_len); + + /* Watch for padded packets and bogons */ + iph= (struct iphdr *)(data+ lan_hdr_len + sizeof(struct trllc)); + ip_len = ntohs(iph->tot_len) - sizeof(struct iphdr); + length -= hdr_len; + if ((ip_len <= length) && (ip_len > 7)) + length = ip_len; + data += hdr_len; + rbuffer_len -= hdr_len; + rbufdata += hdr_len; + } + /* Copy the payload... */ +#define BUFFER_POINTER_OFST 2 +#define BUFFER_LENGTH_OFST 6 + for (;;) { + if (ibmtr_debug_trace&TRC_INITV && length < rbuffer_len) + DPRINTK("CURIOUS, length=%d < rbuffer_len=%d\n", + length,rbuffer_len); + if (IPv4_p) + chksum=csum_partial_copy_nocheck((void*)rbufdata, + data,lengthasb_page); + writeb(0, ti->asb + offsetof(struct asb_rec, ret_code)); + + writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + + skb->protocol = tr_type_trans(skb, dev); + if (IPv4_p) { + skb->csum = chksum; + skb->ip_summed = CHECKSUM_COMPLETE; + } + netif_rx(skb); +} /*tr_rx */ + +/*****************************************************************************/ + +static void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev) +{ + tmr->expires = jiffies + TR_RETRY_INTERVAL; + tmr->data = (unsigned long) dev; + tmr->function = tok_rerun; + init_timer(tmr); + add_timer(tmr); +} + +/*****************************************************************************/ + +static void tok_rerun(unsigned long dev_addr) +{ + struct net_device *dev = (struct net_device *)dev_addr; + struct tok_info *ti = netdev_priv(dev); + + if ( ti->open_action == RESTART){ + ti->do_tok_int = FIRST_INT; + outb(0, dev->base_addr + ADAPTRESETREL); +#ifdef ENABLE_PAGING + if (ti->page_mask) + writeb(SRPR_ENABLE_PAGING, + ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); +#endif + + writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + } else + tok_open_adapter(dev_addr); +} + +/*****************************************************************************/ + +static void ibmtr_readlog(struct net_device *dev) +{ + struct tok_info *ti; + + ti = netdev_priv(dev); + + ti->readlog_pending = 0; + SET_PAGE(ti->srb_page); + writeb(DIR_READ_LOG, ti->srb); + writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); + writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); + + netif_stop_queue(dev); + +} + +/*****************************************************************************/ + +static int ibmtr_change_mtu(struct net_device *dev, int mtu) +{ + struct tok_info *ti = netdev_priv(dev); + + if (ti->ring_speed == 16 && mtu > ti->maxmtu16) + return -EINVAL; + if (ti->ring_speed == 4 && mtu > ti->maxmtu4) + return -EINVAL; + dev->mtu = mtu; + return 0; +} + +/*****************************************************************************/ +#ifdef MODULE + +/* 3COM 3C619C supports 8 interrupts, 32 I/O ports */ +static struct net_device *dev_ibmtr[IBMTR_MAX_ADAPTERS]; +static int io[IBMTR_MAX_ADAPTERS] = { 0xa20, 0xa24 }; +static int irq[IBMTR_MAX_ADAPTERS]; +static int mem[IBMTR_MAX_ADAPTERS]; + +MODULE_LICENSE("GPL"); + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(mem, int, NULL, 0); + +static int __init ibmtr_init(void) +{ + int i; + int count=0; + + find_turbo_adapters(io); + + for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) { + struct net_device *dev; + irq[i] = 0; + mem[i] = 0; + dev = alloc_trdev(sizeof(struct tok_info)); + if (dev == NULL) { + if (i == 0) + return -ENOMEM; + break; + } + dev->base_addr = io[i]; + dev->irq = irq[i]; + dev->mem_start = mem[i]; + + if (ibmtr_probe_card(dev)) { + free_netdev(dev); + continue; + } + dev_ibmtr[i] = dev; + count++; + } + if (count) return 0; + printk("ibmtr: register_netdev() returned non-zero.\n"); + return -EIO; +} +module_init(ibmtr_init); + +static void __exit ibmtr_cleanup(void) +{ + int i; + + for (i = 0; i < IBMTR_MAX_ADAPTERS; i++){ + if (!dev_ibmtr[i]) + continue; + unregister_netdev(dev_ibmtr[i]); + ibmtr_cleanup_card(dev_ibmtr[i]); + free_netdev(dev_ibmtr[i]); + } +} +module_exit(ibmtr_cleanup); +#endif diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c new file mode 100644 index 00000000..9354ca9d --- /dev/null +++ b/drivers/net/tokenring/lanstreamer.c @@ -0,0 +1,1918 @@ +/* + * lanstreamer.c -- driver for the IBM Auto LANStreamer PCI Adapter + * + * Written By: Mike Sullivan, IBM Corporation + * + * Copyright (C) 1999 IBM Corporation + * + * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC + * chipset. + * + * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic + * chipsets) written by: + * 1999 Peter De Schrijver All Rights Reserved + * 1999 Mike Phillips (phillim@amtrak.com) + * + * Base Driver Skeleton: + * Written 1993-94 by Donald Becker. + * + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * 12/10/99 - Alpha Release 0.1.0 + * First release to the public + * 03/03/00 - Merged to kernel, indented -kr -i8 -bri0, fixed some missing + * malloc free checks, reviewed code. + * 03/13/00 - Added spinlocks for smp + * 03/08/01 - Added support for module_init() and module_exit() + * 08/15/01 - Added ioctl() functionality for debugging, changed netif_*_queue + * calls and other incorrectness - Kent Yoder + * 11/05/01 - Restructured the interrupt function, added delays, reduced the + * the number of TX descriptors to 1, which together can prevent + * the card from locking up the box - + * 09/27/02 - New PCI interface + bug fix. - + * 11/13/02 - Removed free_irq calls which could cause a hang, added + * netif_carrier_{on|off} - + * + * To Do: + * + * + * If Problems do Occur + * Most problems can be rectified by either closing and opening the interface + * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult + * if compiled into the kernel). + */ + +/* Change STREAMER_DEBUG to 1 to get verbose, and I mean really verbose, messages */ + +#define STREAMER_DEBUG 0 +#define STREAMER_DEBUG_PACKETS 0 + +/* Change STREAMER_NETWORK_MONITOR to receive mac frames through the arb channel. + * Will also create a /proc/net/streamer_tr entry if proc_fs is compiled into the + * kernel. + * Intended to be used to create a ring-error reporting network module + * i.e. it will give you the source address of beaconers on the ring + */ + +#define STREAMER_NETWORK_MONITOR 0 + +/* #define CONFIG_PROC_FS */ + +/* + * Allow or disallow ioctl's for debugging + */ + +#define STREAMER_IOCTL 0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "lanstreamer.h" + +#if (BITS_PER_LONG == 64) +#error broken on 64-bit: stores pointer to rx_ring->buffer in 32-bit int +#endif + + +/* I've got to put some intelligence into the version number so that Peter and I know + * which version of the code somebody has got. + * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author. + * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike + * + * Official releases will only have an a.b.c version number format. + */ + +static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n" + " v0.5.3 11/13/02 - Kent Yoder"; + +static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = { + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,}, + {} /* terminating entry */ +}; +MODULE_DEVICE_TABLE(pci,streamer_pci_tbl); + + +static char *open_maj_error[] = { + "No error", "Lobe Media Test", "Physical Insertion", + "Address Verification", "Neighbor Notification (Ring Poll)", + "Request Parameters", "FDX Registration Request", + "FDX Lobe Media Test", "FDX Duplicate Address Check", + "Unknown stage" +}; + +static char *open_min_error[] = { + "No error", "Function Failure", "Signal Lost", "Wire Fault", + "Ring Speed Mismatch", "Timeout", "Ring Failure", "Ring Beaconing", + "Duplicate Node Address", "Request Parameters", "Remove Received", + "Reserved", "Reserved", "No Monitor Detected for RPL", + "Monitor Contention failer for RPL", "FDX Protocol Error" +}; + +/* Module parameters */ + +/* Ring Speed 0,4,16 + * 0 = Autosense + * 4,16 = Selected speed only, no autosense + * This allows the card to be the first on the ring + * and become the active monitor. + * + * WARNING: Some hubs will allow you to insert + * at the wrong speed + */ + +static int ringspeed[STREAMER_MAX_ADAPTERS] = { 0, }; + +module_param_array(ringspeed, int, NULL, 0); + +/* Packet buffer size */ + +static int pkt_buf_sz[STREAMER_MAX_ADAPTERS] = { 0, }; + +module_param_array(pkt_buf_sz, int, NULL, 0); + +/* Message Level */ + +static int message_level[STREAMER_MAX_ADAPTERS] = { 1, }; + +module_param_array(message_level, int, NULL, 0); + +#if STREAMER_IOCTL +static int streamer_ioctl(struct net_device *, struct ifreq *, int); +#endif + +static int streamer_reset(struct net_device *dev); +static int streamer_open(struct net_device *dev); +static netdev_tx_t streamer_xmit(struct sk_buff *skb, + struct net_device *dev); +static int streamer_close(struct net_device *dev); +static void streamer_set_rx_mode(struct net_device *dev); +static irqreturn_t streamer_interrupt(int irq, void *dev_id); +static int streamer_set_mac_address(struct net_device *dev, void *addr); +static void streamer_arb_cmd(struct net_device *dev); +static int streamer_change_mtu(struct net_device *dev, int mtu); +static void streamer_srb_bh(struct net_device *dev); +static void streamer_asb_bh(struct net_device *dev); +#if STREAMER_NETWORK_MONITOR +#ifdef CONFIG_PROC_FS +static int streamer_proc_info(char *buffer, char **start, off_t offset, + int length, int *eof, void *data); +static int sprintf_info(char *buffer, struct net_device *dev); +struct streamer_private *dev_streamer=NULL; +#endif +#endif + +static const struct net_device_ops streamer_netdev_ops = { + .ndo_open = streamer_open, + .ndo_stop = streamer_close, + .ndo_start_xmit = streamer_xmit, + .ndo_change_mtu = streamer_change_mtu, +#if STREAMER_IOCTL + .ndo_do_ioctl = streamer_ioctl, +#endif + .ndo_set_multicast_list = streamer_set_rx_mode, + .ndo_set_mac_address = streamer_set_mac_address, +}; + +static int __devinit streamer_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + struct streamer_private *streamer_priv; + unsigned long pio_start, pio_end, pio_flags, pio_len; + unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; + int rc = 0; + static int card_no=-1; + u16 pcr; + +#if STREAMER_DEBUG + printk("lanstreamer::streamer_init_one, entry pdev %p\n",pdev); +#endif + + card_no++; + dev = alloc_trdev(sizeof(*streamer_priv)); + if (dev==NULL) { + printk(KERN_ERR "lanstreamer: out of memory.\n"); + return -ENOMEM; + } + + streamer_priv = netdev_priv(dev); + +#if STREAMER_NETWORK_MONITOR +#ifdef CONFIG_PROC_FS + if (!dev_streamer) + create_proc_read_entry("streamer_tr", 0, init_net.proc_net, + streamer_proc_info, NULL); + streamer_priv->next = dev_streamer; + dev_streamer = streamer_priv; +#endif +#endif + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + printk(KERN_ERR "%s: No suitable PCI mapping available.\n", + dev->name); + rc = -ENODEV; + goto err_out; + } + + rc = pci_enable_device(pdev); + if (rc) { + printk(KERN_ERR "lanstreamer: unable to enable pci device\n"); + rc=-EIO; + goto err_out; + } + + pci_set_master(pdev); + + rc = pci_set_mwi(pdev); + if (rc) { + printk(KERN_ERR "lanstreamer: unable to enable MWI on pci device\n"); + goto err_out_disable; + } + + pio_start = pci_resource_start(pdev, 0); + pio_end = pci_resource_end(pdev, 0); + pio_flags = pci_resource_flags(pdev, 0); + pio_len = pci_resource_len(pdev, 0); + + mmio_start = pci_resource_start(pdev, 1); + mmio_end = pci_resource_end(pdev, 1); + mmio_flags = pci_resource_flags(pdev, 1); + mmio_len = pci_resource_len(pdev, 1); + +#if STREAMER_DEBUG + printk("lanstreamer: pio_start %x pio_end %x pio_len %x pio_flags %x\n", + pio_start, pio_end, pio_len, pio_flags); + printk("lanstreamer: mmio_start %x mmio_end %x mmio_len %x mmio_flags %x\n", + mmio_start, mmio_end, mmio_flags, mmio_len); +#endif + + if (!request_region(pio_start, pio_len, "lanstreamer")) { + printk(KERN_ERR "lanstreamer: unable to get pci io addr %lx\n", + pio_start); + rc= -EBUSY; + goto err_out_mwi; + } + + if (!request_mem_region(mmio_start, mmio_len, "lanstreamer")) { + printk(KERN_ERR "lanstreamer: unable to get pci mmio addr %lx\n", + mmio_start); + rc= -EBUSY; + goto err_out_free_pio; + } + + streamer_priv->streamer_mmio=ioremap(mmio_start, mmio_len); + if (streamer_priv->streamer_mmio == NULL) { + printk(KERN_ERR "lanstreamer: unable to remap MMIO %lx\n", + mmio_start); + rc= -EIO; + goto err_out_free_mmio; + } + + init_waitqueue_head(&streamer_priv->srb_wait); + init_waitqueue_head(&streamer_priv->trb_wait); + + dev->netdev_ops = &streamer_netdev_ops; + dev->irq = pdev->irq; + dev->base_addr=pio_start; + SET_NETDEV_DEV(dev, &pdev->dev); + + streamer_priv->streamer_card_name = (char *)pdev->resource[0].name; + streamer_priv->pci_dev = pdev; + + if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000)) + streamer_priv->pkt_buf_sz = PKT_BUF_SZ; + else + streamer_priv->pkt_buf_sz = pkt_buf_sz[card_no]; + + streamer_priv->streamer_ring_speed = ringspeed[card_no]; + streamer_priv->streamer_message_level = message_level[card_no]; + + pci_set_drvdata(pdev, dev); + + spin_lock_init(&streamer_priv->streamer_lock); + + pci_read_config_word (pdev, PCI_COMMAND, &pcr); + pcr |= PCI_COMMAND_SERR; + pci_write_config_word (pdev, PCI_COMMAND, pcr); + + printk("%s\n", version); + printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name, + streamer_priv->streamer_card_name, + (unsigned int) dev->base_addr, + streamer_priv->streamer_mmio, + dev->irq); + + if (streamer_reset(dev)) + goto err_out_unmap; + + rc = register_netdev(dev); + if (rc) + goto err_out_unmap; + return 0; + +err_out_unmap: + iounmap(streamer_priv->streamer_mmio); +err_out_free_mmio: + release_mem_region(mmio_start, mmio_len); +err_out_free_pio: + release_region(pio_start, pio_len); +err_out_mwi: + pci_clear_mwi(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out: + free_netdev(dev); +#if STREAMER_DEBUG + printk("lanstreamer: Exit error %x\n",rc); +#endif + return rc; +} + +static void __devexit streamer_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev=pci_get_drvdata(pdev); + struct streamer_private *streamer_priv; + +#if STREAMER_DEBUG + printk("lanstreamer::streamer_remove_one entry pdev %p\n",pdev); +#endif + + if (dev == NULL) { + printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev is NULL\n"); + return; + } + + streamer_priv=netdev_priv(dev); + if (streamer_priv == NULL) { + printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n"); + return; + } + +#if STREAMER_NETWORK_MONITOR +#ifdef CONFIG_PROC_FS + { + struct streamer_private **p, **next; + + for (p = &dev_streamer; *p; p = next) { + next = &(*p)->next; + if (*p == streamer_priv) { + *p = *next; + break; + } + } + if (!dev_streamer) + remove_proc_entry("streamer_tr", init_net.proc_net); + } +#endif +#endif + + unregister_netdev(dev); + iounmap(streamer_priv->streamer_mmio); + release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev,1)); + release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev,0)); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); + pci_set_drvdata(pdev, NULL); +} + + +static int streamer_reset(struct net_device *dev) +{ + struct streamer_private *streamer_priv; + __u8 __iomem *streamer_mmio; + unsigned long t; + unsigned int uaa_addr; + struct sk_buff *skb = NULL; + __u16 misr; + + streamer_priv = netdev_priv(dev); + streamer_mmio = streamer_priv->streamer_mmio; + + writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL); + t = jiffies; + /* Hold soft reset bit for a while */ + ssleep(1); + + writew(readw(streamer_mmio + BCTL) & ~BCTL_SOFTRESET, + streamer_mmio + BCTL); + +#if STREAMER_DEBUG + printk("BCTL: %x\n", readw(streamer_mmio + BCTL)); + printk("GPR: %x\n", readw(streamer_mmio + GPR)); + printk("SISRMASK: %x\n", readw(streamer_mmio + SISR_MASK)); +#endif + writew(readw(streamer_mmio + BCTL) | (BCTL_RX_FIFO_8 | BCTL_TX_FIFO_8), streamer_mmio + BCTL ); + + if (streamer_priv->streamer_ring_speed == 0) { /* Autosense */ + writew(readw(streamer_mmio + GPR) | GPR_AUTOSENSE, + streamer_mmio + GPR); + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Ringspeed autosense mode on\n", + dev->name); + } else if (streamer_priv->streamer_ring_speed == 16) { + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", + dev->name); + writew(GPR_16MBPS, streamer_mmio + GPR); + } else if (streamer_priv->streamer_ring_speed == 4) { + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", + dev->name); + writew(0, streamer_mmio + GPR); + } + + skb = dev_alloc_skb(streamer_priv->pkt_buf_sz); + if (!skb) { + printk(KERN_INFO "%s: skb allocation for diagnostics failed...proceeding\n", + dev->name); + } else { + struct streamer_rx_desc *rx_ring; + u8 *data; + + rx_ring=(struct streamer_rx_desc *)skb->data; + data=((u8 *)skb->data)+sizeof(struct streamer_rx_desc); + rx_ring->forward=0; + rx_ring->status=0; + rx_ring->buffer=cpu_to_le32(pci_map_single(streamer_priv->pci_dev, data, + 512, PCI_DMA_FROMDEVICE)); + rx_ring->framelen_buflen=512; + writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, rx_ring, 512, PCI_DMA_FROMDEVICE)), + streamer_mmio+RXBDA); + } + +#if STREAMER_DEBUG + printk("GPR = %x\n", readw(streamer_mmio + GPR)); +#endif + /* start solo init */ + writew(SISR_MI, streamer_mmio + SISR_MASK_SUM); + + while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) { + msleep_interruptible(100); + if (time_after(jiffies, t + 40 * HZ)) { + printk(KERN_ERR + "IBM PCI tokenring card not responding\n"); + release_region(dev->base_addr, STREAMER_IO_SPACE); + if (skb) + dev_kfree_skb(skb); + return -1; + } + } + writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM); + misr = readw(streamer_mmio + MISR_RUM); + writew(~misr, streamer_mmio + MISR_RUM); + + if (skb) + dev_kfree_skb(skb); /* release skb used for diagnostics */ + +#if STREAMER_DEBUG + printk("LAPWWO: %x, LAPA: %x LAPE: %x\n", + readw(streamer_mmio + LAPWWO), readw(streamer_mmio + LAPA), + readw(streamer_mmio + LAPE)); +#endif + +#if STREAMER_DEBUG + { + int i; + writew(readw(streamer_mmio + LAPWWO), + streamer_mmio + LAPA); + printk("initialization response srb dump: "); + for (i = 0; i < 10; i++) + printk("%x:", + ntohs(readw(streamer_mmio + LAPDINC))); + printk("\n"); + } +#endif + + writew(readw(streamer_mmio + LAPWWO) + 6, streamer_mmio + LAPA); + if (readw(streamer_mmio + LAPD)) { + printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n", + ntohs(readw(streamer_mmio + LAPD))); + release_region(dev->base_addr, STREAMER_IO_SPACE); + return -1; + } + + writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA); + uaa_addr = ntohs(readw(streamer_mmio + LAPDINC)); + readw(streamer_mmio + LAPDINC); /* skip over Level.Addr field */ + streamer_priv->streamer_addr_table_addr = ntohs(readw(streamer_mmio + LAPDINC)); + streamer_priv->streamer_parms_addr = ntohs(readw(streamer_mmio + LAPDINC)); + +#if STREAMER_DEBUG + printk("UAA resides at %x\n", uaa_addr); +#endif + + /* setup uaa area for access with LAPD */ + { + int i; + __u16 addr; + writew(uaa_addr, streamer_mmio + LAPA); + for (i = 0; i < 6; i += 2) { + addr=ntohs(readw(streamer_mmio+LAPDINC)); + dev->dev_addr[i]= (addr >> 8) & 0xff; + dev->dev_addr[i+1]= addr & 0xff; + } +#if STREAMER_DEBUG + printk("Adapter address: %pM\n", dev->dev_addr); +#endif + } + return 0; +} + +static int streamer_open(struct net_device *dev) +{ + struct streamer_private *streamer_priv = netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + unsigned long flags; + char open_error[255]; + int i, open_finished = 1; + __u16 srb_word; + __u16 srb_open; + int rc; + + if (readw(streamer_mmio+BMCTL_SUM) & BMCTL_RX_ENABLED) { + rc=streamer_reset(dev); + } + + if (request_irq(dev->irq, streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) { + return -EAGAIN; + } +#if STREAMER_DEBUG + printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM)); + printk("pending ints: %x\n", readw(streamer_mmio + SISR)); +#endif + + writew(SISR_MI | SISR_SRB_REPLY, streamer_mmio + SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */ + writew(LISR_LIE, streamer_mmio + LISR); /* more ints later */ + + /* adapter is closed, so SRB is pointed to by LAPWWO */ + writew(readw(streamer_mmio + LAPWWO), streamer_mmio + LAPA); + +#if STREAMER_DEBUG + printk("LAPWWO: %x, LAPA: %x\n", readw(streamer_mmio + LAPWWO), + readw(streamer_mmio + LAPA)); + printk("LAPE: %x\n", readw(streamer_mmio + LAPE)); + printk("SISR Mask = %04x\n", readw(streamer_mmio + SISR_MASK)); +#endif + do { + for (i = 0; i < SRB_COMMAND_SIZE; i += 2) { + writew(0, streamer_mmio + LAPDINC); + } + + writew(readw(streamer_mmio+LAPWWO),streamer_mmio+LAPA); + writew(htons(SRB_OPEN_ADAPTER<<8),streamer_mmio+LAPDINC) ; /* open */ + writew(htons(STREAMER_CLEAR_RET_CODE<<8),streamer_mmio+LAPDINC); + writew(STREAMER_CLEAR_RET_CODE, streamer_mmio + LAPDINC); + + writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA); +#if STREAMER_NETWORK_MONITOR + /* If Network Monitor, instruct card to copy MAC frames through the ARB */ + writew(htons(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), streamer_mmio + LAPDINC); /* offset 8 word contains open options */ +#else + writew(htons(OPEN_ADAPTER_ENABLE_FDX), streamer_mmio + LAPDINC); /* Offset 8 word contains Open.Options */ +#endif + + if (streamer_priv->streamer_laa[0]) { + writew(readw(streamer_mmio + LAPWWO) + 12, streamer_mmio + LAPA); + writew(htons((streamer_priv->streamer_laa[0] << 8) | + streamer_priv->streamer_laa[1]),streamer_mmio+LAPDINC); + writew(htons((streamer_priv->streamer_laa[2] << 8) | + streamer_priv->streamer_laa[3]),streamer_mmio+LAPDINC); + writew(htons((streamer_priv->streamer_laa[4] << 8) | + streamer_priv->streamer_laa[5]),streamer_mmio+LAPDINC); + memcpy(dev->dev_addr, streamer_priv->streamer_laa, dev->addr_len); + } + + /* save off srb open offset */ + srb_open = readw(streamer_mmio + LAPWWO); +#if STREAMER_DEBUG + writew(readw(streamer_mmio + LAPWWO), + streamer_mmio + LAPA); + printk("srb open request:\n"); + for (i = 0; i < 16; i++) { + printk("%x:", ntohs(readw(streamer_mmio + LAPDINC))); + } + printk("\n"); +#endif + spin_lock_irqsave(&streamer_priv->streamer_lock, flags); + streamer_priv->srb_queued = 1; + + /* signal solo that SRB command has been issued */ + writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM); + spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags); + + while (streamer_priv->srb_queued) { + interruptible_sleep_on_timeout(&streamer_priv->srb_wait, 5 * HZ); + if (signal_pending(current)) { + printk(KERN_WARNING "%s: SRB timed out.\n", dev->name); + printk(KERN_WARNING "SISR=%x MISR=%x, LISR=%x\n", + readw(streamer_mmio + SISR), + readw(streamer_mmio + MISR_RUM), + readw(streamer_mmio + LISR)); + streamer_priv->srb_queued = 0; + break; + } + } + +#if STREAMER_DEBUG + printk("SISR_MASK: %x\n", readw(streamer_mmio + SISR_MASK)); + printk("srb open response:\n"); + writew(srb_open, streamer_mmio + LAPA); + for (i = 0; i < 10; i++) { + printk("%x:", + ntohs(readw(streamer_mmio + LAPDINC))); + } +#endif + + /* If we get the same return response as we set, the interrupt wasn't raised and the open + * timed out. + */ + writew(srb_open + 2, streamer_mmio + LAPA); + srb_word = ntohs(readw(streamer_mmio + LAPD)) >> 8; + if (srb_word == STREAMER_CLEAR_RET_CODE) { + printk(KERN_WARNING "%s: Adapter Open time out or error.\n", + dev->name); + return -EIO; + } + + if (srb_word != 0) { + if (srb_word == 0x07) { + if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */ + printk(KERN_WARNING "%s: Retrying at different ring speed\n", + dev->name); + open_finished = 0; + } else { + __u16 error_code; + + writew(srb_open + 6, streamer_mmio + LAPA); + error_code = ntohs(readw(streamer_mmio + LAPD)); + strcpy(open_error, open_maj_error[(error_code & 0xf0) >> 4]); + strcat(open_error, " - "); + strcat(open_error, open_min_error[(error_code & 0x0f)]); + + if (!streamer_priv->streamer_ring_speed && + ((error_code & 0x0f) == 0x0d)) + { + printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name); + printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name); + free_irq(dev->irq, dev); + return -EIO; + } + + printk(KERN_WARNING "%s: %s\n", + dev->name, open_error); + free_irq(dev->irq, dev); + return -EIO; + + } /* if autosense && open_finished */ + } else { + printk(KERN_WARNING "%s: Bad OPEN response: %x\n", + dev->name, srb_word); + free_irq(dev->irq, dev); + return -EIO; + } + } else + open_finished = 1; + } while (!(open_finished)); /* Will only loop if ring speed mismatch re-open attempted && autosense is on */ + + writew(srb_open + 18, streamer_mmio + LAPA); + srb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8; + if (srb_word & (1 << 3)) + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Opened in FDX Mode\n", dev->name); + + if (srb_word & 1) + streamer_priv->streamer_ring_speed = 16; + else + streamer_priv->streamer_ring_speed = 4; + + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Opened in %d Mbps mode\n", + dev->name, + streamer_priv->streamer_ring_speed); + + writew(srb_open + 8, streamer_mmio + LAPA); + streamer_priv->asb = ntohs(readw(streamer_mmio + LAPDINC)); + streamer_priv->srb = ntohs(readw(streamer_mmio + LAPDINC)); + streamer_priv->arb = ntohs(readw(streamer_mmio + LAPDINC)); + readw(streamer_mmio + LAPDINC); /* offset 14 word is rsvd */ + streamer_priv->trb = ntohs(readw(streamer_mmio + LAPDINC)); + + streamer_priv->streamer_receive_options = 0x00; + streamer_priv->streamer_copy_all_options = 0; + + /* setup rx ring */ + /* enable rx channel */ + writew(~BMCTL_RX_DIS, streamer_mmio + BMCTL_RUM); + + /* setup rx descriptors */ + streamer_priv->streamer_rx_ring= + kmalloc( sizeof(struct streamer_rx_desc)* + STREAMER_RX_RING_SIZE,GFP_KERNEL); + if (!streamer_priv->streamer_rx_ring) { + printk(KERN_WARNING "%s ALLOC of streamer rx ring FAILED!!\n",dev->name); + return -EIO; + } + + for (i = 0; i < STREAMER_RX_RING_SIZE; i++) { + struct sk_buff *skb; + + skb = dev_alloc_skb(streamer_priv->pkt_buf_sz); + if (skb == NULL) + break; + + skb->dev = dev; + + streamer_priv->streamer_rx_ring[i].forward = + cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1], + sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)); + streamer_priv->streamer_rx_ring[i].status = 0; + streamer_priv->streamer_rx_ring[i].buffer = + cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, + streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); + streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz; + streamer_priv->rx_ring_skb[i] = skb; + } + streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward = + cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0], + sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)); + + if (i == 0) { + printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name); + free_irq(dev->irq, dev); + return -EIO; + } + + streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1; /* last processed rx status */ + + writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0], + sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)), + streamer_mmio + RXBDA); + writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1], + sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)), + streamer_mmio + RXLBDA); + + /* set bus master interrupt event mask */ + writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK); + + + /* setup tx ring */ + streamer_priv->streamer_tx_ring=kmalloc(sizeof(struct streamer_tx_desc)* + STREAMER_TX_RING_SIZE,GFP_KERNEL); + if (!streamer_priv->streamer_tx_ring) { + printk(KERN_WARNING "%s ALLOC of streamer_tx_ring FAILED\n",dev->name); + return -EIO; + } + + writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM); /* Enables TX channel 2 */ + for (i = 0; i < STREAMER_TX_RING_SIZE; i++) { + streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev, + &streamer_priv->streamer_tx_ring[i + 1], + sizeof(struct streamer_tx_desc), + PCI_DMA_TODEVICE)); + streamer_priv->streamer_tx_ring[i].status = 0; + streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0; + streamer_priv->streamer_tx_ring[i].buffer = 0; + streamer_priv->streamer_tx_ring[i].buflen = 0; + streamer_priv->streamer_tx_ring[i].rsvd1 = 0; + streamer_priv->streamer_tx_ring[i].rsvd2 = 0; + streamer_priv->streamer_tx_ring[i].rsvd3 = 0; + } + streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward = + cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0], + sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)); + + streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE; + streamer_priv->tx_ring_free = 0; /* next entry in tx ring to use */ + streamer_priv->tx_ring_last_status = STREAMER_TX_RING_SIZE - 1; + + /* set Busmaster interrupt event mask (handle receives on interrupt only */ + writew(MISR_TX2_EOF | MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK); + /* set system event interrupt mask */ + writew(SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE, streamer_mmio + SISR_MASK_SUM); + +#if STREAMER_DEBUG + printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM)); + printk("SISR MASK: %x\n", readw(streamer_mmio + SISR_MASK)); +#endif + +#if STREAMER_NETWORK_MONITOR + + writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA); + printk("%s: Node Address: %04x:%04x:%04x\n", dev->name, + ntohs(readw(streamer_mmio + LAPDINC)), + ntohs(readw(streamer_mmio + LAPDINC)), + ntohs(readw(streamer_mmio + LAPDINC))); + readw(streamer_mmio + LAPDINC); + readw(streamer_mmio + LAPDINC); + printk("%s: Functional Address: %04x:%04x\n", dev->name, + ntohs(readw(streamer_mmio + LAPDINC)), + ntohs(readw(streamer_mmio + LAPDINC))); + + writew(streamer_priv->streamer_parms_addr + 4, + streamer_mmio + LAPA); + printk("%s: NAUN Address: %04x:%04x:%04x\n", dev->name, + ntohs(readw(streamer_mmio + LAPDINC)), + ntohs(readw(streamer_mmio + LAPDINC)), + ntohs(readw(streamer_mmio + LAPDINC))); +#endif + + netif_start_queue(dev); + netif_carrier_on(dev); + return 0; +} + +/* + * When we enter the rx routine we do not know how many frames have been + * queued on the rx channel. Therefore we start at the next rx status + * position and travel around the receive ring until we have completed + * all the frames. + * + * This means that we may process the frame before we receive the end + * of frame interrupt. This is why we always test the status instead + * of blindly processing the next frame. + * + */ +static void streamer_rx(struct net_device *dev) +{ + struct streamer_private *streamer_priv = + netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + struct streamer_rx_desc *rx_desc; + int rx_ring_last_received, length, frame_length, buffer_cnt = 0; + struct sk_buff *skb, *skb2; + + /* setup the next rx descriptor to be received */ + rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)]; + rx_ring_last_received = streamer_priv->rx_ring_last_received; + + while (rx_desc->status & 0x01000000) { /* While processed descriptors are available */ + if (rx_ring_last_received != streamer_priv->rx_ring_last_received) + { + printk(KERN_WARNING "RX Error 1 rx_ring_last_received not the same %x %x\n", + rx_ring_last_received, streamer_priv->rx_ring_last_received); + } + streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1); + rx_ring_last_received = streamer_priv->rx_ring_last_received; + + length = rx_desc->framelen_buflen & 0xffff; /* buffer length */ + frame_length = (rx_desc->framelen_buflen >> 16) & 0xffff; + + if (rx_desc->status & 0x7E830000) { /* errors */ + if (streamer_priv->streamer_message_level) { + printk(KERN_WARNING "%s: Rx Error %x\n", + dev->name, rx_desc->status); + } + } else { /* received without errors */ + if (rx_desc->status & 0x80000000) { /* frame complete */ + buffer_cnt = 1; + skb = dev_alloc_skb(streamer_priv->pkt_buf_sz); + } else { + skb = dev_alloc_skb(frame_length); + } + + if (skb == NULL) + { + printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name); + dev->stats.rx_dropped++; + } else { /* we allocated an skb OK */ + if (buffer_cnt == 1) { + /* release the DMA mapping */ + pci_unmap_single(streamer_priv->pci_dev, + le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer), + streamer_priv->pkt_buf_sz, + PCI_DMA_FROMDEVICE); + skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received]; +#if STREAMER_DEBUG_PACKETS + { + int i; + printk("streamer_rx packet print: skb->data2 %p skb->head %p\n", skb2->data, skb2->head); + for (i = 0; i < frame_length; i++) + { + printk("%x:", skb2->data[i]); + if (((i + 1) % 16) == 0) + printk("\n"); + } + printk("\n"); + } +#endif + skb_put(skb2, length); + skb2->protocol = tr_type_trans(skb2, dev); + /* recycle this descriptor */ + streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0; + streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz; + streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer = + cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz, + PCI_DMA_FROMDEVICE)); + streamer_priv->rx_ring_skb[rx_ring_last_received] = skb; + /* place recycled descriptor back on the adapter */ + writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, + &streamer_priv->streamer_rx_ring[rx_ring_last_received], + sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)), + streamer_mmio + RXLBDA); + /* pass the received skb up to the protocol */ + netif_rx(skb2); + } else { + do { /* Walk the buffers */ + pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE), + memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length); /* copy this fragment */ + streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0; + streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz; + + /* give descriptor back to the adapter */ + writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, + &streamer_priv->streamer_rx_ring[rx_ring_last_received], + length, PCI_DMA_FROMDEVICE)), + streamer_mmio + RXLBDA); + + if (rx_desc->status & 0x80000000) + break; /* this descriptor completes the frame */ + + /* else get the next pending descriptor */ + if (rx_ring_last_received!= streamer_priv->rx_ring_last_received) + { + printk("RX Error rx_ring_last_received not the same %x %x\n", + rx_ring_last_received, + streamer_priv->rx_ring_last_received); + } + rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE-1)]; + + length = rx_desc->framelen_buflen & 0xffff; /* buffer length */ + streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE - 1); + rx_ring_last_received = streamer_priv->rx_ring_last_received; + } while (1); + + skb->protocol = tr_type_trans(skb, dev); + /* send up to the protocol */ + netif_rx(skb); + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; + } /* if skb == null */ + } /* end received without errors */ + + /* try the next one */ + rx_desc = &streamer_priv->streamer_rx_ring[(rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)]; + } /* end for all completed rx descriptors */ +} + +static irqreturn_t streamer_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct streamer_private *streamer_priv = + netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + __u16 sisr; + __u16 misr; + u8 max_intr = MAX_INTR; + + spin_lock(&streamer_priv->streamer_lock); + sisr = readw(streamer_mmio + SISR); + + while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE | + SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR)) && + (max_intr > 0)) { + + if(sisr & SISR_PAR_ERR) { + writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM); + (void)readw(streamer_mmio + SISR_RUM); + } + + else if(sisr & SISR_SERR_ERR) { + writew(~SISR_SERR_ERR, streamer_mmio + SISR_RUM); + (void)readw(streamer_mmio + SISR_RUM); + } + + else if(sisr & SISR_MI) { + misr = readw(streamer_mmio + MISR_RUM); + + if (misr & MISR_TX2_EOF) { + while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) { + streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1); + streamer_priv->free_tx_ring_entries++; + dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len; + dev->stats.tx_packets++; + dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]); + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].bufcnt_framelen = 0; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buflen = 0; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd1 = 0; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd2 = 0; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd3 = 0; + } + netif_wake_queue(dev); + } + + if (misr & MISR_RX_EOF) { + streamer_rx(dev); + } + /* MISR_RX_EOF */ + + if (misr & MISR_RX_NOBUF) { + /* According to the documentation, we don't have to do anything, + * but trapping it keeps it out of /var/log/messages. + */ + } /* SISR_RX_NOBUF */ + + writew(~misr, streamer_mmio + MISR_RUM); + (void)readw(streamer_mmio + MISR_RUM); + } + + else if (sisr & SISR_SRB_REPLY) { + if (streamer_priv->srb_queued == 1) { + wake_up_interruptible(&streamer_priv->srb_wait); + } else if (streamer_priv->srb_queued == 2) { + streamer_srb_bh(dev); + } + streamer_priv->srb_queued = 0; + + writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM); + (void)readw(streamer_mmio + SISR_RUM); + } + + else if (sisr & SISR_ADAPTER_CHECK) { + printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name); + writel(readl(streamer_mmio + LAPWWO), streamer_mmio + LAPA); + printk(KERN_WARNING "%s: Words %x:%x:%x:%x:\n", + dev->name, readw(streamer_mmio + LAPDINC), + ntohs(readw(streamer_mmio + LAPDINC)), + ntohs(readw(streamer_mmio + LAPDINC)), + ntohs(readw(streamer_mmio + LAPDINC))); + netif_stop_queue(dev); + netif_carrier_off(dev); + printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name); + } + + /* SISR_ADAPTER_CHECK */ + else if (sisr & SISR_ASB_FREE) { + /* Wake up anything that is waiting for the asb response */ + if (streamer_priv->asb_queued) { + streamer_asb_bh(dev); + } + writew(~SISR_ASB_FREE, streamer_mmio + SISR_RUM); + (void)readw(streamer_mmio + SISR_RUM); + } + /* SISR_ASB_FREE */ + else if (sisr & SISR_ARB_CMD) { + streamer_arb_cmd(dev); + writew(~SISR_ARB_CMD, streamer_mmio + SISR_RUM); + (void)readw(streamer_mmio + SISR_RUM); + } + /* SISR_ARB_CMD */ + else if (sisr & SISR_TRB_REPLY) { + /* Wake up anything that is waiting for the trb response */ + if (streamer_priv->trb_queued) { + wake_up_interruptible(&streamer_priv-> + trb_wait); + } + streamer_priv->trb_queued = 0; + writew(~SISR_TRB_REPLY, streamer_mmio + SISR_RUM); + (void)readw(streamer_mmio + SISR_RUM); + } + /* SISR_TRB_REPLY */ + + sisr = readw(streamer_mmio + SISR); + max_intr--; + } /* while() */ + + spin_unlock(&streamer_priv->streamer_lock) ; + return IRQ_HANDLED; +} + +static netdev_tx_t streamer_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct streamer_private *streamer_priv = + netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + unsigned long flags ; + + spin_lock_irqsave(&streamer_priv->streamer_lock, flags); + + if (streamer_priv->free_tx_ring_entries) { + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].status = 0; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].bufcnt_framelen = 0x00020000 | skb->len; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer = + cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE)); + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd1 = skb->len; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd2 = 0; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd3 = 0; + streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buflen = skb->len; + + streamer_priv->tx_ring_skb[streamer_priv->tx_ring_free] = skb; + streamer_priv->free_tx_ring_entries--; +#if STREAMER_DEBUG_PACKETS + { + int i; + printk("streamer_xmit packet print:\n"); + for (i = 0; i < skb->len; i++) { + printk("%x:", skb->data[i]); + if (((i + 1) % 16) == 0) + printk("\n"); + } + printk("\n"); + } +#endif + + writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, + &streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free], + sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)), + streamer_mmio + TX2LFDA); + (void)readl(streamer_mmio + TX2LFDA); + + streamer_priv->tx_ring_free = (streamer_priv->tx_ring_free + 1) & (STREAMER_TX_RING_SIZE - 1); + spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags); + return NETDEV_TX_OK; + } else { + netif_stop_queue(dev); + spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags); + return NETDEV_TX_BUSY; + } +} + + +static int streamer_close(struct net_device *dev) +{ + struct streamer_private *streamer_priv = + netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + unsigned long flags; + int i; + + netif_stop_queue(dev); + netif_carrier_off(dev); + writew(streamer_priv->srb, streamer_mmio + LAPA); + writew(htons(SRB_CLOSE_ADAPTER << 8),streamer_mmio+LAPDINC); + writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); + + spin_lock_irqsave(&streamer_priv->streamer_lock, flags); + + streamer_priv->srb_queued = 1; + writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM); + + spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags); + + while (streamer_priv->srb_queued) + { + interruptible_sleep_on_timeout(&streamer_priv->srb_wait, + jiffies + 60 * HZ); + if (signal_pending(current)) + { + printk(KERN_WARNING "%s: SRB timed out.\n", dev->name); + printk(KERN_WARNING "SISR=%x MISR=%x LISR=%x\n", + readw(streamer_mmio + SISR), + readw(streamer_mmio + MISR_RUM), + readw(streamer_mmio + LISR)); + streamer_priv->srb_queued = 0; + break; + } + } + + streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1); + + for (i = 0; i < STREAMER_RX_RING_SIZE; i++) { + if (streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]) { + dev_kfree_skb(streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]); + } + streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1); + } + + /* reset tx/rx fifo's and busmaster logic */ + + /* TBD. Add graceful way to reset the LLC channel without doing a soft reset. + writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL); + udelay(1); + writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); + */ + +#if STREAMER_DEBUG + writew(streamer_priv->srb, streamer_mmio + LAPA); + printk("srb): "); + for (i = 0; i < 2; i++) { + printk("%x ", ntohs(readw(streamer_mmio + LAPDINC))); + } + printk("\n"); +#endif + free_irq(dev->irq, dev); + return 0; +} + +static void streamer_set_rx_mode(struct net_device *dev) +{ + struct streamer_private *streamer_priv = + netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + __u8 options = 0; + struct netdev_hw_addr *ha; + unsigned char dev_mc_address[5]; + + writel(streamer_priv->srb, streamer_mmio + LAPA); + options = streamer_priv->streamer_copy_all_options; + + if (dev->flags & IFF_PROMISC) + options |= (3 << 5); /* All LLC and MAC frames, all through the main rx channel */ + else + options &= ~(3 << 5); + + /* Only issue the srb if there is a change in options */ + + if ((options ^ streamer_priv->streamer_copy_all_options)) + { + /* Now to issue the srb command to alter the copy.all.options */ + writew(htons(SRB_MODIFY_RECEIVE_OPTIONS << 8), streamer_mmio+LAPDINC); + writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); + writew(htons((streamer_priv->streamer_receive_options << 8) | options),streamer_mmio+LAPDINC); + writew(htons(0x4a41),streamer_mmio+LAPDINC); + writew(htons(0x4d45),streamer_mmio+LAPDINC); + writew(htons(0x5320),streamer_mmio+LAPDINC); + writew(0x2020, streamer_mmio + LAPDINC); + + streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */ + + writel(LISR_SRB_CMD, streamer_mmio + LISR_SUM); + + streamer_priv->streamer_copy_all_options = options; + return; + } + + /* Set the functional addresses we need for multicast */ + writel(streamer_priv->srb,streamer_mmio+LAPA); + dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; + + netdev_for_each_mc_addr(ha, dev) { + dev_mc_address[0] |= ha->addr[2]; + dev_mc_address[1] |= ha->addr[3]; + dev_mc_address[2] |= ha->addr[4]; + dev_mc_address[3] |= ha->addr[5]; + } + + writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC); + writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); + writew(0,streamer_mmio+LAPDINC); + writew(htons( (dev_mc_address[0] << 8) | dev_mc_address[1]),streamer_mmio+LAPDINC); + writew(htons( (dev_mc_address[2] << 8) | dev_mc_address[3]),streamer_mmio+LAPDINC); + streamer_priv->srb_queued = 2 ; + writel(LISR_SRB_CMD,streamer_mmio+LISR_SUM); +} + +static void streamer_srb_bh(struct net_device *dev) +{ + struct streamer_private *streamer_priv = netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + __u16 srb_word; + + writew(streamer_priv->srb, streamer_mmio + LAPA); + srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; + + switch (srb_word) { + + /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous) + * At some point we should do something if we get an error, such as + * resetting the IFF_PROMISC flag in dev + */ + + case SRB_MODIFY_RECEIVE_OPTIONS: + srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; + + switch (srb_word) { + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); + break; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); + break; + default: + if (streamer_priv->streamer_message_level) + printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n", + dev->name, + streamer_priv->streamer_copy_all_options, + streamer_priv->streamer_receive_options); + break; + } /* switch srb[2] */ + break; + + + /* SRB_SET_GROUP_ADDRESS - Multicast group setting + */ + case SRB_SET_GROUP_ADDRESS: + srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; + switch (srb_word) { + case 0x00: + break; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); + break; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); + break; + case 0x3c: + printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n", dev->name); + break; + case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */ + printk(KERN_WARNING "%s: Group address registers full\n", dev->name); + break; + case 0x55: + printk(KERN_INFO "%s: Group Address already set.\n", dev->name); + break; + default: + break; + } /* switch srb[2] */ + break; + + + /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list + */ + case SRB_RESET_GROUP_ADDRESS: + srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; + switch (srb_word) { + case 0x00: + break; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); + break; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); + break; + case 0x39: /* Must deal with this if individual multicast addresses used */ + printk(KERN_INFO "%s: Group address not found\n", dev->name); + break; + default: + break; + } /* switch srb[2] */ + break; + + + /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode + */ + + case SRB_SET_FUNC_ADDRESS: + srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; + switch (srb_word) { + case 0x00: + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name); + break; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); + break; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); + break; + default: + break; + } /* switch srb[2] */ + break; + + /* SRB_READ_LOG - Read and reset the adapter error counters + */ + + case SRB_READ_LOG: + srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; + switch (srb_word) { + case 0x00: + { + int i; + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Read Log command complete\n", dev->name); + printk("Read Log statistics: "); + writew(streamer_priv->srb + 6, + streamer_mmio + LAPA); + for (i = 0; i < 5; i++) { + printk("%x:", ntohs(readw(streamer_mmio + LAPDINC))); + } + printk("\n"); + } + break; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); + break; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); + break; + + } /* switch srb[2] */ + break; + + /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */ + + case SRB_READ_SR_COUNTERS: + srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; + switch (srb_word) { + case 0x00: + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name); + break; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); + break; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); + break; + default: + break; + } /* switch srb[2] */ + break; + + default: + printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n", dev->name); + break; + } /* switch srb[0] */ +} + +static int streamer_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *saddr = addr; + struct streamer_private *streamer_priv = netdev_priv(dev); + + if (netif_running(dev)) + { + printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name); + return -EIO; + } + + memcpy(streamer_priv->streamer_laa, saddr->sa_data, dev->addr_len); + + if (streamer_priv->streamer_message_level) { + printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n", + dev->name, streamer_priv->streamer_laa[0], + streamer_priv->streamer_laa[1], + streamer_priv->streamer_laa[2], + streamer_priv->streamer_laa[3], + streamer_priv->streamer_laa[4], + streamer_priv->streamer_laa[5]); + } + return 0; +} + +static void streamer_arb_cmd(struct net_device *dev) +{ + struct streamer_private *streamer_priv = + netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + __u8 header_len; + __u16 frame_len, buffer_len; + struct sk_buff *mac_frame; + __u8 frame_data[256]; + __u16 buff_off; + __u16 lan_status = 0, lan_status_diff; /* Initialize to stop compiler warning */ + __u8 fdx_prot_error; + __u16 next_ptr; + __u16 arb_word; + +#if STREAMER_NETWORK_MONITOR + struct trh_hdr *mac_hdr; +#endif + + writew(streamer_priv->arb, streamer_mmio + LAPA); + arb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8; + + if (arb_word == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */ + writew(streamer_priv->arb + 6, streamer_mmio + LAPA); + streamer_priv->mac_rx_buffer = buff_off = ntohs(readw(streamer_mmio + LAPDINC)); + header_len=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; /* 802.5 Token-Ring Header Length */ + frame_len = ntohs(readw(streamer_mmio + LAPDINC)); + +#if STREAMER_DEBUG + { + int i; + __u16 next; + __u8 status; + __u16 len; + + writew(ntohs(buff_off), streamer_mmio + LAPA); /*setup window to frame data */ + next = htons(readw(streamer_mmio + LAPDINC)); + status = + ntohs(readw(streamer_mmio + LAPDINC)) & 0xff; + len = ntohs(readw(streamer_mmio + LAPDINC)); + + /* print out 1st 14 bytes of frame data */ + for (i = 0; i < 7; i++) { + printk("Loc %d = %04x\n", i, + ntohs(readw + (streamer_mmio + LAPDINC))); + } + + printk("next %04x, fs %02x, len %04x\n", next, + status, len); + } +#endif + if (!(mac_frame = dev_alloc_skb(frame_len))) { + printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", + dev->name); + goto drop_frame; + } + /* Walk the buffer chain, creating the frame */ + + do { + int i; + __u16 rx_word; + + writew(htons(buff_off), streamer_mmio + LAPA); /* setup window to frame data */ + next_ptr = ntohs(readw(streamer_mmio + LAPDINC)); + readw(streamer_mmio + LAPDINC); /* read thru status word */ + buffer_len = ntohs(readw(streamer_mmio + LAPDINC)); + + if (buffer_len > 256) + break; + + i = 0; + while (i < buffer_len) { + rx_word=ntohs(readw(streamer_mmio+LAPDINC)); + frame_data[i]=rx_word >> 8; + frame_data[i+1]=rx_word & 0xff; + i += 2; + } + + memcpy(skb_put(mac_frame, buffer_len), + frame_data, buffer_len); + } while (next_ptr && (buff_off = next_ptr)); + + mac_frame->protocol = tr_type_trans(mac_frame, dev); +#if STREAMER_NETWORK_MONITOR + printk(KERN_WARNING "%s: Received MAC Frame, details:\n", + dev->name); + mac_hdr = tr_hdr(mac_frame); + printk(KERN_WARNING + "%s: MAC Frame Dest. Addr: %pM\n", + dev->name, mac_hdr->daddr); + printk(KERN_WARNING + "%s: MAC Frame Srce. Addr: %pM\n", + dev->name, mac_hdr->saddr); +#endif + netif_rx(mac_frame); + + /* Now tell the card we have dealt with the received frame */ +drop_frame: + /* Set LISR Bit 1 */ + writel(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM); + + /* Is the ASB free ? */ + + if (!(readl(streamer_priv->streamer_mmio + SISR) & SISR_ASB_FREE)) + { + streamer_priv->asb_queued = 1; + writel(LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM); + return; + /* Drop out and wait for the bottom half to be run */ + } + + + writew(streamer_priv->asb, streamer_mmio + LAPA); + writew(htons(ASB_RECEIVE_DATA << 8), streamer_mmio+LAPDINC); + writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); + writew(0, streamer_mmio + LAPDINC); + writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD); + + writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM); + + streamer_priv->asb_queued = 2; + return; + + } else if (arb_word == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */ + writew(streamer_priv->arb + 6, streamer_mmio + LAPA); + lan_status = ntohs(readw(streamer_mmio + LAPDINC)); + fdx_prot_error = ntohs(readw(streamer_mmio+LAPD)) >> 8; + + /* Issue ARB Free */ + writew(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM); + + lan_status_diff = (streamer_priv->streamer_lan_status ^ lan_status) & + lan_status; + + if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR)) + { + if (lan_status_diff & LSC_LWF) + printk(KERN_WARNING "%s: Short circuit detected on the lobe\n", dev->name); + if (lan_status_diff & LSC_ARW) + printk(KERN_WARNING "%s: Auto removal error\n", dev->name); + if (lan_status_diff & LSC_FPE) + printk(KERN_WARNING "%s: FDX Protocol Error\n", dev->name); + if (lan_status_diff & LSC_RR) + printk(KERN_WARNING "%s: Force remove MAC frame received\n", dev->name); + + /* Adapter has been closed by the hardware */ + + /* reset tx/rx fifo's and busmaster logic */ + + /* @TBD. no llc reset on autostreamer writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL); + udelay(1); + writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); */ + + netif_stop_queue(dev); + netif_carrier_off(dev); + printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name); + } + /* If serious error */ + if (streamer_priv->streamer_message_level) { + if (lan_status_diff & LSC_SIG_LOSS) + printk(KERN_WARNING "%s: No receive signal detected\n", dev->name); + if (lan_status_diff & LSC_HARD_ERR) + printk(KERN_INFO "%s: Beaconing\n", dev->name); + if (lan_status_diff & LSC_SOFT_ERR) + printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name); + if (lan_status_diff & LSC_TRAN_BCN) + printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name); + if (lan_status_diff & LSC_SS) + printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); + if (lan_status_diff & LSC_RING_REC) + printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name); + if (lan_status_diff & LSC_FDX_MODE) + printk(KERN_INFO "%s: Operating in FDX mode\n", dev->name); + } + + if (lan_status_diff & LSC_CO) { + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Counter Overflow\n", dev->name); + + /* Issue READ.LOG command */ + + writew(streamer_priv->srb, streamer_mmio + LAPA); + writew(htons(SRB_READ_LOG << 8),streamer_mmio+LAPDINC); + writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); + writew(0, streamer_mmio + LAPDINC); + streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */ + + writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM); + } + + if (lan_status_diff & LSC_SR_CO) { + if (streamer_priv->streamer_message_level) + printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name); + + /* Issue a READ.SR.COUNTERS */ + writew(streamer_priv->srb, streamer_mmio + LAPA); + writew(htons(SRB_READ_SR_COUNTERS << 8), + streamer_mmio+LAPDINC); + writew(htons(STREAMER_CLEAR_RET_CODE << 8), + streamer_mmio+LAPDINC); + streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */ + writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM); + + } + streamer_priv->streamer_lan_status = lan_status; + } /* Lan.change.status */ + else + printk(KERN_WARNING "%s: Unknown arb command\n", dev->name); +} + +static void streamer_asb_bh(struct net_device *dev) +{ + struct streamer_private *streamer_priv = + netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + + if (streamer_priv->asb_queued == 1) + { + /* Dropped through the first time */ + + writew(streamer_priv->asb, streamer_mmio + LAPA); + writew(htons(ASB_RECEIVE_DATA << 8),streamer_mmio+LAPDINC); + writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); + writew(0, streamer_mmio + LAPDINC); + writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD); + + writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM); + streamer_priv->asb_queued = 2; + + return; + } + + if (streamer_priv->asb_queued == 2) { + __u8 rc; + writew(streamer_priv->asb + 2, streamer_mmio + LAPA); + rc=ntohs(readw(streamer_mmio+LAPD)) >> 8; + switch (rc) { + case 0x01: + printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name); + break; + case 0x26: + printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name); + break; + case 0xFF: + /* Valid response, everything should be ok again */ + break; + default: + printk(KERN_WARNING "%s: Invalid return code in asb\n", dev->name); + break; + } + } + streamer_priv->asb_queued = 0; +} + +static int streamer_change_mtu(struct net_device *dev, int mtu) +{ + struct streamer_private *streamer_priv = + netdev_priv(dev); + __u16 max_mtu; + + if (streamer_priv->streamer_ring_speed == 4) + max_mtu = 4500; + else + max_mtu = 18000; + + if (mtu > max_mtu) + return -EINVAL; + if (mtu < 100) + return -EINVAL; + + dev->mtu = mtu; + streamer_priv->pkt_buf_sz = mtu + TR_HLEN; + + return 0; +} + +#if STREAMER_NETWORK_MONITOR +#ifdef CONFIG_PROC_FS +static int streamer_proc_info(char *buffer, char **start, off_t offset, + int length, int *eof, void *data) +{ + struct streamer_private *sdev=NULL; + struct pci_dev *pci_device = NULL; + int len = 0; + off_t begin = 0; + off_t pos = 0; + int size; + + struct net_device *dev; + + size = sprintf(buffer, "IBM LanStreamer/MPC Chipset Token Ring Adapters\n"); + + pos += size; + len += size; + + for(sdev=dev_streamer; sdev; sdev=sdev->next) { + pci_device=sdev->pci_dev; + dev=pci_get_drvdata(pci_device); + + size = sprintf_info(buffer + len, dev); + len += size; + pos = begin + len; + + if (pos < offset) { + len = 0; + begin = pos; + } + if (pos > offset + length) + break; + } /* for */ + + *start = buffer + (offset - begin); /* Start of wanted data */ + len -= (offset - begin); /* Start slop */ + if (len > length) + len = length; /* Ending slop */ + return len; +} + +static int sprintf_info(char *buffer, struct net_device *dev) +{ + struct streamer_private *streamer_priv = + netdev_priv(dev); + __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; + struct streamer_adapter_addr_table sat; + struct streamer_parameters_table spt; + int size = 0; + int i; + + writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA); + for (i = 0; i < 14; i += 2) { + __u16 io_word; + __u8 *datap = (__u8 *) & sat; + io_word=ntohs(readw(streamer_mmio+LAPDINC)); + datap[size]=io_word >> 8; + datap[size+1]=io_word & 0xff; + } + writew(streamer_priv->streamer_parms_addr, streamer_mmio + LAPA); + for (i = 0; i < 68; i += 2) { + __u16 io_word; + __u8 *datap = (__u8 *) & spt; + io_word=ntohs(readw(streamer_mmio+LAPDINC)); + datap[size]=io_word >> 8; + datap[size+1]=io_word & 0xff; + } + + size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name); + + size += sprintf(buffer + size, + "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n", + dev->name, dev->dev_addr, sat.node_addr, + sat.func_addr[0], sat.func_addr[1], + sat.func_addr[2], sat.func_addr[3]); + + size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name); + + size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name); + + size += sprintf(buffer + size, + "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n", + dev->name, spt.phys_addr[0], spt.phys_addr[1], + spt.phys_addr[2], spt.phys_addr[3], + spt.up_node_addr, spt.poll_addr, + ntohs(spt.acc_priority), ntohs(spt.auth_source_class), + ntohs(spt.att_code)); + + size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name); + + size += sprintf(buffer + size, + "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n", + dev->name, spt.source_addr, + ntohs(spt.beacon_type), ntohs(spt.major_vector), + ntohs(spt.lan_status), ntohs(spt.local_ring), + ntohs(spt.mon_error), ntohs(spt.frame_correl)); + + size += sprintf(buffer + size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n", + dev->name); + + size += sprintf(buffer + size, + "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n", + dev->name, ntohs(spt.beacon_transmit), + ntohs(spt.beacon_receive), + spt.beacon_naun, + spt.beacon_phys[0], spt.beacon_phys[1], + spt.beacon_phys[2], spt.beacon_phys[3]); + return size; +} +#endif +#endif + +static struct pci_driver streamer_pci_driver = { + .name = "lanstreamer", + .id_table = streamer_pci_tbl, + .probe = streamer_init_one, + .remove = __devexit_p(streamer_remove_one), +}; + +static int __init streamer_init_module(void) { + return pci_register_driver(&streamer_pci_driver); +} + +static void __exit streamer_cleanup_module(void) { + pci_unregister_driver(&streamer_pci_driver); +} + +module_init(streamer_init_module); +module_exit(streamer_cleanup_module); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h new file mode 100644 index 00000000..3c58d6a3 --- /dev/null +++ b/drivers/net/tokenring/lanstreamer.h @@ -0,0 +1,343 @@ +/* + * lanstreamer.h -- driver for the IBM Auto LANStreamer PCI Adapter + * + * Written By: Mike Sullivan, IBM Corporation + * + * Copyright (C) 1999 IBM Corporation + * + * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC + * chipset. + * + * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic + * chipsets) written by: + * 1999 Peter De Schrijver All Rights Reserved + * 1999 Mike Phillips (phillim@amtrak.com) + * + * Base Driver Skeleton: + * Written 1993-94 by Donald Becker. + * + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * 12/10/99 - Alpha Release 0.1.0 + * First release to the public + * 08/15/01 - Added ioctl() definitions and others - Kent Yoder + * + */ + +/* MAX_INTR - the maximum number of times we can loop + * inside the interrupt function before returning + * control to the OS (maximum value is 256) + */ +#define MAX_INTR 5 + +#define CLS 0x0C +#define MLR 0x86 +#define LTR 0x0D + +#define BCTL 0x60 +#define BCTL_SOFTRESET (1<<15) +#define BCTL_RX_FIFO_8 (1<<1) +#define BCTL_TX_FIFO_8 (1<<3) + +#define GPR 0x4a +#define GPR_AUTOSENSE (1<<2) +#define GPR_16MBPS (1<<3) + +#define LISR 0x10 +#define LISR_SUM 0x12 +#define LISR_RUM 0x14 + +#define LISR_LIE (1<<15) +#define LISR_SLIM (1<<13) +#define LISR_SLI (1<<12) +#define LISR_BPEI (1<<9) +#define LISR_BPE (1<<8) +#define LISR_SRB_CMD (1<<5) +#define LISR_ASB_REPLY (1<<4) +#define LISR_ASB_FREE_REQ (1<<2) +#define LISR_ARB_FREE (1<<1) +#define LISR_TRB_FRAME (1<<0) + +#define SISR 0x16 +#define SISR_SUM 0x18 +#define SISR_RUM 0x1A +#define SISR_MASK 0x54 +#define SISR_MASK_SUM 0x56 +#define SISR_MASK_RUM 0x58 + +#define SISR_MI (1<<15) +#define SISR_SERR_ERR (1<<14) +#define SISR_TIMER (1<<11) +#define SISR_LAP_PAR_ERR (1<<10) +#define SISR_LAP_ACC_ERR (1<<9) +#define SISR_PAR_ERR (1<<8) +#define SISR_ADAPTER_CHECK (1<<6) +#define SISR_SRB_REPLY (1<<5) +#define SISR_ASB_FREE (1<<4) +#define SISR_ARB_CMD (1<<3) +#define SISR_TRB_REPLY (1<<2) + +#define MISR_RUM 0x5A +#define MISR_MASK 0x5C +#define MISR_MASK_RUM 0x5E + +#define MISR_TX2_IDLE (1<<15) +#define MISR_TX2_NO_STATUS (1<<14) +#define MISR_TX2_HALT (1<<13) +#define MISR_TX2_EOF (1<<12) +#define MISR_TX1_IDLE (1<<11) +#define MISR_TX1_NO_STATUS (1<<10) +#define MISR_TX1_HALT (1<<9) +#define MISR_TX1_EOF (1<<8) +#define MISR_RX_NOBUF (1<<5) +#define MISR_RX_EOB (1<<4) +#define MISR_RX_NO_STATUS (1<<2) +#define MISR_RX_HALT (1<<1) +#define MISR_RX_EOF (1<<0) + +#define LAPA 0x62 +#define LAPE 0x64 +#define LAPD 0x66 +#define LAPDINC 0x68 +#define LAPWWO 0x6A +#define LAPWWC 0x6C +#define LAPCTL 0x6E + +#define TIMER 0x4E4 + +#define BMCTL_SUM 0x50 +#define BMCTL_RUM 0x52 +#define BMCTL_TX1_DIS (1<<14) +#define BMCTL_TX2_DIS (1<<10) +#define BMCTL_RX_DIS (1<<6) +#define BMCTL_RX_ENABLED (1<<5) + +#define RXLBDA 0x90 +#define RXBDA 0x94 +#define RXSTAT 0x98 +#define RXDBA 0x9C + +#define TX1LFDA 0xA0 +#define TX1FDA 0xA4 +#define TX1STAT 0xA8 +#define TX1DBA 0xAC +#define TX2LFDA 0xB0 +#define TX2FDA 0xB4 +#define TX2STAT 0xB8 +#define TX2DBA 0xBC + +#define STREAMER_IO_SPACE 256 + +#define SRB_COMMAND_SIZE 50 + +#define STREAMER_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */ + +/* Defines for LAN STATUS CHANGE reports */ +#define LSC_SIG_LOSS 0x8000 +#define LSC_HARD_ERR 0x4000 +#define LSC_SOFT_ERR 0x2000 +#define LSC_TRAN_BCN 0x1000 +#define LSC_LWF 0x0800 +#define LSC_ARW 0x0400 +#define LSC_FPE 0x0200 +#define LSC_RR 0x0100 +#define LSC_CO 0x0080 +#define LSC_SS 0x0040 +#define LSC_RING_REC 0x0020 +#define LSC_SR_CO 0x0010 +#define LSC_FDX_MODE 0x0004 + +/* Defines for OPEN ADAPTER command */ + +#define OPEN_ADAPTER_EXT_WRAP (1<<15) +#define OPEN_ADAPTER_DIS_HARDEE (1<<14) +#define OPEN_ADAPTER_DIS_SOFTERR (1<<13) +#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12) +#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11) +#define OPEN_ADAPTER_ENABLE_EC (1<<10) +#define OPEN_ADAPTER_CONTENDER (1<<8) +#define OPEN_ADAPTER_PASS_BEACON (1<<7) +#define OPEN_ADAPTER_ENABLE_FDX (1<<6) +#define OPEN_ADAPTER_ENABLE_RPL (1<<5) +#define OPEN_ADAPTER_INHIBIT_ETR (1<<4) +#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3) + + +/* Defines for SRB Commands */ +#define SRB_CLOSE_ADAPTER 0x04 +#define SRB_CONFIGURE_BRIDGE 0x0c +#define SRB_CONFIGURE_HP_CHANNEL 0x13 +#define SRB_MODIFY_BRIDGE_PARMS 0x15 +#define SRB_MODIFY_OPEN_OPTIONS 0x01 +#define SRB_MODIFY_RECEIVE_OPTIONS 0x17 +#define SRB_NO_OPERATION 0x00 +#define SRB_OPEN_ADAPTER 0x03 +#define SRB_READ_LOG 0x08 +#define SRB_READ_SR_COUNTERS 0x16 +#define SRB_RESET_GROUP_ADDRESS 0x02 +#define SRB_RESET_TARGET_SEGMETN 0x14 +#define SRB_SAVE_CONFIGURATION 0x1b +#define SRB_SET_BRIDGE_PARMS 0x09 +#define SRB_SET_FUNC_ADDRESS 0x07 +#define SRB_SET_GROUP_ADDRESS 0x06 +#define SRB_SET_TARGET_SEGMENT 0x05 + +/* Clear return code */ +#define STREAMER_CLEAR_RET_CODE 0xfe + +/* ARB Commands */ +#define ARB_RECEIVE_DATA 0x81 +#define ARB_LAN_CHANGE_STATUS 0x84 + +/* ASB Response commands */ +#define ASB_RECEIVE_DATA 0x81 + + +/* Streamer defaults for buffers */ + +#define STREAMER_RX_RING_SIZE 16 /* should be a power of 2 */ +/* Setting the number of TX descriptors to 1 is a workaround for an + * undocumented hardware problem with the lanstreamer board. Setting + * this to something higher may slightly increase the throughput you + * can get from the card, but at the risk of locking up the box. - + * + */ +#define STREAMER_TX_RING_SIZE 1 /* should be a power of 2 */ + +#define PKT_BUF_SZ 4096 /* Default packet size */ + +/* Streamer data structures */ + +struct streamer_tx_desc { + __u32 forward; + __u32 status; + __u32 bufcnt_framelen; + __u32 buffer; + __u32 buflen; + __u32 rsvd1; + __u32 rsvd2; + __u32 rsvd3; +}; + +struct streamer_rx_desc { + __u32 forward; + __u32 status; + __u32 buffer; + __u32 framelen_buflen; +}; + +struct mac_receive_buffer { + __u16 next; + __u8 padding; + __u8 frame_status; + __u16 buffer_length; + __u8 frame_data; +}; + +struct streamer_private { + + __u16 srb; + __u16 trb; + __u16 arb; + __u16 asb; + + struct streamer_private *next; + struct pci_dev *pci_dev; + __u8 __iomem *streamer_mmio; + char *streamer_card_name; + + spinlock_t streamer_lock; + + volatile int srb_queued; /* True if an SRB is still posted */ + wait_queue_head_t srb_wait; + + volatile int asb_queued; /* True if an ASB is posted */ + + volatile int trb_queued; /* True if a TRB is posted */ + wait_queue_head_t trb_wait; + + struct streamer_rx_desc *streamer_rx_ring; + struct streamer_tx_desc *streamer_tx_ring; + struct sk_buff *tx_ring_skb[STREAMER_TX_RING_SIZE], + *rx_ring_skb[STREAMER_RX_RING_SIZE]; + int tx_ring_free, tx_ring_last_status, rx_ring_last_received, + free_tx_ring_entries; + + __u16 streamer_lan_status; + __u8 streamer_ring_speed; + __u16 pkt_buf_sz; + __u8 streamer_receive_options, streamer_copy_all_options, + streamer_message_level; + __u16 streamer_addr_table_addr, streamer_parms_addr; + __u16 mac_rx_buffer; + __u8 streamer_laa[6]; +}; + +struct streamer_adapter_addr_table { + + __u8 node_addr[6]; + __u8 reserved[4]; + __u8 func_addr[4]; +}; + +struct streamer_parameters_table { + + __u8 phys_addr[4]; + __u8 up_node_addr[6]; + __u8 up_phys_addr[4]; + __u8 poll_addr[6]; + __u16 reserved; + __u16 acc_priority; + __u16 auth_source_class; + __u16 att_code; + __u8 source_addr[6]; + __u16 beacon_type; + __u16 major_vector; + __u16 lan_status; + __u16 soft_error_time; + __u16 reserved1; + __u16 local_ring; + __u16 mon_error; + __u16 beacon_transmit; + __u16 beacon_receive; + __u16 frame_correl; + __u8 beacon_naun[6]; + __u32 reserved2; + __u8 beacon_phys[4]; +}; diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c new file mode 100644 index 00000000..2bedc0ac --- /dev/null +++ b/drivers/net/tokenring/madgemc.c @@ -0,0 +1,763 @@ +/* + * madgemc.c: Driver for the Madge Smart 16/4 MC16 MCA token ring card. + * + * Written 2000 by Adam Fritzler + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This driver module supports the following cards: + * - Madge Smart 16/4 Ringnode MC16 + * - Madge Smart 16/4 Ringnode MC32 (??) + * + * Maintainer(s): + * AF Adam Fritzler + * + * Modification History: + * 16-Jan-00 AF Created + * + */ +static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n"; + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "tms380tr.h" +#include "madgemc.h" /* Madge-specific constants */ + +#define MADGEMC_IO_EXTENT 32 +#define MADGEMC_SIF_OFFSET 0x08 + +struct card_info { + /* + * These are read from the BIA ROM. + */ + unsigned int manid; + unsigned int cardtype; + unsigned int cardrev; + unsigned int ramsize; + + /* + * These are read from the MCA POS registers. + */ + unsigned int burstmode:2; + unsigned int fairness:1; /* 0 = Fair, 1 = Unfair */ + unsigned int arblevel:4; + unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */ + unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */ +}; + +static int madgemc_open(struct net_device *dev); +static int madgemc_close(struct net_device *dev); +static int madgemc_chipset_init(struct net_device *dev); +static void madgemc_read_rom(struct net_device *dev, struct card_info *card); +static unsigned short madgemc_setnselout_pins(struct net_device *dev); +static void madgemc_setcabletype(struct net_device *dev, int type); + +static int madgemc_mcaproc(char *buf, int slot, void *d); + +static void madgemc_setregpage(struct net_device *dev, int page); +static void madgemc_setsifsel(struct net_device *dev, int val); +static void madgemc_setint(struct net_device *dev, int val); + +static irqreturn_t madgemc_interrupt(int irq, void *dev_id); + +/* + * These work around paging, however they don't guarantee you're on the + * right page. + */ +#define SIFREADB(reg) (inb(dev->base_addr + ((reg<0x8)?reg:reg-0x8))) +#define SIFWRITEB(val, reg) (outb(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8))) +#define SIFREADW(reg) (inw(dev->base_addr + ((reg<0x8)?reg:reg-0x8))) +#define SIFWRITEW(val, reg) (outw(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8))) + +/* + * Read a byte-length value from the register. + */ +static unsigned short madgemc_sifreadb(struct net_device *dev, unsigned short reg) +{ + unsigned short ret; + if (reg<0x8) + ret = SIFREADB(reg); + else { + madgemc_setregpage(dev, 1); + ret = SIFREADB(reg); + madgemc_setregpage(dev, 0); + } + return ret; +} + +/* + * Write a byte-length value to a register. + */ +static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) +{ + if (reg<0x8) + SIFWRITEB(val, reg); + else { + madgemc_setregpage(dev, 1); + SIFWRITEB(val, reg); + madgemc_setregpage(dev, 0); + } +} + +/* + * Read a word-length value from a register + */ +static unsigned short madgemc_sifreadw(struct net_device *dev, unsigned short reg) +{ + unsigned short ret; + if (reg<0x8) + ret = SIFREADW(reg); + else { + madgemc_setregpage(dev, 1); + ret = SIFREADW(reg); + madgemc_setregpage(dev, 0); + } + return ret; +} + +/* + * Write a word-length value to a register. + */ +static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) +{ + if (reg<0x8) + SIFWRITEW(val, reg); + else { + madgemc_setregpage(dev, 1); + SIFWRITEW(val, reg); + madgemc_setregpage(dev, 0); + } +} + +static struct net_device_ops madgemc_netdev_ops __read_mostly; + +static int __devinit madgemc_probe(struct device *device) +{ + static int versionprinted; + struct net_device *dev; + struct net_local *tp; + struct card_info *card; + struct mca_device *mdev = to_mca_device(device); + int ret = 0; + + if (versionprinted++ == 0) + printk("%s", version); + + if(mca_device_claimed(mdev)) + return -EBUSY; + mca_device_set_claim(mdev, 1); + + dev = alloc_trdev(sizeof(struct net_local)); + if (!dev) { + printk("madgemc: unable to allocate dev space\n"); + mca_device_set_claim(mdev, 0); + ret = -ENOMEM; + goto getout; + } + + dev->netdev_ops = &madgemc_netdev_ops; + + card = kmalloc(sizeof(struct card_info), GFP_KERNEL); + if (card==NULL) { + printk("madgemc: unable to allocate card struct\n"); + ret = -ENOMEM; + goto getout1; + } + + /* + * Parse configuration information. This all comes + * directly from the publicly available @002d.ADF. + * Get it from Madge or your local ADF library. + */ + + /* + * Base address + */ + dev->base_addr = 0x0a20 + + ((mdev->pos[2] & MC16_POS2_ADDR2)?0x0400:0) + + ((mdev->pos[0] & MC16_POS0_ADDR1)?0x1000:0) + + ((mdev->pos[3] & MC16_POS3_ADDR3)?0x2000:0); + + /* + * Interrupt line + */ + switch(mdev->pos[0] >> 6) { /* upper two bits */ + case 0x1: dev->irq = 3; break; + case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */ + case 0x3: dev->irq = 10; break; + default: dev->irq = 0; break; + } + + if (dev->irq == 0) { + printk("%s: invalid IRQ\n", dev->name); + ret = -EBUSY; + goto getout2; + } + + if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT, + "madgemc")) { + printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", mdev->slot, dev->base_addr); + dev->base_addr += MADGEMC_SIF_OFFSET; + ret = -EBUSY; + goto getout2; + } + dev->base_addr += MADGEMC_SIF_OFFSET; + + /* + * Arbitration Level + */ + card->arblevel = ((mdev->pos[0] >> 1) & 0x7) + 8; + + /* + * Burst mode and Fairness + */ + card->burstmode = ((mdev->pos[2] >> 6) & 0x3); + card->fairness = ((mdev->pos[2] >> 4) & 0x1); + + /* + * Ring Speed + */ + if ((mdev->pos[1] >> 2)&0x1) + card->ringspeed = 2; /* not selected */ + else if ((mdev->pos[2] >> 5) & 0x1) + card->ringspeed = 1; /* 16Mb */ + else + card->ringspeed = 0; /* 4Mb */ + + /* + * Cable type + */ + if ((mdev->pos[1] >> 6)&0x1) + card->cabletype = 1; /* STP/DB9 */ + else + card->cabletype = 0; /* UTP/RJ-45 */ + + + /* + * ROM Info. This requires us to actually twiddle + * bits on the card, so we must ensure above that + * the base address is free of conflict (request_region above). + */ + madgemc_read_rom(dev, card); + + if (card->manid != 0x4d) { /* something went wrong */ + printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid); + goto getout3; + } + + if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) { + printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype); + ret = -EIO; + goto getout3; + } + + /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */ + if ((card->cardtype == 0x08) && (card->cardrev <= 0x01)) + card->ramsize = 128; + else + card->ramsize = 256; + + printk("%s: %s Rev %d at 0x%04lx IRQ %d\n", + dev->name, + (card->cardtype == 0x08)?MADGEMC16_CARDNAME: + MADGEMC32_CARDNAME, card->cardrev, + dev->base_addr, dev->irq); + + if (card->cardtype == 0x0d) + printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name); + + if (card->ringspeed==2) { /* Unknown */ + printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name); + card->ringspeed = 1; /* default to 16mb */ + } + + printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize); + + printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name, + (card->ringspeed)?16:4, + card->cabletype?"STP/DB9":"UTP/RJ-45"); + printk("%s: Arbitration Level: %d\n", dev->name, + card->arblevel); + + printk("%s: Burst Mode: ", dev->name); + switch(card->burstmode) { + case 0: printk("Cycle steal"); break; + case 1: printk("Limited burst"); break; + case 2: printk("Delayed release"); break; + case 3: printk("Immediate release"); break; + } + printk(" (%s)\n", (card->fairness)?"Unfair":"Fair"); + + + /* + * Enable SIF before we assign the interrupt handler, + * just in case we get spurious interrupts that need + * handling. + */ + outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */ + madgemc_setsifsel(dev, 1); + if (request_irq(dev->irq, madgemc_interrupt, IRQF_SHARED, + "madgemc", dev)) { + ret = -EBUSY; + goto getout3; + } + + madgemc_chipset_init(dev); /* enables interrupts! */ + madgemc_setcabletype(dev, card->cabletype); + + /* Setup MCA structures */ + mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME); + mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev); + + printk("%s: Ring Station Address: %pM\n", + dev->name, dev->dev_addr); + + if (tmsdev_init(dev, device)) { + printk("%s: unable to get memory for dev->priv.\n", + dev->name); + ret = -ENOMEM; + goto getout4; + } + tp = netdev_priv(dev); + + /* + * The MC16 is physically a 32bit card. However, Madge + * insists on calling it 16bit, so I'll assume here that + * they know what they're talking about. Cut off DMA + * at 16mb. + */ + tp->setnselout = madgemc_setnselout_pins; + tp->sifwriteb = madgemc_sifwriteb; + tp->sifreadb = madgemc_sifreadb; + tp->sifwritew = madgemc_sifwritew; + tp->sifreadw = madgemc_sifreadw; + tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4; + + memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1); + + tp->tmspriv = card; + dev_set_drvdata(device, dev); + + if (register_netdev(dev) == 0) + return 0; + + dev_set_drvdata(device, NULL); + ret = -ENOMEM; +getout4: + free_irq(dev->irq, dev); +getout3: + release_region(dev->base_addr-MADGEMC_SIF_OFFSET, + MADGEMC_IO_EXTENT); +getout2: + kfree(card); +getout1: + free_netdev(dev); +getout: + mca_device_set_claim(mdev, 0); + return ret; +} + +/* + * Handle interrupts generated by the card + * + * The MicroChannel Madge cards need slightly more handling + * after an interrupt than other TMS380 cards do. + * + * First we must make sure it was this card that generated the + * interrupt (since interrupt sharing is allowed). Then, + * because we're using level-triggered interrupts (as is + * standard on MCA), we must toggle the interrupt line + * on the card in order to claim and acknowledge the interrupt. + * Once that is done, the interrupt should be handlable in + * the normal tms380tr_interrupt() routine. + * + * There's two ways we can check to see if the interrupt is ours, + * both with their own disadvantages... + * + * 1) Read in the SIFSTS register from the TMS controller. This + * is guaranteed to be accurate, however, there's a fairly + * large performance penalty for doing so: the Madge chips + * must request the register from the Eagle, the Eagle must + * read them from its internal bus, and then take the route + * back out again, for a 16bit read. + * + * 2) Use the MC_CONTROL_REG0_SINTR bit from the Madge ASICs. + * The major disadvantage here is that the accuracy of the + * bit is in question. However, it cuts out the extra read + * cycles it takes to read the Eagle's SIF, as its only an + * 8bit read, and theoretically the Madge bit is directly + * connected to the interrupt latch coming out of the Eagle + * hardware (that statement is not verified). + * + * I can't determine which of these methods has the best win. For now, + * we make a compromise. Use the Madge way for the first interrupt, + * which should be the fast-path, and then once we hit the first + * interrupt, keep on trying using the SIF method until we've + * exhausted all contiguous interrupts. + * + */ +static irqreturn_t madgemc_interrupt(int irq, void *dev_id) +{ + int pending,reg1; + struct net_device *dev; + + if (!dev_id) { + printk("madgemc_interrupt: was not passed a dev_id!\n"); + return IRQ_NONE; + } + + dev = (struct net_device *)dev_id; + + /* Make sure its really us. -- the Madge way */ + pending = inb(dev->base_addr + MC_CONTROL_REG0); + if (!(pending & MC_CONTROL_REG0_SINTR)) + return IRQ_NONE; /* not our interrupt */ + + /* + * Since we're level-triggered, we may miss the rising edge + * of the next interrupt while we're off handling this one, + * so keep checking until the SIF verifies that it has nothing + * left for us to do. + */ + pending = STS_SYSTEM_IRQ; + do { + if (pending & STS_SYSTEM_IRQ) { + + /* Toggle the interrupt to reset the latch on card */ + reg1 = inb(dev->base_addr + MC_CONTROL_REG1); + outb(reg1 ^ MC_CONTROL_REG1_SINTEN, + dev->base_addr + MC_CONTROL_REG1); + outb(reg1, dev->base_addr + MC_CONTROL_REG1); + + /* Continue handling as normal */ + tms380tr_interrupt(irq, dev_id); + + pending = SIFREADW(SIFSTS); /* restart - the SIF way */ + + } else + return IRQ_HANDLED; + } while (1); + + return IRQ_HANDLED; /* not reachable */ +} + +/* + * Set the card to the preferred ring speed. + * + * Unlike newer cards, the MC16/32 have their speed selection + * circuit connected to the Madge ASICs and not to the TMS380 + * NSELOUT pins. Set the ASIC bits correctly here, and return + * zero to leave the TMS NSELOUT bits unaffected. + * + */ +static unsigned short madgemc_setnselout_pins(struct net_device *dev) +{ + unsigned char reg1; + struct net_local *tp = netdev_priv(dev); + + reg1 = inb(dev->base_addr + MC_CONTROL_REG1); + + if(tp->DataRate == SPEED_16) + reg1 |= MC_CONTROL_REG1_SPEED_SEL; /* add for 16mb */ + else if (reg1 & MC_CONTROL_REG1_SPEED_SEL) + reg1 ^= MC_CONTROL_REG1_SPEED_SEL; /* remove for 4mb */ + outb(reg1, dev->base_addr + MC_CONTROL_REG1); + + return 0; /* no change */ +} + +/* + * Set the register page. This equates to the SRSX line + * on the TMS380Cx6. + * + * Register selection is normally done via three contiguous + * bits. However, some boards (such as the MC16/32) use only + * two bits, plus a separate bit in the glue chip. This + * sets the SRSX bit (the top bit). See page 4-17 in the + * Yellow Book for which registers are affected. + * + */ +static void madgemc_setregpage(struct net_device *dev, int page) +{ + static int reg1; + + reg1 = inb(dev->base_addr + MC_CONTROL_REG1); + if ((page == 0) && (reg1 & MC_CONTROL_REG1_SRSX)) { + outb(reg1 ^ MC_CONTROL_REG1_SRSX, + dev->base_addr + MC_CONTROL_REG1); + } + else if (page == 1) { + outb(reg1 | MC_CONTROL_REG1_SRSX, + dev->base_addr + MC_CONTROL_REG1); + } + reg1 = inb(dev->base_addr + MC_CONTROL_REG1); +} + +/* + * The SIF registers are not mapped into register space by default + * Set this to 1 to map them, 0 to map the BIA ROM. + * + */ +static void madgemc_setsifsel(struct net_device *dev, int val) +{ + unsigned int reg0; + + reg0 = inb(dev->base_addr + MC_CONTROL_REG0); + if ((val == 0) && (reg0 & MC_CONTROL_REG0_SIFSEL)) { + outb(reg0 ^ MC_CONTROL_REG0_SIFSEL, + dev->base_addr + MC_CONTROL_REG0); + } else if (val == 1) { + outb(reg0 | MC_CONTROL_REG0_SIFSEL, + dev->base_addr + MC_CONTROL_REG0); + } + reg0 = inb(dev->base_addr + MC_CONTROL_REG0); +} + +/* + * Enable SIF interrupts + * + * This does not enable interrupts in the SIF, but rather + * enables SIF interrupts to be passed onto the host. + * + */ +static void madgemc_setint(struct net_device *dev, int val) +{ + unsigned int reg1; + + reg1 = inb(dev->base_addr + MC_CONTROL_REG1); + if ((val == 0) && (reg1 & MC_CONTROL_REG1_SINTEN)) { + outb(reg1 ^ MC_CONTROL_REG1_SINTEN, + dev->base_addr + MC_CONTROL_REG1); + } else if (val == 1) { + outb(reg1 | MC_CONTROL_REG1_SINTEN, + dev->base_addr + MC_CONTROL_REG1); + } +} + +/* + * Cable type is set via control register 7. Bit zero high + * for UTP, low for STP. + */ +static void madgemc_setcabletype(struct net_device *dev, int type) +{ + outb((type==0)?MC_CONTROL_REG7_CABLEUTP:MC_CONTROL_REG7_CABLESTP, + dev->base_addr + MC_CONTROL_REG7); +} + +/* + * Enable the functions of the Madge chipset needed for + * full working order. + */ +static int madgemc_chipset_init(struct net_device *dev) +{ + outb(0, dev->base_addr + MC_CONTROL_REG1); /* pull SRESET low */ + tms380tr_wait(100); /* wait for card to reset */ + + /* bring back into normal operating mode */ + outb(MC_CONTROL_REG1_NSRESET, dev->base_addr + MC_CONTROL_REG1); + + /* map SIF registers */ + madgemc_setsifsel(dev, 1); + + /* enable SIF interrupts */ + madgemc_setint(dev, 1); + + return 0; +} + +/* + * Disable the board, and put back into power-up state. + */ +static void madgemc_chipset_close(struct net_device *dev) +{ + /* disable interrupts */ + madgemc_setint(dev, 0); + /* unmap SIF registers */ + madgemc_setsifsel(dev, 0); +} + +/* + * Read the card type (MC16 or MC32) from the card. + * + * The configuration registers are stored in two separate + * pages. Pages are flipped by clearing bit 3 of CONTROL_REG0 (PAGE) + * for page zero, or setting bit 3 for page one. + * + * Page zero contains the following data: + * Byte 0: Manufacturer ID (0x4D -- ASCII "M") + * Byte 1: Card type: + * 0x08 for MC16 + * 0x0D for MC32 + * Byte 2: Card revision + * Byte 3: Mirror of POS config register 0 + * Byte 4: Mirror of POS 1 + * Byte 5: Mirror of POS 2 + * + * Page one contains the following data: + * Byte 0: Unused + * Byte 1-6: BIA, MSB to LSB. + * + * Note that to read the BIA, we must unmap the SIF registers + * by clearing bit 2 of CONTROL_REG0 (SIFSEL), as the data + * will reside in the same logical location. For this reason, + * _never_ read the BIA while the Eagle processor is running! + * The SIF will be completely inaccessible until the BIA operation + * is complete. + * + */ +static void madgemc_read_rom(struct net_device *dev, struct card_info *card) +{ + unsigned long ioaddr; + unsigned char reg0, reg1, tmpreg0, i; + + ioaddr = dev->base_addr; + + reg0 = inb(ioaddr + MC_CONTROL_REG0); + reg1 = inb(ioaddr + MC_CONTROL_REG1); + + /* Switch to page zero and unmap SIF */ + tmpreg0 = reg0 & ~(MC_CONTROL_REG0_PAGE + MC_CONTROL_REG0_SIFSEL); + outb(tmpreg0, ioaddr + MC_CONTROL_REG0); + + card->manid = inb(ioaddr + MC_ROM_MANUFACTURERID); + card->cardtype = inb(ioaddr + MC_ROM_ADAPTERID); + card->cardrev = inb(ioaddr + MC_ROM_REVISION); + + /* Switch to rom page one */ + outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0); + + /* Read BIA */ + dev->addr_len = 6; + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i); + + /* Restore original register values */ + outb(reg0, ioaddr + MC_CONTROL_REG0); + outb(reg1, ioaddr + MC_CONTROL_REG1); +} + +static int madgemc_open(struct net_device *dev) +{ + /* + * Go ahead and reinitialize the chipset again, just to + * make sure we didn't get left in a bad state. + */ + madgemc_chipset_init(dev); + tms380tr_open(dev); + return 0; +} + +static int madgemc_close(struct net_device *dev) +{ + tms380tr_close(dev); + madgemc_chipset_close(dev); + return 0; +} + +/* + * Give some details available from /proc/mca/slotX + */ +static int madgemc_mcaproc(char *buf, int slot, void *d) +{ + struct net_device *dev = (struct net_device *)d; + struct net_local *tp = netdev_priv(dev); + struct card_info *curcard = tp->tmspriv; + int len = 0; + + len += sprintf(buf+len, "-------\n"); + if (curcard) { + len += sprintf(buf+len, "Card Revision: %d\n", curcard->cardrev); + len += sprintf(buf+len, "RAM Size: %dkb\n", curcard->ramsize); + len += sprintf(buf+len, "Cable type: %s\n", (curcard->cabletype)?"STP/DB9":"UTP/RJ-45"); + len += sprintf(buf+len, "Configured ring speed: %dMb/sec\n", (curcard->ringspeed)?16:4); + len += sprintf(buf+len, "Running ring speed: %dMb/sec\n", (tp->DataRate==SPEED_16)?16:4); + len += sprintf(buf+len, "Device: %s\n", dev->name); + len += sprintf(buf+len, "IO Port: 0x%04lx\n", dev->base_addr); + len += sprintf(buf+len, "IRQ: %d\n", dev->irq); + len += sprintf(buf+len, "Arbitration Level: %d\n", curcard->arblevel); + len += sprintf(buf+len, "Burst Mode: "); + switch(curcard->burstmode) { + case 0: len += sprintf(buf+len, "Cycle steal"); break; + case 1: len += sprintf(buf+len, "Limited burst"); break; + case 2: len += sprintf(buf+len, "Delayed release"); break; + case 3: len += sprintf(buf+len, "Immediate release"); break; + } + len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair"); + + len += sprintf(buf+len, "Ring Station Address: %pM\n", + dev->dev_addr); + } else + len += sprintf(buf+len, "Card not configured\n"); + + return len; +} + +static int __devexit madgemc_remove(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct net_local *tp; + struct card_info *card; + + BUG_ON(!dev); + + tp = netdev_priv(dev); + card = tp->tmspriv; + kfree(card); + tp->tmspriv = NULL; + + unregister_netdev(dev); + release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT); + free_irq(dev->irq, dev); + tmsdev_term(dev); + free_netdev(dev); + dev_set_drvdata(device, NULL); + + return 0; +} + +static short madgemc_adapter_ids[] __initdata = { + 0x002d, + 0x0000 +}; + +static struct mca_driver madgemc_driver = { + .id_table = madgemc_adapter_ids, + .driver = { + .name = "madgemc", + .bus = &mca_bus_type, + .probe = madgemc_probe, + .remove = __devexit_p(madgemc_remove), + }, +}; + +static int __init madgemc_init (void) +{ + madgemc_netdev_ops = tms380tr_netdev_ops; + madgemc_netdev_ops.ndo_open = madgemc_open; + madgemc_netdev_ops.ndo_stop = madgemc_close; + + return mca_register_driver (&madgemc_driver); +} + +static void __exit madgemc_exit (void) +{ + mca_unregister_driver (&madgemc_driver); +} + +module_init(madgemc_init); +module_exit(madgemc_exit); + +MODULE_LICENSE("GPL"); + diff --git a/drivers/net/tokenring/madgemc.h b/drivers/net/tokenring/madgemc.h new file mode 100644 index 00000000..fe88e272 --- /dev/null +++ b/drivers/net/tokenring/madgemc.h @@ -0,0 +1,70 @@ +/* + * madgemc.h: Header for the madgemc tms380tr module + * + * Authors: + * - Adam Fritzler + */ + +#ifndef __LINUX_MADGEMC_H +#define __LINUX_MADGEMC_H + +#ifdef __KERNEL__ + +#define MADGEMC16_CARDNAME "Madge Smart 16/4 MC16 Ringnode" +#define MADGEMC32_CARDNAME "Madge Smart 16/4 MC32 Ringnode" + +/* + * Bit definitions for the POS config registers + */ +#define MC16_POS0_ADDR1 0x20 +#define MC16_POS2_ADDR2 0x04 +#define MC16_POS3_ADDR3 0x20 + +#define MC_CONTROL_REG0 ((long)-8) /* 0x00 */ +#define MC_CONTROL_REG1 ((long)-7) /* 0x01 */ +#define MC_ADAPTER_POS_REG0 ((long)-6) /* 0x02 */ +#define MC_ADAPTER_POS_REG1 ((long)-5) /* 0x03 */ +#define MC_ADAPTER_POS_REG2 ((long)-4) /* 0x04 */ +#define MC_ADAPTER_REG5_UNUSED ((long)-3) /* 0x05 */ +#define MC_ADAPTER_REG6_UNUSED ((long)-2) /* 0x06 */ +#define MC_CONTROL_REG7 ((long)-1) /* 0x07 */ + +#define MC_CONTROL_REG0_UNKNOWN1 0x01 +#define MC_CONTROL_REG0_UNKNOWN2 0x02 +#define MC_CONTROL_REG0_SIFSEL 0x04 +#define MC_CONTROL_REG0_PAGE 0x08 +#define MC_CONTROL_REG0_TESTINTERRUPT 0x10 +#define MC_CONTROL_REG0_UNKNOWN20 0x20 +#define MC_CONTROL_REG0_SINTR 0x40 +#define MC_CONTROL_REG0_UNKNOWN80 0x80 + +#define MC_CONTROL_REG1_SINTEN 0x01 +#define MC_CONTROL_REG1_BITOFDEATH 0x02 +#define MC_CONTROL_REG1_NSRESET 0x04 +#define MC_CONTROL_REG1_UNKNOWN8 0x08 +#define MC_CONTROL_REG1_UNKNOWN10 0x10 +#define MC_CONTROL_REG1_UNKNOWN20 0x20 +#define MC_CONTROL_REG1_SRSX 0x40 +#define MC_CONTROL_REG1_SPEED_SEL 0x80 + +#define MC_CONTROL_REG7_CABLESTP 0x00 +#define MC_CONTROL_REG7_CABLEUTP 0x01 + +/* + * ROM Page Zero + */ +#define MC_ROM_MANUFACTURERID 0x00 +#define MC_ROM_ADAPTERID 0x01 +#define MC_ROM_REVISION 0x02 +#define MC_ROM_CONFIG0 0x03 +#define MC_ROM_CONFIG1 0x04 +#define MC_ROM_CONFIG2 0x05 + +/* + * ROM Page One + */ +#define MC_ROM_UNUSED_BYTE 0x00 +#define MC_ROM_BIA_START 0x01 + +#endif /* __KERNEL__ */ +#endif /* __LINUX_MADGEMC_H */ diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c new file mode 100644 index 00000000..e3855aeb --- /dev/null +++ b/drivers/net/tokenring/olympic.c @@ -0,0 +1,1750 @@ +/* + * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved + * 1999/2000 Mike Phillips (mikep@linuxtr.net) + * + * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic + * chipset. + * + * Base Driver Skeleton: + * Written 1993-94 by Donald Becker. + * + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. + * + * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their + * assistance and perserverance with the testing of this driver. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * 4/27/99 - Alpha Release 0.1.0 + * First release to the public + * + * 6/8/99 - Official Release 0.2.0 + * Merged into the kernel code + * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci + * resource. Driver also reports the card name returned by + * the pci resource. + * 1/11/00 - Added spinlocks for smp + * 2/23/00 - Updated to dev_kfree_irq + * 3/10/00 - Fixed FDX enable which triggered other bugs also + * squashed. + * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes. + * The odd thing about the changes is that the fix for + * endian issues with the big-endian data in the arb, asb... + * was to always swab() the bytes, no matter what CPU. + * That's because the read[wl]() functions always swap the + * bytes on the way in on PPC. + * Fixing the hardware descriptors was another matter, + * because they weren't going through read[wl](), there all + * the results had to be in memory in le32 values. kdaaker + * + * 12/23/00 - Added minimal Cardbus support (Thanks Donald). + * + * 03/09/01 - Add new pci api, dev_base_lock, general clean up. + * + * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev + * Change proc_fs behaviour, now one entry per adapter. + * + * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the + * adapter when live does not take the system down with it. + * + * 06/02/01 - Clean up, copy skb for small packets + * + * 06/22/01 - Add EISR error handling routines + * + * 07/19/01 - Improve bad LAA reporting, strip out freemem + * into a separate function, its called from 3 + * different places now. + * 02/09/02 - Replaced sleep_on. + * 03/01/02 - Replace access to several registers from 32 bit to + * 16 bit. Fixes alignment errors on PPC 64 bit machines. + * Thanks to Al Trautman for this one. + * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was + * silently ignored until the error checking code + * went into version 1.0.0 + * 06/04/02 - Add correct start up sequence for the cardbus adapters. + * Required for strict compliance with pci power mgmt specs. + * To Do: + * + * Wake on lan + * + * If Problems do Occur + * Most problems can be rectified by either closing and opening the interface + * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult + * if compiled into the kernel). + */ + +/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */ + +#define OLYMPIC_DEBUG 0 + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "olympic.h" + +/* I've got to put some intelligence into the version number so that Peter and I know + * which version of the code somebody has got. + * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author. + * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike + * + * Official releases will only have an a.b.c version number format. + */ + +static char version[] = +"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ; + +static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion", + "Address Verification", "Neighbor Notification (Ring Poll)", + "Request Parameters","FDX Registration Request", + "FDX Duplicate Address Check", "Station registration Query Wait", + "Unknown stage"}; + +static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault", + "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing", + "Duplicate Node Address","Request Parameters","Remove Received", + "Reserved", "Reserved", "No Monitor Detected for RPL", + "Monitor Contention failer for RPL", "FDX Protocol Error"}; + +/* Module parameters */ + +MODULE_AUTHOR("Mike Phillips ") ; +MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ; + +/* Ring Speed 0,4,16,100 + * 0 = Autosense + * 4,16 = Selected speed only, no autosense + * This allows the card to be the first on the ring + * and become the active monitor. + * 100 = Nothing at present, 100mbps is autodetected + * if FDX is turned on. May be implemented in the future to + * fail if 100mpbs is not detected. + * + * WARNING: Some hubs will allow you to insert + * at the wrong speed + */ + +static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ; +module_param_array(ringspeed, int, NULL, 0); + +/* Packet buffer size */ + +static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ; +module_param_array(pkt_buf_sz, int, NULL, 0) ; + +/* Message Level */ + +static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ; +module_param_array(message_level, int, NULL, 0) ; + +/* Change network_monitor to receive mac frames through the arb channel. + * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr + * device, i.e. tr0, tr1 etc. + * Intended to be used to create a ring-error reporting network module + * i.e. it will give you the source address of beaconers on the ring + */ +static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,}; +module_param_array(network_monitor, int, NULL, 0); + +static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = { + {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,}, + { } /* Terminating Entry */ +}; +MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ; + + +static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static int olympic_init(struct net_device *dev); +static int olympic_open(struct net_device *dev); +static netdev_tx_t olympic_xmit(struct sk_buff *skb, + struct net_device *dev); +static int olympic_close(struct net_device *dev); +static void olympic_set_rx_mode(struct net_device *dev); +static void olympic_freemem(struct net_device *dev) ; +static irqreturn_t olympic_interrupt(int irq, void *dev_id); +static int olympic_set_mac_address(struct net_device *dev, void *addr) ; +static void olympic_arb_cmd(struct net_device *dev); +static int olympic_change_mtu(struct net_device *dev, int mtu); +static void olympic_srb_bh(struct net_device *dev) ; +static void olympic_asb_bh(struct net_device *dev) ; +static const struct file_operations olympic_proc_ops; + +static const struct net_device_ops olympic_netdev_ops = { + .ndo_open = olympic_open, + .ndo_stop = olympic_close, + .ndo_start_xmit = olympic_xmit, + .ndo_change_mtu = olympic_change_mtu, + .ndo_set_multicast_list = olympic_set_rx_mode, + .ndo_set_mac_address = olympic_set_mac_address, +}; + +static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev ; + struct olympic_private *olympic_priv; + static int card_no = -1 ; + int i ; + + card_no++ ; + + if ((i = pci_enable_device(pdev))) { + return i ; + } + + pci_set_master(pdev); + + if ((i = pci_request_regions(pdev,"olympic"))) { + goto op_disable_dev; + } + + dev = alloc_trdev(sizeof(struct olympic_private)) ; + if (!dev) { + i = -ENOMEM; + goto op_release_dev; + } + + olympic_priv = netdev_priv(dev) ; + + spin_lock_init(&olympic_priv->olympic_lock) ; + + init_waitqueue_head(&olympic_priv->srb_wait); + init_waitqueue_head(&olympic_priv->trb_wait); +#if OLYMPIC_DEBUG + printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev)); +#endif + dev->irq=pdev->irq; + dev->base_addr=pci_resource_start(pdev, 0); + olympic_priv->olympic_card_name = pci_name(pdev); + olympic_priv->pdev = pdev; + olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256); + olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048); + if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) { + goto op_free_iomap; + } + + if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) ) + olympic_priv->pkt_buf_sz = PKT_BUF_SZ ; + else + olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ; + + dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ; + olympic_priv->olympic_ring_speed = ringspeed[card_no] ; + olympic_priv->olympic_message_level = message_level[card_no] ; + olympic_priv->olympic_network_monitor = network_monitor[card_no]; + + if ((i = olympic_init(dev))) { + goto op_free_iomap; + } + + dev->netdev_ops = &olympic_netdev_ops; + SET_NETDEV_DEV(dev, &pdev->dev); + + pci_set_drvdata(pdev,dev) ; + register_netdev(dev) ; + printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name); + if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */ + char proc_name[20] ; + strcpy(proc_name,"olympic_") ; + strcat(proc_name,dev->name) ; + proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev); + printk("Olympic: Network Monitor information: /proc/%s\n",proc_name); + } + return 0 ; + +op_free_iomap: + if (olympic_priv->olympic_mmio) + iounmap(olympic_priv->olympic_mmio); + if (olympic_priv->olympic_lap) + iounmap(olympic_priv->olympic_lap); + + free_netdev(dev); +op_release_dev: + pci_release_regions(pdev); + +op_disable_dev: + pci_disable_device(pdev); + return i; +} + +static int olympic_init(struct net_device *dev) +{ + struct olympic_private *olympic_priv; + u8 __iomem *olympic_mmio, *init_srb,*adapter_addr; + unsigned long t; + unsigned int uaa_addr; + + olympic_priv=netdev_priv(dev); + olympic_mmio=olympic_priv->olympic_mmio; + + printk("%s\n", version); + printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq); + + writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL); + t=jiffies; + while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) { + schedule(); + if(time_after(jiffies, t + 40*HZ)) { + printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); + return -ENODEV; + } + } + + + /* Needed for cardbus */ + if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) { + writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK); + } + +#if OLYMPIC_DEBUG + printk("BCTL: %x\n",readl(olympic_mmio+BCTL)); + printk("GPR: %x\n",readw(olympic_mmio+GPR)); + printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK)); +#endif + /* Aaaahhh, You have got to be real careful setting GPR, the card + holds the previous values from flash memory, including autosense + and ring speed */ + + writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL); + + if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */ + writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR); + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name); + } else if (olympic_priv->olympic_ring_speed == 16) { + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name); + writew(GPR_16MBPS, olympic_mmio+GPR); + } else if (olympic_priv->olympic_ring_speed == 4) { + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ; + writew(0, olympic_mmio+GPR); + } + + writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR); + +#if OLYMPIC_DEBUG + printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ; +#endif + /* Solo has been paused to meet the Cardbus power + * specs if the adapter is cardbus. Check to + * see its been paused and then restart solo. The + * adapter should set the pause bit within 1 second. + */ + + if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) { + t=jiffies; + while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) { + schedule() ; + if(time_after(jiffies, t + 2*HZ)) { + printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ; + return -ENODEV; + } + } + writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ; + } + + /* start solo init */ + writel((1<<15),olympic_mmio+SISR_MASK_SUM); + + t=jiffies; + while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) { + schedule(); + if(time_after(jiffies, t + 15*HZ)) { + printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); + return -ENODEV; + } + } + + writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA); + +#if OLYMPIC_DEBUG + printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA)); +#endif + + init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800)); + +#if OLYMPIC_DEBUG +{ + int i; + printk("init_srb(%p): ",init_srb); + for(i=0;i<20;i++) + printk("%x ",readb(init_srb+i)); + printk("\n"); +} +#endif + if(readw(init_srb+6)) { + printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6)); + return -ENODEV; + } + + if (olympic_priv->olympic_message_level) { + if ( readb(init_srb +2) & 0x40) { + printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ; + } else { + printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n"); + } + } + + uaa_addr=swab16(readw(init_srb+8)); + +#if OLYMPIC_DEBUG + printk("UAA resides at %x\n",uaa_addr); +#endif + + writel(uaa_addr,olympic_mmio+LAPA); + adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800)); + + memcpy_fromio(&dev->dev_addr[0], adapter_addr,6); + +#if OLYMPIC_DEBUG + printk("adapter address: %pM\n", dev->dev_addr); +#endif + + olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12)); + olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14)); + + return 0; + +} + +static int olympic_open(struct net_device *dev) +{ + struct olympic_private *olympic_priv=netdev_priv(dev); + u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb; + unsigned long flags, t; + int i, open_finished = 1 ; + u8 resp, err; + + DECLARE_WAITQUEUE(wait,current) ; + + olympic_init(dev); + + if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic", + dev)) + return -EAGAIN; + +#if OLYMPIC_DEBUG + printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM)); + printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR)); +#endif + + writel(SISR_MI,olympic_mmio+SISR_MASK_SUM); + + writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */ + + writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */ + + /* adapter is closed, so SRB is pointed to by LAPWWO */ + + writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA); + init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800)); + +#if OLYMPIC_DEBUG + printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA)); + printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK)); + printk("Before the open command\n"); +#endif + do { + memset_io(init_srb,0,SRB_COMMAND_SIZE); + + writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */ + writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2); + + /* If Network Monitor, instruct card to copy MAC frames through the ARB */ + if (olympic_priv->olympic_network_monitor) + writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8); + else + writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8); + + /* Test OR of first 3 bytes as its totally possible for + * someone to set the first 2 bytes to be zero, although this + * is an error, the first byte must have bit 6 set to 1 */ + + if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) { + writeb(olympic_priv->olympic_laa[0],init_srb+12); + writeb(olympic_priv->olympic_laa[1],init_srb+13); + writeb(olympic_priv->olympic_laa[2],init_srb+14); + writeb(olympic_priv->olympic_laa[3],init_srb+15); + writeb(olympic_priv->olympic_laa[4],init_srb+16); + writeb(olympic_priv->olympic_laa[5],init_srb+17); + memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ; + } + writeb(1,init_srb+30); + + spin_lock_irqsave(&olympic_priv->olympic_lock,flags); + olympic_priv->srb_queued=1; + + writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); + spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags); + + t = jiffies ; + + add_wait_queue(&olympic_priv->srb_wait,&wait) ; + set_current_state(TASK_INTERRUPTIBLE) ; + + while(olympic_priv->srb_queued) { + schedule() ; + if(signal_pending(current)) { + printk(KERN_WARNING "%s: Signal received in open.\n", + dev->name); + printk(KERN_WARNING "SISR=%x LISR=%x\n", + readl(olympic_mmio+SISR), + readl(olympic_mmio+LISR)); + olympic_priv->srb_queued=0; + break; + } + if (time_after(jiffies, t + 10*HZ)) { + printk(KERN_WARNING "%s: SRB timed out.\n",dev->name); + olympic_priv->srb_queued=0; + break ; + } + set_current_state(TASK_INTERRUPTIBLE) ; + } + remove_wait_queue(&olympic_priv->srb_wait,&wait) ; + set_current_state(TASK_RUNNING) ; + olympic_priv->srb_queued = 0 ; +#if OLYMPIC_DEBUG + printk("init_srb(%p): ",init_srb); + for(i=0;i<20;i++) + printk("%02x ",readb(init_srb+i)); + printk("\n"); +#endif + + /* If we get the same return response as we set, the interrupt wasn't raised and the open + * timed out. + */ + + switch (resp = readb(init_srb+2)) { + case OLYMPIC_CLEAR_RET_CODE: + printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ; + goto out; + case 0: + open_finished = 1; + break; + case 0x07: + if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */ + printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name); + open_finished = 0 ; + continue; + } + + err = readb(init_srb+7); + + if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) { + printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name); + printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name); + } else { + printk(KERN_WARNING "%s: %s - %s\n", dev->name, + open_maj_error[(err & 0xf0) >> 4], + open_min_error[(err & 0x0f)]); + } + goto out; + + case 0x32: + printk(KERN_WARNING "%s: Invalid LAA: %pM\n", + dev->name, olympic_priv->olympic_laa); + goto out; + + default: + printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp); + goto out; + + } + } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */ + + if (readb(init_srb+18) & (1<<3)) + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name); + + if (readb(init_srb+18) & (1<<1)) + olympic_priv->olympic_ring_speed = 100 ; + else if (readb(init_srb+18) & 1) + olympic_priv->olympic_ring_speed = 16 ; + else + olympic_priv->olympic_ring_speed = 4 ; + + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed); + + olympic_priv->asb = swab16(readw(init_srb+8)); + olympic_priv->srb = swab16(readw(init_srb+10)); + olympic_priv->arb = swab16(readw(init_srb+12)); + olympic_priv->trb = swab16(readw(init_srb+16)); + + olympic_priv->olympic_receive_options = 0x01 ; + olympic_priv->olympic_copy_all_options = 0 ; + + /* setup rx ring */ + + writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */ + + writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */ + + for(i=0;ipkt_buf_sz); + if(skb == NULL) + break; + + skb->dev = dev; + + olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev, + skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ; + olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz); + olympic_priv->rx_ring_skb[i]=skb; + } + + if (i==0) { + printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name); + goto out; + } + + olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring, + sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE); + writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ); + writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA); + writew(i, olympic_mmio+RXDESCQCNT); + + olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring, + sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE); + writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ); + writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA); + + olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */ + olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1; + + writew(i, olympic_mmio+RXSTATQCNT); + +#if OLYMPIC_DEBUG + printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ)); + printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]); + printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) ); + printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) ); + printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) ); + + printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]); + printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n", + olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ; +#endif + + writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ); + +#if OLYMPIC_DEBUG + printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ)); + printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]); + printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]); +#endif + + writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM); + + /* setup tx ring */ + + writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */ + for(i=0;iolympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef); + + olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE; + olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring, + sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ; + writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1); + writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1); + writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1); + + olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring, + sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE); + writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1); + writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1); + writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1); + + olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */ + olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */ + + writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */ + writel(0,olympic_mmio+EISR) ; + writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */ + writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM); + +#if OLYMPIC_DEBUG + printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM)); + printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK)); +#endif + + if (olympic_priv->olympic_network_monitor) { + u8 __iomem *oat; + u8 __iomem *opt; + u8 addr[6]; + oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr); + opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr); + + for (i = 0; i < 6; i++) + addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i); + printk("%s: Node Address: %pM\n", dev->name, addr); + printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name, + readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), + readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), + readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), + readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3)); + + for (i = 0; i < 6; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i); + printk("%s: NAUN Address: %pM\n", dev->name, addr); + } + + netif_start_queue(dev); + return 0; + +out: + free_irq(dev->irq, dev); + return -EIO; +} + +/* + * When we enter the rx routine we do not know how many frames have been + * queued on the rx channel. Therefore we start at the next rx status + * position and travel around the receive ring until we have completed + * all the frames. + * + * This means that we may process the frame before we receive the end + * of frame interrupt. This is why we always test the status instead + * of blindly processing the next frame. + * + * We also remove the last 4 bytes from the packet as well, these are + * just token ring trailer info and upset protocols that don't check + * their own length, i.e. SNA. + * + */ +static void olympic_rx(struct net_device *dev) +{ + struct olympic_private *olympic_priv=netdev_priv(dev); + u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; + struct olympic_rx_status *rx_status; + struct olympic_rx_desc *rx_desc ; + int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len; + struct sk_buff *skb, *skb2; + int i; + + rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ; + + while (rx_status->status_buffercnt) { + u32 l_status_buffercnt; + + olympic_priv->rx_status_last_received++ ; + olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1); +#if OLYMPIC_DEBUG + printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen)); +#endif + length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff; + buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff; + i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */ + frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16; + +#if OLYMPIC_DEBUG + printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt); +#endif + l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt); + if(l_status_buffercnt & 0xC0000000) { + if (l_status_buffercnt & 0x3B000000) { + if (olympic_priv->olympic_message_level) { + if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */ + printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name); + if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */ + printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name); + if (l_status_buffercnt & (1<<27)) /* No receive buffers */ + printk(KERN_WARNING "%s: No receive buffers\n",dev->name); + if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */ + printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name); + if (l_status_buffercnt & (1<<24)) /* Received Error Detect */ + printk(KERN_WARNING "%s: Received Error Detect\n",dev->name); + } + olympic_priv->rx_ring_last_received += i ; + olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; + dev->stats.rx_errors++; + } else { + + if (buffer_cnt == 1) { + skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ; + } else { + skb = dev_alloc_skb(length) ; + } + + if (skb == NULL) { + printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ; + dev->stats.rx_dropped++; + /* Update counters even though we don't transfer the frame */ + olympic_priv->rx_ring_last_received += i ; + olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; + } else { + /* Optimise based upon number of buffers used. + If only one buffer is used we can simply swap the buffers around. + If more than one then we must use the new buffer and copy the information + first. Ideally all frames would be in a single buffer, this can be tuned by + altering the buffer size. If the length of the packet is less than + 1500 bytes we're going to copy it over anyway to stop packets getting + dropped from sockets with buffers smaller than our pkt_buf_sz. */ + + if (buffer_cnt==1) { + olympic_priv->rx_ring_last_received++ ; + olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1); + rx_ring_last_received = olympic_priv->rx_ring_last_received ; + if (length > 1500) { + skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ; + /* unmap buffer */ + pci_unmap_single(olympic_priv->pdev, + le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), + olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; + skb_put(skb2,length-4); + skb2->protocol = tr_type_trans(skb2,dev); + olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer = + cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, + olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); + olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length = + cpu_to_le32(olympic_priv->pkt_buf_sz); + olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ; + netif_rx(skb2) ; + } else { + pci_dma_sync_single_for_cpu(olympic_priv->pdev, + le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), + olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; + skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], + skb_put(skb,length - 4), + length - 4); + pci_dma_sync_single_for_device(olympic_priv->pdev, + le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), + olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; + skb->protocol = tr_type_trans(skb,dev) ; + netif_rx(skb) ; + } + } else { + do { /* Walk the buffers */ + olympic_priv->rx_ring_last_received++ ; + olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1); + rx_ring_last_received = olympic_priv->rx_ring_last_received ; + pci_dma_sync_single_for_cpu(olympic_priv->pdev, + le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), + olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; + rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]); + cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); + skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], + skb_put(skb, cpy_length), + cpy_length); + pci_dma_sync_single_for_device(olympic_priv->pdev, + le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), + olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; + } while (--i) ; + skb_trim(skb,skb->len-4) ; + skb->protocol = tr_type_trans(skb,dev); + netif_rx(skb) ; + } + dev->stats.rx_packets++ ; + dev->stats.rx_bytes += length ; + } /* if skb == null */ + } /* If status & 0x3b */ + + } else { /*if buffercnt & 0xC */ + olympic_priv->rx_ring_last_received += i ; + olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ; + } + + rx_status->fragmentcnt_framelen = 0 ; + rx_status->status_buffercnt = 0 ; + rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]); + + writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ); + } /* while */ + +} + +static void olympic_freemem(struct net_device *dev) +{ + struct olympic_private *olympic_priv=netdev_priv(dev); + int i; + + for(i=0;irx_ring_skb[olympic_priv->rx_status_last_received] != NULL) { + dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]); + olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL; + } + if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) { + pci_unmap_single(olympic_priv->pdev, + le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer), + olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE); + } + olympic_priv->rx_status_last_received++; + olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1; + } + /* unmap rings */ + pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr, + sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE); + pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr, + sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE); + + pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr, + sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE); + pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr, + sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE); + + return ; +} + +static irqreturn_t olympic_interrupt(int irq, void *dev_id) +{ + struct net_device *dev= (struct net_device *)dev_id; + struct olympic_private *olympic_priv=netdev_priv(dev); + u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; + u32 sisr; + u8 __iomem *adapter_check_area ; + + /* + * Read sisr but don't reset it yet. + * The indication bit may have been set but the interrupt latch + * bit may not be set, so we'd lose the interrupt later. + */ + sisr=readl(olympic_mmio+SISR) ; + if (!(sisr & SISR_MI)) /* Interrupt isn't for us */ + return IRQ_NONE; + sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */ + + spin_lock(&olympic_priv->olympic_lock); + + /* Hotswap gives us this on removal */ + if (sisr == 0xffffffff) { + printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ; + spin_unlock(&olympic_priv->olympic_lock) ; + return IRQ_NONE; + } + + if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK | + SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) { + + /* If we ever get this the adapter is seriously dead. Only a reset is going to + * bring it back to life. We're talking pci bus errors and such like :( */ + if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) { + printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ; + printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ; + printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ; + printk(KERN_ERR "or the linux-tr mailing list.\n") ; + wake_up_interruptible(&olympic_priv->srb_wait); + spin_unlock(&olympic_priv->olympic_lock) ; + return IRQ_HANDLED; + } /* SISR_ERR */ + + if(sisr & SISR_SRB_REPLY) { + if(olympic_priv->srb_queued==1) { + wake_up_interruptible(&olympic_priv->srb_wait); + } else if (olympic_priv->srb_queued==2) { + olympic_srb_bh(dev) ; + } + olympic_priv->srb_queued=0; + } /* SISR_SRB_REPLY */ + + /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure + we get all tx completions. */ + if (sisr & SISR_TX1_EOF) { + while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) { + olympic_priv->tx_ring_last_status++; + olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1); + olympic_priv->free_tx_ring_entries++; + dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len; + dev->stats.tx_packets++ ; + pci_unmap_single(olympic_priv->pdev, + le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), + olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE); + dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]); + olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef); + olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0; + } + netif_wake_queue(dev); + } /* SISR_TX1_EOF */ + + if (sisr & SISR_RX_STATUS) { + olympic_rx(dev); + } /* SISR_RX_STATUS */ + + if (sisr & SISR_ADAPTER_CHECK) { + netif_stop_queue(dev); + printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name); + writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA); + adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ; + printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ; + spin_unlock(&olympic_priv->olympic_lock) ; + return IRQ_HANDLED; + } /* SISR_ADAPTER_CHECK */ + + if (sisr & SISR_ASB_FREE) { + /* Wake up anything that is waiting for the asb response */ + if (olympic_priv->asb_queued) { + olympic_asb_bh(dev) ; + } + } /* SISR_ASB_FREE */ + + if (sisr & SISR_ARB_CMD) { + olympic_arb_cmd(dev) ; + } /* SISR_ARB_CMD */ + + if (sisr & SISR_TRB_REPLY) { + /* Wake up anything that is waiting for the trb response */ + if (olympic_priv->trb_queued) { + wake_up_interruptible(&olympic_priv->trb_wait); + } + olympic_priv->trb_queued = 0 ; + } /* SISR_TRB_REPLY */ + + if (sisr & SISR_RX_NOBUF) { + /* According to the documentation, we don't have to do anything, but trapping it keeps it out of + /var/log/messages. */ + } /* SISR_RX_NOBUF */ + } else { + printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr); + printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ; + } /* One if the interrupts we want */ + writel(SISR_MI,olympic_mmio+SISR_MASK_SUM); + + spin_unlock(&olympic_priv->olympic_lock) ; + return IRQ_HANDLED; +} + +static netdev_tx_t olympic_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct olympic_private *olympic_priv=netdev_priv(dev); + u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; + unsigned long flags ; + + spin_lock_irqsave(&olympic_priv->olympic_lock, flags); + + netif_stop_queue(dev); + + if(olympic_priv->free_tx_ring_entries) { + olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer = + cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE)); + olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000)); + olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb; + olympic_priv->free_tx_ring_entries--; + + olympic_priv->tx_ring_free++; + olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1); + writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1); + netif_wake_queue(dev); + spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags); + return NETDEV_TX_OK; + } else { + spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags); + return NETDEV_TX_BUSY; + } + +} + + +static int olympic_close(struct net_device *dev) +{ + struct olympic_private *olympic_priv=netdev_priv(dev); + u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb; + unsigned long t,flags; + + DECLARE_WAITQUEUE(wait,current) ; + + netif_stop_queue(dev); + + writel(olympic_priv->srb,olympic_mmio+LAPA); + srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800)); + + writeb(SRB_CLOSE_ADAPTER,srb+0); + writeb(0,srb+1); + writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); + + add_wait_queue(&olympic_priv->srb_wait,&wait) ; + set_current_state(TASK_INTERRUPTIBLE) ; + + spin_lock_irqsave(&olympic_priv->olympic_lock,flags); + olympic_priv->srb_queued=1; + + writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); + spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags); + + while(olympic_priv->srb_queued) { + + t = schedule_timeout_interruptible(60*HZ); + + if(signal_pending(current)) { + printk(KERN_WARNING "%s: SRB timed out.\n",dev->name); + printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR)); + olympic_priv->srb_queued=0; + break; + } + + if (t == 0) { + printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name); + } + olympic_priv->srb_queued=0; + } + remove_wait_queue(&olympic_priv->srb_wait,&wait) ; + + olympic_priv->rx_status_last_received++; + olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1; + + olympic_freemem(dev) ; + + /* reset tx/rx fifo's and busmaster logic */ + + writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL); + udelay(1); + writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL); + +#if OLYMPIC_DEBUG + { + int i ; + printk("srb(%p): ",srb); + for(i=0;i<4;i++) + printk("%x ",readb(srb+i)); + printk("\n"); + } +#endif + free_irq(dev->irq,dev); + + return 0; + +} + +static void olympic_set_rx_mode(struct net_device *dev) +{ + struct olympic_private *olympic_priv = netdev_priv(dev); + u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; + u8 options = 0; + u8 __iomem *srb; + struct netdev_hw_addr *ha; + unsigned char dev_mc_address[4] ; + + writel(olympic_priv->srb,olympic_mmio+LAPA); + srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800)); + options = olympic_priv->olympic_copy_all_options; + + if (dev->flags&IFF_PROMISC) + options |= 0x61 ; + else + options &= ~0x61 ; + + /* Only issue the srb if there is a change in options */ + + if ((options ^ olympic_priv->olympic_copy_all_options)) { + + /* Now to issue the srb command to alter the copy.all.options */ + + writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb); + writeb(0,srb+1); + writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); + writeb(0,srb+3); + writeb(olympic_priv->olympic_receive_options,srb+4); + writeb(options,srb+5); + + olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */ + + writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); + + olympic_priv->olympic_copy_all_options = options ; + + return ; + } + + /* Set the functional addresses we need for multicast */ + + dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; + + netdev_for_each_mc_addr(ha, dev) { + dev_mc_address[0] |= ha->addr[2]; + dev_mc_address[1] |= ha->addr[3]; + dev_mc_address[2] |= ha->addr[4]; + dev_mc_address[3] |= ha->addr[5]; + } + + writeb(SRB_SET_FUNC_ADDRESS,srb+0); + writeb(0,srb+1); + writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); + writeb(0,srb+3); + writeb(0,srb+4); + writeb(0,srb+5); + writeb(dev_mc_address[0],srb+6); + writeb(dev_mc_address[1],srb+7); + writeb(dev_mc_address[2],srb+8); + writeb(dev_mc_address[3],srb+9); + + olympic_priv->srb_queued = 2 ; + writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); + +} + +static void olympic_srb_bh(struct net_device *dev) +{ + struct olympic_private *olympic_priv = netdev_priv(dev); + u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; + u8 __iomem *srb; + + writel(olympic_priv->srb,olympic_mmio+LAPA); + srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800)); + + switch (readb(srb)) { + + /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous) + * At some point we should do something if we get an error, such as + * resetting the IFF_PROMISC flag in dev + */ + + case SRB_MODIFY_RECEIVE_OPTIONS: + switch (readb(srb+2)) { + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ; + break ; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name); + break ; + default: + if (olympic_priv->olympic_message_level) + printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ; + break ; + } /* switch srb[2] */ + break ; + + /* SRB_SET_GROUP_ADDRESS - Multicast group setting + */ + + case SRB_SET_GROUP_ADDRESS: + switch (readb(srb+2)) { + case 0x00: + break ; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); + break ; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name); + break ; + case 0x3c: + printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ; + break ; + case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */ + printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ; + break ; + case 0x55: + printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ; + break ; + default: + break ; + } /* switch srb[2] */ + break ; + + /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list + */ + + case SRB_RESET_GROUP_ADDRESS: + switch (readb(srb+2)) { + case 0x00: + break ; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); + break ; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; + break ; + case 0x39: /* Must deal with this if individual multicast addresses used */ + printk(KERN_INFO "%s: Group address not found\n",dev->name); + break ; + default: + break ; + } /* switch srb[2] */ + break ; + + + /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode + */ + + case SRB_SET_FUNC_ADDRESS: + switch (readb(srb+2)) { + case 0x00: + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name); + break ; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); + break ; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; + break ; + default: + break ; + } /* switch srb[2] */ + break ; + + /* SRB_READ_LOG - Read and reset the adapter error counters + */ + + case SRB_READ_LOG: + switch (readb(srb+2)) { + case 0x00: + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Read Log issued\n",dev->name) ; + break ; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); + break ; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; + break ; + + } /* switch srb[2] */ + break ; + + /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */ + + case SRB_READ_SR_COUNTERS: + switch (readb(srb+2)) { + case 0x00: + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ; + break ; + case 0x01: + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); + break ; + case 0x04: + printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; + break ; + default: + break ; + } /* switch srb[2] */ + break ; + + default: + printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name); + break ; + } /* switch srb[0] */ + +} + +static int olympic_set_mac_address (struct net_device *dev, void *addr) +{ + struct sockaddr *saddr = addr ; + struct olympic_private *olympic_priv = netdev_priv(dev); + + if (netif_running(dev)) { + printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; + return -EIO ; + } + + memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ; + + if (olympic_priv->olympic_message_level) { + printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0], + olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2], + olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4], + olympic_priv->olympic_laa[5]); + } + + return 0 ; +} + +static void olympic_arb_cmd(struct net_device *dev) +{ + struct olympic_private *olympic_priv = netdev_priv(dev); + u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; + u8 __iomem *arb_block, *asb_block, *srb ; + u8 header_len ; + u16 frame_len, buffer_len ; + struct sk_buff *mac_frame ; + u8 __iomem *buf_ptr ; + u8 __iomem *frame_data ; + u16 buff_off ; + u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */ + u8 fdx_prot_error ; + u16 next_ptr; + + arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ; + asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ; + srb = (olympic_priv->olympic_lap + olympic_priv->srb) ; + + if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */ + + header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */ + frame_len = swab16(readw(arb_block + 10)) ; + + buff_off = swab16(readw(arb_block + 6)) ; + + buf_ptr = olympic_priv->olympic_lap + buff_off ; + +#if OLYMPIC_DEBUG +{ + int i; + frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ; + + for (i=0 ; i < 14 ; i++) { + printk("Loc %d = %02x\n",i,readb(frame_data + i)); + } + + printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length))); +} +#endif + mac_frame = dev_alloc_skb(frame_len) ; + if (!mac_frame) { + printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name); + goto drop_frame; + } + + /* Walk the buffer chain, creating the frame */ + + do { + frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ; + buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length))); + memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ; + next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next)); + } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr))); + + mac_frame->protocol = tr_type_trans(mac_frame, dev); + + if (olympic_priv->olympic_network_monitor) { + struct trh_hdr *mac_hdr; + printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name); + mac_hdr = tr_hdr(mac_frame); + printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n", + dev->name, mac_hdr->daddr); + printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n", + dev->name, mac_hdr->saddr); + } + netif_rx(mac_frame); + +drop_frame: + /* Now tell the card we have dealt with the received frame */ + + /* Set LISR Bit 1 */ + writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM); + + /* Is the ASB free ? */ + + if (readb(asb_block + 2) != 0xff) { + olympic_priv->asb_queued = 1 ; + writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM); + return ; + /* Drop out and wait for the bottom half to be run */ + } + + writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */ + writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */ + writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */ + writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */ + + writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM); + + olympic_priv->asb_queued = 2 ; + + return ; + + } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */ + lan_status = swab16(readw(arb_block+6)); + fdx_prot_error = readb(arb_block+8) ; + + /* Issue ARB Free */ + writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM); + + lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ; + + if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) { + if (lan_status_diff & LSC_LWF) + printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name); + if (lan_status_diff & LSC_ARW) + printk(KERN_WARNING "%s: Auto removal error\n",dev->name); + if (lan_status_diff & LSC_FPE) + printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name); + if (lan_status_diff & LSC_RR) + printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name); + + /* Adapter has been closed by the hardware */ + + /* reset tx/rx fifo's and busmaster logic */ + + writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL); + udelay(1); + writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL); + netif_stop_queue(dev); + olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ; + printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name); + } /* If serious error */ + + if (olympic_priv->olympic_message_level) { + if (lan_status_diff & LSC_SIG_LOSS) + printk(KERN_WARNING "%s: No receive signal detected\n", dev->name); + if (lan_status_diff & LSC_HARD_ERR) + printk(KERN_INFO "%s: Beaconing\n",dev->name); + if (lan_status_diff & LSC_SOFT_ERR) + printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); + if (lan_status_diff & LSC_TRAN_BCN) + printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); + if (lan_status_diff & LSC_SS) + printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); + if (lan_status_diff & LSC_RING_REC) + printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); + if (lan_status_diff & LSC_FDX_MODE) + printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name); + } + + if (lan_status_diff & LSC_CO) { + + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Counter Overflow\n", dev->name); + + /* Issue READ.LOG command */ + + writeb(SRB_READ_LOG, srb); + writeb(0,srb+1); + writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); + writeb(0,srb+3); + writeb(0,srb+4); + writeb(0,srb+5); + + olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */ + + writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); + + } + + if (lan_status_diff & LSC_SR_CO) { + + if (olympic_priv->olympic_message_level) + printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name); + + /* Issue a READ.SR.COUNTERS */ + + writeb(SRB_READ_SR_COUNTERS,srb); + writeb(0,srb+1); + writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); + writeb(0,srb+3); + + olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */ + + writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); + + } + + olympic_priv->olympic_lan_status = lan_status ; + + } /* Lan.change.status */ + else + printk(KERN_WARNING "%s: Unknown arb command\n", dev->name); +} + +static void olympic_asb_bh(struct net_device *dev) +{ + struct olympic_private *olympic_priv = netdev_priv(dev); + u8 __iomem *arb_block, *asb_block ; + + arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ; + asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ; + + if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */ + + writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */ + writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */ + writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */ + writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */ + + writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM); + olympic_priv->asb_queued = 2 ; + + return ; + } + + if (olympic_priv->asb_queued == 2) { + switch (readb(asb_block+2)) { + case 0x01: + printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name); + break ; + case 0x26: + printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name); + break ; + case 0xFF: + /* Valid response, everything should be ok again */ + break ; + default: + printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name); + break ; + } + } + olympic_priv->asb_queued = 0 ; +} + +static int olympic_change_mtu(struct net_device *dev, int mtu) +{ + struct olympic_private *olympic_priv = netdev_priv(dev); + u16 max_mtu ; + + if (olympic_priv->olympic_ring_speed == 4) + max_mtu = 4500 ; + else + max_mtu = 18000 ; + + if (mtu > max_mtu) + return -EINVAL ; + if (mtu < 100) + return -EINVAL ; + + dev->mtu = mtu ; + olympic_priv->pkt_buf_sz = mtu + TR_HLEN ; + + return 0 ; +} + +static int olympic_proc_show(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + struct olympic_private *olympic_priv=netdev_priv(dev); + u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ; + u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ; + u8 addr[6]; + u8 addr2[6]; + int i; + + seq_printf(m, + "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name); + seq_printf(m, "\n%6s: Adapter Address : Node Address : Functional Addr\n", + dev->name); + + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i); + + seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n", + dev->name, + dev->dev_addr, addr, + readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), + readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), + readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), + readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3)); + + seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name); + + seq_printf(m, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", + dev->name) ; + + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i); + for (i = 0 ; i < 6 ; i++) + addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i); + + seq_printf(m, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n", + dev->name, + readb(opt+offsetof(struct olympic_parameters_table, phys_addr)), + readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1), + readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2), + readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3), + addr, addr2, + swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))), + swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))), + swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code)))); + + seq_printf(m, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", + dev->name) ; + + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i); + seq_printf(m, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n", + dev->name, addr, + swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))), + swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))), + swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))), + swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))), + swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))), + swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl)))); + + seq_printf(m, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n", + dev->name) ; + + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i); + seq_printf(m, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n", + dev->name, + swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))), + swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))), + addr, + readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)), + readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1), + readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2), + readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3)); + + return 0; +} + +static int olympic_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, olympic_proc_show, PDE(inode)->data); +} + +static const struct file_operations olympic_proc_ops = { + .open = olympic_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void __devexit olympic_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev) ; + struct olympic_private *olympic_priv=netdev_priv(dev); + + if (olympic_priv->olympic_network_monitor) { + char proc_name[20] ; + strcpy(proc_name,"olympic_") ; + strcat(proc_name,dev->name) ; + remove_proc_entry(proc_name,init_net.proc_net); + } + unregister_netdev(dev) ; + iounmap(olympic_priv->olympic_mmio) ; + iounmap(olympic_priv->olympic_lap) ; + pci_release_regions(pdev) ; + pci_set_drvdata(pdev,NULL) ; + free_netdev(dev) ; +} + +static struct pci_driver olympic_driver = { + .name = "olympic", + .id_table = olympic_pci_tbl, + .probe = olympic_probe, + .remove = __devexit_p(olympic_remove_one), +}; + +static int __init olympic_pci_init(void) +{ + return pci_register_driver(&olympic_driver) ; +} + +static void __exit olympic_pci_cleanup(void) +{ + pci_unregister_driver(&olympic_driver) ; +} + + +module_init(olympic_pci_init) ; +module_exit(olympic_pci_cleanup) ; + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h new file mode 100644 index 00000000..30631bae --- /dev/null +++ b/drivers/net/tokenring/olympic.h @@ -0,0 +1,321 @@ +/* + * olympic.h (c) 1999 Peter De Schrijver All Rights Reserved + * 1999,2000 Mike Phillips (mikep@linuxtr.net) + * + * Linux driver for IBM PCI tokenring cards based on the olympic and the PIT/PHY chipset. + * + * Base Driver Skeleton: + * Written 1993-94 by Donald Becker. + * + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + */ + +#define CID 0x4e + +#define BCTL 0x70 +#define BCTL_SOFTRESET (1<<15) +#define BCTL_MIMREB (1<<6) +#define BCTL_MODE_INDICATOR (1<<5) + +#define GPR 0x4a +#define GPR_OPTI_BF (1<<6) +#define GPR_NEPTUNE_BF (1<<4) +#define GPR_AUTOSENSE (1<<2) +#define GPR_16MBPS (1<<3) + +#define PAG 0x85 +#define LBC 0x8e + +#define LISR 0x10 +#define LISR_SUM 0x14 +#define LISR_RWM 0x18 + +#define LISR_LIE (1<<15) +#define LISR_SLIM (1<<13) +#define LISR_SLI (1<<12) +#define LISR_PCMSRMASK (1<<11) +#define LISR_PCMSRINT (1<<10) +#define LISR_WOLMASK (1<<9) +#define LISR_WOL (1<<8) +#define LISR_SRB_CMD (1<<5) +#define LISR_ASB_REPLY (1<<4) +#define LISR_ASB_FREE_REQ (1<<2) +#define LISR_ARB_FREE (1<<1) +#define LISR_TRB_FRAME (1<<0) + +#define SISR 0x20 +#define SISR_SUM 0x24 +#define SISR_RWM 0x28 +#define SISR_RR 0x2C +#define SISR_RESMASK 0x30 +#define SISR_MASK 0x54 +#define SISR_MASK_SUM 0x58 +#define SISR_MASK_RWM 0x5C + +#define SISR_TX2_IDLE (1<<31) +#define SISR_TX2_HALT (1<<29) +#define SISR_TX2_EOF (1<<28) +#define SISR_TX1_IDLE (1<<27) +#define SISR_TX1_HALT (1<<25) +#define SISR_TX1_EOF (1<<24) +#define SISR_TIMEOUT (1<<23) +#define SISR_RX_NOBUF (1<<22) +#define SISR_RX_STATUS (1<<21) +#define SISR_RX_HALT (1<<18) +#define SISR_RX_EOF_EARLY (1<<16) +#define SISR_MI (1<<15) +#define SISR_PI (1<<13) +#define SISR_ERR (1<<9) +#define SISR_ADAPTER_CHECK (1<<6) +#define SISR_SRB_REPLY (1<<5) +#define SISR_ASB_FREE (1<<4) +#define SISR_ARB_CMD (1<<3) +#define SISR_TRB_REPLY (1<<2) + +#define EISR 0x34 +#define EISR_RWM 0x38 +#define EISR_MASK 0x3c +#define EISR_MASK_OPTIONS 0x001FFF7F + +#define LAPA 0x60 +#define LAPWWO 0x64 +#define LAPWWC 0x68 +#define LAPCTL 0x6C +#define LAIPD 0x78 +#define LAIPDDINC 0x7C + +#define TIMER 0x50 + +#define CLKCTL 0x74 +#define CLKCTL_PAUSE (1<<15) + +#define PM_CON 0x4 + +#define BMCTL_SUM 0x40 +#define BMCTL_RWM 0x44 +#define BMCTL_TX2_DIS (1<<30) +#define BMCTL_TX1_DIS (1<<26) +#define BMCTL_RX_DIS (1<<22) + +#define BMASR 0xcc + +#define RXDESCQ 0x90 +#define RXDESCQCNT 0x94 +#define RXCDA 0x98 +#define RXENQ 0x9C +#define RXSTATQ 0xA0 +#define RXSTATQCNT 0xA4 +#define RXCSA 0xA8 +#define RXCLEN 0xAC +#define RXHLEN 0xAE + +#define TXDESCQ_1 0xb0 +#define TXDESCQ_2 0xd0 +#define TXDESCQCNT_1 0xb4 +#define TXDESCQCNT_2 0xd4 +#define TXCDA_1 0xb8 +#define TXCDA_2 0xd8 +#define TXENQ_1 0xbc +#define TXENQ_2 0xdc +#define TXSTATQ_1 0xc0 +#define TXSTATQ_2 0xe0 +#define TXSTATQCNT_1 0xc4 +#define TXSTATQCNT_2 0xe4 +#define TXCSA_1 0xc8 +#define TXCSA_2 0xe8 +/* Cardbus */ +#define FERMASK 0xf4 +#define FERMASK_INT_BIT (1<<15) + +#define OLYMPIC_IO_SPACE 256 + +#define SRB_COMMAND_SIZE 50 + +#define OLYMPIC_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */ + +/* Defines for LAN STATUS CHANGE reports */ +#define LSC_SIG_LOSS 0x8000 +#define LSC_HARD_ERR 0x4000 +#define LSC_SOFT_ERR 0x2000 +#define LSC_TRAN_BCN 0x1000 +#define LSC_LWF 0x0800 +#define LSC_ARW 0x0400 +#define LSC_FPE 0x0200 +#define LSC_RR 0x0100 +#define LSC_CO 0x0080 +#define LSC_SS 0x0040 +#define LSC_RING_REC 0x0020 +#define LSC_SR_CO 0x0010 +#define LSC_FDX_MODE 0x0004 + +/* Defines for OPEN ADAPTER command */ + +#define OPEN_ADAPTER_EXT_WRAP (1<<15) +#define OPEN_ADAPTER_DIS_HARDEE (1<<14) +#define OPEN_ADAPTER_DIS_SOFTERR (1<<13) +#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12) +#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11) +#define OPEN_ADAPTER_ENABLE_EC (1<<10) +#define OPEN_ADAPTER_CONTENDER (1<<8) +#define OPEN_ADAPTER_PASS_BEACON (1<<7) +#define OPEN_ADAPTER_ENABLE_FDX (1<<6) +#define OPEN_ADAPTER_ENABLE_RPL (1<<5) +#define OPEN_ADAPTER_INHIBIT_ETR (1<<4) +#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3) +#define OPEN_ADAPTER_USE_OPTS2 (1<<0) + +#define OPEN_ADAPTER_2_ENABLE_ONNOW (1<<15) + +/* Defines for SRB Commands */ + +#define SRB_ACCESS_REGISTER 0x1f +#define SRB_CLOSE_ADAPTER 0x04 +#define SRB_CONFIGURE_BRIDGE 0x0c +#define SRB_CONFIGURE_WAKEUP_EVENT 0x1a +#define SRB_MODIFY_BRIDGE_PARMS 0x15 +#define SRB_MODIFY_OPEN_OPTIONS 0x01 +#define SRB_MODIFY_RECEIVE_OPTIONS 0x17 +#define SRB_NO_OPERATION 0x00 +#define SRB_OPEN_ADAPTER 0x03 +#define SRB_READ_LOG 0x08 +#define SRB_READ_SR_COUNTERS 0x16 +#define SRB_RESET_GROUP_ADDRESS 0x02 +#define SRB_SAVE_CONFIGURATION 0x1b +#define SRB_SET_BRIDGE_PARMS 0x09 +#define SRB_SET_BRIDGE_TARGETS 0x10 +#define SRB_SET_FUNC_ADDRESS 0x07 +#define SRB_SET_GROUP_ADDRESS 0x06 +#define SRB_SET_GROUP_ADDR_OPTIONS 0x11 +#define SRB_UPDATE_WAKEUP_PATTERN 0x19 + +/* Clear return code */ + +#define OLYMPIC_CLEAR_RET_CODE 0xfe + +/* ARB Commands */ +#define ARB_RECEIVE_DATA 0x81 +#define ARB_LAN_CHANGE_STATUS 0x84 +/* ASB Response commands */ + +#define ASB_RECEIVE_DATA 0x81 + + +/* Olympic defaults for buffers */ + +#define OLYMPIC_RX_RING_SIZE 16 /* should be a power of 2 */ +#define OLYMPIC_TX_RING_SIZE 8 /* should be a power of 2 */ + +#define PKT_BUF_SZ 4096 /* Default packet size */ + +/* Olympic data structures */ + +/* xxxx These structures are all little endian in hardware. */ + +struct olympic_tx_desc { + __le32 buffer; + __le32 status_length; +}; + +struct olympic_tx_status { + __le32 status; +}; + +struct olympic_rx_desc { + __le32 buffer; + __le32 res_length; +}; + +struct olympic_rx_status { + __le32 fragmentcnt_framelen; + __le32 status_buffercnt; +}; +/* xxxx END These structures are all little endian in hardware. */ +/* xxxx There may be more, but I'm pretty sure about these */ + +struct mac_receive_buffer { + __le16 next ; + u8 padding ; + u8 frame_status ; + __le16 buffer_length ; + u8 frame_data ; +}; + +struct olympic_private { + + u16 srb; /* be16 */ + u16 trb; /* be16 */ + u16 arb; /* be16 */ + u16 asb; /* be16 */ + + u8 __iomem *olympic_mmio; + u8 __iomem *olympic_lap; + struct pci_dev *pdev ; + const char *olympic_card_name; + + spinlock_t olympic_lock ; + + volatile int srb_queued; /* True if an SRB is still posted */ + wait_queue_head_t srb_wait; + + volatile int asb_queued; /* True if an ASB is posted */ + + volatile int trb_queued; /* True if a TRB is posted */ + wait_queue_head_t trb_wait ; + + /* These must be on a 4 byte boundary. */ + struct olympic_rx_desc olympic_rx_ring[OLYMPIC_RX_RING_SIZE]; + struct olympic_tx_desc olympic_tx_ring[OLYMPIC_TX_RING_SIZE]; + struct olympic_rx_status olympic_rx_status_ring[OLYMPIC_RX_RING_SIZE]; + struct olympic_tx_status olympic_tx_status_ring[OLYMPIC_TX_RING_SIZE]; + + struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE]; + int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries; + + u16 olympic_lan_status ; + u8 olympic_ring_speed ; + u16 pkt_buf_sz ; + u8 olympic_receive_options, olympic_copy_all_options,olympic_message_level, olympic_network_monitor; + u16 olympic_addr_table_addr, olympic_parms_addr ; + u8 olympic_laa[6] ; + u32 rx_ring_dma_addr; + u32 rx_status_ring_dma_addr; + u32 tx_ring_dma_addr; + u32 tx_status_ring_dma_addr; +}; + +struct olympic_adapter_addr_table { + + u8 node_addr[6] ; + u8 reserved[4] ; + u8 func_addr[4] ; +} ; + +struct olympic_parameters_table { + + u8 phys_addr[4] ; + u8 up_node_addr[6] ; + u8 up_phys_addr[4] ; + u8 poll_addr[6] ; + u16 reserved ; + u16 acc_priority ; + u16 auth_source_class ; + u16 att_code ; + u8 source_addr[6] ; + u16 beacon_type ; + u16 major_vector ; + u16 lan_status ; + u16 soft_error_time ; + u16 reserved1 ; + u16 local_ring ; + u16 mon_error ; + u16 beacon_transmit ; + u16 beacon_receive ; + u16 frame_correl ; + u8 beacon_naun[6] ; + u32 reserved2 ; + u8 beacon_phys[4] ; +}; diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c new file mode 100644 index 00000000..8d362e64 --- /dev/null +++ b/drivers/net/tokenring/proteon.c @@ -0,0 +1,423 @@ +/* + * proteon.c: A network driver for Proteon ISA token ring cards. + * + * Based on tmspci written 1999 by Adam Fritzler + * + * Written 2003 by Jochen Friedrich + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This driver module supports the following cards: + * - Proteon 1392, 1392+ + * + * Maintainer(s): + * AF Adam Fritzler + * JF Jochen Friedrich jochen@scram.de + * + * Modification History: + * 02-Jan-03 JF Created + * + */ +static const char version[] = "proteon.c: v1.00 02/01/2003 by Jochen Friedrich\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "tms380tr.h" + +#define PROTEON_IO_EXTENT 32 + +/* A zero-terminated list of I/O addresses to be probed. */ +static unsigned int portlist[] __initdata = { + 0x0A20, 0x0E20, 0x1A20, 0x1E20, 0x2A20, 0x2E20, 0x3A20, 0x3E20,// Prot. + 0x4A20, 0x4E20, 0x5A20, 0x5E20, 0x6A20, 0x6E20, 0x7A20, 0x7E20,// Prot. + 0x8A20, 0x8E20, 0x9A20, 0x9E20, 0xAA20, 0xAE20, 0xBA20, 0xBE20,// Prot. + 0xCA20, 0xCE20, 0xDA20, 0xDE20, 0xEA20, 0xEE20, 0xFA20, 0xFE20,// Prot. + 0 +}; + +/* A zero-terminated list of IRQs to be probed. */ +static unsigned short irqlist[] = { + 7, 6, 5, 4, 3, 12, 11, 10, 9, + 0 +}; + +/* A zero-terminated list of DMAs to be probed. */ +static int dmalist[] __initdata = { + 5, 6, 7, + 0 +}; + +static char cardname[] = "Proteon 1392\0"; +static u64 dma_mask = ISA_MAX_ADDRESS; +static int proteon_open(struct net_device *dev); +static void proteon_read_eeprom(struct net_device *dev); +static unsigned short proteon_setnselout_pins(struct net_device *dev); + +static unsigned short proteon_sifreadb(struct net_device *dev, unsigned short reg) +{ + return inb(dev->base_addr + reg); +} + +static unsigned short proteon_sifreadw(struct net_device *dev, unsigned short reg) +{ + return inw(dev->base_addr + reg); +} + +static void proteon_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) +{ + outb(val, dev->base_addr + reg); +} + +static void proteon_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) +{ + outw(val, dev->base_addr + reg); +} + +static int __init proteon_probe1(struct net_device *dev, int ioaddr) +{ + unsigned char chk1, chk2; + int i; + + if (!request_region(ioaddr, PROTEON_IO_EXTENT, cardname)) + return -ENODEV; + + + chk1 = inb(ioaddr + 0x1f); /* Get Proteon ID reg 1 */ + if (chk1 != 0x1f) + goto nodev; + + chk1 = inb(ioaddr + 0x1e) & 0x07; /* Get Proteon ID reg 0 */ + for (i=0; i<16; i++) { + chk2 = inb(ioaddr + 0x1e) & 0x07; + if (((chk1 + 1) & 0x07) != chk2) + goto nodev; + chk1 = chk2; + } + + dev->base_addr = ioaddr; + return 0; +nodev: + release_region(ioaddr, PROTEON_IO_EXTENT); + return -ENODEV; +} + +static struct net_device_ops proteon_netdev_ops __read_mostly; + +static int __init setup_card(struct net_device *dev, struct device *pdev) +{ + struct net_local *tp; + static int versionprinted; + const unsigned *port; + int j,err = 0; + + if (!dev) + return -ENOMEM; + + if (dev->base_addr) /* probe specific location */ + err = proteon_probe1(dev, dev->base_addr); + else { + for (port = portlist; *port; port++) { + err = proteon_probe1(dev, *port); + if (!err) + break; + } + } + if (err) + goto out5; + + /* At this point we have found a valid card. */ + + if (versionprinted++ == 0) + printk(KERN_DEBUG "%s", version); + + err = -EIO; + pdev->dma_mask = &dma_mask; + if (tmsdev_init(dev, pdev)) + goto out4; + + dev->base_addr &= ~3; + + proteon_read_eeprom(dev); + + printk(KERN_DEBUG "proteon.c: Ring Station Address: %pM\n", + dev->dev_addr); + + tp = netdev_priv(dev); + tp->setnselout = proteon_setnselout_pins; + + tp->sifreadb = proteon_sifreadb; + tp->sifreadw = proteon_sifreadw; + tp->sifwriteb = proteon_sifwriteb; + tp->sifwritew = proteon_sifwritew; + + memcpy(tp->ProductID, cardname, PROD_ID_SIZE + 1); + + tp->tmspriv = NULL; + + dev->netdev_ops = &proteon_netdev_ops; + + if (dev->irq == 0) + { + for(j = 0; irqlist[j] != 0; j++) + { + dev->irq = irqlist[j]; + if (!request_irq(dev->irq, tms380tr_interrupt, 0, + cardname, dev)) + break; + } + + if(irqlist[j] == 0) + { + printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n"); + goto out3; + } + } + else + { + for(j = 0; irqlist[j] != 0; j++) + if (irqlist[j] == dev->irq) + break; + if (irqlist[j] == 0) + { + printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n", + dev->irq); + goto out3; + } + if (request_irq(dev->irq, tms380tr_interrupt, 0, + cardname, dev)) + { + printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n", + dev->irq); + goto out3; + } + } + + if (dev->dma == 0) + { + for(j = 0; dmalist[j] != 0; j++) + { + dev->dma = dmalist[j]; + if (!request_dma(dev->dma, cardname)) + break; + } + + if(dmalist[j] == 0) + { + printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n"); + goto out2; + } + } + else + { + for(j = 0; dmalist[j] != 0; j++) + if (dmalist[j] == dev->dma) + break; + if (dmalist[j] == 0) + { + printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n", + dev->dma); + goto out2; + } + if (request_dma(dev->dma, cardname)) + { + printk(KERN_INFO "proteon.c: Selected DMA %d not available\n", + dev->dma); + goto out2; + } + } + + err = register_netdev(dev); + if (err) + goto out; + + printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n", + dev->name, dev->base_addr, dev->irq, dev->dma); + + return 0; +out: + free_dma(dev->dma); +out2: + free_irq(dev->irq, dev); +out3: + tmsdev_term(dev); +out4: + release_region(dev->base_addr, PROTEON_IO_EXTENT); +out5: + return err; +} + +/* + * Reads MAC address from adapter RAM, which should've read it from + * the onboard ROM. + * + * Calling this on a board that does not support it can be a very + * dangerous thing. The Madge board, for instance, will lock your + * machine hard when this is called. Luckily, its supported in a + * separate driver. --ASF + */ +static void proteon_read_eeprom(struct net_device *dev) +{ + int i; + + /* Address: 0000:0000 */ + proteon_sifwritew(dev, 0, SIFADX); + proteon_sifwritew(dev, 0, SIFADR); + + /* Read six byte MAC address data */ + dev->addr_len = 6; + for(i = 0; i < 6; i++) + dev->dev_addr[i] = proteon_sifreadw(dev, SIFINC) >> 8; +} + +static unsigned short proteon_setnselout_pins(struct net_device *dev) +{ + return 0; +} + +static int proteon_open(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned short val = 0; + int i; + + /* Proteon reset sequence */ + outb(0, dev->base_addr + 0x11); + mdelay(20); + outb(0x04, dev->base_addr + 0x11); + mdelay(20); + outb(0, dev->base_addr + 0x11); + mdelay(100); + + /* set control/status reg */ + val = inb(dev->base_addr + 0x11); + val |= 0x78; + val &= 0xf9; + if(tp->DataRate == SPEED_4) + val |= 0x20; + else + val &= ~0x20; + + outb(val, dev->base_addr + 0x11); + outb(0xff, dev->base_addr + 0x12); + for(i = 0; irqlist[i] != 0; i++) + { + if(irqlist[i] == dev->irq) + break; + } + val = i; + i = (7 - dev->dma) << 4; + val |= i; + outb(val, dev->base_addr + 0x13); + + return tms380tr_open(dev); +} + +#define ISATR_MAX_ADAPTERS 3 + +static int io[ISATR_MAX_ADAPTERS]; +static int irq[ISATR_MAX_ADAPTERS]; +static int dma[ISATR_MAX_ADAPTERS]; + +MODULE_LICENSE("GPL"); + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(dma, int, NULL, 0); + +static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS]; + +static struct platform_driver proteon_driver = { + .driver = { + .name = "proteon", + }, +}; + +static int __init proteon_init(void) +{ + struct net_device *dev; + struct platform_device *pdev; + int i, num = 0, err = 0; + + proteon_netdev_ops = tms380tr_netdev_ops; + proteon_netdev_ops.ndo_open = proteon_open; + proteon_netdev_ops.ndo_stop = tms380tr_close; + + err = platform_driver_register(&proteon_driver); + if (err) + return err; + + for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { + dev = alloc_trdev(sizeof(struct net_local)); + if (!dev) + continue; + + dev->base_addr = io[i]; + dev->irq = irq[i]; + dev->dma = dma[i]; + pdev = platform_device_register_simple("proteon", + i, NULL, 0); + if (IS_ERR(pdev)) { + free_netdev(dev); + continue; + } + err = setup_card(dev, &pdev->dev); + if (!err) { + proteon_dev[i] = pdev; + platform_set_drvdata(pdev, dev); + ++num; + } else { + platform_device_unregister(pdev); + free_netdev(dev); + } + } + + printk(KERN_NOTICE "proteon.c: %d cards found.\n", num); + /* Probe for cards. */ + if (num == 0) { + printk(KERN_NOTICE "proteon.c: No cards found.\n"); + platform_driver_unregister(&proteon_driver); + return -ENODEV; + } + return 0; +} + +static void __exit proteon_cleanup(void) +{ + struct net_device *dev; + int i; + + for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { + struct platform_device *pdev = proteon_dev[i]; + + if (!pdev) + continue; + dev = platform_get_drvdata(pdev); + unregister_netdev(dev); + release_region(dev->base_addr, PROTEON_IO_EXTENT); + free_irq(dev->irq, dev); + free_dma(dev->dma); + tmsdev_term(dev); + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + platform_device_unregister(pdev); + } + platform_driver_unregister(&proteon_driver); +} + +module_init(proteon_init); +module_exit(proteon_cleanup); diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c new file mode 100644 index 00000000..46db5c53 --- /dev/null +++ b/drivers/net/tokenring/skisa.c @@ -0,0 +1,433 @@ +/* + * skisa.c: A network driver for SK-NET TMS380-based ISA token ring cards. + * + * Based on tmspci written 1999 by Adam Fritzler + * + * Written 2000 by Jochen Friedrich + * Dedicated to my girlfriend Steffi Bopp + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This driver module supports the following cards: + * - SysKonnect TR4/16(+) ISA (SK-4190) + * + * Maintainer(s): + * AF Adam Fritzler + * JF Jochen Friedrich jochen@scram.de + * + * Modification History: + * 14-Jan-01 JF Created + * 28-Oct-02 JF Fixed probe of card for static compilation. + * Fixed module init to not make hotplug go wild. + * 09-Nov-02 JF Fixed early bail out on out of memory + * situations if multiple cards are found. + * Cleaned up some unnecessary console SPAM. + * 09-Dec-02 JF Fixed module reference counting. + * 02-Jan-03 JF Renamed to skisa.c + * + */ +static const char version[] = "skisa.c: v1.03 09/12/2002 by Jochen Friedrich\n"; + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "tms380tr.h" + +#define SK_ISA_IO_EXTENT 32 + +/* A zero-terminated list of I/O addresses to be probed. */ +static unsigned int portlist[] __initdata = { + 0x0A20, 0x1A20, 0x0B20, 0x1B20, 0x0980, 0x1980, 0x0900, 0x1900,// SK + 0 +}; + +/* A zero-terminated list of IRQs to be probed. + * Used again after initial probe for sktr_chipset_init, called from sktr_open. + */ +static const unsigned short irqlist[] = { + 3, 5, 9, 10, 11, 12, 15, + 0 +}; + +/* A zero-terminated list of DMAs to be probed. */ +static int dmalist[] __initdata = { + 5, 6, 7, + 0 +}; + +static char isa_cardname[] = "SK NET TR 4/16 ISA\0"; +static u64 dma_mask = ISA_MAX_ADDRESS; +static int sk_isa_open(struct net_device *dev); +static void sk_isa_read_eeprom(struct net_device *dev); +static unsigned short sk_isa_setnselout_pins(struct net_device *dev); + +static unsigned short sk_isa_sifreadb(struct net_device *dev, unsigned short reg) +{ + return inb(dev->base_addr + reg); +} + +static unsigned short sk_isa_sifreadw(struct net_device *dev, unsigned short reg) +{ + return inw(dev->base_addr + reg); +} + +static void sk_isa_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) +{ + outb(val, dev->base_addr + reg); +} + +static void sk_isa_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) +{ + outw(val, dev->base_addr + reg); +} + + +static int __init sk_isa_probe1(struct net_device *dev, int ioaddr) +{ + unsigned char old, chk1, chk2; + + if (!request_region(ioaddr, SK_ISA_IO_EXTENT, isa_cardname)) + return -ENODEV; + + old = inb(ioaddr + SIFADR); /* Get the old SIFADR value */ + + chk1 = 0; /* Begin with check value 0 */ + do { + /* Write new SIFADR value */ + outb(chk1, ioaddr + SIFADR); + + /* Read, invert and write */ + chk2 = inb(ioaddr + SIFADD); + chk2 ^= 0x0FE; + outb(chk2, ioaddr + SIFADR); + + /* Read, invert and compare */ + chk2 = inb(ioaddr + SIFADD); + chk2 ^= 0x0FE; + + if(chk1 != chk2) { + release_region(ioaddr, SK_ISA_IO_EXTENT); + return -ENODEV; + } + + chk1 -= 2; + } while(chk1 != 0); /* Repeat 128 times (all byte values) */ + + /* Restore the SIFADR value */ + outb(old, ioaddr + SIFADR); + + dev->base_addr = ioaddr; + return 0; +} + +static struct net_device_ops sk_isa_netdev_ops __read_mostly; + +static int __init setup_card(struct net_device *dev, struct device *pdev) +{ + struct net_local *tp; + static int versionprinted; + const unsigned *port; + int j, err = 0; + + if (!dev) + return -ENOMEM; + + if (dev->base_addr) /* probe specific location */ + err = sk_isa_probe1(dev, dev->base_addr); + else { + for (port = portlist; *port; port++) { + err = sk_isa_probe1(dev, *port); + if (!err) + break; + } + } + if (err) + goto out5; + + /* At this point we have found a valid card. */ + + if (versionprinted++ == 0) + printk(KERN_DEBUG "%s", version); + + err = -EIO; + pdev->dma_mask = &dma_mask; + if (tmsdev_init(dev, pdev)) + goto out4; + + dev->base_addr &= ~3; + + sk_isa_read_eeprom(dev); + + printk(KERN_DEBUG "skisa.c: Ring Station Address: %pM\n", + dev->dev_addr); + + tp = netdev_priv(dev); + tp->setnselout = sk_isa_setnselout_pins; + + tp->sifreadb = sk_isa_sifreadb; + tp->sifreadw = sk_isa_sifreadw; + tp->sifwriteb = sk_isa_sifwriteb; + tp->sifwritew = sk_isa_sifwritew; + + memcpy(tp->ProductID, isa_cardname, PROD_ID_SIZE + 1); + + tp->tmspriv = NULL; + + dev->netdev_ops = &sk_isa_netdev_ops; + + if (dev->irq == 0) + { + for(j = 0; irqlist[j] != 0; j++) + { + dev->irq = irqlist[j]; + if (!request_irq(dev->irq, tms380tr_interrupt, 0, + isa_cardname, dev)) + break; + } + + if(irqlist[j] == 0) + { + printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n"); + goto out3; + } + } + else + { + for(j = 0; irqlist[j] != 0; j++) + if (irqlist[j] == dev->irq) + break; + if (irqlist[j] == 0) + { + printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n", + dev->irq); + goto out3; + } + if (request_irq(dev->irq, tms380tr_interrupt, 0, + isa_cardname, dev)) + { + printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n", + dev->irq); + goto out3; + } + } + + if (dev->dma == 0) + { + for(j = 0; dmalist[j] != 0; j++) + { + dev->dma = dmalist[j]; + if (!request_dma(dev->dma, isa_cardname)) + break; + } + + if(dmalist[j] == 0) + { + printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n"); + goto out2; + } + } + else + { + for(j = 0; dmalist[j] != 0; j++) + if (dmalist[j] == dev->dma) + break; + if (dmalist[j] == 0) + { + printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n", + dev->dma); + goto out2; + } + if (request_dma(dev->dma, isa_cardname)) + { + printk(KERN_INFO "skisa.c: Selected DMA %d not available\n", + dev->dma); + goto out2; + } + } + + err = register_netdev(dev); + if (err) + goto out; + + printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n", + dev->name, dev->base_addr, dev->irq, dev->dma); + + return 0; +out: + free_dma(dev->dma); +out2: + free_irq(dev->irq, dev); +out3: + tmsdev_term(dev); +out4: + release_region(dev->base_addr, SK_ISA_IO_EXTENT); +out5: + return err; +} + +/* + * Reads MAC address from adapter RAM, which should've read it from + * the onboard ROM. + * + * Calling this on a board that does not support it can be a very + * dangerous thing. The Madge board, for instance, will lock your + * machine hard when this is called. Luckily, its supported in a + * separate driver. --ASF + */ +static void sk_isa_read_eeprom(struct net_device *dev) +{ + int i; + + /* Address: 0000:0000 */ + sk_isa_sifwritew(dev, 0, SIFADX); + sk_isa_sifwritew(dev, 0, SIFADR); + + /* Read six byte MAC address data */ + dev->addr_len = 6; + for(i = 0; i < 6; i++) + dev->dev_addr[i] = sk_isa_sifreadw(dev, SIFINC) >> 8; +} + +static unsigned short sk_isa_setnselout_pins(struct net_device *dev) +{ + return 0; +} + +static int sk_isa_open(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned short val = 0; + unsigned short oldval; + int i; + + val = 0; + for(i = 0; irqlist[i] != 0; i++) + { + if(irqlist[i] == dev->irq) + break; + } + + val |= CYCLE_TIME << 2; + val |= i << 4; + i = dev->dma - 5; + val |= i; + if(tp->DataRate == SPEED_4) + val |= LINE_SPEED_BIT; + else + val &= ~LINE_SPEED_BIT; + oldval = sk_isa_sifreadb(dev, POSREG); + /* Leave cycle bits alone */ + oldval |= 0xf3; + val &= oldval; + sk_isa_sifwriteb(dev, val, POSREG); + + return tms380tr_open(dev); +} + +#define ISATR_MAX_ADAPTERS 3 + +static int io[ISATR_MAX_ADAPTERS]; +static int irq[ISATR_MAX_ADAPTERS]; +static int dma[ISATR_MAX_ADAPTERS]; + +MODULE_LICENSE("GPL"); + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(dma, int, NULL, 0); + +static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS]; + +static struct platform_driver sk_isa_driver = { + .driver = { + .name = "skisa", + }, +}; + +static int __init sk_isa_init(void) +{ + struct net_device *dev; + struct platform_device *pdev; + int i, num = 0, err = 0; + + sk_isa_netdev_ops = tms380tr_netdev_ops; + sk_isa_netdev_ops.ndo_open = sk_isa_open; + sk_isa_netdev_ops.ndo_stop = tms380tr_close; + + err = platform_driver_register(&sk_isa_driver); + if (err) + return err; + + for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { + dev = alloc_trdev(sizeof(struct net_local)); + if (!dev) + continue; + + dev->base_addr = io[i]; + dev->irq = irq[i]; + dev->dma = dma[i]; + pdev = platform_device_register_simple("skisa", + i, NULL, 0); + if (IS_ERR(pdev)) { + free_netdev(dev); + continue; + } + err = setup_card(dev, &pdev->dev); + if (!err) { + sk_isa_dev[i] = pdev; + platform_set_drvdata(sk_isa_dev[i], dev); + ++num; + } else { + platform_device_unregister(pdev); + free_netdev(dev); + } + } + + printk(KERN_NOTICE "skisa.c: %d cards found.\n", num); + /* Probe for cards. */ + if (num == 0) { + printk(KERN_NOTICE "skisa.c: No cards found.\n"); + platform_driver_unregister(&sk_isa_driver); + return -ENODEV; + } + return 0; +} + +static void __exit sk_isa_cleanup(void) +{ + struct net_device *dev; + int i; + + for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { + struct platform_device *pdev = sk_isa_dev[i]; + + if (!pdev) + continue; + dev = platform_get_drvdata(pdev); + unregister_netdev(dev); + release_region(dev->base_addr, SK_ISA_IO_EXTENT); + free_irq(dev->irq, dev); + free_dma(dev->dma); + tmsdev_term(dev); + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + platform_device_unregister(pdev); + } + platform_driver_unregister(&sk_isa_driver); +} + +module_init(sk_isa_init); +module_exit(sk_isa_cleanup); diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c new file mode 100644 index 00000000..d9044aba --- /dev/null +++ b/drivers/net/tokenring/smctr.c @@ -0,0 +1,5718 @@ +/* + * smctr.c: A network driver for the SMC Token Ring Adapters. + * + * Written by Jay Schulist + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This device driver works with the following SMC adapters: + * - SMC TokenCard Elite (8115T, chips 825/584) + * - SMC TokenCard Elite/A MCA (8115T/A, chips 825/594) + * + * Source(s): + * - SMC TokenCard SDK. + * + * Maintainer(s): + * JS Jay Schulist + * + * Changes: + * 07102000 JS Fixed a timing problem in smctr_wait_cmd(); + * Also added a bit more discriptive error msgs. + * 07122000 JS Fixed problem with detecting a card with + * module io/irq/mem specified. + * + * To do: + * 1. Multicast support. + * + * Initial 2.5 cleanup Alan Cox 2002/10/28 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if BITS_PER_LONG == 64 +#error FIXME: driver does not support 64-bit platforms +#endif + +#include "smctr.h" /* Our Stuff */ + +static const char version[] __initdata = + KERN_INFO "smctr.c: v1.4 7/12/00 by jschlst@samba.org\n"; +static const char cardname[] = "smctr"; + + +#define SMCTR_IO_EXTENT 20 + +#ifdef CONFIG_MCA_LEGACY +static unsigned int smctr_posid = 0x6ec6; +#endif + +static int ringspeed; + +/* SMC Name of the Adapter. */ +static char smctr_name[] = "SMC TokenCard"; +static char *smctr_model = "Unknown"; + +/* Use 0 for production, 1 for verification, 2 for debug, and + * 3 for very verbose debug. + */ +#ifndef SMCTR_DEBUG +#define SMCTR_DEBUG 1 +#endif +static unsigned int smctr_debug = SMCTR_DEBUG; + +/* smctr.c prototypes and functions are arranged alphabeticly + * for clearity, maintainability and pure old fashion fun. + */ +/* A */ +static int smctr_alloc_shared_memory(struct net_device *dev); + +/* B */ +static int smctr_bypass_state(struct net_device *dev); + +/* C */ +static int smctr_checksum_firmware(struct net_device *dev); +static int __init smctr_chk_isa(struct net_device *dev); +static int smctr_chg_rx_mask(struct net_device *dev); +static int smctr_clear_int(struct net_device *dev); +static int smctr_clear_trc_reset(int ioaddr); +static int smctr_close(struct net_device *dev); + +/* D */ +static int smctr_decode_firmware(struct net_device *dev, + const struct firmware *fw); +static int smctr_disable_16bit(struct net_device *dev); +static int smctr_disable_adapter_ctrl_store(struct net_device *dev); +static int smctr_disable_bic_int(struct net_device *dev); + +/* E */ +static int smctr_enable_16bit(struct net_device *dev); +static int smctr_enable_adapter_ctrl_store(struct net_device *dev); +static int smctr_enable_adapter_ram(struct net_device *dev); +static int smctr_enable_bic_int(struct net_device *dev); + +/* G */ +static int __init smctr_get_boardid(struct net_device *dev, int mca); +static int smctr_get_group_address(struct net_device *dev); +static int smctr_get_functional_address(struct net_device *dev); +static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev); +static int smctr_get_physical_drop_number(struct net_device *dev); +static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue); +static int smctr_get_station_id(struct net_device *dev); +static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, + __u16 bytes_count); +static int smctr_get_upstream_neighbor_addr(struct net_device *dev); + +/* H */ +static int smctr_hardware_send_packet(struct net_device *dev, + struct net_local *tp); +/* I */ +static int smctr_init_acbs(struct net_device *dev); +static int smctr_init_adapter(struct net_device *dev); +static int smctr_init_card_real(struct net_device *dev); +static int smctr_init_rx_bdbs(struct net_device *dev); +static int smctr_init_rx_fcbs(struct net_device *dev); +static int smctr_init_shared_memory(struct net_device *dev); +static int smctr_init_tx_bdbs(struct net_device *dev); +static int smctr_init_tx_fcbs(struct net_device *dev); +static int smctr_internal_self_test(struct net_device *dev); +static irqreturn_t smctr_interrupt(int irq, void *dev_id); +static int smctr_issue_enable_int_cmd(struct net_device *dev, + __u16 interrupt_enable_mask); +static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, + __u16 ibits); +static int smctr_issue_init_timers_cmd(struct net_device *dev); +static int smctr_issue_init_txrx_cmd(struct net_device *dev); +static int smctr_issue_insert_cmd(struct net_device *dev); +static int smctr_issue_read_ring_status_cmd(struct net_device *dev); +static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt); +static int smctr_issue_remove_cmd(struct net_device *dev); +static int smctr_issue_resume_acb_cmd(struct net_device *dev); +static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue); +static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue); +static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue); +static int smctr_issue_test_internal_rom_cmd(struct net_device *dev); +static int smctr_issue_test_hic_cmd(struct net_device *dev); +static int smctr_issue_test_mac_reg_cmd(struct net_device *dev); +static int smctr_issue_trc_loopback_cmd(struct net_device *dev); +static int smctr_issue_tri_loopback_cmd(struct net_device *dev); +static int smctr_issue_write_byte_cmd(struct net_device *dev, + short aword_cnt, void *byte); +static int smctr_issue_write_word_cmd(struct net_device *dev, + short aword_cnt, void *word); + +/* J */ +static int smctr_join_complete_state(struct net_device *dev); + +/* L */ +static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev); +static int smctr_load_firmware(struct net_device *dev); +static int smctr_load_node_addr(struct net_device *dev); +static int smctr_lobe_media_test(struct net_device *dev); +static int smctr_lobe_media_test_cmd(struct net_device *dev); +static int smctr_lobe_media_test_state(struct net_device *dev); + +/* M */ +static int smctr_make_8025_hdr(struct net_device *dev, + MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc); +static int smctr_make_access_pri(struct net_device *dev, + MAC_SUB_VECTOR *tsv); +static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv); +static int smctr_make_auth_funct_class(struct net_device *dev, + MAC_SUB_VECTOR *tsv); +static int smctr_make_corr(struct net_device *dev, + MAC_SUB_VECTOR *tsv, __u16 correlator); +static int smctr_make_funct_addr(struct net_device *dev, + MAC_SUB_VECTOR *tsv); +static int smctr_make_group_addr(struct net_device *dev, + MAC_SUB_VECTOR *tsv); +static int smctr_make_phy_drop_num(struct net_device *dev, + MAC_SUB_VECTOR *tsv); +static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv); +static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv); +static int smctr_make_ring_station_status(struct net_device *dev, + MAC_SUB_VECTOR *tsv); +static int smctr_make_ring_station_version(struct net_device *dev, + MAC_SUB_VECTOR *tsv); +static int smctr_make_tx_status_code(struct net_device *dev, + MAC_SUB_VECTOR *tsv, __u16 tx_fstatus); +static int smctr_make_upstream_neighbor_addr(struct net_device *dev, + MAC_SUB_VECTOR *tsv); +static int smctr_make_wrap_data(struct net_device *dev, + MAC_SUB_VECTOR *tsv); + +/* O */ +static int smctr_open(struct net_device *dev); +static int smctr_open_tr(struct net_device *dev); + +/* P */ +struct net_device *smctr_probe(int unit); +static int __init smctr_probe1(struct net_device *dev, int ioaddr); +static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, + struct net_device *dev, __u16 rx_status); + +/* R */ +static int smctr_ram_memory_test(struct net_device *dev); +static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf, + __u16 *correlator); +static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf, + __u16 *correlator); +static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf); +static int smctr_rcv_rq_addr_state_attch(struct net_device *dev, + MAC_HEADER *rmf, __u16 *correlator); +static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf, + __u16 *correlator); +static int smctr_reset_adapter(struct net_device *dev); +static int smctr_restart_tx_chain(struct net_device *dev, short queue); +static int smctr_ring_status_chg(struct net_device *dev); +static int smctr_rx_frame(struct net_device *dev); + +/* S */ +static int smctr_send_dat(struct net_device *dev); +static netdev_tx_t smctr_send_packet(struct sk_buff *skb, + struct net_device *dev); +static int smctr_send_lobe_media_test(struct net_device *dev); +static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf, + __u16 correlator); +static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf, + __u16 correlator); +static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf, + __u16 correlator); +static int smctr_send_rpt_tx_forward(struct net_device *dev, + MAC_HEADER *rmf, __u16 tx_fstatus); +static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf, + __u16 rcode, __u16 correlator); +static int smctr_send_rq_init(struct net_device *dev); +static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf, + __u16 *tx_fstatus); +static int smctr_set_auth_access_pri(struct net_device *dev, + MAC_SUB_VECTOR *rsv); +static int smctr_set_auth_funct_class(struct net_device *dev, + MAC_SUB_VECTOR *rsv); +static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv, + __u16 *correlator); +static int smctr_set_error_timer_value(struct net_device *dev, + MAC_SUB_VECTOR *rsv); +static int smctr_set_frame_forward(struct net_device *dev, + MAC_SUB_VECTOR *rsv, __u8 dc_sc); +static int smctr_set_local_ring_num(struct net_device *dev, + MAC_SUB_VECTOR *rsv); +static unsigned short smctr_set_ctrl_attention(struct net_device *dev); +static void smctr_set_multicast_list(struct net_device *dev); +static int smctr_set_page(struct net_device *dev, __u8 *buf); +static int smctr_set_phy_drop(struct net_device *dev, + MAC_SUB_VECTOR *rsv); +static int smctr_set_ring_speed(struct net_device *dev); +static int smctr_set_rx_look_ahead(struct net_device *dev); +static int smctr_set_trc_reset(int ioaddr); +static int smctr_setup_single_cmd(struct net_device *dev, + __u16 command, __u16 subcommand); +static int smctr_setup_single_cmd_w_data(struct net_device *dev, + __u16 command, __u16 subcommand); +static char *smctr_malloc(struct net_device *dev, __u16 size); +static int smctr_status_chg(struct net_device *dev); + +/* T */ +static void smctr_timeout(struct net_device *dev); +static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb, + __u16 queue); +static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue); +static unsigned short smctr_tx_move_frame(struct net_device *dev, + struct sk_buff *skb, __u8 *pbuff, unsigned int bytes); + +/* U */ +static int smctr_update_err_stats(struct net_device *dev); +static int smctr_update_rx_chain(struct net_device *dev, __u16 queue); +static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb, + __u16 queue); + +/* W */ +static int smctr_wait_cmd(struct net_device *dev); +static int smctr_wait_while_cbusy(struct net_device *dev); + +#define TO_256_BYTE_BOUNDRY(X) (((X + 0xff) & 0xff00) - X) +#define TO_PARAGRAPH_BOUNDRY(X) (((X + 0x0f) & 0xfff0) - X) +#define PARAGRAPH_BOUNDRY(X) smctr_malloc(dev, TO_PARAGRAPH_BOUNDRY(X)) + +/* Allocate Adapter Shared Memory. + * IMPORTANT NOTE: Any changes to this function MUST be mirrored in the + * function "get_num_rx_bdbs" below!!! + * + * Order of memory allocation: + * + * 0. Initial System Configuration Block Pointer + * 1. System Configuration Block + * 2. System Control Block + * 3. Action Command Block + * 4. Interrupt Status Block + * + * 5. MAC TX FCB'S + * 6. NON-MAC TX FCB'S + * 7. MAC TX BDB'S + * 8. NON-MAC TX BDB'S + * 9. MAC RX FCB'S + * 10. NON-MAC RX FCB'S + * 11. MAC RX BDB'S + * 12. NON-MAC RX BDB'S + * 13. MAC TX Data Buffer( 1, 256 byte buffer) + * 14. MAC RX Data Buffer( 1, 256 byte buffer) + * + * 15. NON-MAC TX Data Buffer + * 16. NON-MAC RX Data Buffer + */ +static int smctr_alloc_shared_memory(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_alloc_shared_memory\n", dev->name); + + /* Allocate initial System Control Block pointer. + * This pointer is located in the last page, last offset - 4. + */ + tp->iscpb_ptr = (ISCPBlock *)(tp->ram_access + ((__u32)64 * 0x400) + - (long)ISCP_BLOCK_SIZE); + + /* Allocate System Control Blocks. */ + tp->scgb_ptr = (SCGBlock *)smctr_malloc(dev, sizeof(SCGBlock)); + PARAGRAPH_BOUNDRY(tp->sh_mem_used); + + tp->sclb_ptr = (SCLBlock *)smctr_malloc(dev, sizeof(SCLBlock)); + PARAGRAPH_BOUNDRY(tp->sh_mem_used); + + tp->acb_head = (ACBlock *)smctr_malloc(dev, + sizeof(ACBlock)*tp->num_acbs); + PARAGRAPH_BOUNDRY(tp->sh_mem_used); + + tp->isb_ptr = (ISBlock *)smctr_malloc(dev, sizeof(ISBlock)); + PARAGRAPH_BOUNDRY(tp->sh_mem_used); + + tp->misc_command_data = (__u16 *)smctr_malloc(dev, MISC_DATA_SIZE); + PARAGRAPH_BOUNDRY(tp->sh_mem_used); + + /* Allocate transmit FCBs. */ + tp->tx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, + sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]); + + tp->tx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, + sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]); + + tp->tx_fcb_head[BUG_QUEUE] = (FCBlock *)smctr_malloc(dev, + sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]); + + /* Allocate transmit BDBs. */ + tp->tx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, + sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]); + + tp->tx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, + sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]); + + tp->tx_bdb_head[BUG_QUEUE] = (BDBlock *)smctr_malloc(dev, + sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]); + + /* Allocate receive FCBs. */ + tp->rx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, + sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]); + + tp->rx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, + sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]); + + /* Allocate receive BDBs. */ + tp->rx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, + sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]); + + tp->rx_bdb_end[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0); + + tp->rx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, + sizeof(BDBlock) * tp->num_rx_bdbs[NON_MAC_QUEUE]); + + tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0); + + /* Allocate MAC transmit buffers. + * MAC Tx Buffers doen't have to be on an ODD Boundary. + */ + tp->tx_buff_head[MAC_QUEUE] + = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]); + tp->tx_buff_curr[MAC_QUEUE] = tp->tx_buff_head[MAC_QUEUE]; + tp->tx_buff_end [MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); + + /* Allocate BUG transmit buffers. */ + tp->tx_buff_head[BUG_QUEUE] + = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[BUG_QUEUE]); + tp->tx_buff_curr[BUG_QUEUE] = tp->tx_buff_head[BUG_QUEUE]; + tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0); + + /* Allocate MAC receive data buffers. + * MAC Rx buffer doesn't have to be on a 256 byte boundary. + */ + tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, + RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]); + tp->rx_buff_end[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); + + /* Allocate Non-MAC transmit buffers. + * ?? For maximum Netware performance, put Tx Buffers on + * ODD Boundary and then restore malloc to Even Boundrys. + */ + smctr_malloc(dev, 1L); + tp->tx_buff_head[NON_MAC_QUEUE] + = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[NON_MAC_QUEUE]); + tp->tx_buff_curr[NON_MAC_QUEUE] = tp->tx_buff_head[NON_MAC_QUEUE]; + tp->tx_buff_end [NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); + smctr_malloc(dev, 1L); + + /* Allocate Non-MAC receive data buffers. + * To guarantee a minimum of 256 contiguous memory to + * UM_Receive_Packet's lookahead pointer, before a page + * change or ring end is encountered, place each rx buffer on + * a 256 byte boundary. + */ + smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used)); + tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, + RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]); + tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); + + return 0; +} + +/* Enter Bypass state. */ +static int smctr_bypass_state(struct net_device *dev) +{ + int err; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_bypass_state\n", dev->name); + + err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE); + + return err; +} + +static int smctr_checksum_firmware(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + __u16 i, checksum = 0; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_checksum_firmware\n", dev->name); + + smctr_enable_adapter_ctrl_store(dev); + + for(i = 0; i < CS_RAM_SIZE; i += 2) + checksum += *((__u16 *)(tp->ram_access + i)); + + tp->microcode_version = *(__u16 *)(tp->ram_access + + CS_RAM_VERSION_OFFSET); + tp->microcode_version >>= 8; + + smctr_disable_adapter_ctrl_store(dev); + + if(checksum) + return checksum; + + return 0; +} + +static int __init smctr_chk_mca(struct net_device *dev) +{ +#ifdef CONFIG_MCA_LEGACY + struct net_local *tp = netdev_priv(dev); + int current_slot; + __u8 r1, r2, r3, r4, r5; + + current_slot = mca_find_unused_adapter(smctr_posid, 0); + if(current_slot == MCA_NOTFOUND) + return -ENODEV; + + mca_set_adapter_name(current_slot, smctr_name); + mca_mark_as_used(current_slot); + tp->slot_num = current_slot; + + r1 = mca_read_stored_pos(tp->slot_num, 2); + r2 = mca_read_stored_pos(tp->slot_num, 3); + + if(tp->slot_num) + outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num - 1) | CNFG_SLOT_ENABLE_BIT)); + else + outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num) | CNFG_SLOT_ENABLE_BIT)); + + r1 = inb(CNFG_POS_REG1); + r2 = inb(CNFG_POS_REG0); + + tp->bic_type = BIC_594_CHIP; + + /* IO */ + r2 = mca_read_stored_pos(tp->slot_num, 2); + r2 &= 0xF0; + dev->base_addr = ((__u16)r2 << 8) + (__u16)0x800; + request_region(dev->base_addr, SMCTR_IO_EXTENT, smctr_name); + + /* IRQ */ + r5 = mca_read_stored_pos(tp->slot_num, 5); + r5 &= 0xC; + switch(r5) + { + case 0: + dev->irq = 3; + break; + + case 0x4: + dev->irq = 4; + break; + + case 0x8: + dev->irq = 10; + break; + + default: + dev->irq = 15; + break; + } + if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev)) { + release_region(dev->base_addr, SMCTR_IO_EXTENT); + return -ENODEV; + } + + /* Get RAM base */ + r3 = mca_read_stored_pos(tp->slot_num, 3); + tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0C0000; + if (r3 & 0x8) + tp->ram_base += 0x010000; + if (r3 & 0x80) + tp->ram_base += 0xF00000; + + /* Get Ram Size */ + r3 &= 0x30; + r3 >>= 4; + + tp->ram_usable = (__u16)CNFG_SIZE_8KB << r3; + tp->ram_size = (__u16)CNFG_SIZE_64KB; + tp->board_id |= TOKEN_MEDIA; + + r4 = mca_read_stored_pos(tp->slot_num, 4); + tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0x0C0000; + if (r4 & 0x8) + tp->rom_base += 0x010000; + + /* Get ROM size. */ + r4 >>= 4; + switch (r4) { + case 0: + tp->rom_size = CNFG_SIZE_8KB; + break; + case 1: + tp->rom_size = CNFG_SIZE_16KB; + break; + case 2: + tp->rom_size = CNFG_SIZE_32KB; + break; + default: + tp->rom_size = ROM_DISABLE; + } + + /* Get Media Type. */ + r5 = mca_read_stored_pos(tp->slot_num, 5); + r5 &= CNFG_MEDIA_TYPE_MASK; + switch(r5) + { + case (0): + tp->media_type = MEDIA_STP_4; + break; + + case (1): + tp->media_type = MEDIA_STP_16; + break; + + case (3): + tp->media_type = MEDIA_UTP_16; + break; + + default: + tp->media_type = MEDIA_UTP_4; + break; + } + tp->media_menu = 14; + + r2 = mca_read_stored_pos(tp->slot_num, 2); + if(!(r2 & 0x02)) + tp->mode_bits |= EARLY_TOKEN_REL; + + /* Disable slot */ + outb(CNFG_POS_CONTROL_REG, 0); + + tp->board_id = smctr_get_boardid(dev, 1); + switch(tp->board_id & 0xffff) + { + case WD8115TA: + smctr_model = "8115T/A"; + break; + + case WD8115T: + if(tp->extra_info & CHIP_REV_MASK) + smctr_model = "8115T rev XE"; + else + smctr_model = "8115T rev XD"; + break; + + default: + smctr_model = "Unknown"; + break; + } + + return 0; +#else + return -1; +#endif /* CONFIG_MCA_LEGACY */ +} + +static int smctr_chg_rx_mask(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err = 0; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_chg_rx_mask\n", dev->name); + + smctr_enable_16bit(dev); + smctr_set_page(dev, (__u8 *)tp->ram_access); + + if(tp->mode_bits & LOOPING_MODE_MASK) + tp->config_word0 |= RX_OWN_BIT; + else + tp->config_word0 &= ~RX_OWN_BIT; + + if(tp->receive_mask & PROMISCUOUS_MODE) + tp->config_word0 |= PROMISCUOUS_BIT; + else + tp->config_word0 &= ~PROMISCUOUS_BIT; + + if(tp->receive_mask & ACCEPT_ERR_PACKETS) + tp->config_word0 |= SAVBAD_BIT; + else + tp->config_word0 &= ~SAVBAD_BIT; + + if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) + tp->config_word0 |= RXATMAC; + else + tp->config_word0 &= ~RXATMAC; + + if(tp->receive_mask & ACCEPT_MULTI_PROM) + tp->config_word1 |= MULTICAST_ADDRESS_BIT; + else + tp->config_word1 &= ~MULTICAST_ADDRESS_BIT; + + if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING) + tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS; + else + { + if(tp->receive_mask & ACCEPT_SOURCE_ROUTING) + tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT; + else + tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS; + } + + if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0, + &tp->config_word0))) + { + return err; + } + + if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1, + &tp->config_word1))) + { + return err; + } + + smctr_disable_16bit(dev); + + return 0; +} + +static int smctr_clear_int(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR); + + return 0; +} + +static int smctr_clear_trc_reset(int ioaddr) +{ + __u8 r; + + r = inb(ioaddr + MSR); + outb(~MSR_RST & r, ioaddr + MSR); + + return 0; +} + +/* + * The inverse routine to smctr_open(). + */ +static int smctr_close(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + struct sk_buff *skb; + int err; + + netif_stop_queue(dev); + + tp->cleanup = 1; + + /* Check to see if adapter is already in a closed state. */ + if(tp->status != OPEN) + return 0; + + smctr_enable_16bit(dev); + smctr_set_page(dev, (__u8 *)tp->ram_access); + + if((err = smctr_issue_remove_cmd(dev))) + { + smctr_disable_16bit(dev); + return err; + } + + for(;;) + { + skb = skb_dequeue(&tp->SendSkbQueue); + if(skb == NULL) + break; + tp->QueueSkb++; + dev_kfree_skb(skb); + } + + + return 0; +} + +static int smctr_decode_firmware(struct net_device *dev, + const struct firmware *fw) +{ + struct net_local *tp = netdev_priv(dev); + short bit = 0x80, shift = 12; + DECODE_TREE_NODE *tree; + short branch, tsize; + __u16 buff = 0; + long weight; + __u8 *ucode; + __u16 *mem; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_decode_firmware\n", dev->name); + + weight = *(long *)(fw->data + WEIGHT_OFFSET); + tsize = *(__u8 *)(fw->data + TREE_SIZE_OFFSET); + tree = (DECODE_TREE_NODE *)(fw->data + TREE_OFFSET); + ucode = (__u8 *)(fw->data + TREE_OFFSET + + (tsize * sizeof(DECODE_TREE_NODE))); + mem = (__u16 *)(tp->ram_access); + + while(weight) + { + branch = ROOT; + while((tree + branch)->tag != LEAF && weight) + { + branch = *ucode & bit ? (tree + branch)->llink + : (tree + branch)->rlink; + + bit >>= 1; + weight--; + + if(bit == 0) + { + bit = 0x80; + ucode++; + } + } + + buff |= (tree + branch)->info << shift; + shift -= 4; + + if(shift < 0) + { + *(mem++) = SWAP_BYTES(buff); + buff = 0; + shift = 12; + } + } + + /* The following assumes the Control Store Memory has + * been initialized to zero. If the last partial word + * is zero, it will not be written. + */ + if(buff) + *(mem++) = SWAP_BYTES(buff); + + return 0; +} + +static int smctr_disable_16bit(struct net_device *dev) +{ + return 0; +} + +/* + * On Exit, Adapter is: + * 1. TRC is in a reset state and un-initialized. + * 2. Adapter memory is enabled. + * 3. Control Store memory is out of context (-WCSS is 1). + */ +static int smctr_disable_adapter_ctrl_store(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_disable_adapter_ctrl_store\n", dev->name); + + tp->trc_mask |= CSR_WCSS; + outb(tp->trc_mask, ioaddr + CSR); + + return 0; +} + +static int smctr_disable_bic_int(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + tp->trc_mask = CSR_MSK_ALL | CSR_MSKCBUSY + | CSR_MSKTINT | CSR_WCSS; + outb(tp->trc_mask, ioaddr + CSR); + + return 0; +} + +static int smctr_enable_16bit(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + __u8 r; + + if(tp->adapter_bus == BUS_ISA16_TYPE) + { + r = inb(dev->base_addr + LAAR); + outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR); + } + + return 0; +} + +/* + * To enable the adapter control store memory: + * 1. Adapter must be in a RESET state. + * 2. Adapter memory must be enabled. + * 3. Control Store Memory is in context (-WCSS is 0). + */ +static int smctr_enable_adapter_ctrl_store(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_enable_adapter_ctrl_store\n", dev->name); + + smctr_set_trc_reset(ioaddr); + smctr_enable_adapter_ram(dev); + + tp->trc_mask &= ~CSR_WCSS; + outb(tp->trc_mask, ioaddr + CSR); + + return 0; +} + +static int smctr_enable_adapter_ram(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + __u8 r; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_enable_adapter_ram\n", dev->name); + + r = inb(ioaddr + MSR); + outb(MSR_MEMB | r, ioaddr + MSR); + + return 0; +} + +static int smctr_enable_bic_int(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int ioaddr = dev->base_addr; + __u8 r; + + switch(tp->bic_type) + { + case (BIC_584_CHIP): + tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS; + outb(tp->trc_mask, ioaddr + CSR); + r = inb(ioaddr + IRR); + outb(r | IRR_IEN, ioaddr + IRR); + break; + + case (BIC_594_CHIP): + tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS; + outb(tp->trc_mask, ioaddr + CSR); + r = inb(ioaddr + IMCCR); + outb(r | IMCCR_EIL, ioaddr + IMCCR); + break; + } + + return 0; +} + +static int __init smctr_chk_isa(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int ioaddr = dev->base_addr; + __u8 r1, r2, b, chksum = 0; + __u16 r; + int i; + int err = -ENODEV; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_chk_isa %#4x\n", dev->name, ioaddr); + + if((ioaddr & 0x1F) != 0) + goto out; + + /* Grab the region so that no one else tries to probe our ioports. */ + if (!request_region(ioaddr, SMCTR_IO_EXTENT, smctr_name)) { + err = -EBUSY; + goto out; + } + + /* Checksum SMC node address */ + for(i = 0; i < 8; i++) + { + b = inb(ioaddr + LAR0 + i); + chksum += b; + } + + if (chksum != NODE_ADDR_CKSUM) + goto out2; + + b = inb(ioaddr + BDID); + if(b != BRD_ID_8115T) + { + printk(KERN_ERR "%s: The adapter found is not supported\n", dev->name); + goto out2; + } + + /* Check for 8115T Board ID */ + r2 = 0; + for(r = 0; r < 8; r++) + { + r1 = inb(ioaddr + 0x8 + r); + r2 += r1; + } + + /* value of RegF adds up the sum to 0xFF */ + if((r2 != 0xFF) && (r2 != 0xEE)) + goto out2; + + /* Get adapter ID */ + tp->board_id = smctr_get_boardid(dev, 0); + switch(tp->board_id & 0xffff) + { + case WD8115TA: + smctr_model = "8115T/A"; + break; + + case WD8115T: + if(tp->extra_info & CHIP_REV_MASK) + smctr_model = "8115T rev XE"; + else + smctr_model = "8115T rev XD"; + break; + + default: + smctr_model = "Unknown"; + break; + } + + /* Store BIC type. */ + tp->bic_type = BIC_584_CHIP; + tp->nic_type = NIC_825_CHIP; + + /* Copy Ram Size */ + tp->ram_usable = CNFG_SIZE_16KB; + tp->ram_size = CNFG_SIZE_64KB; + + /* Get 58x Ram Base */ + r1 = inb(ioaddr); + r1 &= 0x3F; + + r2 = inb(ioaddr + CNFG_LAAR_584); + r2 &= CNFG_LAAR_MASK; + r2 <<= 3; + r2 |= ((r1 & 0x38) >> 3); + + tp->ram_base = ((__u32)r2 << 16) + (((__u32)(r1 & 0x7)) << 13); + + /* Get 584 Irq */ + r1 = 0; + r1 = inb(ioaddr + CNFG_ICR_583); + r1 &= CNFG_ICR_IR2_584; + + r2 = inb(ioaddr + CNFG_IRR_583); + r2 &= CNFG_IRR_IRQS; /* 0x60 */ + r2 >>= 5; + + switch(r2) + { + case 0: + if(r1 == 0) + dev->irq = 2; + else + dev->irq = 10; + break; + + case 1: + if(r1 == 0) + dev->irq = 3; + else + dev->irq = 11; + break; + + case 2: + if(r1 == 0) + { + if(tp->extra_info & ALTERNATE_IRQ_BIT) + dev->irq = 5; + else + dev->irq = 4; + } + else + dev->irq = 15; + break; + + case 3: + if(r1 == 0) + dev->irq = 7; + else + dev->irq = 4; + break; + + default: + printk(KERN_ERR "%s: No IRQ found aborting\n", dev->name); + goto out2; + } + + if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev)) + goto out2; + + /* Get 58x Rom Base */ + r1 = inb(ioaddr + CNFG_BIO_583); + r1 &= 0x3E; + r1 |= 0x40; + + tp->rom_base = (__u32)r1 << 13; + + /* Get 58x Rom Size */ + r1 = inb(ioaddr + CNFG_BIO_583); + r1 &= 0xC0; + if(r1 == 0) + tp->rom_size = ROM_DISABLE; + else + { + r1 >>= 6; + tp->rom_size = (__u16)CNFG_SIZE_8KB << r1; + } + + /* Get 58x Boot Status */ + r1 = inb(ioaddr + CNFG_GP2); + + tp->mode_bits &= (~BOOT_STATUS_MASK); + + if(r1 & CNFG_GP2_BOOT_NIBBLE) + tp->mode_bits |= BOOT_TYPE_1; + + /* Get 58x Zero Wait State */ + tp->mode_bits &= (~ZERO_WAIT_STATE_MASK); + + r1 = inb(ioaddr + CNFG_IRR_583); + + if(r1 & CNFG_IRR_ZWS) + tp->mode_bits |= ZERO_WAIT_STATE_8_BIT; + + if(tp->board_id & BOARD_16BIT) + { + r1 = inb(ioaddr + CNFG_LAAR_584); + + if(r1 & CNFG_LAAR_ZWS) + tp->mode_bits |= ZERO_WAIT_STATE_16_BIT; + } + + /* Get 584 Media Menu */ + tp->media_menu = 14; + r1 = inb(ioaddr + CNFG_IRR_583); + + tp->mode_bits &= 0xf8ff; /* (~CNFG_INTERFACE_TYPE_MASK) */ + if((tp->board_id & TOKEN_MEDIA) == TOKEN_MEDIA) + { + /* Get Advanced Features */ + if(((r1 & 0x6) >> 1) == 0x3) + tp->media_type |= MEDIA_UTP_16; + else + { + if(((r1 & 0x6) >> 1) == 0x2) + tp->media_type |= MEDIA_STP_16; + else + { + if(((r1 & 0x6) >> 1) == 0x1) + tp->media_type |= MEDIA_UTP_4; + + else + tp->media_type |= MEDIA_STP_4; + } + } + + r1 = inb(ioaddr + CNFG_GP2); + if(!(r1 & 0x2) ) /* GP2_ETRD */ + tp->mode_bits |= EARLY_TOKEN_REL; + + /* see if the chip is corrupted + if(smctr_read_584_chksum(ioaddr)) + { + printk(KERN_ERR "%s: EEPROM Checksum Failure\n", dev->name); + free_irq(dev->irq, dev); + goto out2; + } + */ + } + + return 0; + +out2: + release_region(ioaddr, SMCTR_IO_EXTENT); +out: + return err; +} + +static int __init smctr_get_boardid(struct net_device *dev, int mca) +{ + struct net_local *tp = netdev_priv(dev); + int ioaddr = dev->base_addr; + __u8 r, r1, IdByte; + __u16 BoardIdMask; + + tp->board_id = BoardIdMask = 0; + + if(mca) + { + BoardIdMask |= (MICROCHANNEL+INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT); + tp->extra_info |= (INTERFACE_594_CHIP+RAM_SIZE_64K+NIC_825_BIT+ALTERNATE_IRQ_BIT+SLOT_16BIT); + } + else + { + BoardIdMask|=(INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT); + tp->extra_info |= (INTERFACE_584_CHIP + RAM_SIZE_64K + + NIC_825_BIT + ALTERNATE_IRQ_BIT); + } + + if(!mca) + { + r = inb(ioaddr + BID_REG_1); + r &= 0x0c; + outb(r, ioaddr + BID_REG_1); + r = inb(ioaddr + BID_REG_1); + + if(r & BID_SIXTEEN_BIT_BIT) + { + tp->extra_info |= SLOT_16BIT; + tp->adapter_bus = BUS_ISA16_TYPE; + } + else + tp->adapter_bus = BUS_ISA8_TYPE; + } + else + tp->adapter_bus = BUS_MCA_TYPE; + + /* Get Board Id Byte */ + IdByte = inb(ioaddr + BID_BOARD_ID_BYTE); + + /* if Major version > 1.0 then + * return; + */ + if(IdByte & 0xF8) + return -1; + + r1 = inb(ioaddr + BID_REG_1); + r1 &= BID_ICR_MASK; + r1 |= BID_OTHER_BIT; + + outb(r1, ioaddr + BID_REG_1); + r1 = inb(ioaddr + BID_REG_3); + + r1 &= BID_EAR_MASK; + r1 |= BID_ENGR_PAGE; + + outb(r1, ioaddr + BID_REG_3); + r1 = inb(ioaddr + BID_REG_1); + r1 &= BID_ICR_MASK; + r1 |= (BID_RLA | BID_OTHER_BIT); + + outb(r1, ioaddr + BID_REG_1); + + r1 = inb(ioaddr + BID_REG_1); + while(r1 & BID_RECALL_DONE_MASK) + r1 = inb(ioaddr + BID_REG_1); + + r = inb(ioaddr + BID_LAR_0 + BID_REG_6); + + /* clear chip rev bits */ + tp->extra_info &= ~CHIP_REV_MASK; + tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6); + + r1 = inb(ioaddr + BID_REG_1); + r1 &= BID_ICR_MASK; + r1 |= BID_OTHER_BIT; + + outb(r1, ioaddr + BID_REG_1); + r1 = inb(ioaddr + BID_REG_3); + + r1 &= BID_EAR_MASK; + r1 |= BID_EA6; + + outb(r1, ioaddr + BID_REG_3); + r1 = inb(ioaddr + BID_REG_1); + + r1 &= BID_ICR_MASK; + r1 |= BID_RLA; + + outb(r1, ioaddr + BID_REG_1); + r1 = inb(ioaddr + BID_REG_1); + + while(r1 & BID_RECALL_DONE_MASK) + r1 = inb(ioaddr + BID_REG_1); + + return BoardIdMask; +} + +static int smctr_get_group_address(struct net_device *dev) +{ + smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR); + + return smctr_wait_cmd(dev); +} + +static int smctr_get_functional_address(struct net_device *dev) +{ + smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR); + + return smctr_wait_cmd(dev); +} + +/* Calculate number of Non-MAC receive BDB's and data buffers. + * This function must simulate allocateing shared memory exactly + * as the allocate_shared_memory function above. + */ +static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int mem_used = 0; + + /* Allocate System Control Blocks. */ + mem_used += sizeof(SCGBlock); + + mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); + mem_used += sizeof(SCLBlock); + + mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); + mem_used += sizeof(ACBlock) * tp->num_acbs; + + mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); + mem_used += sizeof(ISBlock); + + mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); + mem_used += MISC_DATA_SIZE; + + /* Allocate transmit FCB's. */ + mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); + + mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]; + mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]; + mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]; + + /* Allocate transmit BDBs. */ + mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]; + mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]; + mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]; + + /* Allocate receive FCBs. */ + mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]; + mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]; + + /* Allocate receive BDBs. */ + mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]; + + /* Allocate MAC transmit buffers. + * MAC transmit buffers don't have to be on an ODD Boundary. + */ + mem_used += tp->tx_buff_size[MAC_QUEUE]; + + /* Allocate BUG transmit buffers. */ + mem_used += tp->tx_buff_size[BUG_QUEUE]; + + /* Allocate MAC receive data buffers. + * MAC receive buffers don't have to be on a 256 byte boundary. + */ + mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]; + + /* Allocate Non-MAC transmit buffers. + * For maximum Netware performance, put Tx Buffers on + * ODD Boundary,and then restore malloc to Even Boundrys. + */ + mem_used += 1L; + mem_used += tp->tx_buff_size[NON_MAC_QUEUE]; + mem_used += 1L; + + /* CALCULATE NUMBER OF NON-MAC RX BDB'S + * AND NON-MAC RX DATA BUFFERS + * + * Make sure the mem_used offset at this point is the + * same as in allocate_shared memory or the following + * boundary adjustment will be incorrect (i.e. not allocating + * the non-mac receive buffers above cannot change the 256 + * byte offset). + * + * Since this cannot be guaranteed, adding the full 256 bytes + * to the amount of shared memory used at this point will guaranteed + * that the rx data buffers do not overflow shared memory. + */ + mem_used += 0x100; + + return (0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock)); +} + +static int smctr_get_physical_drop_number(struct net_device *dev) +{ + smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER); + + return smctr_wait_cmd(dev); +} + +static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue) +{ + struct net_local *tp = netdev_priv(dev); + BDBlock *bdb; + + bdb = (BDBlock *)((__u32)tp->ram_access + + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr)); + + tp->rx_fcb_curr[queue]->bdb_ptr = bdb; + + return (__u8 *)bdb->data_block_ptr; +} + +static int smctr_get_station_id(struct net_device *dev) +{ + smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS); + + return smctr_wait_cmd(dev); +} + +/* + * Get the current statistics. This may be called with the card open + * or closed. + */ +static struct net_device_stats *smctr_get_stats(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + return (struct net_device_stats *)&tp->MacStat; +} + +static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, + __u16 bytes_count) +{ + struct net_local *tp = netdev_priv(dev); + FCBlock *pFCB; + BDBlock *pbdb; + unsigned short alloc_size; + unsigned short *temp; + + if(smctr_debug > 20) + printk(KERN_DEBUG "smctr_get_tx_fcb\n"); + + /* check if there is enough FCB blocks */ + if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue]) + return (FCBlock *)(-1L); + + /* round off the input pkt size to the nearest even number */ + alloc_size = (bytes_count + 1) & 0xfffe; + + /* check if enough mem */ + if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue]) + return (FCBlock *)(-1L); + + /* check if past the end ; + * if exactly enough mem to end of ring, alloc from front. + * this avoids update of curr when curr = end + */ + if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size) + >= (unsigned long)(tp->tx_buff_end[queue])) + { + /* check if enough memory from ring head */ + alloc_size = alloc_size + + (__u16)((__u32)tp->tx_buff_end[queue] + - (__u32)tp->tx_buff_curr[queue]); + + if((tp->tx_buff_used[queue] + alloc_size) + > tp->tx_buff_size[queue]) + { + return (FCBlock *)(-1L); + } + + /* ring wrap */ + tp->tx_buff_curr[queue] = tp->tx_buff_head[queue]; + } + + tp->tx_buff_used[queue] += alloc_size; + tp->num_tx_fcbs_used[queue]++; + tp->tx_fcb_curr[queue]->frame_length = bytes_count; + tp->tx_fcb_curr[queue]->memory_alloc = alloc_size; + temp = tp->tx_buff_curr[queue]; + tp->tx_buff_curr[queue] + = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe)); + + pbdb = tp->tx_fcb_curr[queue]->bdb_ptr; + pbdb->buffer_length = bytes_count; + pbdb->data_block_ptr = temp; + pbdb->trc_data_block_ptr = TRC_POINTER(temp); + + pFCB = tp->tx_fcb_curr[queue]; + tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr; + + return pFCB; +} + +static int smctr_get_upstream_neighbor_addr(struct net_device *dev) +{ + smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS); + + return smctr_wait_cmd(dev); +} + +static int smctr_hardware_send_packet(struct net_device *dev, + struct net_local *tp) +{ + struct tr_statistics *tstat = &tp->MacStat; + struct sk_buff *skb; + FCBlock *fcb; + + if(smctr_debug > 10) + printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name); + + if(tp->status != OPEN) + return -1; + + if(tp->monitor_state_ready != 1) + return -1; + + for(;;) + { + /* Send first buffer from queue */ + skb = skb_dequeue(&tp->SendSkbQueue); + if(skb == NULL) + return -1; + + tp->QueueSkb++; + + if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) + return -1; + + smctr_enable_16bit(dev); + smctr_set_page(dev, (__u8 *)tp->ram_access); + + if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len)) + == (FCBlock *)(-1L)) + { + smctr_disable_16bit(dev); + return -1; + } + + smctr_tx_move_frame(dev, skb, + (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len); + + smctr_set_page(dev, (__u8 *)fcb); + + smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE); + dev_kfree_skb(skb); + + tstat->tx_packets++; + + smctr_disable_16bit(dev); + } + + return 0; +} + +static int smctr_init_acbs(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i; + ACBlock *acb; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_init_acbs\n", dev->name); + + acb = tp->acb_head; + acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL); + acb->cmd_info = ACB_CHAIN_END; + acb->cmd = 0; + acb->subcmd = 0; + acb->data_offset_lo = 0; + acb->data_offset_hi = 0; + acb->next_ptr + = (ACBlock *)(((char *)acb) + sizeof(ACBlock)); + acb->trc_next_ptr = TRC_POINTER(acb->next_ptr); + + for(i = 1; i < tp->num_acbs; i++) + { + acb = acb->next_ptr; + acb->cmd_done_status + = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL); + acb->cmd_info = ACB_CHAIN_END; + acb->cmd = 0; + acb->subcmd = 0; + acb->data_offset_lo = 0; + acb->data_offset_hi = 0; + acb->next_ptr + = (ACBlock *)(((char *)acb) + sizeof(ACBlock)); + acb->trc_next_ptr = TRC_POINTER(acb->next_ptr); + } + + acb->next_ptr = tp->acb_head; + acb->trc_next_ptr = TRC_POINTER(tp->acb_head); + tp->acb_next = tp->acb_head->next_ptr; + tp->acb_curr = tp->acb_head->next_ptr; + tp->num_acbs_used = 0; + + return 0; +} + +static int smctr_init_adapter(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_init_adapter\n", dev->name); + + tp->status = CLOSED; + tp->page_offset_mask = (tp->ram_usable * 1024) - 1; + skb_queue_head_init(&tp->SendSkbQueue); + tp->QueueSkb = MAX_TX_QUEUE; + + if(!(tp->group_address_0 & 0x0080)) + tp->group_address_0 |= 0x00C0; + + if(!(tp->functional_address_0 & 0x00C0)) + tp->functional_address_0 |= 0x00C0; + + tp->functional_address[0] &= 0xFF7F; + + if(tp->authorized_function_classes == 0) + tp->authorized_function_classes = 0x7FFF; + + if(tp->authorized_access_priority == 0) + tp->authorized_access_priority = 0x06; + + smctr_disable_bic_int(dev); + smctr_set_trc_reset(dev->base_addr); + + smctr_enable_16bit(dev); + smctr_set_page(dev, (__u8 *)tp->ram_access); + + if(smctr_checksum_firmware(dev)) + { + printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name); + return -ENOENT; + } + + if((err = smctr_ram_memory_test(dev))) + { + printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name); + return -EIO; + } + + smctr_set_rx_look_ahead(dev); + smctr_load_node_addr(dev); + + /* Initialize adapter for Internal Self Test. */ + smctr_reset_adapter(dev); + if((err = smctr_init_card_real(dev))) + { + printk(KERN_ERR "%s: Initialization of card failed (%d)\n", + dev->name, err); + return -EINVAL; + } + + /* This routine clobbers the TRC's internal registers. */ + if((err = smctr_internal_self_test(dev))) + { + printk(KERN_ERR "%s: Card failed internal self test (%d)\n", + dev->name, err); + return -EINVAL; + } + + /* Re-Initialize adapter's internal registers */ + smctr_reset_adapter(dev); + if((err = smctr_init_card_real(dev))) + { + printk(KERN_ERR "%s: Initialization of card failed (%d)\n", + dev->name, err); + return -EINVAL; + } + + smctr_enable_bic_int(dev); + + if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) + return err; + + smctr_disable_16bit(dev); + + return 0; +} + +static int smctr_init_card_real(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err = 0; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_init_card_real\n", dev->name); + + tp->sh_mem_used = 0; + tp->num_acbs = NUM_OF_ACBS; + + /* Range Check Max Packet Size */ + if(tp->max_packet_size < 256) + tp->max_packet_size = 256; + else + { + if(tp->max_packet_size > NON_MAC_TX_BUFFER_MEMORY) + tp->max_packet_size = NON_MAC_TX_BUFFER_MEMORY; + } + + tp->num_of_tx_buffs = (NON_MAC_TX_BUFFER_MEMORY + / tp->max_packet_size) - 1; + + if(tp->num_of_tx_buffs > NUM_NON_MAC_TX_FCBS) + tp->num_of_tx_buffs = NUM_NON_MAC_TX_FCBS; + else + { + if(tp->num_of_tx_buffs == 0) + tp->num_of_tx_buffs = 1; + } + + /* Tx queue constants */ + tp->num_tx_fcbs [BUG_QUEUE] = NUM_BUG_TX_FCBS; + tp->num_tx_bdbs [BUG_QUEUE] = NUM_BUG_TX_BDBS; + tp->tx_buff_size [BUG_QUEUE] = BUG_TX_BUFFER_MEMORY; + tp->tx_buff_used [BUG_QUEUE] = 0; + tp->tx_queue_status [BUG_QUEUE] = NOT_TRANSMITING; + + tp->num_tx_fcbs [MAC_QUEUE] = NUM_MAC_TX_FCBS; + tp->num_tx_bdbs [MAC_QUEUE] = NUM_MAC_TX_BDBS; + tp->tx_buff_size [MAC_QUEUE] = MAC_TX_BUFFER_MEMORY; + tp->tx_buff_used [MAC_QUEUE] = 0; + tp->tx_queue_status [MAC_QUEUE] = NOT_TRANSMITING; + + tp->num_tx_fcbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_FCBS; + tp->num_tx_bdbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_BDBS; + tp->tx_buff_size [NON_MAC_QUEUE] = NON_MAC_TX_BUFFER_MEMORY; + tp->tx_buff_used [NON_MAC_QUEUE] = 0; + tp->tx_queue_status [NON_MAC_QUEUE] = NOT_TRANSMITING; + + /* Receive Queue Constants */ + tp->num_rx_fcbs[MAC_QUEUE] = NUM_MAC_RX_FCBS; + tp->num_rx_bdbs[MAC_QUEUE] = NUM_MAC_RX_BDBS; + + if(tp->extra_info & CHIP_REV_MASK) + tp->num_rx_fcbs[NON_MAC_QUEUE] = 78; /* 825 Rev. XE */ + else + tp->num_rx_fcbs[NON_MAC_QUEUE] = 7; /* 825 Rev. XD */ + + tp->num_rx_bdbs[NON_MAC_QUEUE] = smctr_get_num_rx_bdbs(dev); + + smctr_alloc_shared_memory(dev); + smctr_init_shared_memory(dev); + + if((err = smctr_issue_init_timers_cmd(dev))) + return err; + + if((err = smctr_issue_init_txrx_cmd(dev))) + { + printk(KERN_ERR "%s: Hardware failure\n", dev->name); + return err; + } + + return 0; +} + +static int smctr_init_rx_bdbs(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i, j; + BDBlock *bdb; + __u16 *buf; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_init_rx_bdbs\n", dev->name); + + for(i = 0; i < NUM_RX_QS_USED; i++) + { + bdb = tp->rx_bdb_head[i]; + buf = tp->rx_buff_head[i]; + bdb->info = (BDB_CHAIN_END | BDB_NO_WARNING); + bdb->buffer_length = RX_DATA_BUFFER_SIZE; + bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock)); + bdb->data_block_ptr = buf; + bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); + + if(i == NON_MAC_QUEUE) + bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf); + else + bdb->trc_data_block_ptr = TRC_POINTER(buf); + + for(j = 1; j < tp->num_rx_bdbs[i]; j++) + { + bdb->next_ptr->back_ptr = bdb; + bdb = bdb->next_ptr; + buf = (__u16 *)((char *)buf + RX_DATA_BUFFER_SIZE); + bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING); + bdb->buffer_length = RX_DATA_BUFFER_SIZE; + bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock)); + bdb->data_block_ptr = buf; + bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); + + if(i == NON_MAC_QUEUE) + bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf); + else + bdb->trc_data_block_ptr = TRC_POINTER(buf); + } + + bdb->next_ptr = tp->rx_bdb_head[i]; + bdb->trc_next_ptr = TRC_POINTER(tp->rx_bdb_head[i]); + + tp->rx_bdb_head[i]->back_ptr = bdb; + tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr; + } + + return 0; +} + +static int smctr_init_rx_fcbs(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i, j; + FCBlock *fcb; + + for(i = 0; i < NUM_RX_QS_USED; i++) + { + fcb = tp->rx_fcb_head[i]; + fcb->frame_status = 0; + fcb->frame_length = 0; + fcb->info = FCB_CHAIN_END; + fcb->next_ptr = (FCBlock *)(((char*)fcb) + sizeof(FCBlock)); + if(i == NON_MAC_QUEUE) + fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr); + else + fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); + + for(j = 1; j < tp->num_rx_fcbs[i]; j++) + { + fcb->next_ptr->back_ptr = fcb; + fcb = fcb->next_ptr; + fcb->frame_status = 0; + fcb->frame_length = 0; + fcb->info = FCB_WARNING; + fcb->next_ptr + = (FCBlock *)(((char *)fcb) + sizeof(FCBlock)); + + if(i == NON_MAC_QUEUE) + fcb->trc_next_ptr + = RX_FCB_TRC_POINTER(fcb->next_ptr); + else + fcb->trc_next_ptr + = TRC_POINTER(fcb->next_ptr); + } + + fcb->next_ptr = tp->rx_fcb_head[i]; + + if(i == NON_MAC_QUEUE) + fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr); + else + fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); + + tp->rx_fcb_head[i]->back_ptr = fcb; + tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr; + } + + return 0; +} + +static int smctr_init_shared_memory(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i; + __u32 *iscpb; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_init_shared_memory\n", dev->name); + + smctr_set_page(dev, (__u8 *)(unsigned int)tp->iscpb_ptr); + + /* Initialize Initial System Configuration Point. (ISCP) */ + iscpb = (__u32 *)PAGE_POINTER(&tp->iscpb_ptr->trc_scgb_ptr); + *iscpb = (__u32)(SWAP_WORDS(TRC_POINTER(tp->scgb_ptr))); + + smctr_set_page(dev, (__u8 *)tp->ram_access); + + /* Initialize System Configuration Pointers. (SCP) */ + tp->scgb_ptr->config = (SCGB_ADDRESS_POINTER_FORMAT + | SCGB_MULTI_WORD_CONTROL | SCGB_DATA_FORMAT + | SCGB_BURST_LENGTH); + + tp->scgb_ptr->trc_sclb_ptr = TRC_POINTER(tp->sclb_ptr); + tp->scgb_ptr->trc_acb_ptr = TRC_POINTER(tp->acb_head); + tp->scgb_ptr->trc_isb_ptr = TRC_POINTER(tp->isb_ptr); + tp->scgb_ptr->isbsiz = (sizeof(ISBlock)) - 2; + + /* Initialize System Control Block. (SCB) */ + tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_NOP; + tp->sclb_ptr->iack_code = 0; + tp->sclb_ptr->resume_control = 0; + tp->sclb_ptr->int_mask_control = 0; + tp->sclb_ptr->int_mask_state = 0; + + /* Initialize Interrupt Status Block. (ISB) */ + for(i = 0; i < NUM_OF_INTERRUPTS; i++) + { + tp->isb_ptr->IStatus[i].IType = 0xf0; + tp->isb_ptr->IStatus[i].ISubtype = 0; + } + + tp->current_isb_index = 0; + + /* Initialize Action Command Block. (ACB) */ + smctr_init_acbs(dev); + + /* Initialize transmit FCB's and BDB's. */ + smctr_link_tx_fcbs_to_bdbs(dev); + smctr_init_tx_bdbs(dev); + smctr_init_tx_fcbs(dev); + + /* Initialize receive FCB's and BDB's. */ + smctr_init_rx_bdbs(dev); + smctr_init_rx_fcbs(dev); + + return 0; +} + +static int smctr_init_tx_bdbs(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i, j; + BDBlock *bdb; + + for(i = 0; i < NUM_TX_QS_USED; i++) + { + bdb = tp->tx_bdb_head[i]; + bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING); + bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock)); + bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); + + for(j = 1; j < tp->num_tx_bdbs[i]; j++) + { + bdb->next_ptr->back_ptr = bdb; + bdb = bdb->next_ptr; + bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING); + bdb->next_ptr + = (BDBlock *)(((char *)bdb) + sizeof( BDBlock)); bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); + } + + bdb->next_ptr = tp->tx_bdb_head[i]; + bdb->trc_next_ptr = TRC_POINTER(tp->tx_bdb_head[i]); + tp->tx_bdb_head[i]->back_ptr = bdb; + } + + return 0; +} + +static int smctr_init_tx_fcbs(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i, j; + FCBlock *fcb; + + for(i = 0; i < NUM_TX_QS_USED; i++) + { + fcb = tp->tx_fcb_head[i]; + fcb->frame_status = 0; + fcb->frame_length = 0; + fcb->info = FCB_CHAIN_END; + fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock)); + fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); + + for(j = 1; j < tp->num_tx_fcbs[i]; j++) + { + fcb->next_ptr->back_ptr = fcb; + fcb = fcb->next_ptr; + fcb->frame_status = 0; + fcb->frame_length = 0; + fcb->info = FCB_CHAIN_END; + fcb->next_ptr + = (FCBlock *)(((char *)fcb) + sizeof(FCBlock)); + fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); + } + + fcb->next_ptr = tp->tx_fcb_head[i]; + fcb->trc_next_ptr = TRC_POINTER(tp->tx_fcb_head[i]); + + tp->tx_fcb_head[i]->back_ptr = fcb; + tp->tx_fcb_end[i] = tp->tx_fcb_head[i]->next_ptr; + tp->tx_fcb_curr[i] = tp->tx_fcb_head[i]->next_ptr; + tp->num_tx_fcbs_used[i] = 0; + } + + return 0; +} + +static int smctr_internal_self_test(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + if((err = smctr_issue_test_internal_rom_cmd(dev))) + return err; + + if((err = smctr_wait_cmd(dev))) + return err; + + if(tp->acb_head->cmd_done_status & 0xff) + return -1; + + if((err = smctr_issue_test_hic_cmd(dev))) + return err; + + if((err = smctr_wait_cmd(dev))) + return err; + + if(tp->acb_head->cmd_done_status & 0xff) + return -1; + + if((err = smctr_issue_test_mac_reg_cmd(dev))) + return err; + + if((err = smctr_wait_cmd(dev))) + return err; + + if(tp->acb_head->cmd_done_status & 0xff) + return -1; + + return 0; +} + +/* + * The typical workload of the driver: Handle the network interface interrupts. + */ +static irqreturn_t smctr_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct net_local *tp; + int ioaddr; + __u16 interrupt_unmask_bits = 0, interrupt_ack_code = 0xff00; + __u16 err1, err = NOT_MY_INTERRUPT; + __u8 isb_type, isb_subtype; + __u16 isb_index; + + ioaddr = dev->base_addr; + tp = netdev_priv(dev); + + if(tp->status == NOT_INITIALIZED) + return IRQ_NONE; + + spin_lock(&tp->lock); + + smctr_disable_bic_int(dev); + smctr_enable_16bit(dev); + + smctr_clear_int(dev); + + /* First read the LSB */ + while((tp->isb_ptr->IStatus[tp->current_isb_index].IType & 0xf0) == 0) + { + isb_index = tp->current_isb_index; + isb_type = tp->isb_ptr->IStatus[isb_index].IType; + isb_subtype = tp->isb_ptr->IStatus[isb_index].ISubtype; + + (tp->current_isb_index)++; + if(tp->current_isb_index == NUM_OF_INTERRUPTS) + tp->current_isb_index = 0; + + if(isb_type >= 0x10) + { + smctr_disable_16bit(dev); + spin_unlock(&tp->lock); + return IRQ_HANDLED; + } + + err = HARDWARE_FAILED; + interrupt_ack_code = isb_index; + tp->isb_ptr->IStatus[isb_index].IType |= 0xf0; + + interrupt_unmask_bits |= (1 << (__u16)isb_type); + + switch(isb_type) + { + case ISB_IMC_MAC_TYPE_3: + smctr_disable_16bit(dev); + + switch(isb_subtype) + { + case 0: + tp->monitor_state = MS_MONITOR_FSM_INACTIVE; + break; + + case 1: + tp->monitor_state = MS_REPEAT_BEACON_STATE; + break; + + case 2: + tp->monitor_state = MS_REPEAT_CLAIM_TOKEN_STATE; + break; + + case 3: + tp->monitor_state = MS_TRANSMIT_CLAIM_TOKEN_STATE; break; + + case 4: + tp->monitor_state = MS_STANDBY_MONITOR_STATE; + break; + + case 5: + tp->monitor_state = MS_TRANSMIT_BEACON_STATE; + break; + + case 6: + tp->monitor_state = MS_ACTIVE_MONITOR_STATE; + break; + + case 7: + tp->monitor_state = MS_TRANSMIT_RING_PURGE_STATE; + break; + + case 8: /* diagnostic state */ + break; + + case 9: + tp->monitor_state = MS_BEACON_TEST_STATE; + if(smctr_lobe_media_test(dev)) + { + tp->ring_status_flags = RING_STATUS_CHANGED; + tp->ring_status = AUTO_REMOVAL_ERROR; + smctr_ring_status_chg(dev); + smctr_bypass_state(dev); + } + else + smctr_issue_insert_cmd(dev); + break; + + /* case 0x0a-0xff, illegal states */ + default: + break; + } + + tp->ring_status_flags = MONITOR_STATE_CHANGED; + err = smctr_ring_status_chg(dev); + + smctr_enable_16bit(dev); + break; + + /* Type 0x02 - MAC Error Counters Interrupt + * One or more MAC Error Counter is half full + * MAC Error Counters + * Lost_FR_Error_Counter + * RCV_Congestion_Counter + * FR_copied_Error_Counter + * FREQ_Error_Counter + * Token_Error_Counter + * Line_Error_Counter + * Internal_Error_Count + */ + case ISB_IMC_MAC_ERROR_COUNTERS: + /* Read 802.5 Error Counters */ + err = smctr_issue_read_ring_status_cmd(dev); + break; + + /* Type 0x04 - MAC Type 2 Interrupt + * HOST needs to enqueue MAC Frame for transmission + * SubType Bit 15 - RQ_INIT_PDU( Request Initialization) * Changed from RQ_INIT_PDU to + * TRC_Status_Changed_Indicate + */ + case ISB_IMC_MAC_TYPE_2: + err = smctr_issue_read_ring_status_cmd(dev); + break; + + + /* Type 0x05 - TX Frame Interrupt (FI). */ + case ISB_IMC_TX_FRAME: + /* BUG QUEUE for TRC stuck receive BUG */ + if(isb_subtype & TX_PENDING_PRIORITY_2) + { + if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS) + break; + } + + /* NON-MAC frames only */ + if(isb_subtype & TX_PENDING_PRIORITY_1) + { + if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS) + break; + } + + /* MAC frames only */ + if(isb_subtype & TX_PENDING_PRIORITY_0) + err = smctr_tx_complete(dev, MAC_QUEUE); break; + + /* Type 0x06 - TX END OF QUEUE (FE) */ + case ISB_IMC_END_OF_TX_QUEUE: + /* BUG queue */ + if(isb_subtype & TX_PENDING_PRIORITY_2) + { + /* ok to clear Receive FIFO overrun + * imask send_BUG now completes. + */ + interrupt_unmask_bits |= 0x800; + + tp->tx_queue_status[BUG_QUEUE] = NOT_TRANSMITING; + if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS) + break; + if((err = smctr_restart_tx_chain(dev, BUG_QUEUE)) != SUCCESS) + break; + } + + /* NON-MAC queue only */ + if(isb_subtype & TX_PENDING_PRIORITY_1) + { + tp->tx_queue_status[NON_MAC_QUEUE] = NOT_TRANSMITING; + if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS) + break; + if((err = smctr_restart_tx_chain(dev, NON_MAC_QUEUE)) != SUCCESS) + break; + } + + /* MAC queue only */ + if(isb_subtype & TX_PENDING_PRIORITY_0) + { + tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; + if((err = smctr_tx_complete(dev, MAC_QUEUE)) != SUCCESS) + break; + + err = smctr_restart_tx_chain(dev, MAC_QUEUE); + } + break; + + /* Type 0x07 - NON-MAC RX Resource Interrupt + * Subtype bit 12 - (BW) BDB warning + * Subtype bit 13 - (FW) FCB warning + * Subtype bit 14 - (BE) BDB End of chain + * Subtype bit 15 - (FE) FCB End of chain + */ + case ISB_IMC_NON_MAC_RX_RESOURCE: + tp->rx_fifo_overrun_count = 0; + tp->receive_queue_number = NON_MAC_QUEUE; + err1 = smctr_rx_frame(dev); + + if(isb_subtype & NON_MAC_RX_RESOURCE_FE) + { + if((err = smctr_issue_resume_rx_fcb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break; + + if(tp->ptr_rx_fcb_overruns) + (*tp->ptr_rx_fcb_overruns)++; + } + + if(isb_subtype & NON_MAC_RX_RESOURCE_BE) + { + if((err = smctr_issue_resume_rx_bdb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break; + + if(tp->ptr_rx_bdb_overruns) + (*tp->ptr_rx_bdb_overruns)++; + } + err = err1; + break; + + /* Type 0x08 - MAC RX Resource Interrupt + * Subtype bit 12 - (BW) BDB warning + * Subtype bit 13 - (FW) FCB warning + * Subtype bit 14 - (BE) BDB End of chain + * Subtype bit 15 - (FE) FCB End of chain + */ + case ISB_IMC_MAC_RX_RESOURCE: + tp->receive_queue_number = MAC_QUEUE; + err1 = smctr_rx_frame(dev); + + if(isb_subtype & MAC_RX_RESOURCE_FE) + { + if((err = smctr_issue_resume_rx_fcb_cmd( dev, MAC_QUEUE)) != SUCCESS) + break; + + if(tp->ptr_rx_fcb_overruns) + (*tp->ptr_rx_fcb_overruns)++; + } + + if(isb_subtype & MAC_RX_RESOURCE_BE) + { + if((err = smctr_issue_resume_rx_bdb_cmd( dev, MAC_QUEUE)) != SUCCESS) + break; + + if(tp->ptr_rx_bdb_overruns) + (*tp->ptr_rx_bdb_overruns)++; + } + err = err1; + break; + + /* Type 0x09 - NON_MAC RX Frame Interrupt */ + case ISB_IMC_NON_MAC_RX_FRAME: + tp->rx_fifo_overrun_count = 0; + tp->receive_queue_number = NON_MAC_QUEUE; + err = smctr_rx_frame(dev); + break; + + /* Type 0x0A - MAC RX Frame Interrupt */ + case ISB_IMC_MAC_RX_FRAME: + tp->receive_queue_number = MAC_QUEUE; + err = smctr_rx_frame(dev); + break; + + /* Type 0x0B - TRC status + * TRC has encountered an error condition + * subtype bit 14 - transmit FIFO underrun + * subtype bit 15 - receive FIFO overrun + */ + case ISB_IMC_TRC_FIFO_STATUS: + if(isb_subtype & TRC_FIFO_STATUS_TX_UNDERRUN) + { + if(tp->ptr_tx_fifo_underruns) + (*tp->ptr_tx_fifo_underruns)++; + } + + if(isb_subtype & TRC_FIFO_STATUS_RX_OVERRUN) + { + /* update overrun stuck receive counter + * if >= 3, has to clear it by sending + * back to back frames. We pick + * DAT(duplicate address MAC frame) + */ + tp->rx_fifo_overrun_count++; + + if(tp->rx_fifo_overrun_count >= 3) + { + tp->rx_fifo_overrun_count = 0; + + /* delay clearing fifo overrun + * imask till send_BUG tx + * complete posted + */ + interrupt_unmask_bits &= (~0x800); + printk(KERN_CRIT "Jay please send bug\n");// smctr_send_bug(dev); + } + + if(tp->ptr_rx_fifo_overruns) + (*tp->ptr_rx_fifo_overruns)++; + } + + err = SUCCESS; + break; + + /* Type 0x0C - Action Command Status Interrupt + * Subtype bit 14 - CB end of command chain (CE) + * Subtype bit 15 - CB command interrupt (CI) + */ + case ISB_IMC_COMMAND_STATUS: + err = SUCCESS; + if(tp->acb_head->cmd == ACB_CMD_HIC_NOP) + { + printk(KERN_ERR "i1\n"); + smctr_disable_16bit(dev); + + /* XXXXXXXXXXXXXXXXX */ + /* err = UM_Interrupt(dev); */ + + smctr_enable_16bit(dev); + } + else + { + if((tp->acb_head->cmd + == ACB_CMD_READ_TRC_STATUS) && + (tp->acb_head->subcmd + == RW_TRC_STATUS_BLOCK)) + { + if(tp->ptr_bcn_type) + { + *(tp->ptr_bcn_type) + = (__u32)((SBlock *)tp->misc_command_data)->BCN_Type; + } + + if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & ERROR_COUNTERS_CHANGED) + { + smctr_update_err_stats(dev); + } + + if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & TI_NDIS_RING_STATUS_CHANGED) + { + tp->ring_status + = ((SBlock*)tp->misc_command_data)->TI_NDIS_Ring_Status; + smctr_disable_16bit(dev); + err = smctr_ring_status_chg(dev); + smctr_enable_16bit(dev); + if((tp->ring_status & REMOVE_RECEIVED) && + (tp->config_word0 & NO_AUTOREMOVE)) + { + smctr_issue_remove_cmd(dev); + } + + if(err != SUCCESS) + { + tp->acb_pending = 0; + break; + } + } + + if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & UNA_CHANGED) + { + if(tp->ptr_una) + { + tp->ptr_una[0] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[0]); + tp->ptr_una[1] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[1]); + tp->ptr_una[2] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[2]); + } + + } + + if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & READY_TO_SEND_RQ_INIT) { + err = smctr_send_rq_init(dev); + } + } + } + + tp->acb_pending = 0; + break; + + /* Type 0x0D - MAC Type 1 interrupt + * Subtype -- 00 FR_BCN received at S12 + * 01 FR_BCN received at S21 + * 02 FR_DAT(DA=MA, A<>0) received at S21 + * 03 TSM_EXP at S21 + * 04 FR_REMOVE received at S42 + * 05 TBR_EXP, BR_FLAG_SET at S42 + * 06 TBT_EXP at S53 + */ + case ISB_IMC_MAC_TYPE_1: + if(isb_subtype > 8) + { + err = HARDWARE_FAILED; + break; + } + + err = SUCCESS; + switch(isb_subtype) + { + case 0: + tp->join_state = JS_BYPASS_STATE; + if(tp->status != CLOSED) + { + tp->status = CLOSED; + err = smctr_status_chg(dev); + } + break; + + case 1: + tp->join_state = JS_LOBE_TEST_STATE; + break; + + case 2: + tp->join_state = JS_DETECT_MONITOR_PRESENT_STATE; + break; + + case 3: + tp->join_state = JS_AWAIT_NEW_MONITOR_STATE; + break; + + case 4: + tp->join_state = JS_DUPLICATE_ADDRESS_TEST_STATE; + break; + + case 5: + tp->join_state = JS_NEIGHBOR_NOTIFICATION_STATE; + break; + + case 6: + tp->join_state = JS_REQUEST_INITIALIZATION_STATE; + break; + + case 7: + tp->join_state = JS_JOIN_COMPLETE_STATE; + tp->status = OPEN; + err = smctr_status_chg(dev); + break; + + case 8: + tp->join_state = JS_BYPASS_WAIT_STATE; + break; + } + break ; + + /* Type 0x0E - TRC Initialization Sequence Interrupt + * Subtype -- 00-FF Initializatin sequence complete + */ + case ISB_IMC_TRC_INTRNL_TST_STATUS: + tp->status = INITIALIZED; + smctr_disable_16bit(dev); + err = smctr_status_chg(dev); + smctr_enable_16bit(dev); + break; + + /* other interrupt types, illegal */ + default: + break; + } + + if(err != SUCCESS) + break; + } + + /* Checking the ack code instead of the unmask bits here is because : + * while fixing the stuck receive, DAT frame are sent and mask off + * FIFO overrun interrupt temporarily (interrupt_unmask_bits = 0) + * but we still want to issue ack to ISB + */ + if(!(interrupt_ack_code & 0xff00)) + smctr_issue_int_ack(dev, interrupt_ack_code, interrupt_unmask_bits); + + smctr_disable_16bit(dev); + smctr_enable_bic_int(dev); + spin_unlock(&tp->lock); + + return IRQ_HANDLED; +} + +static int smctr_issue_enable_int_cmd(struct net_device *dev, + __u16 interrupt_enable_mask) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + tp->sclb_ptr->int_mask_control = interrupt_enable_mask; + tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK; + + smctr_set_ctrl_attention(dev); + + return 0; +} + +static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits) +{ + struct net_local *tp = netdev_priv(dev); + + if(smctr_wait_while_cbusy(dev)) + return -1; + + tp->sclb_ptr->int_mask_control = ibits; + tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0; + tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_IACK_CODE_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK; + + smctr_set_ctrl_attention(dev); + + return 0; +} + +static int smctr_issue_init_timers_cmd(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i; + int err; + __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + if((err = smctr_wait_cmd(dev))) + return err; + + tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE; + tp->config_word1 = 0; + + if((tp->media_type == MEDIA_STP_16) || + (tp->media_type == MEDIA_UTP_16) || + (tp->media_type == MEDIA_STP_16_UTP_16)) + { + tp->config_word0 |= FREQ_16MB_BIT; + } + + if(tp->mode_bits & EARLY_TOKEN_REL) + tp->config_word0 |= ETREN; + + if(tp->mode_bits & LOOPING_MODE_MASK) + tp->config_word0 |= RX_OWN_BIT; + else + tp->config_word0 &= ~RX_OWN_BIT; + + if(tp->receive_mask & PROMISCUOUS_MODE) + tp->config_word0 |= PROMISCUOUS_BIT; + else + tp->config_word0 &= ~PROMISCUOUS_BIT; + + if(tp->receive_mask & ACCEPT_ERR_PACKETS) + tp->config_word0 |= SAVBAD_BIT; + else + tp->config_word0 &= ~SAVBAD_BIT; + + if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) + tp->config_word0 |= RXATMAC; + else + tp->config_word0 &= ~RXATMAC; + + if(tp->receive_mask & ACCEPT_MULTI_PROM) + tp->config_word1 |= MULTICAST_ADDRESS_BIT; + else + tp->config_word1 &= ~MULTICAST_ADDRESS_BIT; + + if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING) + tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS; + else + { + if(tp->receive_mask & ACCEPT_SOURCE_ROUTING) + tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT; + else + tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS; + } + + if((tp->media_type == MEDIA_STP_16) || + (tp->media_type == MEDIA_UTP_16) || + (tp->media_type == MEDIA_STP_16_UTP_16)) + { + tp->config_word1 |= INTERFRAME_SPACING_16; + } + else + tp->config_word1 |= INTERFRAME_SPACING_4; + + *pTimer_Struc++ = tp->config_word0; + *pTimer_Struc++ = tp->config_word1; + + if((tp->media_type == MEDIA_STP_4) || + (tp->media_type == MEDIA_UTP_4) || + (tp->media_type == MEDIA_STP_4_UTP_4)) + { + *pTimer_Struc++ = 0x00FA; /* prescale */ + *pTimer_Struc++ = 0x2710; /* TPT_limit */ + *pTimer_Struc++ = 0x2710; /* TQP_limit */ + *pTimer_Struc++ = 0x0A28; /* TNT_limit */ + *pTimer_Struc++ = 0x3E80; /* TBT_limit */ + *pTimer_Struc++ = 0x3A98; /* TSM_limit */ + *pTimer_Struc++ = 0x1B58; /* TAM_limit */ + *pTimer_Struc++ = 0x00C8; /* TBR_limit */ + *pTimer_Struc++ = 0x07D0; /* TER_limit */ + *pTimer_Struc++ = 0x000A; /* TGT_limit */ + *pTimer_Struc++ = 0x1162; /* THT_limit */ + *pTimer_Struc++ = 0x07D0; /* TRR_limit */ + *pTimer_Struc++ = 0x1388; /* TVX_limit */ + *pTimer_Struc++ = 0x0000; /* reserved */ + } + else + { + *pTimer_Struc++ = 0x03E8; /* prescale */ + *pTimer_Struc++ = 0x9C40; /* TPT_limit */ + *pTimer_Struc++ = 0x9C40; /* TQP_limit */ + *pTimer_Struc++ = 0x0A28; /* TNT_limit */ + *pTimer_Struc++ = 0x3E80; /* TBT_limit */ + *pTimer_Struc++ = 0x3A98; /* TSM_limit */ + *pTimer_Struc++ = 0x1B58; /* TAM_limit */ + *pTimer_Struc++ = 0x00C8; /* TBR_limit */ + *pTimer_Struc++ = 0x07D0; /* TER_limit */ + *pTimer_Struc++ = 0x000A; /* TGT_limit */ + *pTimer_Struc++ = 0x4588; /* THT_limit */ + *pTimer_Struc++ = 0x1F40; /* TRR_limit */ + *pTimer_Struc++ = 0x4E20; /* TVX_limit */ + *pTimer_Struc++ = 0x0000; /* reserved */ + } + + /* Set node address. */ + *pTimer_Struc++ = dev->dev_addr[0] << 8 + | (dev->dev_addr[1] & 0xFF); + *pTimer_Struc++ = dev->dev_addr[2] << 8 + | (dev->dev_addr[3] & 0xFF); + *pTimer_Struc++ = dev->dev_addr[4] << 8 + | (dev->dev_addr[5] & 0xFF); + + /* Set group address. */ + *pTimer_Struc++ = tp->group_address_0 << 8 + | tp->group_address_0 >> 8; + *pTimer_Struc++ = tp->group_address[0] << 8 + | tp->group_address[0] >> 8; + *pTimer_Struc++ = tp->group_address[1] << 8 + | tp->group_address[1] >> 8; + + /* Set functional address. */ + *pTimer_Struc++ = tp->functional_address_0 << 8 + | tp->functional_address_0 >> 8; + *pTimer_Struc++ = tp->functional_address[0] << 8 + | tp->functional_address[0] >> 8; + *pTimer_Struc++ = tp->functional_address[1] << 8 + | tp->functional_address[1] >> 8; + + /* Set Bit-Wise group address. */ + *pTimer_Struc++ = tp->bitwise_group_address[0] << 8 + | tp->bitwise_group_address[0] >> 8; + *pTimer_Struc++ = tp->bitwise_group_address[1] << 8 + | tp->bitwise_group_address[1] >> 8; + + /* Set ring number address. */ + *pTimer_Struc++ = tp->source_ring_number; + *pTimer_Struc++ = tp->target_ring_number; + + /* Physical drop number. */ + *pTimer_Struc++ = (unsigned short)0; + *pTimer_Struc++ = (unsigned short)0; + + /* Product instance ID. */ + for(i = 0; i < 9; i++) + *pTimer_Struc++ = (unsigned short)0; + + err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0); + + return err; +} + +static int smctr_issue_init_txrx_cmd(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i; + int err; + void **txrx_ptrs = (void *)tp->misc_command_data; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + if((err = smctr_wait_cmd(dev))) + { + printk(KERN_ERR "%s: Hardware failure\n", dev->name); + return err; + } + + /* Initialize Transmit Queue Pointers that are used, to point to + * a single FCB. + */ + for(i = 0; i < NUM_TX_QS_USED; i++) + *txrx_ptrs++ = (void *)TRC_POINTER(tp->tx_fcb_head[i]); + + /* Initialize Transmit Queue Pointers that are NOT used to ZERO. */ + for(; i < MAX_TX_QS; i++) + *txrx_ptrs++ = (void *)0; + + /* Initialize Receive Queue Pointers (MAC and Non-MAC) that are + * used, to point to a single FCB and a BDB chain of buffers. + */ + for(i = 0; i < NUM_RX_QS_USED; i++) + { + *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_fcb_head[i]); + *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_bdb_head[i]); + } + + /* Initialize Receive Queue Pointers that are NOT used to ZERO. */ + for(; i < MAX_RX_QS; i++) + { + *txrx_ptrs++ = (void *)0; + *txrx_ptrs++ = (void *)0; + } + + err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0); + + return err; +} + +static int smctr_issue_insert_cmd(struct net_device *dev) +{ + int err; + + err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP); + + return err; +} + +static int smctr_issue_read_ring_status_cmd(struct net_device *dev) +{ + int err; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + if((err = smctr_wait_cmd(dev))) + return err; + + err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS, + RW_TRC_STATUS_BLOCK); + + return err; +} + +static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt) +{ + int err; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + if((err = smctr_wait_cmd(dev))) + return err; + + err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE, + aword_cnt); + + return err; +} + +static int smctr_issue_remove_cmd(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + tp->sclb_ptr->resume_control = 0; + tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE; + + smctr_set_ctrl_attention(dev); + + return 0; +} + +static int smctr_issue_resume_acb_cmd(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + tp->sclb_ptr->resume_control = SCLB_RC_ACB; + tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; + + tp->acb_pending = 1; + + smctr_set_ctrl_attention(dev); + + return 0; +} + +static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + if(queue == MAC_QUEUE) + tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB; + else + tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_BDB; + + tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; + + smctr_set_ctrl_attention(dev); + + return 0; +} + +static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue) +{ + struct net_local *tp = netdev_priv(dev); + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name); + + if(smctr_wait_while_cbusy(dev)) + return -1; + + if(queue == MAC_QUEUE) + tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB; + else + tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_FCB; + + tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; + + smctr_set_ctrl_attention(dev); + + return 0; +} + +static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue) +{ + struct net_local *tp = netdev_priv(dev); + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name); + + if(smctr_wait_while_cbusy(dev)) + return -1; + + tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue); + tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID; + + smctr_set_ctrl_attention(dev); + + return 0; +} + +static int smctr_issue_test_internal_rom_cmd(struct net_device *dev) +{ + int err; + + err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, + TRC_INTERNAL_ROM_TEST); + + return err; +} + +static int smctr_issue_test_hic_cmd(struct net_device *dev) +{ + int err; + + err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST, + TRC_HOST_INTERFACE_REG_TEST); + + return err; +} + +static int smctr_issue_test_mac_reg_cmd(struct net_device *dev) +{ + int err; + + err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, + TRC_MAC_REGISTERS_TEST); + + return err; +} + +static int smctr_issue_trc_loopback_cmd(struct net_device *dev) +{ + int err; + + err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, + TRC_INTERNAL_LOOPBACK); + + return err; +} + +static int smctr_issue_tri_loopback_cmd(struct net_device *dev) +{ + int err; + + err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, + TRC_TRI_LOOPBACK); + + return err; +} + +static int smctr_issue_write_byte_cmd(struct net_device *dev, + short aword_cnt, void *byte) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int iword, ibyte; + int err; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + if((err = smctr_wait_cmd(dev))) + return err; + + for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff); + iword++, ibyte += 2) + { + tp->misc_command_data[iword] = (*((__u8 *)byte + ibyte) << 8) + | (*((__u8 *)byte + ibyte + 1)); + } + + return smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, + aword_cnt); +} + +static int smctr_issue_write_word_cmd(struct net_device *dev, + short aword_cnt, void *word) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i, err; + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + if((err = smctr_wait_cmd(dev))) + return err; + + for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++) + tp->misc_command_data[i] = *((__u16 *)word + i); + + err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, + aword_cnt); + + return err; +} + +static int smctr_join_complete_state(struct net_device *dev) +{ + int err; + + err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, + JS_JOIN_COMPLETE_STATE); + + return err; +} + +static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i, j; + FCBlock *fcb; + BDBlock *bdb; + + for(i = 0; i < NUM_TX_QS_USED; i++) + { + fcb = tp->tx_fcb_head[i]; + bdb = tp->tx_bdb_head[i]; + + for(j = 0; j < tp->num_tx_fcbs[i]; j++) + { + fcb->bdb_ptr = bdb; + fcb->trc_bdb_ptr = TRC_POINTER(bdb); + fcb = (FCBlock *)((char *)fcb + sizeof(FCBlock)); + bdb = (BDBlock *)((char *)bdb + sizeof(BDBlock)); + } + } + + return 0; +} + +static int smctr_load_firmware(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + const struct firmware *fw; + __u16 i, checksum = 0; + int err = 0; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_load_firmware\n", dev->name); + + if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) { + printk(KERN_ERR "%s: firmware not found\n", dev->name); + return UCODE_NOT_PRESENT; + } + + tp->num_of_tx_buffs = 4; + tp->mode_bits |= UMAC; + tp->receive_mask = 0; + tp->max_packet_size = 4177; + + /* Can only upload the firmware once per adapter reset. */ + if (tp->microcode_version != 0) { + err = (UCODE_PRESENT); + goto out; + } + + /* Verify the firmware exists and is there in the right amount. */ + if (!fw->data || + (*(fw->data + UCODE_VERSION_OFFSET) < UCODE_VERSION)) + { + err = (UCODE_NOT_PRESENT); + goto out; + } + + /* UCODE_SIZE is not included in Checksum. */ + for(i = 0; i < *((__u16 *)(fw->data + UCODE_SIZE_OFFSET)); i += 2) + checksum += *((__u16 *)(fw->data + 2 + i)); + if (checksum) { + err = (UCODE_NOT_PRESENT); + goto out; + } + + /* At this point we have a valid firmware image, lets kick it on up. */ + smctr_enable_adapter_ram(dev); + smctr_enable_16bit(dev); + smctr_set_page(dev, (__u8 *)tp->ram_access); + + if((smctr_checksum_firmware(dev)) || + (*(fw->data + UCODE_VERSION_OFFSET) > tp->microcode_version)) + { + smctr_enable_adapter_ctrl_store(dev); + + /* Zero out ram space for firmware. */ + for(i = 0; i < CS_RAM_SIZE; i += 2) + *((__u16 *)(tp->ram_access + i)) = 0; + + smctr_decode_firmware(dev, fw); + + tp->microcode_version = *(fw->data + UCODE_VERSION_OFFSET); *((__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET)) + = (tp->microcode_version << 8); + *((__u16 *)(tp->ram_access + CS_RAM_CHECKSUM_OFFSET)) + = ~(tp->microcode_version << 8) + 1; + + smctr_disable_adapter_ctrl_store(dev); + + if(smctr_checksum_firmware(dev)) + err = HARDWARE_FAILED; + } + else + err = UCODE_PRESENT; + + smctr_disable_16bit(dev); + out: + release_firmware(fw); + return err; +} + +static int smctr_load_node_addr(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + unsigned int i; + __u8 r; + + for(i = 0; i < 6; i++) + { + r = inb(ioaddr + LAR0 + i); + dev->dev_addr[i] = (char)r; + } + dev->addr_len = 6; + + return 0; +} + +/* Lobe Media Test. + * During the transmission of the initial 1500 lobe media MAC frames, + * the phase lock loop in the 805 chip may lock, and then un-lock, causing + * the 825 to go into a PURGE state. When performing a PURGE, the MCT + * microcode will not transmit any frames given to it by the host, and + * will consequently cause a timeout. + * + * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit + * queues other than the one used for the lobe_media_test should be + * disabled.!? + * + * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask + * has any multi-cast or promiscuous bits set, the receive_mask needs to + * be changed to clear the multi-cast or promiscuous mode bits, the lobe_test + * run, and then the receive mask set back to its original value if the test + * is successful. + */ +static int smctr_lobe_media_test(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i, perror = 0; + unsigned short saved_rcv_mask; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_lobe_media_test\n", dev->name); + + /* Clear receive mask for lobe test. */ + saved_rcv_mask = tp->receive_mask; + tp->receive_mask = 0; + + smctr_chg_rx_mask(dev); + + /* Setup the lobe media test. */ + smctr_lobe_media_test_cmd(dev); + if(smctr_wait_cmd(dev)) + goto err; + + /* Tx lobe media test frames. */ + for(i = 0; i < 1500; ++i) + { + if(smctr_send_lobe_media_test(dev)) + { + if(perror) + goto err; + else + { + perror = 1; + if(smctr_lobe_media_test_cmd(dev)) + goto err; + } + } + } + + if(smctr_send_dat(dev)) + { + if(smctr_send_dat(dev)) + goto err; + } + + /* Check if any frames received during test. */ + if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status) || + (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status)) + goto err; + + /* Set receive mask to "Promisc" mode. */ + tp->receive_mask = saved_rcv_mask; + + smctr_chg_rx_mask(dev); + + return 0; +err: + smctr_reset_adapter(dev); + tp->status = CLOSED; + return LOBE_MEDIA_TEST_FAILED; +} + +static int smctr_lobe_media_test_cmd(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_lobe_media_test_cmd\n", dev->name); + + /* Change to lobe media test state. */ + if(tp->monitor_state != MS_BEACON_TEST_STATE) + { + smctr_lobe_media_test_state(dev); + if(smctr_wait_cmd(dev)) + { + printk(KERN_ERR "Lobe Failed test state\n"); + return LOBE_MEDIA_TEST_FAILED; + } + } + + err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, + TRC_LOBE_MEDIA_TEST); + + return err; +} + +static int smctr_lobe_media_test_state(struct net_device *dev) +{ + int err; + + err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, + JS_LOBE_TEST_STATE); + + return err; +} + +static int smctr_make_8025_hdr(struct net_device *dev, + MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc) +{ + tmf->ac = MSB(ac_fc); /* msb is access control */ + tmf->fc = LSB(ac_fc); /* lsb is frame control */ + + tmf->sa[0] = dev->dev_addr[0]; + tmf->sa[1] = dev->dev_addr[1]; + tmf->sa[2] = dev->dev_addr[2]; + tmf->sa[3] = dev->dev_addr[3]; + tmf->sa[4] = dev->dev_addr[4]; + tmf->sa[5] = dev->dev_addr[5]; + + switch(tmf->vc) + { + /* Send RQ_INIT to RPS */ + case RQ_INIT: + tmf->da[0] = 0xc0; + tmf->da[1] = 0x00; + tmf->da[2] = 0x00; + tmf->da[3] = 0x00; + tmf->da[4] = 0x00; + tmf->da[5] = 0x02; + break; + + /* Send RPT_TX_FORWARD to CRS */ + case RPT_TX_FORWARD: + tmf->da[0] = 0xc0; + tmf->da[1] = 0x00; + tmf->da[2] = 0x00; + tmf->da[3] = 0x00; + tmf->da[4] = 0x00; + tmf->da[5] = 0x10; + break; + + /* Everything else goes to sender */ + default: + tmf->da[0] = rmf->sa[0]; + tmf->da[1] = rmf->sa[1]; + tmf->da[2] = rmf->sa[2]; + tmf->da[3] = rmf->sa[3]; + tmf->da[4] = rmf->sa[4]; + tmf->da[5] = rmf->sa[5]; + break; + } + + return 0; +} + +static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv) +{ + struct net_local *tp = netdev_priv(dev); + + tsv->svi = AUTHORIZED_ACCESS_PRIORITY; + tsv->svl = S_AUTHORIZED_ACCESS_PRIORITY; + + tsv->svv[0] = MSB(tp->authorized_access_priority); + tsv->svv[1] = LSB(tp->authorized_access_priority); + + return 0; +} + +static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv) +{ + tsv->svi = ADDRESS_MODIFER; + tsv->svl = S_ADDRESS_MODIFER; + + tsv->svv[0] = 0; + tsv->svv[1] = 0; + + return 0; +} + +static int smctr_make_auth_funct_class(struct net_device *dev, + MAC_SUB_VECTOR *tsv) +{ + struct net_local *tp = netdev_priv(dev); + + tsv->svi = AUTHORIZED_FUNCTION_CLASS; + tsv->svl = S_AUTHORIZED_FUNCTION_CLASS; + + tsv->svv[0] = MSB(tp->authorized_function_classes); + tsv->svv[1] = LSB(tp->authorized_function_classes); + + return 0; +} + +static int smctr_make_corr(struct net_device *dev, + MAC_SUB_VECTOR *tsv, __u16 correlator) +{ + tsv->svi = CORRELATOR; + tsv->svl = S_CORRELATOR; + + tsv->svv[0] = MSB(correlator); + tsv->svv[1] = LSB(correlator); + + return 0; +} + +static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) +{ + struct net_local *tp = netdev_priv(dev); + + smctr_get_functional_address(dev); + + tsv->svi = FUNCTIONAL_ADDRESS; + tsv->svl = S_FUNCTIONAL_ADDRESS; + + tsv->svv[0] = MSB(tp->misc_command_data[0]); + tsv->svv[1] = LSB(tp->misc_command_data[0]); + + tsv->svv[2] = MSB(tp->misc_command_data[1]); + tsv->svv[3] = LSB(tp->misc_command_data[1]); + + return 0; +} + +static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) +{ + struct net_local *tp = netdev_priv(dev); + + smctr_get_group_address(dev); + + tsv->svi = GROUP_ADDRESS; + tsv->svl = S_GROUP_ADDRESS; + + tsv->svv[0] = MSB(tp->misc_command_data[0]); + tsv->svv[1] = LSB(tp->misc_command_data[0]); + + tsv->svv[2] = MSB(tp->misc_command_data[1]); + tsv->svv[3] = LSB(tp->misc_command_data[1]); + + /* Set Group Address Sub-vector to all zeros if only the + * Group Address/Functional Address Indicator is set. + */ + if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00 && + tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00) + tsv->svv[0] = 0x00; + + return 0; +} + +static int smctr_make_phy_drop_num(struct net_device *dev, + MAC_SUB_VECTOR *tsv) +{ + struct net_local *tp = netdev_priv(dev); + + smctr_get_physical_drop_number(dev); + + tsv->svi = PHYSICAL_DROP; + tsv->svl = S_PHYSICAL_DROP; + + tsv->svv[0] = MSB(tp->misc_command_data[0]); + tsv->svv[1] = LSB(tp->misc_command_data[0]); + + tsv->svv[2] = MSB(tp->misc_command_data[1]); + tsv->svv[3] = LSB(tp->misc_command_data[1]); + + return 0; +} + +static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) +{ + int i; + + tsv->svi = PRODUCT_INSTANCE_ID; + tsv->svl = S_PRODUCT_INSTANCE_ID; + + for(i = 0; i < 18; i++) + tsv->svv[i] = 0xF0; + + return 0; +} + +static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) +{ + struct net_local *tp = netdev_priv(dev); + + smctr_get_station_id(dev); + + tsv->svi = STATION_IDENTIFER; + tsv->svl = S_STATION_IDENTIFER; + + tsv->svv[0] = MSB(tp->misc_command_data[0]); + tsv->svv[1] = LSB(tp->misc_command_data[0]); + + tsv->svv[2] = MSB(tp->misc_command_data[1]); + tsv->svv[3] = LSB(tp->misc_command_data[1]); + + tsv->svv[4] = MSB(tp->misc_command_data[2]); + tsv->svv[5] = LSB(tp->misc_command_data[2]); + + return 0; +} + +static int smctr_make_ring_station_status(struct net_device *dev, + MAC_SUB_VECTOR * tsv) +{ + tsv->svi = RING_STATION_STATUS; + tsv->svl = S_RING_STATION_STATUS; + + tsv->svv[0] = 0; + tsv->svv[1] = 0; + tsv->svv[2] = 0; + tsv->svv[3] = 0; + tsv->svv[4] = 0; + tsv->svv[5] = 0; + + return 0; +} + +static int smctr_make_ring_station_version(struct net_device *dev, + MAC_SUB_VECTOR *tsv) +{ + struct net_local *tp = netdev_priv(dev); + + tsv->svi = RING_STATION_VERSION_NUMBER; + tsv->svl = S_RING_STATION_VERSION_NUMBER; + + tsv->svv[0] = 0xe2; /* EBCDIC - S */ + tsv->svv[1] = 0xd4; /* EBCDIC - M */ + tsv->svv[2] = 0xc3; /* EBCDIC - C */ + tsv->svv[3] = 0x40; /* EBCDIC - */ + tsv->svv[4] = 0xe5; /* EBCDIC - V */ + tsv->svv[5] = 0xF0 + (tp->microcode_version >> 4); + tsv->svv[6] = 0xF0 + (tp->microcode_version & 0x0f); + tsv->svv[7] = 0x40; /* EBCDIC - */ + tsv->svv[8] = 0xe7; /* EBCDIC - X */ + + if(tp->extra_info & CHIP_REV_MASK) + tsv->svv[9] = 0xc5; /* EBCDIC - E */ + else + tsv->svv[9] = 0xc4; /* EBCDIC - D */ + + return 0; +} + +static int smctr_make_tx_status_code(struct net_device *dev, + MAC_SUB_VECTOR *tsv, __u16 tx_fstatus) +{ + tsv->svi = TRANSMIT_STATUS_CODE; + tsv->svl = S_TRANSMIT_STATUS_CODE; + + tsv->svv[0] = ((tx_fstatus & 0x0100 >> 6) | IBM_PASS_SOURCE_ADDR); + + /* Stripped frame status of Transmitted Frame */ + tsv->svv[1] = tx_fstatus & 0xff; + + return 0; +} + +static int smctr_make_upstream_neighbor_addr(struct net_device *dev, + MAC_SUB_VECTOR *tsv) +{ + struct net_local *tp = netdev_priv(dev); + + smctr_get_upstream_neighbor_addr(dev); + + tsv->svi = UPSTREAM_NEIGHBOR_ADDRESS; + tsv->svl = S_UPSTREAM_NEIGHBOR_ADDRESS; + + tsv->svv[0] = MSB(tp->misc_command_data[0]); + tsv->svv[1] = LSB(tp->misc_command_data[0]); + + tsv->svv[2] = MSB(tp->misc_command_data[1]); + tsv->svv[3] = LSB(tp->misc_command_data[1]); + + tsv->svv[4] = MSB(tp->misc_command_data[2]); + tsv->svv[5] = LSB(tp->misc_command_data[2]); + + return 0; +} + +static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv) +{ + tsv->svi = WRAP_DATA; + tsv->svl = S_WRAP_DATA; + + return 0; +} + +/* + * Open/initialize the board. This is called sometime after + * booting when the 'ifconfig' program is run. + * + * This routine should set everything up anew at each open, even + * registers that "should" only need to be set once at boot, so that + * there is non-reboot way to recover if something goes wrong. + */ +static int smctr_open(struct net_device *dev) +{ + int err; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_open\n", dev->name); + + err = smctr_init_adapter(dev); + if(err < 0) + return err; + + return err; +} + +/* Interrupt driven open of Token card. */ +static int smctr_open_tr(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned long flags; + int err; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_open_tr\n", dev->name); + + /* Now we can actually open the adapter. */ + if(tp->status == OPEN) + return 0; + if(tp->status != INITIALIZED) + return -1; + + /* FIXME: it would work a lot better if we masked the irq sources + on the card here, then we could skip the locking and poll nicely */ + spin_lock_irqsave(&tp->lock, flags); + + smctr_set_page(dev, (__u8 *)tp->ram_access); + + if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)MAC_QUEUE))) + goto out; + + if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)MAC_QUEUE))) + goto out; + + if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)NON_MAC_QUEUE))) + goto out; + + if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)NON_MAC_QUEUE))) + goto out; + + tp->status = CLOSED; + + /* Insert into the Ring or Enter Loopback Mode. */ + if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_1) + { + tp->status = CLOSED; + + if(!(err = smctr_issue_trc_loopback_cmd(dev))) + { + if(!(err = smctr_wait_cmd(dev))) + tp->status = OPEN; + } + + smctr_status_chg(dev); + } + else + { + if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_2) + { + tp->status = CLOSED; + if(!(err = smctr_issue_tri_loopback_cmd(dev))) + { + if(!(err = smctr_wait_cmd(dev))) + tp->status = OPEN; + } + + smctr_status_chg(dev); + } + else + { + if((tp->mode_bits & LOOPING_MODE_MASK) + == LOOPBACK_MODE_3) + { + tp->status = CLOSED; + if(!(err = smctr_lobe_media_test_cmd(dev))) + { + if(!(err = smctr_wait_cmd(dev))) + tp->status = OPEN; + } + smctr_status_chg(dev); + } + else + { + if(!(err = smctr_lobe_media_test(dev))) + err = smctr_issue_insert_cmd(dev); + else + { + if(err == LOBE_MEDIA_TEST_FAILED) + printk(KERN_WARNING "%s: Lobe Media Test Failure - Check cable?\n", dev->name); + } + } + } + } + +out: + spin_unlock_irqrestore(&tp->lock, flags); + + return err; +} + +/* Check for a network adapter of this type, + * and return device structure if one exists. + */ +struct net_device __init *smctr_probe(int unit) +{ + struct net_device *dev = alloc_trdev(sizeof(struct net_local)); + static const unsigned ports[] = { + 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300, + 0x320, 0x340, 0x360, 0x380, 0 + }; + const unsigned *port; + int err = 0; + + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "tr%d", unit); + netdev_boot_setup_check(dev); + } + + if (dev->base_addr > 0x1ff) /* Check a single specified location. */ + err = smctr_probe1(dev, dev->base_addr); + else if(dev->base_addr != 0) /* Don't probe at all. */ + err =-ENXIO; + else { + for (port = ports; *port; port++) { + err = smctr_probe1(dev, *port); + if (!err) + break; + } + } + if (err) + goto out; + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: +#ifdef CONFIG_MCA_LEGACY + { struct net_local *tp = netdev_priv(dev); + if (tp->slot_num) + mca_mark_as_unused(tp->slot_num); + } +#endif + release_region(dev->base_addr, SMCTR_IO_EXTENT); + free_irq(dev->irq, dev); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static const struct net_device_ops smctr_netdev_ops = { + .ndo_open = smctr_open, + .ndo_stop = smctr_close, + .ndo_start_xmit = smctr_send_packet, + .ndo_tx_timeout = smctr_timeout, + .ndo_get_stats = smctr_get_stats, + .ndo_set_multicast_list = smctr_set_multicast_list, +}; + +static int __init smctr_probe1(struct net_device *dev, int ioaddr) +{ + static unsigned version_printed; + struct net_local *tp = netdev_priv(dev); + int err; + __u32 *ram; + + if(smctr_debug && version_printed++ == 0) + printk(version); + + spin_lock_init(&tp->lock); + dev->base_addr = ioaddr; + + /* Actually detect an adapter now. */ + err = smctr_chk_isa(dev); + if(err < 0) + { + if ((err = smctr_chk_mca(dev)) < 0) { + err = -ENODEV; + goto out; + } + } + + tp = netdev_priv(dev); + dev->mem_start = tp->ram_base; + dev->mem_end = dev->mem_start + 0x10000; + ram = (__u32 *)phys_to_virt(dev->mem_start); + tp->ram_access = *(__u32 *)&ram; + tp->status = NOT_INITIALIZED; + + err = smctr_load_firmware(dev); + if(err != UCODE_PRESENT && err != SUCCESS) + { + printk(KERN_ERR "%s: Firmware load failed (%d)\n", dev->name, err); + err = -EIO; + goto out; + } + + /* Allow user to specify ring speed on module insert. */ + if(ringspeed == 4) + tp->media_type = MEDIA_UTP_4; + else + tp->media_type = MEDIA_UTP_16; + + printk(KERN_INFO "%s: %s %s at Io %#4x, Irq %d, Rom %#4x, Ram %#4x.\n", + dev->name, smctr_name, smctr_model, + (unsigned int)dev->base_addr, + dev->irq, tp->rom_base, tp->ram_base); + + dev->netdev_ops = &smctr_netdev_ops; + dev->watchdog_timeo = HZ; + return 0; + +out: + return err; +} + +static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, + struct net_device *dev, __u16 rx_status) +{ + struct net_local *tp = netdev_priv(dev); + struct sk_buff *skb; + __u16 rcode, correlator; + int err = 0; + __u8 xframe = 1; + + rmf->vl = SWAP_BYTES(rmf->vl); + if(rx_status & FCB_RX_STATUS_DA_MATCHED) + { + switch(rmf->vc) + { + /* Received MAC Frames Processed by RS. */ + case INIT: + if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED) + { + return rcode; + } + + if((err = smctr_send_rsp(dev, rmf, rcode, + correlator))) + { + return err; + } + break; + + case CHG_PARM: + if((rcode = smctr_rcv_chg_param(dev, rmf, + &correlator)) ==HARDWARE_FAILED) + { + return rcode; + } + + if((err = smctr_send_rsp(dev, rmf, rcode, + correlator))) + { + return err; + } + break; + + case RQ_ADDR: + if((rcode = smctr_rcv_rq_addr_state_attch(dev, + rmf, &correlator)) != POSITIVE_ACK) + { + if(rcode == HARDWARE_FAILED) + return rcode; + else + return smctr_send_rsp(dev, rmf, + rcode, correlator); + } + + if((err = smctr_send_rpt_addr(dev, rmf, + correlator))) + { + return err; + } + break; + + case RQ_ATTCH: + if((rcode = smctr_rcv_rq_addr_state_attch(dev, + rmf, &correlator)) != POSITIVE_ACK) + { + if(rcode == HARDWARE_FAILED) + return rcode; + else + return smctr_send_rsp(dev, rmf, + rcode, + correlator); + } + + if((err = smctr_send_rpt_attch(dev, rmf, + correlator))) + { + return err; + } + break; + + case RQ_STATE: + if((rcode = smctr_rcv_rq_addr_state_attch(dev, + rmf, &correlator)) != POSITIVE_ACK) + { + if(rcode == HARDWARE_FAILED) + return rcode; + else + return smctr_send_rsp(dev, rmf, + rcode, + correlator); + } + + if((err = smctr_send_rpt_state(dev, rmf, + correlator))) + { + return err; + } + break; + + case TX_FORWARD: { + __u16 uninitialized_var(tx_fstatus); + + if((rcode = smctr_rcv_tx_forward(dev, rmf)) + != POSITIVE_ACK) + { + if(rcode == HARDWARE_FAILED) + return rcode; + else + return smctr_send_rsp(dev, rmf, + rcode, + correlator); + } + + if((err = smctr_send_tx_forward(dev, rmf, + &tx_fstatus)) == HARDWARE_FAILED) + { + return err; + } + + if(err == A_FRAME_WAS_FORWARDED) + { + if((err = smctr_send_rpt_tx_forward(dev, + rmf, tx_fstatus)) + == HARDWARE_FAILED) + { + return err; + } + } + break; + } + + /* Received MAC Frames Processed by CRS/REM/RPS. */ + case RSP: + case RQ_INIT: + case RPT_NEW_MON: + case RPT_SUA_CHG: + case RPT_ACTIVE_ERR: + case RPT_NN_INCMP: + case RPT_ERROR: + case RPT_ATTCH: + case RPT_STATE: + case RPT_ADDR: + break; + + /* Rcvd Att. MAC Frame (if RXATMAC set) or UNKNOWN */ + default: + xframe = 0; + if(!(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)) + { + rcode = smctr_rcv_unknown(dev, rmf, + &correlator); + if((err = smctr_send_rsp(dev, rmf,rcode, + correlator))) + { + return err; + } + } + + break; + } + } + else + { + /* 1. DA doesn't match (Promiscuous Mode). + * 2. Parse for Extended MAC Frame Type. + */ + switch(rmf->vc) + { + case RSP: + case INIT: + case RQ_INIT: + case RQ_ADDR: + case RQ_ATTCH: + case RQ_STATE: + case CHG_PARM: + case RPT_ADDR: + case RPT_ERROR: + case RPT_ATTCH: + case RPT_STATE: + case RPT_NEW_MON: + case RPT_SUA_CHG: + case RPT_NN_INCMP: + case RPT_ACTIVE_ERR: + break; + + default: + xframe = 0; + break; + } + } + + /* NOTE: UNKNOWN MAC frames will NOT be passed up unless + * ACCEPT_ATT_MAC_FRAMES is set. + */ + if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) && + (xframe == (__u8)0)) || + ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES) && + (xframe == (__u8)1))) + { + rmf->vl = SWAP_BYTES(rmf->vl); + + if (!(skb = dev_alloc_skb(size))) + return -ENOMEM; + skb->len = size; + + /* Slide data into a sleek skb. */ + skb_put(skb, skb->len); + skb_copy_to_linear_data(skb, rmf, skb->len); + + /* Update Counters */ + tp->MacStat.rx_packets++; + tp->MacStat.rx_bytes += skb->len; + + /* Kick the packet on up. */ + skb->protocol = tr_type_trans(skb, dev); + netif_rx(skb); + err = 0; + } + + return err; +} + +/* Adapter RAM test. Incremental word ODD boundary data test. */ +static int smctr_ram_memory_test(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + __u16 page, pages_of_ram, start_pattern = 0, word_pattern = 0, + word_read = 0, err_word = 0, err_pattern = 0; + unsigned int err_offset; + __u32 j, pword; + __u8 err = 0; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_ram_memory_test\n", dev->name); + + start_pattern = 0x0001; + pages_of_ram = tp->ram_size / tp->ram_usable; + pword = tp->ram_access; + + /* Incremental word ODD boundary test. */ + for(page = 0; (page < pages_of_ram) && (~err); + page++, start_pattern += 0x8000) + { + smctr_set_page(dev, (__u8 *)(tp->ram_access + + (page * tp->ram_usable * 1024) + 1)); + word_pattern = start_pattern; + + for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1; j += 2) + *(__u16 *)(pword + j) = word_pattern++; + + word_pattern = start_pattern; + + for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1 && (~err); + j += 2, word_pattern++) + { + word_read = *(__u16 *)(pword + j); + if(word_read != word_pattern) + { + err = (__u8)1; + err_offset = j; + err_word = word_read; + err_pattern = word_pattern; + return RAM_TEST_FAILED; + } + } + } + + /* Zero out memory. */ + for(page = 0; page < pages_of_ram && (~err); page++) + { + smctr_set_page(dev, (__u8 *)(tp->ram_access + + (page * tp->ram_usable * 1024))); + word_pattern = 0; + + for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2) + *(__u16 *)(pword + j) = word_pattern; + + for(j =0; j < (__u32)tp->ram_usable * 1024 && (~err); j += 2) + { + word_read = *(__u16 *)(pword + j); + if(word_read != word_pattern) + { + err = (__u8)1; + err_offset = j; + err_word = word_read; + err_pattern = word_pattern; + return RAM_TEST_FAILED; + } + } + } + + smctr_set_page(dev, (__u8 *)tp->ram_access); + + return 0; +} + +static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf, + __u16 *correlator) +{ + MAC_SUB_VECTOR *rsv; + signed short vlen; + __u16 rcode = POSITIVE_ACK; + unsigned int svectors = F_NO_SUB_VECTORS_FOUND; + + /* This Frame can only come from a CRS */ + if((rmf->dc_sc & SC_MASK) != SC_CRS) + return E_INAPPROPRIATE_SOURCE_CLASS; + + /* Remove MVID Length from total length. */ + vlen = (signed short)rmf->vl - 4; + + /* Point to First SVID */ + rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); + + /* Search for Appropriate SVID's. */ + while((vlen > 0) && (rcode == POSITIVE_ACK)) + { + switch(rsv->svi) + { + case CORRELATOR: + svectors |= F_CORRELATOR; + rcode = smctr_set_corr(dev, rsv, correlator); + break; + + case LOCAL_RING_NUMBER: + svectors |= F_LOCAL_RING_NUMBER; + rcode = smctr_set_local_ring_num(dev, rsv); + break; + + case ASSIGN_PHYSICAL_DROP: + svectors |= F_ASSIGN_PHYSICAL_DROP; + rcode = smctr_set_phy_drop(dev, rsv); + break; + + case ERROR_TIMER_VALUE: + svectors |= F_ERROR_TIMER_VALUE; + rcode = smctr_set_error_timer_value(dev, rsv); + break; + + case AUTHORIZED_FUNCTION_CLASS: + svectors |= F_AUTHORIZED_FUNCTION_CLASS; + rcode = smctr_set_auth_funct_class(dev, rsv); + break; + + case AUTHORIZED_ACCESS_PRIORITY: + svectors |= F_AUTHORIZED_ACCESS_PRIORITY; + rcode = smctr_set_auth_access_pri(dev, rsv); + break; + + default: + rcode = E_SUB_VECTOR_UNKNOWN; + break; + } + + /* Let Sender Know if SUM of SV length's is + * larger then length in MVID length field + */ + if((vlen -= rsv->svl) < 0) + rcode = E_VECTOR_LENGTH_ERROR; + + rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); + } + + if(rcode == POSITIVE_ACK) + { + /* Let Sender Know if MVID length field + * is larger then SUM of SV length's + */ + if(vlen != 0) + rcode = E_VECTOR_LENGTH_ERROR; + else + { + /* Let Sender Know if Expected SVID Missing */ + if((svectors & R_CHG_PARM) ^ R_CHG_PARM) + rcode = E_MISSING_SUB_VECTOR; + } + } + + return rcode; +} + +static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf, + __u16 *correlator) +{ + MAC_SUB_VECTOR *rsv; + signed short vlen; + __u16 rcode = POSITIVE_ACK; + unsigned int svectors = F_NO_SUB_VECTORS_FOUND; + + /* This Frame can only come from a RPS */ + if((rmf->dc_sc & SC_MASK) != SC_RPS) + return E_INAPPROPRIATE_SOURCE_CLASS; + + /* Remove MVID Length from total length. */ + vlen = (signed short)rmf->vl - 4; + + /* Point to First SVID */ + rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); + + /* Search for Appropriate SVID's */ + while((vlen > 0) && (rcode == POSITIVE_ACK)) + { + switch(rsv->svi) + { + case CORRELATOR: + svectors |= F_CORRELATOR; + rcode = smctr_set_corr(dev, rsv, correlator); + break; + + case LOCAL_RING_NUMBER: + svectors |= F_LOCAL_RING_NUMBER; + rcode = smctr_set_local_ring_num(dev, rsv); + break; + + case ASSIGN_PHYSICAL_DROP: + svectors |= F_ASSIGN_PHYSICAL_DROP; + rcode = smctr_set_phy_drop(dev, rsv); + break; + + case ERROR_TIMER_VALUE: + svectors |= F_ERROR_TIMER_VALUE; + rcode = smctr_set_error_timer_value(dev, rsv); + break; + + default: + rcode = E_SUB_VECTOR_UNKNOWN; + break; + } + + /* Let Sender Know if SUM of SV length's is + * larger then length in MVID length field + */ + if((vlen -= rsv->svl) < 0) + rcode = E_VECTOR_LENGTH_ERROR; + + rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); + } + + if(rcode == POSITIVE_ACK) + { + /* Let Sender Know if MVID length field + * is larger then SUM of SV length's + */ + if(vlen != 0) + rcode = E_VECTOR_LENGTH_ERROR; + else + { + /* Let Sender Know if Expected SV Missing */ + if((svectors & R_INIT) ^ R_INIT) + rcode = E_MISSING_SUB_VECTOR; + } + } + + return rcode; +} + +static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf) +{ + MAC_SUB_VECTOR *rsv; + signed short vlen; + __u16 rcode = POSITIVE_ACK; + unsigned int svectors = F_NO_SUB_VECTORS_FOUND; + + /* This Frame can only come from a CRS */ + if((rmf->dc_sc & SC_MASK) != SC_CRS) + return E_INAPPROPRIATE_SOURCE_CLASS; + + /* Remove MVID Length from total length */ + vlen = (signed short)rmf->vl - 4; + + /* Point to First SVID */ + rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); + + /* Search for Appropriate SVID's */ + while((vlen > 0) && (rcode == POSITIVE_ACK)) + { + switch(rsv->svi) + { + case FRAME_FORWARD: + svectors |= F_FRAME_FORWARD; + rcode = smctr_set_frame_forward(dev, rsv, + rmf->dc_sc); + break; + + default: + rcode = E_SUB_VECTOR_UNKNOWN; + break; + } + + /* Let Sender Know if SUM of SV length's is + * larger then length in MVID length field + */ + if((vlen -= rsv->svl) < 0) + rcode = E_VECTOR_LENGTH_ERROR; + + rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); + } + + if(rcode == POSITIVE_ACK) + { + /* Let Sender Know if MVID length field + * is larger then SUM of SV length's + */ + if(vlen != 0) + rcode = E_VECTOR_LENGTH_ERROR; + else + { + /* Let Sender Know if Expected SV Missing */ + if((svectors & R_TX_FORWARD) ^ R_TX_FORWARD) + rcode = E_MISSING_SUB_VECTOR; + } + } + + return rcode; +} + +static int smctr_rcv_rq_addr_state_attch(struct net_device *dev, + MAC_HEADER *rmf, __u16 *correlator) +{ + MAC_SUB_VECTOR *rsv; + signed short vlen; + __u16 rcode = POSITIVE_ACK; + unsigned int svectors = F_NO_SUB_VECTORS_FOUND; + + /* Remove MVID Length from total length */ + vlen = (signed short)rmf->vl - 4; + + /* Point to First SVID */ + rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); + + /* Search for Appropriate SVID's */ + while((vlen > 0) && (rcode == POSITIVE_ACK)) + { + switch(rsv->svi) + { + case CORRELATOR: + svectors |= F_CORRELATOR; + rcode = smctr_set_corr(dev, rsv, correlator); + break; + + default: + rcode = E_SUB_VECTOR_UNKNOWN; + break; + } + + /* Let Sender Know if SUM of SV length's is + * larger then length in MVID length field + */ + if((vlen -= rsv->svl) < 0) + rcode = E_VECTOR_LENGTH_ERROR; + + rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); + } + + if(rcode == POSITIVE_ACK) + { + /* Let Sender Know if MVID length field + * is larger then SUM of SV length's + */ + if(vlen != 0) + rcode = E_VECTOR_LENGTH_ERROR; + else + { + /* Let Sender Know if Expected SVID Missing */ + if((svectors & R_RQ_ATTCH_STATE_ADDR) + ^ R_RQ_ATTCH_STATE_ADDR) + rcode = E_MISSING_SUB_VECTOR; + } + } + + return rcode; +} + +static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf, + __u16 *correlator) +{ + MAC_SUB_VECTOR *rsv; + signed short vlen; + + *correlator = 0; + + /* Remove MVID Length from total length */ + vlen = (signed short)rmf->vl - 4; + + /* Point to First SVID */ + rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); + + /* Search for CORRELATOR for RSP to UNKNOWN */ + while((vlen > 0) && (*correlator == 0)) + { + switch(rsv->svi) + { + case CORRELATOR: + smctr_set_corr(dev, rsv, correlator); + break; + + default: + break; + } + + vlen -= rsv->svl; + rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); + } + + return E_UNRECOGNIZED_VECTOR_ID; +} + +/* + * Reset the 825 NIC and exit w: + * 1. The NIC reset cleared (non-reset state), halted and un-initialized. + * 2. TINT masked. + * 3. CBUSY masked. + * 4. TINT clear. + * 5. CBUSY clear. + */ +static int smctr_reset_adapter(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + /* Reseting the NIC will put it in a halted and un-initialized state. */ smctr_set_trc_reset(ioaddr); + mdelay(200); /* ~2 ms */ + + smctr_clear_trc_reset(ioaddr); + mdelay(200); /* ~2 ms */ + + /* Remove any latched interrupts that occurred prior to reseting the + * adapter or possibily caused by line glitches due to the reset. + */ + outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR); + + return 0; +} + +static int smctr_restart_tx_chain(struct net_device *dev, short queue) +{ + struct net_local *tp = netdev_priv(dev); + int err = 0; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_restart_tx_chain\n", dev->name); + + if(tp->num_tx_fcbs_used[queue] != 0 && + tp->tx_queue_status[queue] == NOT_TRANSMITING) + { + tp->tx_queue_status[queue] = TRANSMITING; + err = smctr_issue_resume_tx_fcb_cmd(dev, queue); + } + + return err; +} + +static int smctr_ring_status_chg(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_ring_status_chg\n", dev->name); + + /* Check for ring_status_flag: whenever MONITOR_STATE_BIT + * Bit is set, check value of monitor_state, only then we + * enable and start transmit/receive timeout (if and only + * if it is MS_ACTIVE_MONITOR_STATE or MS_STANDBY_MONITOR_STATE) + */ + if(tp->ring_status_flags == MONITOR_STATE_CHANGED) + { + if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE) || + (tp->monitor_state == MS_STANDBY_MONITOR_STATE)) + { + tp->monitor_state_ready = 1; + } + else + { + /* if adapter is NOT in either active monitor + * or standby monitor state => Disable + * transmit/receive timeout. + */ + tp->monitor_state_ready = 0; + + /* Ring speed problem, switching to auto mode. */ + if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE && + !tp->cleanup) + { + printk(KERN_INFO "%s: Incorrect ring speed switching.\n", + dev->name); + smctr_set_ring_speed(dev); + } + } + } + + if(!(tp->ring_status_flags & RING_STATUS_CHANGED)) + return 0; + + switch(tp->ring_status) + { + case RING_RECOVERY: + printk(KERN_INFO "%s: Ring Recovery\n", dev->name); + break; + + case SINGLE_STATION: + printk(KERN_INFO "%s: Single Statinon\n", dev->name); + break; + + case COUNTER_OVERFLOW: + printk(KERN_INFO "%s: Counter Overflow\n", dev->name); + break; + + case REMOVE_RECEIVED: + printk(KERN_INFO "%s: Remove Received\n", dev->name); + break; + + case AUTO_REMOVAL_ERROR: + printk(KERN_INFO "%s: Auto Remove Error\n", dev->name); + break; + + case LOBE_WIRE_FAULT: + printk(KERN_INFO "%s: Lobe Wire Fault\n", dev->name); + break; + + case TRANSMIT_BEACON: + printk(KERN_INFO "%s: Transmit Beacon\n", dev->name); + break; + + case SOFT_ERROR: + printk(KERN_INFO "%s: Soft Error\n", dev->name); + break; + + case HARD_ERROR: + printk(KERN_INFO "%s: Hard Error\n", dev->name); + break; + + case SIGNAL_LOSS: + printk(KERN_INFO "%s: Signal Loss\n", dev->name); + break; + + default: + printk(KERN_INFO "%s: Unknown ring status change\n", + dev->name); + break; + } + + return 0; +} + +static int smctr_rx_frame(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + __u16 queue, status, rx_size, err = 0; + __u8 *pbuff; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_rx_frame\n", dev->name); + + queue = tp->receive_queue_number; + + while((status = tp->rx_fcb_curr[queue]->frame_status) != SUCCESS) + { + err = HARDWARE_FAILED; + + if(((status & 0x007f) == 0) || + ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0)) + { + /* frame length less the CRC (4 bytes) + FS (1 byte) */ + rx_size = tp->rx_fcb_curr[queue]->frame_length - 5; + + pbuff = smctr_get_rx_pointer(dev, queue); + + smctr_set_page(dev, pbuff); + smctr_disable_16bit(dev); + + /* pbuff points to addr within one page */ + pbuff = (__u8 *)PAGE_POINTER(pbuff); + + if(queue == NON_MAC_QUEUE) + { + struct sk_buff *skb; + + skb = dev_alloc_skb(rx_size); + if (skb) { + skb_put(skb, rx_size); + + skb_copy_to_linear_data(skb, pbuff, rx_size); + + /* Update Counters */ + tp->MacStat.rx_packets++; + tp->MacStat.rx_bytes += skb->len; + + /* Kick the packet on up. */ + skb->protocol = tr_type_trans(skb, dev); + netif_rx(skb); + } else { + } + } + else + smctr_process_rx_packet((MAC_HEADER *)pbuff, + rx_size, dev, status); + } + + smctr_enable_16bit(dev); + smctr_set_page(dev, (__u8 *)tp->ram_access); + smctr_update_rx_chain(dev, queue); + + if(err != SUCCESS) + break; + } + + return err; +} + +static int smctr_send_dat(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int i, err; + MAC_HEADER *tmf; + FCBlock *fcb; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_send_dat\n", dev->name); + + if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, + sizeof(MAC_HEADER))) == (FCBlock *)(-1L)) + { + return OUT_OF_RESOURCES; + } + + /* Initialize DAT Data Fields. */ + tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; + tmf->ac = MSB(AC_FC_DAT); + tmf->fc = LSB(AC_FC_DAT); + + for(i = 0; i < 6; i++) + { + tmf->sa[i] = dev->dev_addr[i]; + tmf->da[i] = dev->dev_addr[i]; + + } + + tmf->vc = DAT; + tmf->dc_sc = DC_RS | SC_RS; + tmf->vl = 4; + tmf->vl = SWAP_BYTES(tmf->vl); + + /* Start Transmit. */ + if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) + return err; + + /* Wait for Transmit to Complete */ + for(i = 0; i < 10000; i++) + { + if(fcb->frame_status & FCB_COMMAND_DONE) + break; + mdelay(1); + } + + /* Check if GOOD frame Tx'ed. */ + if(!(fcb->frame_status & FCB_COMMAND_DONE) || + fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) + { + return INITIALIZE_FAILED; + } + + /* De-allocated Tx FCB and Frame Buffer + * The FCB must be de-allocated manually if executing with + * interrupts disabled, other wise the ISR (LM_Service_Events) + * will de-allocate it when the interrupt occurs. + */ + tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; + smctr_update_tx_chain(dev, fcb, MAC_QUEUE); + + return 0; +} + +static void smctr_timeout(struct net_device *dev) +{ + /* + * If we get here, some higher level has decided we are broken. + * There should really be a "kick me" function call instead. + * + * Resetting the token ring adapter takes a long time so just + * fake transmission time and go on trying. Our own timeout + * routine is in sktr_timer_chk() + */ + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * Gets skb from system, queues it and checks if it can be sent + */ +static netdev_tx_t smctr_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_send_packet\n", dev->name); + + /* + * Block a transmit overlap + */ + + netif_stop_queue(dev); + + if(tp->QueueSkb == 0) + return NETDEV_TX_BUSY; /* Return with tbusy set: queue full */ + + tp->QueueSkb--; + skb_queue_tail(&tp->SendSkbQueue, skb); + smctr_hardware_send_packet(dev, tp); + if(tp->QueueSkb > 0) + netif_wake_queue(dev); + + return NETDEV_TX_OK; +} + +static int smctr_send_lobe_media_test(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + MAC_SUB_VECTOR *tsv; + MAC_HEADER *tmf; + FCBlock *fcb; + __u32 i; + int err; + + if(smctr_debug > 15) + printk(KERN_DEBUG "%s: smctr_send_lobe_media_test\n", dev->name); + + if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr) + + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L)) + { + return OUT_OF_RESOURCES; + } + + /* Initialize DAT Data Fields. */ + tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; + tmf->ac = MSB(AC_FC_LOBE_MEDIA_TEST); + tmf->fc = LSB(AC_FC_LOBE_MEDIA_TEST); + + for(i = 0; i < 6; i++) + { + tmf->da[i] = 0; + tmf->sa[i] = dev->dev_addr[i]; + } + + tmf->vc = LOBE_MEDIA_TEST; + tmf->dc_sc = DC_RS | SC_RS; + tmf->vl = 4; + + tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); + smctr_make_wrap_data(dev, tsv); + tmf->vl += tsv->svl; + + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_wrap_data(dev, tsv); + tmf->vl += tsv->svl; + + /* Start Transmit. */ + tmf->vl = SWAP_BYTES(tmf->vl); + if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) + return err; + + /* Wait for Transmit to Complete. (10 ms). */ + for(i=0; i < 10000; i++) + { + if(fcb->frame_status & FCB_COMMAND_DONE) + break; + mdelay(1); + } + + /* Check if GOOD frame Tx'ed */ + if(!(fcb->frame_status & FCB_COMMAND_DONE) || + fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) + { + return LOBE_MEDIA_TEST_FAILED; + } + + /* De-allocated Tx FCB and Frame Buffer + * The FCB must be de-allocated manually if executing with + * interrupts disabled, other wise the ISR (LM_Service_Events) + * will de-allocate it when the interrupt occurs. + */ + tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; + smctr_update_tx_chain(dev, fcb, MAC_QUEUE); + + return 0; +} + +static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf, + __u16 correlator) +{ + MAC_HEADER *tmf; + MAC_SUB_VECTOR *tsv; + FCBlock *fcb; + + if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + + S_CORRELATOR + S_PHYSICAL_DROP + S_UPSTREAM_NEIGHBOR_ADDRESS + + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS)) + == (FCBlock *)(-1L)) + { + return 0; + } + + tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; + tmf->vc = RPT_ADDR; + tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; + tmf->vl = 4; + + smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ADDR); + + tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); + smctr_make_corr(dev, tsv, correlator); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_phy_drop_num(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_upstream_neighbor_addr(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_addr_mod(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_group_addr(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_funct_addr(dev, tsv); + + tmf->vl += tsv->svl; + + /* Subtract out MVID and MVL which is + * include in both vl and MAC_HEADER + */ +/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; + fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; +*/ + tmf->vl = SWAP_BYTES(tmf->vl); + + return smctr_trc_send_packet(dev, fcb, MAC_QUEUE); +} + +static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf, + __u16 correlator) +{ + MAC_HEADER *tmf; + MAC_SUB_VECTOR *tsv; + FCBlock *fcb; + + if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + + S_CORRELATOR + S_PRODUCT_INSTANCE_ID + S_FUNCTIONAL_ADDRESS + + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY)) + == (FCBlock *)(-1L)) + { + return 0; + } + + tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; + tmf->vc = RPT_ATTCH; + tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; + tmf->vl = 4; + + smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ATTCH); + + tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); + smctr_make_corr(dev, tsv, correlator); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_product_id(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_funct_addr(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_auth_funct_class(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_access_pri(dev, tsv); + + tmf->vl += tsv->svl; + + /* Subtract out MVID and MVL which is + * include in both vl and MAC_HEADER + */ +/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; + fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; +*/ + tmf->vl = SWAP_BYTES(tmf->vl); + + return smctr_trc_send_packet(dev, fcb, MAC_QUEUE); +} + +static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf, + __u16 correlator) +{ + MAC_HEADER *tmf; + MAC_SUB_VECTOR *tsv; + FCBlock *fcb; + + if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + + S_CORRELATOR + S_RING_STATION_VERSION_NUMBER + + S_RING_STATION_STATUS + S_STATION_IDENTIFER)) + == (FCBlock *)(-1L)) + { + return 0; + } + + tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; + tmf->vc = RPT_STATE; + tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; + tmf->vl = 4; + + smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_STATE); + + tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); + smctr_make_corr(dev, tsv, correlator); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_ring_station_version(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_ring_station_status(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_station_id(dev, tsv); + + tmf->vl += tsv->svl; + + /* Subtract out MVID and MVL which is + * include in both vl and MAC_HEADER + */ +/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; + fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; +*/ + tmf->vl = SWAP_BYTES(tmf->vl); + + return smctr_trc_send_packet(dev, fcb, MAC_QUEUE); +} + +static int smctr_send_rpt_tx_forward(struct net_device *dev, + MAC_HEADER *rmf, __u16 tx_fstatus) +{ + MAC_HEADER *tmf; + MAC_SUB_VECTOR *tsv; + FCBlock *fcb; + + if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L)) + { + return 0; + } + + tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; + tmf->vc = RPT_TX_FORWARD; + tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; + tmf->vl = 4; + + smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_TX_FORWARD); + + tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); + smctr_make_tx_status_code(dev, tsv, tx_fstatus); + + tmf->vl += tsv->svl; + + /* Subtract out MVID and MVL which is + * include in both vl and MAC_HEADER + */ +/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; + fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; +*/ + tmf->vl = SWAP_BYTES(tmf->vl); + + return smctr_trc_send_packet(dev, fcb, MAC_QUEUE); +} + +static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf, + __u16 rcode, __u16 correlator) +{ + MAC_HEADER *tmf; + MAC_SUB_VECTOR *tsv; + FCBlock *fcb; + + if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L)) + { + return 0; + } + + tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; + tmf->vc = RSP; + tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; + tmf->vl = 4; + + smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RSP); + + tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); + smctr_make_corr(dev, tsv, correlator); + + return 0; +} + +static int smctr_send_rq_init(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + MAC_HEADER *tmf; + MAC_SUB_VECTOR *tsv; + FCBlock *fcb; + unsigned int i, count = 0; + __u16 fstatus; + int err; + + do { + if(((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) + + S_PRODUCT_INSTANCE_ID + S_UPSTREAM_NEIGHBOR_ADDRESS + + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER)) + == (FCBlock *)(-1L))) + { + return 0; + } + + tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; + tmf->vc = RQ_INIT; + tmf->dc_sc = DC_RPS | SC_RS; + tmf->vl = 4; + + smctr_make_8025_hdr(dev, NULL, tmf, AC_FC_RQ_INIT); + + tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); + smctr_make_product_id(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_upstream_neighbor_addr(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_ring_station_version(dev, tsv); + + tmf->vl += tsv->svl; + tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); + smctr_make_addr_mod(dev, tsv); + + tmf->vl += tsv->svl; + + /* Subtract out MVID and MVL which is + * include in both vl and MAC_HEADER + */ +/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; + fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; +*/ + tmf->vl = SWAP_BYTES(tmf->vl); + + if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) + return err; + + /* Wait for Transmit to Complete */ + for(i = 0; i < 10000; i++) + { + if(fcb->frame_status & FCB_COMMAND_DONE) + break; + mdelay(1); + } + + /* Check if GOOD frame Tx'ed */ + fstatus = fcb->frame_status; + + if(!(fstatus & FCB_COMMAND_DONE)) + return HARDWARE_FAILED; + + if(!(fstatus & FCB_TX_STATUS_E)) + count++; + + /* De-allocated Tx FCB and Frame Buffer + * The FCB must be de-allocated manually if executing with + * interrupts disabled, other wise the ISR (LM_Service_Events) + * will de-allocate it when the interrupt occurs. + */ + tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; + smctr_update_tx_chain(dev, fcb, MAC_QUEUE); + } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS)); + + return smctr_join_complete_state(dev); +} + +static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf, + __u16 *tx_fstatus) +{ + struct net_local *tp = netdev_priv(dev); + FCBlock *fcb; + unsigned int i; + int err; + + /* Check if this is the END POINT of the Transmit Forward Chain. */ + if(rmf->vl <= 18) + return 0; + + /* Allocate Transmit FCB only by requesting 0 bytes + * of data buffer. + */ + if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L)) + return 0; + + /* Set pointer to Transmit Frame Buffer to the data + * portion of the received TX Forward frame, making + * sure to skip over the Vector Code (vc) and Vector + * length (vl). + */ + fcb->bdb_ptr->trc_data_block_ptr = TRC_POINTER((__u32)rmf + + sizeof(MAC_HEADER) + 2); + fcb->bdb_ptr->data_block_ptr = (__u16 *)((__u32)rmf + + sizeof(MAC_HEADER) + 2); + + fcb->frame_length = rmf->vl - 4 - 2; + fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2; + + if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) + return err; + + /* Wait for Transmit to Complete */ + for(i = 0; i < 10000; i++) + { + if(fcb->frame_status & FCB_COMMAND_DONE) + break; + mdelay(1); + } + + /* Check if GOOD frame Tx'ed */ + if(!(fcb->frame_status & FCB_COMMAND_DONE)) + { + if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE))) + return err; + + for(i = 0; i < 10000; i++) + { + if(fcb->frame_status & FCB_COMMAND_DONE) + break; + mdelay(1); + } + + if(!(fcb->frame_status & FCB_COMMAND_DONE)) + return HARDWARE_FAILED; + } + + *tx_fstatus = fcb->frame_status; + + return A_FRAME_WAS_FORWARDED; +} + +static int smctr_set_auth_access_pri(struct net_device *dev, + MAC_SUB_VECTOR *rsv) +{ + struct net_local *tp = netdev_priv(dev); + + if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY) + return E_SUB_VECTOR_LENGTH_ERROR; + + tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]); + + return POSITIVE_ACK; +} + +static int smctr_set_auth_funct_class(struct net_device *dev, + MAC_SUB_VECTOR *rsv) +{ + struct net_local *tp = netdev_priv(dev); + + if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS) + return E_SUB_VECTOR_LENGTH_ERROR; + + tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]); + + return POSITIVE_ACK; +} + +static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv, + __u16 *correlator) +{ + if(rsv->svl != S_CORRELATOR) + return E_SUB_VECTOR_LENGTH_ERROR; + + *correlator = (rsv->svv[0] << 8 | rsv->svv[1]); + + return POSITIVE_ACK; +} + +static int smctr_set_error_timer_value(struct net_device *dev, + MAC_SUB_VECTOR *rsv) +{ + __u16 err_tval; + int err; + + if(rsv->svl != S_ERROR_TIMER_VALUE) + return E_SUB_VECTOR_LENGTH_ERROR; + + err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10; + + smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval); + + if((err = smctr_wait_cmd(dev))) + return err; + + return POSITIVE_ACK; +} + +static int smctr_set_frame_forward(struct net_device *dev, + MAC_SUB_VECTOR *rsv, __u8 dc_sc) +{ + if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD)) + return E_SUB_VECTOR_LENGTH_ERROR; + + if((dc_sc & DC_MASK) != DC_CRS) + { + if(rsv->svl >= 2 && rsv->svl < 20) + return E_TRANSMIT_FORWARD_INVALID; + + if((rsv->svv[0] != 0) || (rsv->svv[1] != 0)) + return E_TRANSMIT_FORWARD_INVALID; + } + + return POSITIVE_ACK; +} + +static int smctr_set_local_ring_num(struct net_device *dev, + MAC_SUB_VECTOR *rsv) +{ + struct net_local *tp = netdev_priv(dev); + + if(rsv->svl != S_LOCAL_RING_NUMBER) + return E_SUB_VECTOR_LENGTH_ERROR; + + if(tp->ptr_local_ring_num) + *(__u16 *)(tp->ptr_local_ring_num) + = (rsv->svv[0] << 8 | rsv->svv[1]); + + return POSITIVE_ACK; +} + +static unsigned short smctr_set_ctrl_attention(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + if(tp->bic_type == BIC_585_CHIP) + outb((tp->trc_mask | HWR_CA), ioaddr + HWR); + else + { + outb((tp->trc_mask | CSR_CA), ioaddr + CSR); + outb(tp->trc_mask, ioaddr + CSR); + } + + return 0; +} + +static void smctr_set_multicast_list(struct net_device *dev) +{ + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name); +} + +static int smctr_set_page(struct net_device *dev, __u8 *buf) +{ + struct net_local *tp = netdev_priv(dev); + __u8 amask; + __u32 tptr; + + tptr = (__u32)buf - (__u32)tp->ram_access; + amask = (__u8)((tptr & PR_PAGE_MASK) >> 8); + outb(amask, dev->base_addr + PR); + + return 0; +} + +static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv) +{ + int err; + + if(rsv->svl != S_PHYSICAL_DROP) + return E_SUB_VECTOR_LENGTH_ERROR; + + smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]); + if((err = smctr_wait_cmd(dev))) + return err; + + return POSITIVE_ACK; +} + +/* Reset the ring speed to the opposite of what it was. This auto-pilot + * mode requires a complete reset and re-init of the adapter. + */ +static int smctr_set_ring_speed(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + if(tp->media_type == MEDIA_UTP_16) + tp->media_type = MEDIA_UTP_4; + else + tp->media_type = MEDIA_UTP_16; + + smctr_enable_16bit(dev); + + /* Re-Initialize adapter's internal registers */ + smctr_reset_adapter(dev); + + if((err = smctr_init_card_real(dev))) + return err; + + smctr_enable_bic_int(dev); + + if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) + return err; + + smctr_disable_16bit(dev); + + return 0; +} + +static int smctr_set_rx_look_ahead(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + __u16 sword, rword; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_set_rx_look_ahead_flag\n", dev->name); + + tp->adapter_flags &= ~(FORCED_16BIT_MODE); + tp->adapter_flags |= RX_VALID_LOOKAHEAD; + + if(tp->adapter_bus == BUS_ISA16_TYPE) + { + sword = *((__u16 *)(tp->ram_access)); + *((__u16 *)(tp->ram_access)) = 0x1234; + + smctr_disable_16bit(dev); + rword = *((__u16 *)(tp->ram_access)); + smctr_enable_16bit(dev); + + if(rword != 0x1234) + tp->adapter_flags |= FORCED_16BIT_MODE; + + *((__u16 *)(tp->ram_access)) = sword; + } + + return 0; +} + +static int smctr_set_trc_reset(int ioaddr) +{ + __u8 r; + + r = inb(ioaddr + MSR); + outb(MSR_RST | r, ioaddr + MSR); + + return 0; +} + +/* + * This function can be called if the adapter is busy or not. + */ +static int smctr_setup_single_cmd(struct net_device *dev, + __u16 command, __u16 subcommand) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int err; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name); + + if((err = smctr_wait_while_cbusy(dev))) + return err; + + if((err = (unsigned int)smctr_wait_cmd(dev))) + return err; + + tp->acb_head->cmd_done_status = 0; + tp->acb_head->cmd = command; + tp->acb_head->subcmd = subcommand; + + err = smctr_issue_resume_acb_cmd(dev); + + return err; +} + +/* + * This function can not be called with the adapter busy. + */ +static int smctr_setup_single_cmd_w_data(struct net_device *dev, + __u16 command, __u16 subcommand) +{ + struct net_local *tp = netdev_priv(dev); + + tp->acb_head->cmd_done_status = ACB_COMMAND_NOT_DONE; + tp->acb_head->cmd = command; + tp->acb_head->subcmd = subcommand; + tp->acb_head->data_offset_lo + = (__u16)TRC_POINTER(tp->misc_command_data); + + return smctr_issue_resume_acb_cmd(dev); +} + +static char *smctr_malloc(struct net_device *dev, __u16 size) +{ + struct net_local *tp = netdev_priv(dev); + char *m; + + m = (char *)(tp->ram_access + tp->sh_mem_used); + tp->sh_mem_used += (__u32)size; + + return m; +} + +static int smctr_status_chg(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_status_chg\n", dev->name); + + switch(tp->status) + { + case OPEN: + break; + + case CLOSED: + break; + + /* Interrupt driven open() completion. XXX */ + case INITIALIZED: + tp->group_address_0 = 0; + tp->group_address[0] = 0; + tp->group_address[1] = 0; + tp->functional_address_0 = 0; + tp->functional_address[0] = 0; + tp->functional_address[1] = 0; + smctr_open_tr(dev); + break; + + default: + printk(KERN_INFO "%s: status change unknown %x\n", + dev->name, tp->status); + break; + } + + return 0; +} + +static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb, + __u16 queue) +{ + struct net_local *tp = netdev_priv(dev); + int err = 0; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_trc_send_packet\n", dev->name); + + fcb->info = FCB_CHAIN_END | FCB_ENABLE_TFS; + if(tp->num_tx_fcbs[queue] != 1) + fcb->back_ptr->info = FCB_INTERRUPT_ENABLE | FCB_ENABLE_TFS; + + if(tp->tx_queue_status[queue] == NOT_TRANSMITING) + { + tp->tx_queue_status[queue] = TRANSMITING; + err = smctr_issue_resume_tx_fcb_cmd(dev, queue); + } + + return err; +} + +static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue) +{ + struct net_local *tp = netdev_priv(dev); + __u16 status, err = 0; + int cstatus; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_tx_complete\n", dev->name); + + while((status = tp->tx_fcb_end[queue]->frame_status) != SUCCESS) + { + if(status & 0x7e00 ) + { + err = HARDWARE_FAILED; + break; + } + + if((err = smctr_update_tx_chain(dev, tp->tx_fcb_end[queue], + queue)) != SUCCESS) + break; + + smctr_disable_16bit(dev); + + if(tp->mode_bits & UMAC) + { + if(!(status & (FCB_TX_STATUS_AR1 | FCB_TX_STATUS_AR2))) + cstatus = NO_SUCH_DESTINATION; + else + { + if(!(status & (FCB_TX_STATUS_CR1 | FCB_TX_STATUS_CR2))) + cstatus = DEST_OUT_OF_RESOURCES; + else + { + if(status & FCB_TX_STATUS_E) + cstatus = MAX_COLLISIONS; + else + cstatus = SUCCESS; + } + } + } + else + cstatus = SUCCESS; + + if(queue == BUG_QUEUE) + err = SUCCESS; + + smctr_enable_16bit(dev); + if(err != SUCCESS) + break; + } + + return err; +} + +static unsigned short smctr_tx_move_frame(struct net_device *dev, + struct sk_buff *skb, __u8 *pbuff, unsigned int bytes) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int ram_usable; + __u32 flen, len, offset = 0; + __u8 *frag, *page; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_tx_move_frame\n", dev->name); + + ram_usable = ((unsigned int)tp->ram_usable) << 10; + frag = skb->data; + flen = skb->len; + + while(flen > 0 && bytes > 0) + { + smctr_set_page(dev, pbuff); + + offset = SMC_PAGE_OFFSET(pbuff); + + if(offset + flen > ram_usable) + len = ram_usable - offset; + else + len = flen; + + if(len > bytes) + len = bytes; + + page = (char *) (offset + tp->ram_access); + memcpy(page, frag, len); + + flen -=len; + bytes -= len; + frag += len; + pbuff += len; + } + + return 0; +} + +/* Update the error statistic counters for this adapter. */ +static int smctr_update_err_stats(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + struct tr_statistics *tstat = &tp->MacStat; + + if(tstat->internal_errors) + tstat->internal_errors + += *(tp->misc_command_data + 0) & 0x00ff; + + if(tstat->line_errors) + tstat->line_errors += *(tp->misc_command_data + 0) >> 8; + + if(tstat->A_C_errors) + tstat->A_C_errors += *(tp->misc_command_data + 1) & 0x00ff; + + if(tstat->burst_errors) + tstat->burst_errors += *(tp->misc_command_data + 1) >> 8; + + if(tstat->abort_delimiters) + tstat->abort_delimiters += *(tp->misc_command_data + 2) >> 8; + + if(tstat->recv_congest_count) + tstat->recv_congest_count + += *(tp->misc_command_data + 3) & 0x00ff; + + if(tstat->lost_frames) + tstat->lost_frames + += *(tp->misc_command_data + 3) >> 8; + + if(tstat->frequency_errors) + tstat->frequency_errors += *(tp->misc_command_data + 4) & 0x00ff; + + if(tstat->frame_copied_errors) + tstat->frame_copied_errors + += *(tp->misc_command_data + 4) >> 8; + + if(tstat->token_errors) + tstat->token_errors += *(tp->misc_command_data + 5) >> 8; + + return 0; +} + +static int smctr_update_rx_chain(struct net_device *dev, __u16 queue) +{ + struct net_local *tp = netdev_priv(dev); + FCBlock *fcb; + BDBlock *bdb; + __u16 size, len; + + fcb = tp->rx_fcb_curr[queue]; + len = fcb->frame_length; + + fcb->frame_status = 0; + fcb->info = FCB_CHAIN_END; + fcb->back_ptr->info = FCB_WARNING; + + tp->rx_fcb_curr[queue] = tp->rx_fcb_curr[queue]->next_ptr; + + /* update RX BDBs */ + size = (len >> RX_BDB_SIZE_SHIFT); + if(len & RX_DATA_BUFFER_SIZE_MASK) + size += sizeof(BDBlock); + size &= (~RX_BDB_SIZE_MASK); + + /* check if wrap around */ + bdb = (BDBlock *)((__u32)(tp->rx_bdb_curr[queue]) + (__u32)(size)); + if((__u32)bdb >= (__u32)tp->rx_bdb_end[queue]) + { + bdb = (BDBlock *)((__u32)(tp->rx_bdb_head[queue]) + + (__u32)(bdb) - (__u32)(tp->rx_bdb_end[queue])); + } + + bdb->back_ptr->info = BDB_CHAIN_END; + tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END; + tp->rx_bdb_curr[queue] = bdb; + + return 0; +} + +static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb, + __u16 queue) +{ + struct net_local *tp = netdev_priv(dev); + + if(smctr_debug > 20) + printk(KERN_DEBUG "smctr_update_tx_chain\n"); + + if(tp->num_tx_fcbs_used[queue] <= 0) + return HARDWARE_FAILED; + else + { + if(tp->tx_buff_used[queue] < fcb->memory_alloc) + { + tp->tx_buff_used[queue] = 0; + return HARDWARE_FAILED; + } + + tp->tx_buff_used[queue] -= fcb->memory_alloc; + + /* if all transmit buffer are cleared + * need to set the tx_buff_curr[] to tx_buff_head[] + * otherwise, tx buffer will be segregate and cannot + * accommodate and buffer greater than (curr - head) and + * (end - curr) since we do not allow wrap around allocation. + */ + if(tp->tx_buff_used[queue] == 0) + tp->tx_buff_curr[queue] = tp->tx_buff_head[queue]; + + tp->num_tx_fcbs_used[queue]--; + fcb->frame_status = 0; + tp->tx_fcb_end[queue] = fcb->next_ptr; + netif_wake_queue(dev); + return 0; + } +} + +static int smctr_wait_cmd(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int loop_count = 0x20000; + + if(smctr_debug > 10) + printk(KERN_DEBUG "%s: smctr_wait_cmd\n", dev->name); + + while(loop_count) + { + if(tp->acb_head->cmd_done_status & ACB_COMMAND_DONE) + break; + udelay(1); + loop_count--; + } + + if(loop_count == 0) + return HARDWARE_FAILED; + + if(tp->acb_head->cmd_done_status & 0xff) + return HARDWARE_FAILED; + + return 0; +} + +static int smctr_wait_while_cbusy(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int timeout = 0x20000; + int ioaddr = dev->base_addr; + __u8 r; + + if(tp->bic_type == BIC_585_CHIP) + { + while(timeout) + { + r = inb(ioaddr + HWR); + if((r & HWR_CBUSY) == 0) + break; + timeout--; + } + } + else + { + while(timeout) + { + r = inb(ioaddr + CSR); + if((r & CSR_CBUSY) == 0) + break; + timeout--; + } + } + + if(timeout) + return 0; + else + return HARDWARE_FAILED; +} + +#ifdef MODULE + +static struct net_device* dev_smctr[SMCTR_MAX_ADAPTERS]; +static int io[SMCTR_MAX_ADAPTERS]; +static int irq[SMCTR_MAX_ADAPTERS]; + +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("tr_smctr.bin"); + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param(ringspeed, int, 0); + +static struct net_device * __init setup_card(int n) +{ + struct net_device *dev = alloc_trdev(sizeof(struct net_local)); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + dev->irq = irq[n]; + err = smctr_probe1(dev, io[n]); + if (err) + goto out; + + err = register_netdev(dev); + if (err) + goto out1; + return dev; + out1: +#ifdef CONFIG_MCA_LEGACY + { struct net_local *tp = netdev_priv(dev); + if (tp->slot_num) + mca_mark_as_unused(tp->slot_num); + } +#endif + release_region(dev->base_addr, SMCTR_IO_EXTENT); + free_irq(dev->irq, dev); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +int __init init_module(void) +{ + int i, found = 0; + struct net_device *dev; + + for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) { + dev = io[0]? setup_card(i) : smctr_probe(-1); + if (!IS_ERR(dev)) { + ++found; + dev_smctr[i] = dev; + } + } + + return found ? 0 : -ENODEV; +} + +void __exit cleanup_module(void) +{ + int i; + + for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) { + struct net_device *dev = dev_smctr[i]; + + if (dev) { + + unregister_netdev(dev); +#ifdef CONFIG_MCA_LEGACY + { struct net_local *tp = netdev_priv(dev); + if (tp->slot_num) + mca_mark_as_unused(tp->slot_num); + } +#endif + release_region(dev->base_addr, SMCTR_IO_EXTENT); + if (dev->irq) + free_irq(dev->irq, dev); + + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h new file mode 100644 index 00000000..6e5700ab --- /dev/null +++ b/drivers/net/tokenring/smctr.h @@ -0,0 +1,1585 @@ +/* smctr.h: SMC Token Ring driver header for Linux + * + * Authors: + * - Jay Schulist + */ + +#ifndef __LINUX_SMCTR_H +#define __LINUX_SMCTR_H + +#ifdef __KERNEL__ + +#define MAX_TX_QUEUE 10 + +#define SMC_HEADER_SIZE 14 + +#define SMC_PAGE_OFFSET(X) (((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask) + +#define INIT 0x0D +#define RQ_ATTCH 0x10 +#define RQ_STATE 0x0F +#define RQ_ADDR 0x0E +#define CHG_PARM 0x0C +#define RSP 0x00 +#define TX_FORWARD 0x09 + +#define AC_FC_DAT ((3<<13) | 1) +#define DAT 0x07 + +#define RPT_NEW_MON 0x25 +#define RPT_SUA_CHG 0x26 +#define RPT_ACTIVE_ERR 0x28 +#define RPT_NN_INCMP 0x27 +#define RPT_ERROR 0x29 + +#define RQ_INIT 0x20 +#define RPT_ATTCH 0x24 +#define RPT_STATE 0x23 +#define RPT_ADDR 0x22 + +#define POSITIVE_ACK 0x0001 +#define A_FRAME_WAS_FORWARDED 0x8888 + +#define GROUP_ADDRESS 0x2B +#define PHYSICAL_DROP 0x0B +#define AUTHORIZED_ACCESS_PRIORITY 0x07 +#define AUTHORIZED_FUNCTION_CLASS 0x06 +#define FUNCTIONAL_ADDRESS 0x2C +#define RING_STATION_STATUS 0x29 +#define TRANSMIT_STATUS_CODE 0x2A +#define IBM_PASS_SOURCE_ADDR 0x01 +#define AC_FC_RPT_TX_FORWARD ((0<<13) | 0) +#define AC_FC_RPT_STATE ((0<<13) | 0) +#define AC_FC_RPT_ADDR ((0<<13) | 0) +#define CORRELATOR 0x09 + +#define POSITIVE_ACK 0x0001 /* */ +#define E_MAC_DATA_INCOMPLETE 0x8001 /* not used */ +#define E_VECTOR_LENGTH_ERROR 0x8002 /* */ +#define E_UNRECOGNIZED_VECTOR_ID 0x8003 /* */ +#define E_INAPPROPRIATE_SOURCE_CLASS 0x8004 /* */ +#define E_SUB_VECTOR_LENGTH_ERROR 0x8005 /* */ +#define E_TRANSMIT_FORWARD_INVALID 0x8006 /* def. by IBM */ +#define E_MISSING_SUB_VECTOR 0x8007 /* */ +#define E_SUB_VECTOR_UNKNOWN 0x8008 /* */ +#define E_MAC_HEADER_TOO_LONG 0x8009 /* */ +#define E_FUNCTION_DISABLED 0x800A /* not used */ + +#define A_FRAME_WAS_FORWARDED 0x8888 /* used by send_TX_FORWARD */ + +#define UPSTREAM_NEIGHBOR_ADDRESS 0x02 +#define LOCAL_RING_NUMBER 0x03 +#define ASSIGN_PHYSICAL_DROP 0x04 +#define ERROR_TIMER_VALUE 0x05 +#define AUTHORIZED_FUNCTION_CLASS 0x06 +#define AUTHORIZED_ACCESS_PRIORITY 0x07 +#define CORRELATOR 0x09 +#define PHYSICAL_DROP 0x0B +#define RESPONSE_CODE 0x20 +#define ADDRESS_MODIFER 0x21 +#define PRODUCT_INSTANCE_ID 0x22 +#define RING_STATION_VERSION_NUMBER 0x23 +#define WRAP_DATA 0x26 +#define FRAME_FORWARD 0x27 +#define STATION_IDENTIFER 0x28 +#define RING_STATION_STATUS 0x29 +#define TRANSMIT_STATUS_CODE 0x2A +#define GROUP_ADDRESS 0x2B +#define FUNCTIONAL_ADDRESS 0x2C + +#define F_NO_SUB_VECTORS_FOUND 0x0000 +#define F_UPSTREAM_NEIGHBOR_ADDRESS 0x0001 +#define F_LOCAL_RING_NUMBER 0x0002 +#define F_ASSIGN_PHYSICAL_DROP 0x0004 +#define F_ERROR_TIMER_VALUE 0x0008 +#define F_AUTHORIZED_FUNCTION_CLASS 0x0010 +#define F_AUTHORIZED_ACCESS_PRIORITY 0x0020 +#define F_CORRELATOR 0x0040 +#define F_PHYSICAL_DROP 0x0080 +#define F_RESPONSE_CODE 0x0100 +#define F_PRODUCT_INSTANCE_ID 0x0200 +#define F_RING_STATION_VERSION_NUMBER 0x0400 +#define F_STATION_IDENTIFER 0x0800 +#define F_RING_STATION_STATUS 0x1000 +#define F_GROUP_ADDRESS 0x2000 +#define F_FUNCTIONAL_ADDRESS 0x4000 +#define F_FRAME_FORWARD 0x8000 + +#define R_INIT 0x00 +#define R_RQ_ATTCH_STATE_ADDR 0x00 +#define R_CHG_PARM 0x00 +#define R_TX_FORWARD F_FRAME_FORWARD + + +#define UPSTREAM_NEIGHBOR_ADDRESS 0x02 +#define ADDRESS_MODIFER 0x21 +#define RING_STATION_VERSION_NUMBER 0x23 +#define PRODUCT_INSTANCE_ID 0x22 + +#define RPT_TX_FORWARD 0x2A + +#define AC_FC_INIT (3<<13) | 0 /* */ +#define AC_FC_RQ_INIT ((3<<13) | 0) /* */ +#define AC_FC_RQ_ATTCH (3<<13) | 0 /* DC = SC of rx frame */ +#define AC_FC_RQ_STATE (3<<13) | 0 /* DC = SC of rx frame */ +#define AC_FC_RQ_ADDR (3<<13) | 0 /* DC = SC of rx frame */ +#define AC_FC_CHG_PARM (3<<13) | 0 /* */ +#define AC_FC_RSP (0<<13) | 0 /* DC = SC of rx frame */ +#define AC_FC_RPT_ATTCH (0<<13) | 0 + +#define S_UPSTREAM_NEIGHBOR_ADDRESS 6 + 2 +#define S_LOCAL_RING_NUMBER 2 + 2 +#define S_ASSIGN_PHYSICAL_DROP 4 + 2 +#define S_ERROR_TIMER_VALUE 2 + 2 +#define S_AUTHORIZED_FUNCTION_CLASS 2 + 2 +#define S_AUTHORIZED_ACCESS_PRIORITY 2 + 2 +#define S_CORRELATOR 2 + 2 +#define S_PHYSICAL_DROP 4 + 2 +#define S_RESPONSE_CODE 4 + 2 +#define S_ADDRESS_MODIFER 2 + 2 +#define S_PRODUCT_INSTANCE_ID 18 + 2 +#define S_RING_STATION_VERSION_NUMBER 10 + 2 +#define S_STATION_IDENTIFER 6 + 2 +#define S_RING_STATION_STATUS 6 + 2 +#define S_GROUP_ADDRESS 4 + 2 +#define S_FUNCTIONAL_ADDRESS 4 + 2 +#define S_FRAME_FORWARD 252 + 2 +#define S_TRANSMIT_STATUS_CODE 2 + 2 + +#define ISB_IMC_RES0 0x0000 /* */ +#define ISB_IMC_MAC_TYPE_3 0x0001 /* MAC_ARC_INDICATE */ +#define ISB_IMC_MAC_ERROR_COUNTERS 0x0002 /* */ +#define ISB_IMC_RES1 0x0003 /* */ +#define ISB_IMC_MAC_TYPE_2 0x0004 /* QUE_MAC_INDICATE */ +#define ISB_IMC_TX_FRAME 0x0005 /* */ +#define ISB_IMC_END_OF_TX_QUEUE 0x0006 /* */ +#define ISB_IMC_NON_MAC_RX_RESOURCE 0x0007 /* */ +#define ISB_IMC_MAC_RX_RESOURCE 0x0008 /* */ +#define ISB_IMC_NON_MAC_RX_FRAME 0x0009 /* */ +#define ISB_IMC_MAC_RX_FRAME 0x000A /* */ +#define ISB_IMC_TRC_FIFO_STATUS 0x000B /* */ +#define ISB_IMC_COMMAND_STATUS 0x000C /* */ +#define ISB_IMC_MAC_TYPE_1 0x000D /* Self Removed */ +#define ISB_IMC_TRC_INTRNL_TST_STATUS 0x000E /* */ +#define ISB_IMC_RES2 0x000F /* */ + +#define NON_MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */ +#define NON_MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */ +#define NON_MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */ +#define NON_MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */ +#define RAW_NON_MAC_RX_RESOURCE_BW 0x1000 /* */ +#define RAW_NON_MAC_RX_RESOURCE_FW 0x2000 /* */ +#define RAW_NON_MAC_RX_RESOURCE_BE 0x4000 /* */ +#define RAW_NON_MAC_RX_RESOURCE_FE 0x8000 /* */ + +#define MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */ +#define MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */ +#define MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */ +#define MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */ +#define RAW_MAC_RX_RESOURCE_BW 0x1000 /* */ +#define RAW_MAC_RX_RESOURCE_FW 0x2000 /* */ +#define RAW_MAC_RX_RESOURCE_BE 0x4000 /* */ +#define RAW_MAC_RX_RESOURCE_FE 0x8000 /* */ + +#define TRC_FIFO_STATUS_TX_UNDERRUN 0x40 /* shifted right 8 bits */ +#define TRC_FIFO_STATUS_RX_OVERRUN 0x80 /* shifted right 8 bits */ +#define RAW_TRC_FIFO_STATUS_TX_UNDERRUN 0x4000 /* */ +#define RAW_TRC_FIFO_STATUS_RX_OVERRUN 0x8000 /* */ + +#define CSR_CLRTINT 0x08 + +#define MSB(X) ((__u8)((__u16) X >> 8)) +#define LSB(X) ((__u8)((__u16) X & 0xff)) + +#define AC_FC_LOBE_MEDIA_TEST ((3<<13) | 0) +#define S_WRAP_DATA 248 + 2 /* 500 + 2 */ +#define WRAP_DATA 0x26 +#define LOBE_MEDIA_TEST 0x08 + +/* Destination Class (dc) */ + +#define DC_MASK 0xF0 +#define DC_RS 0x00 +#define DC_CRS 0x40 +#define DC_RPS 0x50 +#define DC_REM 0x60 + +/* Source Classes (sc) */ + +#define SC_MASK 0x0F +#define SC_RS 0x00 +#define SC_CRS 0x04 +#define SC_RPS 0x05 +#define SC_REM 0x06 + +#define PR 0x11 +#define PR_PAGE_MASK 0x0C000 + +#define MICROCHANNEL 0x0008 +#define INTERFACE_CHIP 0x0010 +#define BOARD_16BIT 0x0040 +#define PAGED_RAM 0x0080 +#define WD8115TA (TOKEN_MEDIA | MICROCHANNEL | INTERFACE_CHIP | PAGED_RAM) +#define WD8115T (TOKEN_MEDIA | INTERFACE_CHIP | BOARD_16BIT | PAGED_RAM) + +#define BRD_ID_8316 0x50 + +#define r587_SER 0x001 +#define SER_DIN 0x80 +#define SER_DOUT 0x40 +#define SER_CLK 0x20 +#define SER_ECS 0x10 +#define SER_E806 0x08 +#define SER_PNP 0x04 +#define SER_BIO 0x02 +#define SER_16B 0x01 + +#define r587_IDR 0x004 +#define IDR_IRQ_MASK 0x0F0 +#define IDR_DCS_MASK 0x007 +#define IDR_RWS 0x008 + + +#define r587_BIO 0x003 +#define BIO_ENB 0x080 +#define BIO_MASK 0x03F + +#define r587_PCR 0x005 +#define PCR_RAMS 0x040 + + + +#define NUM_ADDR_BITS 8 + +#define ISA_MAX_ADDRESS 0x00ffffff + +#define SMCTR_MAX_ADAPTERS 7 + +#define MC_TABLE_ENTRIES 16 + +#define MAXFRAGMENTS 32 + +#define CHIP_REV_MASK 0x3000 + +#define MAX_TX_QS 8 +#define NUM_TX_QS_USED 3 + +#define MAX_RX_QS 2 +#define NUM_RX_QS_USED 2 + +#define INTEL_DATA_FORMAT 0x4000 +#define INTEL_ADDRESS_POINTER_FORMAT 0x8000 +#define PAGE_POINTER(X) ((((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask) + tp->ram_access) +#define SWAP_WORDS(X) (((X & 0xFFFF) << 16) | (X >> 16)) + +#define INTERFACE_CHIP 0x0010 /* Soft Config Adapter */ +#define ADVANCED_FEATURES 0x0020 /* Adv. netw. interface features */ +#define BOARD_16BIT 0x0040 /* 16 bit capability */ +#define PAGED_RAM 0x0080 /* Adapter has paged RAM */ + +#define PAGED_ROM 0x0100 /* Adapter has paged ROM */ + +#define RAM_SIZE_UNKNOWN 0x0000 /* Unknown RAM size */ +#define RAM_SIZE_0K 0x0001 /* 0K RAM */ +#define RAM_SIZE_8K 0x0002 /* 8k RAM */ +#define RAM_SIZE_16K 0x0003 /* 16k RAM */ +#define RAM_SIZE_32K 0x0004 /* 32k RAM */ +#define RAM_SIZE_64K 0x0005 /* 64k RAM */ +#define RAM_SIZE_RESERVED_6 0x0006 /* Reserved RAM size */ +#define RAM_SIZE_RESERVED_7 0x0007 /* Reserved RAM size */ +#define RAM_SIZE_MASK 0x0007 /* Isolates RAM Size */ + +#define TOKEN_MEDIA 0x0005 + +#define BID_REG_0 0x00 +#define BID_REG_1 0x01 +#define BID_REG_2 0x02 +#define BID_REG_3 0x03 +#define BID_REG_4 0x04 +#define BID_REG_5 0x05 +#define BID_REG_6 0x06 +#define BID_REG_7 0x07 +#define BID_LAR_0 0x08 +#define BID_LAR_1 0x09 +#define BID_LAR_2 0x0A +#define BID_LAR_3 0x0B +#define BID_LAR_4 0x0C +#define BID_LAR_5 0x0D + +#define BID_BOARD_ID_BYTE 0x0E +#define BID_CHCKSM_BYTE 0x0F +#define BID_LAR_OFFSET 0x08 + +#define BID_MSZ_583_BIT 0x08 +#define BID_SIXTEEN_BIT_BIT 0x01 + +#define BID_BOARD_REV_MASK 0x1E + +#define BID_MEDIA_TYPE_BIT 0x01 +#define BID_SOFT_CONFIG_BIT 0x20 +#define BID_RAM_SIZE_BIT 0x40 +#define BID_BUS_TYPE_BIT 0x80 + +#define BID_CR 0x10 + +#define BID_TXP 0x04 /* Transmit Packet Command */ + +#define BID_TCR_DIFF 0x0D /* Transmit Configuration Register */ + +#define BID_TCR_VAL 0x18 /* Value to Test 8390 or 690 */ +#define BID_PS0 0x00 /* Register Page Select 0 */ +#define BID_PS1 0x40 /* Register Page Select 1 */ +#define BID_PS2 0x80 /* Register Page Select 2 */ +#define BID_PS_MASK 0x3F /* For Masking Off Page Select Bits */ + +#define BID_EEPROM_0 0x08 +#define BID_EEPROM_1 0x09 +#define BID_EEPROM_2 0x0A +#define BID_EEPROM_3 0x0B +#define BID_EEPROM_4 0x0C +#define BID_EEPROM_5 0x0D +#define BID_EEPROM_6 0x0E +#define BID_EEPROM_7 0x0F + +#define BID_OTHER_BIT 0x02 +#define BID_ICR_MASK 0x0C +#define BID_EAR_MASK 0x0F +#define BID_ENGR_PAGE 0x0A0 +#define BID_RLA 0x10 +#define BID_EA6 0x80 +#define BID_RECALL_DONE_MASK 0x10 +#define BID_BID_EEPROM_OVERRIDE 0xFFB0 +#define BID_EXTRA_EEPROM_OVERRIDE 0xFFD0 +#define BID_EEPROM_MEDIA_MASK 0x07 +#define BID_STARLAN_TYPE 0x00 +#define BID_ETHERNET_TYPE 0x01 +#define BID_TP_TYPE 0x02 +#define BID_EW_TYPE 0x03 +#define BID_TOKEN_RING_TYPE 0x04 +#define BID_UTP2_TYPE 0x05 +#define BID_EEPROM_IRQ_MASK 0x18 +#define BID_PRIMARY_IRQ 0x00 +#define BID_ALTERNATE_IRQ_1 0x08 +#define BID_ALTERNATE_IRQ_2 0x10 +#define BID_ALTERNATE_IRQ_3 0x18 +#define BID_EEPROM_RAM_SIZE_MASK 0xE0 +#define BID_EEPROM_RAM_SIZE_RES1 0x00 +#define BID_EEPROM_RAM_SIZE_RES2 0x20 +#define BID_EEPROM_RAM_SIZE_8K 0x40 +#define BID_EEPROM_RAM_SIZE_16K 0x60 +#define BID_EEPROM_RAM_SIZE_32K 0x80 +#define BID_EEPROM_RAM_SIZE_64K 0xA0 +#define BID_EEPROM_RAM_SIZE_RES3 0xC0 +#define BID_EEPROM_RAM_SIZE_RES4 0xE0 +#define BID_EEPROM_BUS_TYPE_MASK 0x07 +#define BID_EEPROM_BUS_TYPE_AT 0x00 +#define BID_EEPROM_BUS_TYPE_MCA 0x01 +#define BID_EEPROM_BUS_TYPE_EISA 0x02 +#define BID_EEPROM_BUS_TYPE_NEC 0x03 +#define BID_EEPROM_BUS_SIZE_MASK 0x18 +#define BID_EEPROM_BUS_SIZE_8BIT 0x00 +#define BID_EEPROM_BUS_SIZE_16BIT 0x08 +#define BID_EEPROM_BUS_SIZE_32BIT 0x10 +#define BID_EEPROM_BUS_SIZE_64BIT 0x18 +#define BID_EEPROM_BUS_MASTER 0x20 +#define BID_EEPROM_RAM_PAGING 0x40 +#define BID_EEPROM_ROM_PAGING 0x80 +#define BID_EEPROM_PAGING_MASK 0xC0 +#define BID_EEPROM_LOW_COST 0x08 +#define BID_EEPROM_IO_MAPPED 0x10 +#define BID_EEPROM_HMI 0x01 +#define BID_EEPROM_AUTO_MEDIA_DETECT 0x01 +#define BID_EEPROM_CHIP_REV_MASK 0x0C + +#define BID_EEPROM_LAN_ADDR 0x30 + +#define BID_EEPROM_MEDIA_OPTION 0x54 +#define BID_EEPROM_MEDIA_UTP 0x01 +#define BID_EEPROM_4MB_RING 0x08 +#define BID_EEPROM_16MB_RING 0x10 +#define BID_EEPROM_MEDIA_STP 0x40 + +#define BID_EEPROM_MISC_DATA 0x56 +#define BID_EEPROM_EARLY_TOKEN_RELEASE 0x02 + +#define CNFG_ID_8003E 0x6fc0 +#define CNFG_ID_8003S 0x6fc1 +#define CNFG_ID_8003W 0x6fc2 +#define CNFG_ID_8115TRA 0x6ec6 +#define CNFG_ID_8013E 0x61C8 +#define CNFG_ID_8013W 0x61C9 +#define CNFG_ID_BISTRO03E 0xEFE5 +#define CNFG_ID_BISTRO13E 0xEFD5 +#define CNFG_ID_BISTRO13W 0xEFD4 +#define CNFG_MSR_583 0x0 +#define CNFG_ICR_583 0x1 +#define CNFG_IAR_583 0x2 +#define CNFG_BIO_583 0x3 +#define CNFG_EAR_583 0x3 +#define CNFG_IRR_583 0x4 +#define CNFG_LAAR_584 0x5 +#define CNFG_GP2 0x7 +#define CNFG_LAAR_MASK 0x1F +#define CNFG_LAAR_ZWS 0x20 +#define CNFG_LAAR_L16E 0x40 +#define CNFG_ICR_IR2_584 0x04 +#define CNFG_ICR_MASK 0x08 +#define CNFG_ICR_MSZ 0x08 +#define CNFG_ICR_RLA 0x10 +#define CNFG_ICR_STO 0x80 +#define CNFG_IRR_IRQS 0x60 +#define CNFG_IRR_IEN 0x80 +#define CNFG_IRR_ZWS 0x01 +#define CNFG_GP2_BOOT_NIBBLE 0x0F +#define CNFG_IRR_OUT2 0x04 +#define CNFG_IRR_OUT1 0x02 + +#define CNFG_SIZE_8KB 8 +#define CNFG_SIZE_16KB 16 +#define CNFG_SIZE_32KB 32 +#define CNFG_SIZE_64KB 64 +#define CNFG_SIZE_128KB 128 +#define CNFG_SIZE_256KB 256 +#define ROM_DISABLE 0x0 + +#define CNFG_SLOT_ENABLE_BIT 0x08 + +#define CNFG_POS_CONTROL_REG 0x096 +#define CNFG_POS_REG0 0x100 +#define CNFG_POS_REG1 0x101 +#define CNFG_POS_REG2 0x102 +#define CNFG_POS_REG3 0x103 +#define CNFG_POS_REG4 0x104 +#define CNFG_POS_REG5 0x105 + +#define CNFG_ADAPTER_TYPE_MASK 0x0e + +#define SLOT_16BIT 0x0008 +#define INTERFACE_5X3_CHIP 0x0000 /* 0000 = 583 or 593 chips */ +#define NIC_690_BIT 0x0010 /* NIC is 690 */ +#define ALTERNATE_IRQ_BIT 0x0020 /* Alternate IRQ is used */ +#define INTERFACE_584_CHIP 0x0040 /* 0001 = 584 chip */ +#define INTERFACE_594_CHIP 0x0080 /* 0010 = 594 chip */ +#define INTERFACE_585_CHIP 0x0100 /* 0100 = 585/790 chip */ +#define INTERFACE_CHIP_MASK 0x03C0 /* Isolates Intfc Chip Type */ + +#define BOARD_16BIT 0x0040 +#define NODE_ADDR_CKSUM 0xEE +#define BRD_ID_8115T 0x04 + +#define NIC_825_BIT 0x0400 /* TRC 83C825 NIC */ +#define NIC_790_BIT 0x0800 /* NIC is 83C790 Ethernet */ + +#define CHIP_REV_MASK 0x3000 + +#define HWR_CBUSY 0x02 +#define HWR_CA 0x01 + +#define MAC_QUEUE 0 +#define NON_MAC_QUEUE 1 +#define BUG_QUEUE 2 /* NO RECEIVE QUEUE, ONLY TX */ + +#define NUM_MAC_TX_FCBS 8 +#define NUM_MAC_TX_BDBS NUM_MAC_TX_FCBS +#define NUM_MAC_RX_FCBS 7 +#define NUM_MAC_RX_BDBS 8 + +#define NUM_NON_MAC_TX_FCBS 6 +#define NUM_NON_MAC_TX_BDBS NUM_NON_MAC_TX_FCBS + +#define NUM_NON_MAC_RX_BDBS 0 /* CALCULATED DYNAMICALLY */ + +#define NUM_BUG_TX_FCBS 8 +#define NUM_BUG_TX_BDBS NUM_BUG_TX_FCBS + +#define MAC_TX_BUFFER_MEMORY 1024 +#define NON_MAC_TX_BUFFER_MEMORY (20 * 1024) +#define BUG_TX_BUFFER_MEMORY (NUM_BUG_TX_FCBS * 32) + +#define RX_BUFFER_MEMORY 0 /* CALCULATED DYNAMICALLY */ +#define RX_DATA_BUFFER_SIZE 256 +#define RX_BDB_SIZE_SHIFT 3 /* log2(RX_DATA_BUFFER_SIZE)-log2(sizeof(BDBlock)) */ +#define RX_BDB_SIZE_MASK (sizeof(BDBlock) - 1) +#define RX_DATA_BUFFER_SIZE_MASK (RX_DATA_BUFFER_SIZE-1) + +#define NUM_OF_INTERRUPTS 0x20 + +#define NOT_TRANSMITING 0 +#define TRANSMITING 1 + +#define TRC_INTERRUPT_ENABLE_MASK 0x7FF6 + +#define UCODE_VERSION 0x58 + +#define UCODE_SIZE_OFFSET 0x0000 /* WORD */ +#define UCODE_CHECKSUM_OFFSET 0x0002 /* WORD */ +#define UCODE_VERSION_OFFSET 0x0004 /* BYTE */ + +#define CS_RAM_SIZE 0X2000 +#define CS_RAM_CHECKSUM_OFFSET 0x1FFE /* WORD 1FFE(MSB)-1FFF(LSB)*/ +#define CS_RAM_VERSION_OFFSET 0x1FFC /* WORD 1FFC(MSB)-1FFD(LSB)*/ + +#define MISC_DATA_SIZE 128 +#define NUM_OF_ACBS 1 + +#define ACB_COMMAND_NOT_DONE 0x0000 /* Init, command not done */ +#define ACB_COMMAND_DONE 0x8000 /* TRC says command done */ +#define ACB_COMMAND_STATUS_MASK 0x00FF /* low byte is status */ +#define ACB_COMMAND_SUCCESSFUL 0x0000 /* means cmd was successful */ +#define ACB_NOT_CHAIN_END 0x0000 /* tell TRC more CBs in chain */ +#define ACB_CHAIN_END 0x8000 /* tell TRC last CB in chain */ +#define ACB_COMMAND_NO_INTERRUPT 0x0000 /* tell TRC no INT after CB */ +#define ACB_COMMAND_INTERRUPT 0x2000 /* tell TRC to INT after CB */ +#define ACB_SUB_CMD_NOP 0x0000 +#define ACB_CMD_HIC_NOP 0x0080 +#define ACB_CMD_MCT_NOP 0x0000 +#define ACB_CMD_MCT_TEST 0x0001 +#define ACB_CMD_HIC_TEST 0x0081 +#define ACB_CMD_INSERT 0x0002 +#define ACB_CMD_REMOVE 0x0003 +#define ACB_CMD_MCT_WRITE_VALUE 0x0004 +#define ACB_CMD_HIC_WRITE_VALUE 0x0084 +#define ACB_CMD_MCT_READ_VALUE 0x0005 +#define ACB_CMD_HIC_READ_VALUE 0x0085 +#define ACB_CMD_INIT_TX_RX 0x0086 +#define ACB_CMD_INIT_TRC_TIMERS 0x0006 +#define ACB_CMD_READ_TRC_STATUS 0x0007 +#define ACB_CMD_CHANGE_JOIN_STATE 0x0008 +#define ACB_CMD_RESERVED_9 0x0009 +#define ACB_CMD_RESERVED_A 0x000A +#define ACB_CMD_RESERVED_B 0x000B +#define ACB_CMD_RESERVED_C 0x000C +#define ACB_CMD_RESERVED_D 0x000D +#define ACB_CMD_RESERVED_E 0x000E +#define ACB_CMD_RESERVED_F 0x000F + +#define TRC_MAC_REGISTERS_TEST 0x0000 +#define TRC_INTERNAL_LOOPBACK 0x0001 +#define TRC_TRI_LOOPBACK 0x0002 +#define TRC_INTERNAL_ROM_TEST 0x0003 +#define TRC_LOBE_MEDIA_TEST 0x0004 +#define TRC_ANALOG_TEST 0x0005 +#define TRC_HOST_INTERFACE_REG_TEST 0x0003 + +#define TEST_DMA_1 0x0000 +#define TEST_DMA_2 0x0001 +#define TEST_MCT_ROM 0x0002 +#define HIC_INTERNAL_DIAG 0x0003 + +#define ABORT_TRANSMIT_PRIORITY_0 0x0001 +#define ABORT_TRANSMIT_PRIORITY_1 0x0002 +#define ABORT_TRANSMIT_PRIORITY_2 0x0004 +#define ABORT_TRANSMIT_PRIORITY_3 0x0008 +#define ABORT_TRANSMIT_PRIORITY_4 0x0010 +#define ABORT_TRANSMIT_PRIORITY_5 0x0020 +#define ABORT_TRANSMIT_PRIORITY_6 0x0040 +#define ABORT_TRANSMIT_PRIORITY_7 0x0080 + +#define TX_PENDING_PRIORITY_0 0x0001 +#define TX_PENDING_PRIORITY_1 0x0002 +#define TX_PENDING_PRIORITY_2 0x0004 +#define TX_PENDING_PRIORITY_3 0x0008 +#define TX_PENDING_PRIORITY_4 0x0010 +#define TX_PENDING_PRIORITY_5 0x0020 +#define TX_PENDING_PRIORITY_6 0x0040 +#define TX_PENDING_PRIORITY_7 0x0080 + +#define FCB_FRAME_LENGTH 0x100 +#define FCB_COMMAND_DONE 0x8000 /* FCB Word 0 */ +#define FCB_NOT_CHAIN_END 0x0000 /* FCB Word 1 */ +#define FCB_CHAIN_END 0x8000 +#define FCB_NO_WARNING 0x0000 +#define FCB_WARNING 0x4000 +#define FCB_INTERRUPT_DISABLE 0x0000 +#define FCB_INTERRUPT_ENABLE 0x2000 + +#define FCB_ENABLE_IMA 0x0008 +#define FCB_ENABLE_TES 0x0004 /* Guarantee Tx before Int */ +#define FCB_ENABLE_TFS 0x0002 /* Post Tx Frame Status */ +#define FCB_ENABLE_NTC 0x0001 /* No Tx CRC */ + +#define FCB_TX_STATUS_CR2 0x0004 +#define FCB_TX_STATUS_AR2 0x0008 +#define FCB_TX_STATUS_CR1 0x0040 +#define FCB_TX_STATUS_AR1 0x0080 +#define FCB_TX_AC_BITS (FCB_TX_STATUS_AR1+FCB_TX_STATUS_AR2+FCB_TX_STATUS_CR1+FCB_TX_STATUS_CR2) +#define FCB_TX_STATUS_E 0x0100 + +#define FCB_RX_STATUS_ANY_ERROR 0x0001 +#define FCB_RX_STATUS_FCS_ERROR 0x0002 + +#define FCB_RX_STATUS_IA_MATCHED 0x0400 +#define FCB_RX_STATUS_IGA_BSGA_MATCHED 0x0500 +#define FCB_RX_STATUS_FA_MATCHED 0x0600 +#define FCB_RX_STATUS_BA_MATCHED 0x0700 +#define FCB_RX_STATUS_DA_MATCHED 0x0400 +#define FCB_RX_STATUS_SOURCE_ROUTING 0x0800 + +#define BDB_BUFFER_SIZE 0x100 +#define BDB_NOT_CHAIN_END 0x0000 +#define BDB_CHAIN_END 0x8000 +#define BDB_NO_WARNING 0x0000 +#define BDB_WARNING 0x4000 + +#define ERROR_COUNTERS_CHANGED 0x0001 +#define TI_NDIS_RING_STATUS_CHANGED 0x0002 +#define UNA_CHANGED 0x0004 +#define READY_TO_SEND_RQ_INIT 0x0008 + +#define SCGB_ADDRESS_POINTER_FORMAT INTEL_ADDRESS_POINTER_FORMAT +#define SCGB_DATA_FORMAT INTEL_DATA_FORMAT +#define SCGB_MULTI_WORD_CONTROL 0 +#define SCGB_BURST_LENGTH 0x000E /* DMA Burst Length */ + +#define SCGB_CONFIG (INTEL_ADDRESS_POINTER_FORMAT+INTEL_DATA_FORMAT+SCGB_BURST_LENGTH) + +#define ISCP_BLOCK_SIZE 0x0A +#define RAM_SIZE 0x10000 +#define INIT_SYS_CONFIG_PTR_OFFSET (RAM_SIZE-ISCP_BLOCK_SIZE) +#define SCGP_BLOCK_OFFSET 0 + +#define SCLB_NOT_VALID 0x0000 /* Initially, SCLB not valid */ +#define SCLB_VALID 0x8000 /* Host tells TRC SCLB valid */ +#define SCLB_PROCESSED 0x0000 /* TRC says SCLB processed */ +#define SCLB_RESUME_CONTROL_NOT_VALID 0x0000 /* Initially, RC not valid */ +#define SCLB_RESUME_CONTROL_VALID 0x4000 /* Host tells TRC RC valid */ +#define SCLB_IACK_CODE_NOT_VALID 0x0000 /* Initially, IACK not valid */ +#define SCLB_IACK_CODE_VALID 0x2000 /* Host tells TRC IACK valid */ +#define SCLB_CMD_NOP 0x0000 +#define SCLB_CMD_REMOVE 0x0001 +#define SCLB_CMD_SUSPEND_ACB_CHAIN 0x0002 +#define SCLB_CMD_SET_INTERRUPT_MASK 0x0003 +#define SCLB_CMD_CLEAR_INTERRUPT_MASK 0x0004 +#define SCLB_CMD_RESERVED_5 0x0005 +#define SCLB_CMD_RESERVED_6 0x0006 +#define SCLB_CMD_RESERVED_7 0x0007 +#define SCLB_CMD_RESERVED_8 0x0008 +#define SCLB_CMD_RESERVED_9 0x0009 +#define SCLB_CMD_RESERVED_A 0x000A +#define SCLB_CMD_RESERVED_B 0x000B +#define SCLB_CMD_RESERVED_C 0x000C +#define SCLB_CMD_RESERVED_D 0x000D +#define SCLB_CMD_RESERVED_E 0x000E +#define SCLB_CMD_RESERVED_F 0x000F + +#define SCLB_RC_ACB 0x0001 /* Action Command Block Chain */ +#define SCLB_RC_RES0 0x0002 /* Always Zero */ +#define SCLB_RC_RES1 0x0004 /* Always Zero */ +#define SCLB_RC_RES2 0x0008 /* Always Zero */ +#define SCLB_RC_RX_MAC_FCB 0x0010 /* RX_MAC_FCB Chain */ +#define SCLB_RC_RX_MAC_BDB 0x0020 /* RX_MAC_BDB Chain */ +#define SCLB_RC_RX_NON_MAC_FCB 0x0040 /* RX_NON_MAC_FCB Chain */ +#define SCLB_RC_RX_NON_MAC_BDB 0x0080 /* RX_NON_MAC_BDB Chain */ +#define SCLB_RC_TFCB0 0x0100 /* TX Priority 0 FCB Chain */ +#define SCLB_RC_TFCB1 0x0200 /* TX Priority 1 FCB Chain */ +#define SCLB_RC_TFCB2 0x0400 /* TX Priority 2 FCB Chain */ +#define SCLB_RC_TFCB3 0x0800 /* TX Priority 3 FCB Chain */ +#define SCLB_RC_TFCB4 0x1000 /* TX Priority 4 FCB Chain */ +#define SCLB_RC_TFCB5 0x2000 /* TX Priority 5 FCB Chain */ +#define SCLB_RC_TFCB6 0x4000 /* TX Priority 6 FCB Chain */ +#define SCLB_RC_TFCB7 0x8000 /* TX Priority 7 FCB Chain */ + +#define SCLB_IMC_RES0 0x0001 /* */ +#define SCLB_IMC_MAC_TYPE_3 0x0002 /* MAC_ARC_INDICATE */ +#define SCLB_IMC_MAC_ERROR_COUNTERS 0x0004 /* */ +#define SCLB_IMC_RES1 0x0008 /* */ +#define SCLB_IMC_MAC_TYPE_2 0x0010 /* QUE_MAC_INDICATE */ +#define SCLB_IMC_TX_FRAME 0x0020 /* */ +#define SCLB_IMC_END_OF_TX_QUEUE 0x0040 /* */ +#define SCLB_IMC_NON_MAC_RX_RESOURCE 0x0080 /* */ +#define SCLB_IMC_MAC_RX_RESOURCE 0x0100 /* */ +#define SCLB_IMC_NON_MAC_RX_FRAME 0x0200 /* */ +#define SCLB_IMC_MAC_RX_FRAME 0x0400 /* */ +#define SCLB_IMC_TRC_FIFO_STATUS 0x0800 /* */ +#define SCLB_IMC_COMMAND_STATUS 0x1000 /* */ +#define SCLB_IMC_MAC_TYPE_1 0x2000 /* Self Removed */ +#define SCLB_IMC_TRC_INTRNL_TST_STATUS 0x4000 /* */ +#define SCLB_IMC_RES2 0x8000 /* */ + +#define DMA_TRIGGER 0x0004 +#define FREQ_16MB_BIT 0x0010 +#define THDREN 0x0020 +#define CFG0_RSV1 0x0040 +#define CFG0_RSV2 0x0080 +#define ETREN 0x0100 +#define RX_OWN_BIT 0x0200 +#define RXATMAC 0x0400 +#define PROMISCUOUS_BIT 0x0800 +#define USETPT 0x1000 +#define SAVBAD_BIT 0x2000 +#define ONEQUE 0x4000 +#define NO_AUTOREMOVE 0x8000 + +#define RX_FCB_AREA_8316 0x00000000 +#define RX_BUFF_AREA_8316 0x00000000 + +#define TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access) +#define RX_FCB_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_FCB_AREA_8316) +#define RX_BUFF_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_BUFF_AREA_8316) + +// Offset 0: MSR - Memory Select Register +// +#define r587_MSR 0x000 // Register Offset +//#define MSR_RST 0x080 // LAN Controller Reset +#define MSR_MENB 0x040 // Shared Memory Enable +#define MSR_RA18 0x020 // Ram Address bit 18 (583, 584, 587) +#define MSR_RA17 0x010 // Ram Address bit 17 (583, 584, 585/790) +#define MSR_RA16 0x008 // Ram Address bit 16 (583, 584, 585/790) +#define MSR_RA15 0x004 // Ram Address bit 15 (583, 584, 585/790) +#define MSR_RA14 0x002 // Ram Address bit 14 (583, 584, 585/790) +#define MSR_RA13 0x001 // Ram Address bit 13 (583, 584, 585/790) + +#define MSR_MASK 0x03F // Mask for Address bits RA18-RA13 (583, 584, 587) + +#define MSR 0x00 +#define IRR 0x04 +#define HWR 0x04 +#define LAAR 0x05 +#define IMCCR 0x05 +#define LAR0 0x08 +#define BDID 0x0E // Adapter ID byte register offset +#define CSR 0x10 +#define PR 0x11 + +#define MSR_RST 0x80 +#define MSR_MEMB 0x40 +#define MSR_0WS 0x20 + +#define FORCED_16BIT_MODE 0x0002 + +#define INTERFRAME_SPACING_16 0x0003 /* 6 bytes */ +#define INTERFRAME_SPACING_4 0x0001 /* 2 bytes */ +#define MULTICAST_ADDRESS_BIT 0x0010 +#define NON_SRC_ROUTING_BIT 0x0020 + +#define LOOPING_MODE_MASK 0x0007 + +/* + * Decode firmware defines. + */ +#define SWAP_BYTES(X) ((X & 0xff) << 8) | (X >> 8) +#define WEIGHT_OFFSET 5 +#define TREE_SIZE_OFFSET 9 +#define TREE_OFFSET 11 + +/* The Huffman Encoding Tree is constructed of these nodes. */ +typedef struct { + __u8 llink; /* Short version of above node. */ + __u8 tag; + __u8 info; /* This node is used on decodes. */ + __u8 rlink; +} DECODE_TREE_NODE; + +#define ROOT 0 /* Branch value. */ +#define LEAF 0 /* Tag field value. */ +#define BRANCH 1 /* Tag field value. */ + +/* + * Multicast Table Structure + */ +typedef struct { + __u8 address[6]; + __u8 instance_count; +} McTable; + +/* + * Fragment Descriptor Definition + */ +typedef struct { + __u8 *fragment_ptr; + __u32 fragment_length; +} FragmentStructure; + +/* + * Data Buffer Structure Definition + */ +typedef struct { + __u32 fragment_count; + FragmentStructure fragment_list[MAXFRAGMENTS]; +} DataBufferStructure; + +#pragma pack(1) +typedef struct { + __u8 IType; + __u8 ISubtype; +} Interrupt_Status_Word; + +#pragma pack(1) +typedef struct BDBlockType { + __u16 info; /* 02 */ + __u32 trc_next_ptr; /* 06 */ + __u32 trc_data_block_ptr; /* 10 */ + __u16 buffer_length; /* 12 */ + + __u16 *data_block_ptr; /* 16 */ + struct BDBlockType *next_ptr; /* 20 */ + struct BDBlockType *back_ptr; /* 24 */ + __u8 filler[8]; /* 32 */ +} BDBlock; + +#pragma pack(1) +typedef struct FCBlockType { + __u16 frame_status; /* 02 */ + __u16 info; /* 04 */ + __u32 trc_next_ptr; /* 08 */ + __u32 trc_bdb_ptr; /* 12 */ + __u16 frame_length; /* 14 */ + + BDBlock *bdb_ptr; /* 18 */ + struct FCBlockType *next_ptr; /* 22 */ + struct FCBlockType *back_ptr; /* 26 */ + __u16 memory_alloc; /* 28 */ + __u8 filler[4]; /* 32 */ + +} FCBlock; + +#pragma pack(1) +typedef struct SBlockType{ + __u8 Internal_Error_Count; + __u8 Line_Error_Count; + __u8 AC_Error_Count; + __u8 Burst_Error_Count; + __u8 RESERVED_COUNTER_0; + __u8 AD_TRANS_Count; + __u8 RCV_Congestion_Count; + __u8 Lost_FR_Error_Count; + __u8 FREQ_Error_Count; + __u8 FR_Copied_Error_Count; + __u8 RESERVED_COUNTER_1; + __u8 Token_Error_Count; + + __u16 TI_NDIS_Ring_Status; + __u16 BCN_Type; + __u16 Error_Code; + __u16 SA_of_Last_AMP_SMP[3]; + __u16 UNA[3]; + __u16 Ucode_Version_Number; + __u16 Status_CHG_Indicate; + __u16 RESERVED_STATUS_0; +} SBlock; + +#pragma pack(1) +typedef struct ACBlockType { + __u16 cmd_done_status; /* 02 */ + __u16 cmd_info; /* 04 */ + __u32 trc_next_ptr; /* 08 */ + __u16 cmd; /* 10 */ + __u16 subcmd; /* 12 */ + __u16 data_offset_lo; /* 14 */ + __u16 data_offset_hi; /* 16 */ + + struct ACBlockType *next_ptr; /* 20 */ + + __u8 filler[12]; /* 32 */ +} ACBlock; + +#define NUM_OF_INTERRUPTS 0x20 + +#pragma pack(1) +typedef struct { + Interrupt_Status_Word IStatus[NUM_OF_INTERRUPTS]; +} ISBlock; + +#pragma pack(1) +typedef struct { + __u16 valid_command; /* 02 */ + __u16 iack_code; /* 04 */ + __u16 resume_control; /* 06 */ + __u16 int_mask_control; /* 08 */ + __u16 int_mask_state; /* 10 */ + + __u8 filler[6]; /* 16 */ +} SCLBlock; + +#pragma pack(1) +typedef struct +{ + __u16 config; /* 02 */ + __u32 trc_sclb_ptr; /* 06 */ + __u32 trc_acb_ptr; /* 10 */ + __u32 trc_isb_ptr; /* 14 */ + __u16 isbsiz; /* 16 */ + + SCLBlock *sclb_ptr; /* 20 */ + ACBlock *acb_ptr; /* 24 */ + ISBlock *isb_ptr; /* 28 */ + + __u16 Non_Mac_Rx_Bdbs; /* 30 DEBUG */ + __u8 filler[2]; /* 32 */ + +} SCGBlock; + +#pragma pack(1) +typedef struct +{ + __u32 trc_scgb_ptr; + SCGBlock *scgb_ptr; +} ISCPBlock; +#pragma pack() + +typedef struct net_local { + ISCPBlock *iscpb_ptr; + SCGBlock *scgb_ptr; + SCLBlock *sclb_ptr; + ISBlock *isb_ptr; + + ACBlock *acb_head; + ACBlock *acb_curr; + ACBlock *acb_next; + + __u8 adapter_name[12]; + + __u16 num_rx_bdbs [NUM_RX_QS_USED]; + __u16 num_rx_fcbs [NUM_RX_QS_USED]; + + __u16 num_tx_bdbs [NUM_TX_QS_USED]; + __u16 num_tx_fcbs [NUM_TX_QS_USED]; + + __u16 num_of_tx_buffs; + + __u16 tx_buff_size [NUM_TX_QS_USED]; + __u16 tx_buff_used [NUM_TX_QS_USED]; + __u16 tx_queue_status [NUM_TX_QS_USED]; + + FCBlock *tx_fcb_head[NUM_TX_QS_USED]; + FCBlock *tx_fcb_curr[NUM_TX_QS_USED]; + FCBlock *tx_fcb_end[NUM_TX_QS_USED]; + BDBlock *tx_bdb_head[NUM_TX_QS_USED]; + __u16 *tx_buff_head[NUM_TX_QS_USED]; + __u16 *tx_buff_end[NUM_TX_QS_USED]; + __u16 *tx_buff_curr[NUM_TX_QS_USED]; + __u16 num_tx_fcbs_used[NUM_TX_QS_USED]; + + FCBlock *rx_fcb_head[NUM_RX_QS_USED]; + FCBlock *rx_fcb_curr[NUM_RX_QS_USED]; + BDBlock *rx_bdb_head[NUM_RX_QS_USED]; + BDBlock *rx_bdb_curr[NUM_RX_QS_USED]; + BDBlock *rx_bdb_end[NUM_RX_QS_USED]; + __u16 *rx_buff_head[NUM_RX_QS_USED]; + __u16 *rx_buff_end[NUM_RX_QS_USED]; + + __u32 *ptr_local_ring_num; + + __u32 sh_mem_used; + + __u16 page_offset_mask; + + __u16 authorized_function_classes; + __u16 authorized_access_priority; + + __u16 num_acbs; + __u16 num_acbs_used; + __u16 acb_pending; + + __u16 current_isb_index; + + __u8 monitor_state; + __u8 monitor_state_ready; + __u16 ring_status; + __u8 ring_status_flags; + __u8 state; + + __u8 join_state; + + __u8 slot_num; + __u16 pos_id; + + __u32 *ptr_una; + __u32 *ptr_bcn_type; + __u32 *ptr_tx_fifo_underruns; + __u32 *ptr_rx_fifo_underruns; + __u32 *ptr_rx_fifo_overruns; + __u32 *ptr_tx_fifo_overruns; + __u32 *ptr_tx_fcb_overruns; + __u32 *ptr_rx_fcb_overruns; + __u32 *ptr_tx_bdb_overruns; + __u32 *ptr_rx_bdb_overruns; + + __u16 receive_queue_number; + + __u8 rx_fifo_overrun_count; + __u8 tx_fifo_overrun_count; + + __u16 adapter_flags; + __u16 adapter_flags1; + __u16 *misc_command_data; + __u16 max_packet_size; + + __u16 config_word0; + __u16 config_word1; + + __u8 trc_mask; + + __u16 source_ring_number; + __u16 target_ring_number; + + __u16 microcode_version; + + __u16 bic_type; + __u16 nic_type; + __u16 board_id; + + __u16 rom_size; + __u32 rom_base; + __u16 ram_size; + __u16 ram_usable; + __u32 ram_base; + __u32 ram_access; + + __u16 extra_info; + __u16 mode_bits; + __u16 media_menu; + __u16 media_type; + __u16 adapter_bus; + + __u16 status; + __u16 receive_mask; + + __u16 group_address_0; + __u16 group_address[2]; + __u16 functional_address_0; + __u16 functional_address[2]; + __u16 bitwise_group_address[2]; + + __u8 cleanup; + + struct sk_buff_head SendSkbQueue; + __u16 QueueSkb; + + struct tr_statistics MacStat; /* MAC statistics structure */ + + spinlock_t lock; +} NET_LOCAL; + +/************************************ + * SNMP-ON-BOARD Agent Link Structure + ************************************/ + +typedef struct { + __u8 LnkSigStr[12]; /* signature string "SmcLinkTable" */ + __u8 LnkDrvTyp; /* 1=Redbox ODI, 2=ODI DOS, 3=ODI OS/2, 4=NDIS DOS */ + __u8 LnkFlg; /* 0 if no agent linked, 1 if agent linked */ + void *LnkNfo; /* routine which returns pointer to NIC info */ + void *LnkAgtRcv; /* pointer to agent receive trap entry */ + void *LnkAgtXmt; /* pointer to agent transmit trap +entry */ +void *LnkGet; /* pointer to NIC receive data +copy routine */ + void *LnkSnd; /* pointer to NIC send routine +*/ + void *LnkRst; /* pointer to NIC driver reset +routine */ + void *LnkMib; /* pointer to MIB data base */ + void *LnkMibAct; /* pointer to MIB action routine list */ + __u16 LnkCntOffset; /* offset to error counters */ + __u16 LnkCntNum; /* number of error counters */ + __u16 LnkCntSize; /* size of error counters i.e. 32 = 32 bits */ + void *LnkISR; /* pointer to interrupt vector */ + __u8 LnkFrmTyp; /* 1=Ethernet, 2=Token Ring */ + __u8 LnkDrvVer1 ; /* driver major version */ + __u8 LnkDrvVer2 ; /* driver minor version */ +} AgentLink; + +/* + * Definitions for pcm_card_flags(bit_mapped) + */ +#define REG_COMPLETE 0x0001 +#define INSERTED 0x0002 +#define PCC_INSERTED 0x0004 /* 1=currently inserted, 0=cur removed */ + +/* + * Adapter RAM test patterns + */ +#define RAM_PATTERN_1 0x55AA +#define RAM_PATTERN_2 0x9249 +#define RAM_PATTERN_3 0xDB6D + +/* + * definitions for RAM test + */ +#define ROM_SIGNATURE 0xAA55 +#define MIN_ROM_SIZE 0x2000 + +/* + * Return Codes + */ +#define SUCCESS 0x0000 +#define ADAPTER_AND_CONFIG 0x0001 +#define ADAPTER_NO_CONFIG 0x0002 +#define NOT_MY_INTERRUPT 0x0003 +#define FRAME_REJECTED 0x0004 +#define EVENTS_DISABLED 0x0005 +#define OUT_OF_RESOURCES 0x0006 +#define INVALID_PARAMETER 0x0007 +#define INVALID_FUNCTION 0x0008 +#define INITIALIZE_FAILED 0x0009 +#define CLOSE_FAILED 0x000A +#define MAX_COLLISIONS 0x000B +#define NO_SUCH_DESTINATION 0x000C +#define BUFFER_TOO_SMALL_ERROR 0x000D +#define ADAPTER_CLOSED 0x000E +#define UCODE_NOT_PRESENT 0x000F +#define FIFO_UNDERRUN 0x0010 +#define DEST_OUT_OF_RESOURCES 0x0011 +#define ADAPTER_NOT_INITIALIZED 0x0012 +#define PENDING 0x0013 +#define UCODE_PRESENT 0x0014 +#define NOT_INIT_BY_BRIDGE 0x0015 + +#define OPEN_FAILED 0x0080 +#define HARDWARE_FAILED 0x0081 +#define SELF_TEST_FAILED 0x0082 +#define RAM_TEST_FAILED 0x0083 +#define RAM_CONFLICT 0x0084 +#define ROM_CONFLICT 0x0085 +#define UNKNOWN_ADAPTER 0x0086 +#define CONFIG_ERROR 0x0087 +#define CONFIG_WARNING 0x0088 +#define NO_FIXED_CNFG 0x0089 +#define EEROM_CKSUM_ERROR 0x008A +#define ROM_SIGNATURE_ERROR 0x008B +#define ROM_CHECKSUM_ERROR 0x008C +#define ROM_SIZE_ERROR 0x008D +#define UNSUPPORTED_NIC_CHIP 0x008E +#define NIC_REG_ERROR 0x008F +#define BIC_REG_ERROR 0x0090 +#define MICROCODE_TEST_ERROR 0x0091 +#define LOBE_MEDIA_TEST_FAILED 0x0092 + +#define ADAPTER_FOUND_LAN_CORRUPT 0x009B + +#define ADAPTER_NOT_FOUND 0xFFFF + +#define ILLEGAL_FUNCTION INVALID_FUNCTION + +/* Errors */ +#define IO_BASE_INVALID 0x0001 +#define IO_BASE_RANGE 0x0002 +#define IRQ_INVALID 0x0004 +#define IRQ_RANGE 0x0008 +#define RAM_BASE_INVALID 0x0010 +#define RAM_BASE_RANGE 0x0020 +#define RAM_SIZE_RANGE 0x0040 +#define MEDIA_INVALID 0x0800 + +/* Warnings */ +#define IRQ_MISMATCH 0x0080 +#define RAM_BASE_MISMATCH 0x0100 +#define RAM_SIZE_MISMATCH 0x0200 +#define BUS_MODE_MISMATCH 0x0400 + +#define RX_CRC_ERROR 0x01 +#define RX_ALIGNMENT_ERROR 0x02 +#define RX_HW_FAILED 0x80 + +/* + * Definitions for the field RING_STATUS_FLAGS + */ +#define RING_STATUS_CHANGED 0X01 +#define MONITOR_STATE_CHANGED 0X02 +#define JOIN_STATE_CHANGED 0X04 + +/* + * Definitions for the field JOIN_STATE + */ +#define JS_BYPASS_STATE 0x00 +#define JS_LOBE_TEST_STATE 0x01 +#define JS_DETECT_MONITOR_PRESENT_STATE 0x02 +#define JS_AWAIT_NEW_MONITOR_STATE 0x03 +#define JS_DUPLICATE_ADDRESS_TEST_STATE 0x04 +#define JS_NEIGHBOR_NOTIFICATION_STATE 0x05 +#define JS_REQUEST_INITIALIZATION_STATE 0x06 +#define JS_JOIN_COMPLETE_STATE 0x07 +#define JS_BYPASS_WAIT_STATE 0x08 + +/* + * Definitions for the field MONITOR_STATE + */ +#define MS_MONITOR_FSM_INACTIVE 0x00 +#define MS_REPEAT_BEACON_STATE 0x01 +#define MS_REPEAT_CLAIM_TOKEN_STATE 0x02 +#define MS_TRANSMIT_CLAIM_TOKEN_STATE 0x03 +#define MS_STANDBY_MONITOR_STATE 0x04 +#define MS_TRANSMIT_BEACON_STATE 0x05 +#define MS_ACTIVE_MONITOR_STATE 0x06 +#define MS_TRANSMIT_RING_PURGE_STATE 0x07 +#define MS_BEACON_TEST_STATE 0x09 + +/* + * Definitions for the bit-field RING_STATUS + */ +#define SIGNAL_LOSS 0x8000 +#define HARD_ERROR 0x4000 +#define SOFT_ERROR 0x2000 +#define TRANSMIT_BEACON 0x1000 +#define LOBE_WIRE_FAULT 0x0800 +#define AUTO_REMOVAL_ERROR 0x0400 +#define REMOVE_RECEIVED 0x0100 +#define COUNTER_OVERFLOW 0x0080 +#define SINGLE_STATION 0x0040 +#define RING_RECOVERY 0x0020 + +/* + * Definitions for the field BUS_TYPE + */ +#define AT_BUS 0x00 +#define MCA_BUS 0x01 +#define EISA_BUS 0x02 +#define PCI_BUS 0x03 +#define PCMCIA_BUS 0x04 + +/* + * Definitions for adapter_flags + */ +#define RX_VALID_LOOKAHEAD 0x0001 +#define FORCED_16BIT_MODE 0x0002 +#define ADAPTER_DISABLED 0x0004 +#define TRANSMIT_CHAIN_INT 0x0008 +#define EARLY_RX_FRAME 0x0010 +#define EARLY_TX 0x0020 +#define EARLY_RX_COPY 0x0040 +#define USES_PHYSICAL_ADDR 0x0080 /* Rsvd for DEC PCI and 9232 */ +#define NEEDS_PHYSICAL_ADDR 0x0100 /* Reserved*/ +#define RX_STATUS_PENDING 0x0200 +#define ERX_DISABLED 0x0400 /* EARLY_RX_ENABLE rcv_mask */ +#define ENABLE_TX_PENDING 0x0800 +#define ENABLE_RX_PENDING 0x1000 +#define PERM_CLOSE 0x2000 +#define IO_MAPPED 0x4000 /* IOmapped bus interface 795 */ +#define ETX_DISABLED 0x8000 + + +/* + * Definitions for adapter_flags1 + */ +#define TX_PHY_RX_VIRT 0x0001 +#define NEEDS_HOST_RAM 0x0002 +#define NEEDS_MEDIA_TYPE 0x0004 +#define EARLY_RX_DONE 0x0008 +#define PNP_BOOT_BIT 0x0010 /* activates PnP & config on power-up */ + /* clear => regular PnP operation */ +#define PNP_ENABLE 0x0020 /* regular PnP operation clear => */ + /* no PnP, overrides PNP_BOOT_BIT */ +#define SATURN_ENABLE 0x0040 + +#define ADAPTER_REMOVABLE 0x0080 /* adapter is hot swappable */ +#define TX_PHY 0x0100 /* Uses physical address for tx bufs */ +#define RX_PHY 0x0200 /* Uses physical address for rx bufs */ +#define TX_VIRT 0x0400 /* Uses virtual addr for tx bufs */ +#define RX_VIRT 0x0800 +#define NEEDS_SERVICE 0x1000 + +/* + * Adapter Status Codes + */ +#define OPEN 0x0001 +#define INITIALIZED 0x0002 +#define CLOSED 0x0003 +#define FAILED 0x0005 +#define NOT_INITIALIZED 0x0006 +#define IO_CONFLICT 0x0007 +#define CARD_REMOVED 0x0008 +#define CARD_INSERTED 0x0009 + +/* + * Mode Bit Definitions + */ +#define INTERRUPT_STATUS_BIT 0x8000 /* PC Interrupt Line: 0 = Not Enabled */ +#define BOOT_STATUS_MASK 0x6000 /* Mask to isolate BOOT_STATUS */ +#define BOOT_INHIBIT 0x0000 /* BOOT_STATUS is 'inhibited' */ +#define BOOT_TYPE_1 0x2000 /* Unused BOOT_STATUS value */ +#define BOOT_TYPE_2 0x4000 /* Unused BOOT_STATUS value */ +#define BOOT_TYPE_3 0x6000 /* Unused BOOT_STATUS value */ +#define ZERO_WAIT_STATE_MASK 0x1800 /* Mask to isolate Wait State flags */ +#define ZERO_WAIT_STATE_8_BIT 0x1000 /* 0 = Disabled (Inserts Wait States) */ +#define ZERO_WAIT_STATE_16_BIT 0x0800 /* 0 = Disabled (Inserts Wait States) */ +#define LOOPING_MODE_MASK 0x0007 +#define LOOPBACK_MODE_0 0x0000 +#define LOOPBACK_MODE_1 0x0001 +#define LOOPBACK_MODE_2 0x0002 +#define LOOPBACK_MODE_3 0x0003 +#define LOOPBACK_MODE_4 0x0004 +#define LOOPBACK_MODE_5 0x0005 +#define LOOPBACK_MODE_6 0x0006 +#define LOOPBACK_MODE_7 0x0007 +#define AUTO_MEDIA_DETECT 0x0008 +#define MANUAL_CRC 0x0010 +#define EARLY_TOKEN_REL 0x0020 /* Early Token Release for Token Ring */ +#define UMAC 0x0040 +#define UTP2_PORT 0x0080 /* For 8216T2, 0=port A, 1=Port B. */ +#define BNC_10BT_INTERFACE 0x0600 /* BNC and UTP current media set */ +#define UTP_INTERFACE 0x0500 /* Ethernet UTP Only. */ +#define BNC_INTERFACE 0x0400 +#define AUI_INTERFACE 0x0300 +#define AUI_10BT_INTERFACE 0x0200 +#define STARLAN_10_INTERFACE 0x0100 +#define INTERFACE_TYPE_MASK 0x0700 + +/* + * Media Type Bit Definitions + * + * legend: TP = Twisted Pair + * STP = Shielded twisted pair + * UTP = Unshielded twisted pair + */ + +#define CNFG_MEDIA_TYPE_MASK 0x001e /* POS Register 3 Mask */ + +#define MEDIA_S10 0x0000 /* Ethernet adapter, TP. */ +#define MEDIA_AUI_UTP 0x0001 /* Ethernet adapter, AUI/UTP media */ +#define MEDIA_BNC 0x0002 /* Ethernet adapter, BNC media. */ +#define MEDIA_AUI 0x0003 /* Ethernet Adapter, AUI media. */ +#define MEDIA_STP_16 0x0004 /* TokenRing adap, 16Mbit STP. */ +#define MEDIA_STP_4 0x0005 /* TokenRing adap, 4Mbit STP. */ +#define MEDIA_UTP_16 0x0006 /* TokenRing adap, 16Mbit UTP. */ +#define MEDIA_UTP_4 0x0007 /* TokenRing adap, 4Mbit UTP. */ +#define MEDIA_UTP 0x0008 /* Ethernet adapter, UTP media (no AUI) +*/ +#define MEDIA_BNC_UTP 0x0010 /* Ethernet adapter, BNC/UTP media */ +#define MEDIA_UTPFD 0x0011 /* Ethernet adapter, TP full duplex */ +#define MEDIA_UTPNL 0x0012 /* Ethernet adapter, TP with link integrity test disabled */ +#define MEDIA_AUI_BNC 0x0013 /* Ethernet adapter, AUI/BNC media */ +#define MEDIA_AUI_BNC_UTP 0x0014 /* Ethernet adapter, AUI_BNC/UTP */ +#define MEDIA_UTPA 0x0015 /* Ethernet UTP-10Mbps Ports A */ +#define MEDIA_UTPB 0x0016 /* Ethernet UTP-10Mbps Ports B */ +#define MEDIA_STP_16_UTP_16 0x0017 /* Token Ring STP-16Mbps/UTP-16Mbps */ +#define MEDIA_STP_4_UTP_4 0x0018 /* Token Ring STP-4Mbps/UTP-4Mbps */ + +#define MEDIA_STP100_UTP100 0x0020 /* Ethernet STP-100Mbps/UTP-100Mbps */ +#define MEDIA_UTP100FD 0x0021 /* Ethernet UTP-100Mbps, full duplex */ +#define MEDIA_UTP100 0x0022 /* Ethernet UTP-100Mbps */ + + +#define MEDIA_UNKNOWN 0xFFFF /* Unknown adapter/media type */ + +/* + * Definitions for the field: + * media_type2 + */ +#define MEDIA_TYPE_MII 0x0001 +#define MEDIA_TYPE_UTP 0x0002 +#define MEDIA_TYPE_BNC 0x0004 +#define MEDIA_TYPE_AUI 0x0008 +#define MEDIA_TYPE_S10 0x0010 +#define MEDIA_TYPE_AUTO_SENSE 0x1000 +#define MEDIA_TYPE_AUTO_DETECT 0x4000 +#define MEDIA_TYPE_AUTO_NEGOTIATE 0x8000 + +/* + * Definitions for the field: + * line_speed + */ +#define LINE_SPEED_UNKNOWN 0x0000 +#define LINE_SPEED_4 0x0001 +#define LINE_SPEED_10 0x0002 +#define LINE_SPEED_16 0x0004 +#define LINE_SPEED_100 0x0008 +#define LINE_SPEED_T4 0x0008 /* 100BaseT4 aliased for 9332BVT */ +#define LINE_SPEED_FULL_DUPLEX 0x8000 + +/* + * Definitions for the field: + * bic_type (Bus interface chip type) + */ +#define BIC_NO_CHIP 0x0000 /* Bus interface chip not implemented */ +#define BIC_583_CHIP 0x0001 /* 83C583 bus interface chip */ +#define BIC_584_CHIP 0x0002 /* 83C584 bus interface chip */ +#define BIC_585_CHIP 0x0003 /* 83C585 bus interface chip */ +#define BIC_593_CHIP 0x0004 /* 83C593 bus interface chip */ +#define BIC_594_CHIP 0x0005 /* 83C594 bus interface chip */ +#define BIC_564_CHIP 0x0006 /* PCMCIA Bus interface chip */ +#define BIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */ +#define BIC_571_CHIP 0x0008 /* 83C571 EISA bus master i-face */ +#define BIC_587_CHIP 0x0009 /* Token Ring AT bus master i-face */ +#define BIC_574_CHIP 0x0010 /* FEAST bus interface chip */ +#define BIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */ +#define BIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */ +#define BIC_8432E_CHIP 0x0013 /* 8432 Enhanced bus iface/Ethernet NIC(DEC) */ +#define BIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */ +#define BIC_C94_CHIP 0x0015 /* 91C94 bus i-face in PCMCIA mode */ +#define BIC_X8020_CHIP 0x0016 /* Xilinx PCMCIA multi-func i-face */ + +/* + * Definitions for the field: + * nic_type (Bus interface chip type) + */ +#define NIC_UNK_CHIP 0x0000 /* Unknown NIC chip */ +#define NIC_8390_CHIP 0x0001 /* DP8390 Ethernet NIC */ +#define NIC_690_CHIP 0x0002 /* 83C690 Ethernet NIC */ +#define NIC_825_CHIP 0x0003 /* 83C825 Token Ring NIC */ +/* #define NIC_???_CHIP 0x0004 */ /* Not used */ +/* #define NIC_???_CHIP 0x0005 */ /* Not used */ +/* #define NIC_???_CHIP 0x0006 */ /* Not used */ +#define NIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */ +#define NIC_C100_CHIP 0x0010 /* FEAST 100Mbps Ethernet NIC */ +#define NIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */ +#define NIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */ +#define NIC_8432E_CHIP 0x0013 /* 8432 enhanced bus iface/Ethernet NIC(DEC) */ +#define NIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */ +#define NIC_C94_CHIP 0x0015 /* 91C94 PC Card with multi func */ + +/* + * Definitions for the field: + * adapter_type The adapter_type field describes the adapter/bus + * configuration. + */ +#define BUS_ISA16_TYPE 0x0001 /* 16 bit adap in 16 bit (E)ISA slot */ +#define BUS_ISA8_TYPE 0x0002 /* 8/16b adap in 8 bit XT/(E)ISA slot */ +#define BUS_MCA_TYPE 0x0003 /* Micro Channel adapter */ + +/* + * Receive Mask definitions + */ +#define ACCEPT_MULTICAST 0x0001 +#define ACCEPT_BROADCAST 0x0002 +#define PROMISCUOUS_MODE 0x0004 +#define ACCEPT_SOURCE_ROUTING 0x0008 +#define ACCEPT_ERR_PACKETS 0x0010 +#define ACCEPT_ATT_MAC_FRAMES 0x0020 +#define ACCEPT_MULTI_PROM 0x0040 +#define TRANSMIT_ONLY 0x0080 +#define ACCEPT_EXT_MAC_FRAMES 0x0100 +#define EARLY_RX_ENABLE 0x0200 +#define PKT_SIZE_NOT_NEEDED 0x0400 +#define ACCEPT_SOURCE_ROUTING_SPANNING 0x0808 + +#define ACCEPT_ALL_MAC_FRAMES 0x0120 + +/* + * config_mode defs + */ +#define STORE_EEROM 0x0001 /* Store config in EEROM. */ +#define STORE_REGS 0x0002 /* Store config in register set. */ + +/* + * equates for lmac_flags in adapter structure (Ethernet) + */ +#define MEM_DISABLE 0x0001 +#define RX_STATUS_POLL 0x0002 +#define USE_RE_BIT 0x0004 +/*#define RESERVED 0x0008 */ +/*#define RESERVED 0x0010 */ +/*#define RESERVED 0x0020 */ +/*#define RESERVED 0x0040 */ +/*#define RESERVED 0x0080 */ +/*#define RESERVED 0x0100 */ +/*#define RESERVED 0x0200 */ +/*#define RESERVED 0x0400 */ +/*#define RESERVED 0x0800 */ +/*#define RESERVED 0x1000 */ +/*#define RESERVED 0x2000 */ +/*#define RESERVED 0x4000 */ +/*#define RESERVED 0x8000 */ + +/* media_opts & media_set Fields bit defs for Ethernet ... */ +#define MED_OPT_BNC 0x01 +#define MED_OPT_UTP 0x02 +#define MED_OPT_AUI 0x04 +#define MED_OPT_10MB 0x08 +#define MED_OPT_100MB 0x10 +#define MED_OPT_S10 0x20 + +/* media_opts & media_set Fields bit defs for Token Ring ... */ +#define MED_OPT_4MB 0x08 +#define MED_OPT_16MB 0x10 +#define MED_OPT_STP 0x40 + +#define MAX_8023_SIZE 1500 /* Max 802.3 size of frame. */ +#define DEFAULT_ERX_VALUE 4 /* Number of 16-byte blocks for 790B early Rx. */ +#define DEFAULT_ETX_VALUE 32 /* Number of bytes for 790B early Tx. */ +#define DEFAULT_TX_RETRIES 3 /* Number of transmit retries */ +#define LPBK_FRAME_SIZE 1024 /* Default loopback frame for Rx calibration test. */ +#define MAX_LOOKAHEAD_SIZE 252 /* Max lookahead size for ethernet. */ + +#define RW_MAC_STATE 0x1101 +#define RW_SA_OF_LAST_AMP_OR_SMP 0x2803 +#define RW_PHYSICAL_DROP_NUMBER 0x3B02 +#define RW_UPSTREAM_NEIGHBOR_ADDRESS 0x3E03 +#define RW_PRODUCT_INSTANCE_ID 0x4B09 + +#define RW_TRC_STATUS_BLOCK 0x5412 + +#define RW_MAC_ERROR_COUNTERS_NO_CLEAR 0x8006 +#define RW_MAC_ERROR_COUNTER_CLEAR 0x7A06 +#define RW_CONFIG_REGISTER_0 0xA001 +#define RW_CONFIG_REGISTER_1 0xA101 +#define RW_PRESCALE_TIMER_THRESHOLD 0xA201 +#define RW_TPT_THRESHOLD 0xA301 +#define RW_TQP_THRESHOLD 0xA401 +#define RW_TNT_THRESHOLD 0xA501 +#define RW_TBT_THRESHOLD 0xA601 +#define RW_TSM_THRESHOLD 0xA701 +#define RW_TAM_THRESHOLD 0xA801 +#define RW_TBR_THRESHOLD 0xA901 +#define RW_TER_THRESHOLD 0xAA01 +#define RW_TGT_THRESHOLD 0xAB01 +#define RW_THT_THRESHOLD 0xAC01 +#define RW_TRR_THRESHOLD 0xAD01 +#define RW_TVX_THRESHOLD 0xAE01 +#define RW_INDIVIDUAL_MAC_ADDRESS 0xB003 + +#define RW_INDIVIDUAL_GROUP_ADDRESS 0xB303 /* all of group addr */ +#define RW_INDIVIDUAL_GROUP_ADDR_WORD_0 0xB301 /* 1st word of group addr */ +#define RW_INDIVIDUAL_GROUP_ADDR 0xB402 /* 2nd-3rd word of group addr */ +#define RW_FUNCTIONAL_ADDRESS 0xB603 /* all of functional addr */ +#define RW_FUNCTIONAL_ADDR_WORD_0 0xB601 /* 1st word of func addr */ +#define RW_FUNCTIONAL_ADDR 0xB702 /* 2nd-3rd word func addr */ + +#define RW_BIT_SIGNIFICANT_GROUP_ADDR 0xB902 +#define RW_SOURCE_RING_BRIDGE_NUMBER 0xBB01 +#define RW_TARGET_RING_NUMBER 0xBC01 + +#define RW_HIC_INTERRUPT_MASK 0xC601 + +#define SOURCE_ROUTING_SPANNING_BITS 0x00C0 /* Spanning Tree Frames */ +#define SOURCE_ROUTING_EXPLORER_BIT 0x0040 /* Explorer and Single Route */ + + /* write */ + +#define CSR_MSK_ALL 0x80 // Bic 587 Only +#define CSR_MSKTINT 0x20 +#define CSR_MSKCBUSY 0x10 +#define CSR_CLRTINT 0x08 +#define CSR_CLRCBUSY 0x04 +#define CSR_WCSS 0x02 +#define CSR_CA 0x01 + + /* read */ + +#define CSR_TINT 0x20 +#define CSR_CINT 0x10 +#define CSR_TSTAT 0x08 +#define CSR_CSTAT 0x04 +#define CSR_FAULT 0x02 +#define CSR_CBUSY 0x01 + +#define LAAR_MEM16ENB 0x80 +#define Zws16 0x20 + +#define IRR_IEN 0x80 +#define Zws8 0x01 + +#define IMCCR_EIL 0x04 + +typedef struct { + __u8 ac; /* Access Control */ + __u8 fc; /* Frame Control */ + __u8 da[6]; /* Dest Addr */ + __u8 sa[6]; /* Source Addr */ + + __u16 vl; /* Vector Length */ + __u8 dc_sc; /* Dest/Source Class */ + __u8 vc; /* Vector Code */ + } MAC_HEADER; + +#define MAX_SUB_VECTOR_INFO (RX_DATA_BUFFER_SIZE - sizeof(MAC_HEADER) - 2) + +typedef struct + { + __u8 svl; /* Sub-vector Length */ + __u8 svi; /* Sub-vector Code */ + __u8 svv[MAX_SUB_VECTOR_INFO]; /* Sub-vector Info */ + } MAC_SUB_VECTOR; + +#endif /* __KERNEL__ */ +#endif /* __LINUX_SMCTR_H */ diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c new file mode 100644 index 00000000..79302034 --- /dev/null +++ b/drivers/net/tokenring/tms380tr.c @@ -0,0 +1,2352 @@ +/* + * tms380tr.c: A network driver library for Texas Instruments TMS380-based + * Token Ring Adapters. + * + * Originally sktr.c: Written 1997 by Christoph Goos + * + * A fine result of the Linux Systems Network Architecture Project. + * http://www.vanheusden.com/sna/ + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * The following modules are currently available for card support: + * - tmspci (Generic PCI card support) + * - abyss (Madge PCI support) + * - tmsisa (SysKonnect TR4/16 ISA) + * + * Sources: + * - The hardware related parts of this driver are take from + * the SysKonnect Token Ring driver for Windows NT. + * - I used the IBM Token Ring driver 'ibmtr.c' as a base for this + * driver, as well as the 'skeleton.c' driver by Donald Becker. + * - Also various other drivers in the linux source tree were taken + * as samples for some tasks. + * - TI TMS380 Second-Generation Token Ring User's Guide + * - TI datasheets for respective chips + * - David Hein at Texas Instruments + * - Various Madge employees + * + * Maintainer(s): + * JS Jay Schulist jschlst@samba.org + * CG Christoph Goos cgoos@syskonnect.de + * AF Adam Fritzler + * MLP Mike Phillips phillim@amtrak.com + * JF Jochen Friedrich jochen@scram.de + * + * Modification History: + * 29-Aug-97 CG Created + * 04-Apr-98 CG Fixed problems caused by tok_timer_check + * 10-Apr-98 CG Fixed lockups at cable disconnection + * 27-May-98 JS Formated to Linux Kernel Format + * 31-May-98 JS Hacked in PCI support + * 16-Jun-98 JS Modulized for multiple cards with one driver + * Sep-99 AF Renamed to tms380tr (supports more than SK's) + * 23-Sep-99 AF Added Compaq and Thomas-Conrad PCI support + * Fixed a bug causing double copies on PCI + * Fixed for new multicast stuff (2.2/2.3) + * 25-Sep-99 AF Uped TPL_NUM from 3 to 9 + * Removed extraneous 'No free TPL' + * 22-Dec-99 AF Added Madge PCI Mk2 support and generalized + * parts of the initilization procedure. + * 30-Dec-99 AF Turned tms380tr into a library ala 8390. + * Madge support is provided in the abyss module + * Generic PCI support is in the tmspci module. + * 30-Nov-00 JF Updated PCI code to support IO MMU via + * pci_map_static(). Alpha uses this MMU for ISA + * as well. + * 14-Jan-01 JF Fix DMA on ifdown/ifup sequences. Some + * cleanup. + * 13-Jan-02 JF Add spinlock to fix race condition. + * 09-Nov-02 JF Fixed printks to not SPAM the console during + * normal operation. + * 30-Dec-02 JF Removed incorrect __init from + * tms380tr_init_card. + * 22-Jul-05 JF Converted to dma-mapping. + * + * To do: + * 1. Multi/Broadcast packet handling (this may have fixed itself) + * 2. Write a sktrisa module that includes the old ISA support (done) + * 3. Allow modules to load their own microcode + * 4. Speed up the BUD process -- freezing the kernel for 3+sec is + * quite unacceptable. + * 5. Still a few remaining stalls when the cable is unplugged. + */ + +#ifdef MODULE +static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, Adam Fritzler\n"; +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "tms380tr.h" /* Our Stuff */ + +/* Use 0 for production, 1 for verification, 2 for debug, and + * 3 for very verbose debug. + */ +#ifndef TMS380TR_DEBUG +#define TMS380TR_DEBUG 0 +#endif +static unsigned int tms380tr_debug = TMS380TR_DEBUG; + +/* Index to functions, as function prototypes. + * Alphabetical by function name. + */ + +/* "A" */ +/* "B" */ +static int tms380tr_bringup_diags(struct net_device *dev); +/* "C" */ +static void tms380tr_cancel_tx_queue(struct net_local* tp); +static int tms380tr_chipset_init(struct net_device *dev); +static void tms380tr_chk_irq(struct net_device *dev); +static void tms380tr_chk_outstanding_cmds(struct net_device *dev); +static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr); +static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType); +int tms380tr_close(struct net_device *dev); +static void tms380tr_cmd_status_irq(struct net_device *dev); +/* "D" */ +static void tms380tr_disable_interrupts(struct net_device *dev); +#if TMS380TR_DEBUG > 0 +static void tms380tr_dump(unsigned char *Data, int length); +#endif +/* "E" */ +static void tms380tr_enable_interrupts(struct net_device *dev); +static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command); +static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue); +/* "F" */ +/* "G" */ +static struct net_device_stats *tms380tr_get_stats(struct net_device *dev); +/* "H" */ +static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb, + struct net_device *dev); +/* "I" */ +static int tms380tr_init_adapter(struct net_device *dev); +static void tms380tr_init_ipb(struct net_local *tp); +static void tms380tr_init_net_local(struct net_device *dev); +static void tms380tr_init_opb(struct net_device *dev); +/* "M" */ +/* "O" */ +int tms380tr_open(struct net_device *dev); +static void tms380tr_open_adapter(struct net_device *dev); +/* "P" */ +/* "R" */ +static void tms380tr_rcv_status_irq(struct net_device *dev); +static int tms380tr_read_ptr(struct net_device *dev); +static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data, + unsigned short Address, int Length); +static int tms380tr_reset_adapter(struct net_device *dev); +static void tms380tr_reset_interrupt(struct net_device *dev); +static void tms380tr_ring_status_irq(struct net_device *dev); +/* "S" */ +static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb, + struct net_device *dev); +static void tms380tr_set_multicast_list(struct net_device *dev); +static int tms380tr_set_mac_address(struct net_device *dev, void *addr); +/* "T" */ +static void tms380tr_timer_chk(unsigned long data); +static void tms380tr_timer_end_wait(unsigned long data); +static void tms380tr_tx_status_irq(struct net_device *dev); +/* "U" */ +static void tms380tr_update_rcv_stats(struct net_local *tp, + unsigned char DataPtr[], unsigned int Length); +/* "W" */ +void tms380tr_wait(unsigned long time); +static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status); +static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status); + +#define SIFREADB(reg) \ + (((struct net_local *)netdev_priv(dev))->sifreadb(dev, reg)) +#define SIFWRITEB(val, reg) \ + (((struct net_local *)netdev_priv(dev))->sifwriteb(dev, val, reg)) +#define SIFREADW(reg) \ + (((struct net_local *)netdev_priv(dev))->sifreadw(dev, reg)) +#define SIFWRITEW(val, reg) \ + (((struct net_local *)netdev_priv(dev))->sifwritew(dev, val, reg)) + + + +#if 0 /* TMS380TR_DEBUG > 0 */ +static int madgemc_sifprobe(struct net_device *dev) +{ + unsigned char old, chk1, chk2; + + old = SIFREADB(SIFADR); /* Get the old SIFADR value */ + + chk1 = 0; /* Begin with check value 0 */ + do { + madgemc_setregpage(dev, 0); + /* Write new SIFADR value */ + SIFWRITEB(chk1, SIFADR); + chk2 = SIFREADB(SIFADR); + if (chk2 != chk1) + return -1; + + madgemc_setregpage(dev, 1); + /* Read, invert and write */ + chk2 = SIFREADB(SIFADD); + if (chk2 != chk1) + return -1; + + madgemc_setregpage(dev, 0); + chk2 ^= 0x0FE; + SIFWRITEB(chk2, SIFADR); + + /* Read, invert and compare */ + madgemc_setregpage(dev, 1); + chk2 = SIFREADB(SIFADD); + madgemc_setregpage(dev, 0); + chk2 ^= 0x0FE; + + if(chk1 != chk2) + return -1; /* No adapter */ + chk1 -= 2; + } while(chk1 != 0); /* Repeat 128 times (all byte values) */ + + madgemc_setregpage(dev, 0); /* sanity */ + /* Restore the SIFADR value */ + SIFWRITEB(old, SIFADR); + + return 0; +} +#endif + +/* + * Open/initialize the board. This is called sometime after + * booting when the 'ifconfig' program is run. + * + * This routine should set everything up anew at each open, even + * registers that "should" only need to be set once at boot, so that + * there is non-reboot way to recover if something goes wrong. + */ +int tms380tr_open(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + /* init the spinlock */ + spin_lock_init(&tp->lock); + init_timer(&tp->timer); + + /* Reset the hardware here. Don't forget to set the station address. */ + +#ifdef CONFIG_ISA + if(dev->dma > 0) + { + unsigned long flags=claim_dma_lock(); + disable_dma(dev->dma); + set_dma_mode(dev->dma, DMA_MODE_CASCADE); + enable_dma(dev->dma); + release_dma_lock(flags); + } +#endif + + err = tms380tr_chipset_init(dev); + if(err) + { + printk(KERN_INFO "%s: Chipset initialization error\n", + dev->name); + return -1; + } + + tp->timer.expires = jiffies + 30*HZ; + tp->timer.function = tms380tr_timer_end_wait; + tp->timer.data = (unsigned long)dev; + add_timer(&tp->timer); + + printk(KERN_DEBUG "%s: Adapter RAM size: %dK\n", + dev->name, tms380tr_read_ptr(dev)); + + tms380tr_enable_interrupts(dev); + tms380tr_open_adapter(dev); + + netif_start_queue(dev); + + /* Wait for interrupt from hardware. If interrupt does not come, + * there will be a timeout from the timer. + */ + tp->Sleeping = 1; + interruptible_sleep_on(&tp->wait_for_tok_int); + del_timer(&tp->timer); + + /* If AdapterVirtOpenFlag is 1, the adapter is now open for use */ + if(tp->AdapterVirtOpenFlag == 0) + { + tms380tr_disable_interrupts(dev); + return -1; + } + + tp->StartTime = jiffies; + + /* Start function control timer */ + tp->timer.expires = jiffies + 2*HZ; + tp->timer.function = tms380tr_timer_chk; + tp->timer.data = (unsigned long)dev; + add_timer(&tp->timer); + + return 0; +} + +/* + * Timeout function while waiting for event + */ +static void tms380tr_timer_end_wait(unsigned long data) +{ + struct net_device *dev = (struct net_device*)data; + struct net_local *tp = netdev_priv(dev); + + if(tp->Sleeping) + { + tp->Sleeping = 0; + wake_up_interruptible(&tp->wait_for_tok_int); + } +} + +/* + * Initialize the chipset + */ +static int tms380tr_chipset_init(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int err; + + tms380tr_init_ipb(tp); + tms380tr_init_opb(dev); + tms380tr_init_net_local(dev); + + if(tms380tr_debug > 3) + printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name); + err = tms380tr_reset_adapter(dev); + if(err < 0) + return -1; + + if(tms380tr_debug > 3) + printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name); + err = tms380tr_bringup_diags(dev); + if(err < 0) + return -1; + + if(tms380tr_debug > 3) + printk(KERN_DEBUG "%s: Init adapter...\n", dev->name); + err = tms380tr_init_adapter(dev); + if(err < 0) + return -1; + + if(tms380tr_debug > 3) + printk(KERN_DEBUG "%s: Done!\n", dev->name); + return 0; +} + +/* + * Initializes the net_local structure. + */ +static void tms380tr_init_net_local(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + int i; + dma_addr_t dmabuf; + + tp->scb.CMD = 0; + tp->scb.Parm[0] = 0; + tp->scb.Parm[1] = 0; + + tp->ssb.STS = 0; + tp->ssb.Parm[0] = 0; + tp->ssb.Parm[1] = 0; + tp->ssb.Parm[2] = 0; + + tp->CMDqueue = 0; + + tp->AdapterOpenFlag = 0; + tp->AdapterVirtOpenFlag = 0; + tp->ScbInUse = 0; + tp->OpenCommandIssued = 0; + tp->ReOpenInProgress = 0; + tp->HaltInProgress = 0; + tp->TransmitHaltScheduled = 0; + tp->LobeWireFaultLogged = 0; + tp->LastOpenStatus = 0; + tp->MaxPacketSize = DEFAULT_PACKET_SIZE; + + /* Create circular chain of transmit lists */ + for (i = 0; i < TPL_NUM; i++) + { + tp->Tpl[i].NextTPLAddr = htonl(((char *)(&tp->Tpl[(i+1) % TPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */ + tp->Tpl[i].Status = 0; + tp->Tpl[i].FrameSize = 0; + tp->Tpl[i].FragList[0].DataCount = 0; + tp->Tpl[i].FragList[0].DataAddr = 0; + tp->Tpl[i].NextTPLPtr = &tp->Tpl[(i+1) % TPL_NUM]; + tp->Tpl[i].MData = NULL; + tp->Tpl[i].TPLIndex = i; + tp->Tpl[i].DMABuff = 0; + tp->Tpl[i].BusyFlag = 0; + } + + tp->TplFree = tp->TplBusy = &tp->Tpl[0]; + + /* Create circular chain of receive lists */ + for (i = 0; i < RPL_NUM; i++) + { + tp->Rpl[i].NextRPLAddr = htonl(((char *)(&tp->Rpl[(i+1) % RPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */ + tp->Rpl[i].Status = (RX_VALID | RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ); + tp->Rpl[i].FrameSize = 0; + tp->Rpl[i].FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize); + + /* Alloc skb and point adapter to data area */ + tp->Rpl[i].Skb = dev_alloc_skb(tp->MaxPacketSize); + tp->Rpl[i].DMABuff = 0; + + /* skb == NULL ? then use local buffer */ + if(tp->Rpl[i].Skb == NULL) + { + tp->Rpl[i].SkbStat = SKB_UNAVAILABLE; + tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer); + tp->Rpl[i].MData = tp->LocalRxBuffers[i]; + } + else /* SKB != NULL */ + { + tp->Rpl[i].Skb->dev = dev; + skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize); + + /* data unreachable for DMA ? then use local buffer */ + dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE); + if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) + { + tp->Rpl[i].SkbStat = SKB_DATA_COPY; + tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer); + tp->Rpl[i].MData = tp->LocalRxBuffers[i]; + } + else /* DMA directly in skb->data */ + { + tp->Rpl[i].SkbStat = SKB_DMA_DIRECT; + tp->Rpl[i].FragList[0].DataAddr = htonl(dmabuf); + tp->Rpl[i].MData = tp->Rpl[i].Skb->data; + tp->Rpl[i].DMABuff = dmabuf; + } + } + + tp->Rpl[i].NextRPLPtr = &tp->Rpl[(i+1) % RPL_NUM]; + tp->Rpl[i].RPLIndex = i; + } + + tp->RplHead = &tp->Rpl[0]; + tp->RplTail = &tp->Rpl[RPL_NUM-1]; + tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ); +} + +/* + * Initializes the initialisation parameter block. + */ +static void tms380tr_init_ipb(struct net_local *tp) +{ + tp->ipb.Init_Options = BURST_MODE; + tp->ipb.CMD_Status_IV = 0; + tp->ipb.TX_IV = 0; + tp->ipb.RX_IV = 0; + tp->ipb.Ring_Status_IV = 0; + tp->ipb.SCB_Clear_IV = 0; + tp->ipb.Adapter_CHK_IV = 0; + tp->ipb.RX_Burst_Size = BURST_SIZE; + tp->ipb.TX_Burst_Size = BURST_SIZE; + tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES; + tp->ipb.SCB_Addr = 0; + tp->ipb.SSB_Addr = 0; +} + +/* + * Initializes the open parameter block. + */ +static void tms380tr_init_opb(struct net_device *dev) +{ + struct net_local *tp; + unsigned long Addr; + unsigned short RplSize = RPL_SIZE; + unsigned short TplSize = TPL_SIZE; + unsigned short BufferSize = BUFFER_SIZE; + int i; + + tp = netdev_priv(dev); + + tp->ocpl.OPENOptions = 0; + tp->ocpl.OPENOptions |= ENABLE_FULL_DUPLEX_SELECTION; + tp->ocpl.FullDuplex = 0; + tp->ocpl.FullDuplex |= OPEN_FULL_DUPLEX_OFF; + + /* + * Set node address + * + * We go ahead and put it in the OPB even though on + * most of the generic adapters this isn't required. + * Its simpler this way. -- ASF + */ + for (i=0;i<6;i++) + tp->ocpl.NodeAddr[i] = ((unsigned char *)dev->dev_addr)[i]; + + tp->ocpl.GroupAddr = 0; + tp->ocpl.FunctAddr = 0; + tp->ocpl.RxListSize = cpu_to_be16((unsigned short)RplSize); + tp->ocpl.TxListSize = cpu_to_be16((unsigned short)TplSize); + tp->ocpl.BufSize = cpu_to_be16((unsigned short)BufferSize); + tp->ocpl.Reserved = 0; + tp->ocpl.TXBufMin = TX_BUF_MIN; + tp->ocpl.TXBufMax = TX_BUF_MAX; + + Addr = htonl(((char *)tp->ProductID - (char *)tp) + tp->dmabuffer); + + tp->ocpl.ProdIDAddr[0] = LOWORD(Addr); + tp->ocpl.ProdIDAddr[1] = HIWORD(Addr); +} + +/* + * Send OPEN command to adapter + */ +static void tms380tr_open_adapter(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + if(tp->OpenCommandIssued) + return; + + tp->OpenCommandIssued = 1; + tms380tr_exec_cmd(dev, OC_OPEN); +} + +/* + * Clear the adapter's interrupt flag. Clear system interrupt enable + * (SINTEN): disable adapter to system interrupts. + */ +static void tms380tr_disable_interrupts(struct net_device *dev) +{ + SIFWRITEB(0, SIFACL); +} + +/* + * Set the adapter's interrupt flag. Set system interrupt enable + * (SINTEN): enable adapter to system interrupts. + */ +static void tms380tr_enable_interrupts(struct net_device *dev) +{ + SIFWRITEB(ACL_SINTEN, SIFACL); +} + +/* + * Put command in command queue, try to execute it. + */ +static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command) +{ + struct net_local *tp = netdev_priv(dev); + + tp->CMDqueue |= Command; + tms380tr_chk_outstanding_cmds(dev); +} + +static void tms380tr_timeout(struct net_device *dev) +{ + /* + * If we get here, some higher level has decided we are broken. + * There should really be a "kick me" function call instead. + * + * Resetting the token ring adapter takes a long time so just + * fake transmission time and go on trying. Our own timeout + * routine is in tms380tr_timer_chk() + */ + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * Gets skb from system, queues it and checks if it can be sent + */ +static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + netdev_tx_t rc; + + rc = tms380tr_hardware_send_packet(skb, dev); + if(tp->TplFree->NextTPLPtr->BusyFlag) + netif_stop_queue(dev); + return rc; +} + +/* + * Move frames into adapter tx queue + */ +static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + TPL *tpl; + short length; + unsigned char *buf; + unsigned long flags; + int i; + dma_addr_t dmabuf, newbuf; + struct net_local *tp = netdev_priv(dev); + + /* Try to get a free TPL from the chain. + * + * NOTE: We *must* always leave one unused TPL in the chain, + * because otherwise the adapter might send frames twice. + */ + spin_lock_irqsave(&tp->lock, flags); + if(tp->TplFree->NextTPLPtr->BusyFlag) { /* No free TPL */ + if (tms380tr_debug > 0) + printk(KERN_DEBUG "%s: No free TPL\n", dev->name); + spin_unlock_irqrestore(&tp->lock, flags); + return NETDEV_TX_BUSY; + } + + dmabuf = 0; + + /* Is buffer reachable for Busmaster-DMA? */ + + length = skb->len; + dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE); + if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) { + /* Copy frame to local buffer */ + dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE); + dmabuf = 0; + i = tp->TplFree->TPLIndex; + buf = tp->LocalTxBuffers[i]; + skb_copy_from_linear_data(skb, buf, length); + newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer; + } + else { + /* Send direct from skb->data */ + newbuf = dmabuf; + buf = skb->data; + } + /* Source address in packet? */ + tms380tr_chk_src_addr(buf, dev->dev_addr); + tp->LastSendTime = jiffies; + tpl = tp->TplFree; /* Get the "free" TPL */ + tpl->BusyFlag = 1; /* Mark TPL as busy */ + tp->TplFree = tpl->NextTPLPtr; + + /* Save the skb for delayed return of skb to system */ + tpl->Skb = skb; + tpl->DMABuff = dmabuf; + tpl->FragList[0].DataCount = cpu_to_be16((unsigned short)length); + tpl->FragList[0].DataAddr = htonl(newbuf); + + /* Write the data length in the transmit list. */ + tpl->FrameSize = cpu_to_be16((unsigned short)length); + tpl->MData = buf; + + /* Transmit the frame and set the status values. */ + tms380tr_write_tpl_status(tpl, TX_VALID | TX_START_FRAME + | TX_END_FRAME | TX_PASS_SRC_ADDR + | TX_FRAME_IRQ); + + /* Let adapter send the frame. */ + tms380tr_exec_sifcmd(dev, CMD_TX_VALID); + spin_unlock_irqrestore(&tp->lock, flags); + + return NETDEV_TX_OK; +} + +/* + * Write the given value to the 'Status' field of the specified TPL. + * NOTE: This function should be used whenever the status of any TPL must be + * modified by the driver, because the compiler may otherwise change the + * order of instructions such that writing the TPL status may be executed at + * an undesirable time. When this function is used, the status is always + * written when the function is called. + */ +static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status) +{ + tpl->Status = Status; +} + +static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr) +{ + unsigned char SRBit; + + if((((unsigned long)frame[8]) & ~0x80) != 0) /* Compare 4 bytes */ + return; + if((unsigned short)frame[12] != 0) /* Compare 2 bytes */ + return; + + SRBit = frame[8] & 0x80; + memcpy(&frame[8], hw_addr, 6); + frame[8] |= SRBit; +} + +/* + * The timer routine: Check if adapter still open and working, reopen if not. + */ +static void tms380tr_timer_chk(unsigned long data) +{ + struct net_device *dev = (struct net_device*)data; + struct net_local *tp = netdev_priv(dev); + + if(tp->HaltInProgress) + return; + + tms380tr_chk_outstanding_cmds(dev); + if(time_before(tp->LastSendTime + SEND_TIMEOUT, jiffies) && + (tp->TplFree != tp->TplBusy)) + { + /* Anything to send, but stalled too long */ + tp->LastSendTime = jiffies; + tms380tr_exec_cmd(dev, OC_CLOSE); /* Does reopen automatically */ + } + + tp->timer.expires = jiffies + 2*HZ; + add_timer(&tp->timer); + + if(tp->AdapterOpenFlag || tp->ReOpenInProgress) + return; + tp->ReOpenInProgress = 1; + tms380tr_open_adapter(dev); +} + +/* + * The typical workload of the driver: Handle the network interface interrupts. + */ +irqreturn_t tms380tr_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct net_local *tp; + unsigned short irq_type; + int handled = 0; + + tp = netdev_priv(dev); + + irq_type = SIFREADW(SIFSTS); + + while(irq_type & STS_SYSTEM_IRQ) { + handled = 1; + irq_type &= STS_IRQ_MASK; + + if(!tms380tr_chk_ssb(tp, irq_type)) { + printk(KERN_DEBUG "%s: DATA LATE occurred\n", dev->name); + break; + } + + switch(irq_type) { + case STS_IRQ_RECEIVE_STATUS: + tms380tr_reset_interrupt(dev); + tms380tr_rcv_status_irq(dev); + break; + + case STS_IRQ_TRANSMIT_STATUS: + /* Check if TRANSMIT.HALT command is complete */ + if(tp->ssb.Parm[0] & COMMAND_COMPLETE) { + tp->TransmitCommandActive = 0; + tp->TransmitHaltScheduled = 0; + + /* Issue a new transmit command. */ + tms380tr_exec_cmd(dev, OC_TRANSMIT); + } + + tms380tr_reset_interrupt(dev); + tms380tr_tx_status_irq(dev); + break; + + case STS_IRQ_COMMAND_STATUS: + /* The SSB contains status of last command + * other than receive/transmit. + */ + tms380tr_cmd_status_irq(dev); + break; + + case STS_IRQ_SCB_CLEAR: + /* The SCB is free for another command. */ + tp->ScbInUse = 0; + tms380tr_chk_outstanding_cmds(dev); + break; + + case STS_IRQ_RING_STATUS: + tms380tr_ring_status_irq(dev); + break; + + case STS_IRQ_ADAPTER_CHECK: + tms380tr_chk_irq(dev); + break; + + case STS_IRQ_LLC_STATUS: + printk(KERN_DEBUG "tms380tr: unexpected LLC status IRQ\n"); + break; + + case STS_IRQ_TIMER: + printk(KERN_DEBUG "tms380tr: unexpected Timer IRQ\n"); + break; + + case STS_IRQ_RECEIVE_PENDING: + printk(KERN_DEBUG "tms380tr: unexpected Receive Pending IRQ\n"); + break; + + default: + printk(KERN_DEBUG "Unknown Token Ring IRQ (0x%04x)\n", irq_type); + break; + } + + /* Reset system interrupt if not already done. */ + if(irq_type != STS_IRQ_TRANSMIT_STATUS && + irq_type != STS_IRQ_RECEIVE_STATUS) { + tms380tr_reset_interrupt(dev); + } + + irq_type = SIFREADW(SIFSTS); + } + + return IRQ_RETVAL(handled); +} + +/* + * Reset the INTERRUPT SYSTEM bit and issue SSB CLEAR command. + */ +static void tms380tr_reset_interrupt(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + SSB *ssb = &tp->ssb; + + /* + * [Workaround for "Data Late"] + * Set all fields of the SSB to well-defined values so we can + * check if the adapter has written the SSB. + */ + + ssb->STS = (unsigned short) -1; + ssb->Parm[0] = (unsigned short) -1; + ssb->Parm[1] = (unsigned short) -1; + ssb->Parm[2] = (unsigned short) -1; + + /* Free SSB by issuing SSB_CLEAR command after reading IRQ code + * and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts. + */ + tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ); +} + +/* + * Check if the SSB has actually been written by the adapter. + */ +static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType) +{ + SSB *ssb = &tp->ssb; /* The address of the SSB. */ + + /* C 0 1 2 INTERRUPT CODE + * - - - - -------------- + * 1 1 1 1 TRANSMIT STATUS + * 1 1 1 1 RECEIVE STATUS + * 1 ? ? 0 COMMAND STATUS + * 0 0 0 0 SCB CLEAR + * 1 1 0 0 RING STATUS + * 0 0 0 0 ADAPTER CHECK + * + * 0 = SSB field not affected by interrupt + * 1 = SSB field is affected by interrupt + * + * C = SSB ADDRESS +0: COMMAND + * 0 = SSB ADDRESS +2: STATUS 0 + * 1 = SSB ADDRESS +4: STATUS 1 + * 2 = SSB ADDRESS +6: STATUS 2 + */ + + /* Check if this interrupt does use the SSB. */ + + if(IrqType != STS_IRQ_TRANSMIT_STATUS && + IrqType != STS_IRQ_RECEIVE_STATUS && + IrqType != STS_IRQ_COMMAND_STATUS && + IrqType != STS_IRQ_RING_STATUS) + { + return 1; /* SSB not involved. */ + } + + /* Note: All fields of the SSB have been set to all ones (-1) after it + * has last been used by the software (see DriverIsr()). + * + * Check if the affected SSB fields are still unchanged. + */ + + if(ssb->STS == (unsigned short) -1) + return 0; /* Command field not yet available. */ + if(IrqType == STS_IRQ_COMMAND_STATUS) + return 1; /* Status fields not always affected. */ + if(ssb->Parm[0] == (unsigned short) -1) + return 0; /* Status 1 field not yet available. */ + if(IrqType == STS_IRQ_RING_STATUS) + return 1; /* Status 2 & 3 fields not affected. */ + + /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */ + if(ssb->Parm[1] == (unsigned short) -1) + return 0; /* Status 2 field not yet available. */ + if(ssb->Parm[2] == (unsigned short) -1) + return 0; /* Status 3 field not yet available. */ + + return 1; /* All SSB fields have been written by the adapter. */ +} + +/* + * Evaluates the command results status in the SSB status field. + */ +static void tms380tr_cmd_status_irq(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned short ssb_cmd, ssb_parm_0; + unsigned short ssb_parm_1; + char *open_err = "Open error -"; + char *code_err = "Open code -"; + + /* Copy the ssb values to local variables */ + ssb_cmd = tp->ssb.STS; + ssb_parm_0 = tp->ssb.Parm[0]; + ssb_parm_1 = tp->ssb.Parm[1]; + + if(ssb_cmd == OPEN) + { + tp->Sleeping = 0; + if(!tp->ReOpenInProgress) + wake_up_interruptible(&tp->wait_for_tok_int); + + tp->OpenCommandIssued = 0; + tp->ScbInUse = 0; + + if((ssb_parm_0 & 0x00FF) == GOOD_COMPLETION) + { + /* Success, the adapter is open. */ + tp->LobeWireFaultLogged = 0; + tp->AdapterOpenFlag = 1; + tp->AdapterVirtOpenFlag = 1; + tp->TransmitCommandActive = 0; + tms380tr_exec_cmd(dev, OC_TRANSMIT); + tms380tr_exec_cmd(dev, OC_RECEIVE); + + if(tp->ReOpenInProgress) + tp->ReOpenInProgress = 0; + + return; + } + else /* The adapter did not open. */ + { + if(ssb_parm_0 & NODE_ADDR_ERROR) + printk(KERN_INFO "%s: Node address error\n", + dev->name); + if(ssb_parm_0 & LIST_SIZE_ERROR) + printk(KERN_INFO "%s: List size error\n", + dev->name); + if(ssb_parm_0 & BUF_SIZE_ERROR) + printk(KERN_INFO "%s: Buffer size error\n", + dev->name); + if(ssb_parm_0 & TX_BUF_COUNT_ERROR) + printk(KERN_INFO "%s: Tx buffer count error\n", + dev->name); + if(ssb_parm_0 & INVALID_OPEN_OPTION) + printk(KERN_INFO "%s: Invalid open option\n", + dev->name); + if(ssb_parm_0 & OPEN_ERROR) + { + /* Show the open phase. */ + switch(ssb_parm_0 & OPEN_PHASES_MASK) + { + case LOBE_MEDIA_TEST: + if(!tp->LobeWireFaultLogged) + { + tp->LobeWireFaultLogged = 1; + printk(KERN_INFO "%s: %s Lobe wire fault (check cable !).\n", dev->name, open_err); + } + tp->ReOpenInProgress = 1; + tp->AdapterOpenFlag = 0; + tp->AdapterVirtOpenFlag = 1; + tms380tr_open_adapter(dev); + return; + + case PHYSICAL_INSERTION: + printk(KERN_INFO "%s: %s Physical insertion.\n", dev->name, open_err); + break; + + case ADDRESS_VERIFICATION: + printk(KERN_INFO "%s: %s Address verification.\n", dev->name, open_err); + break; + + case PARTICIPATION_IN_RING_POLL: + printk(KERN_INFO "%s: %s Participation in ring poll.\n", dev->name, open_err); + break; + + case REQUEST_INITIALISATION: + printk(KERN_INFO "%s: %s Request initialisation.\n", dev->name, open_err); + break; + + case FULLDUPLEX_CHECK: + printk(KERN_INFO "%s: %s Full duplex check.\n", dev->name, open_err); + break; + + default: + printk(KERN_INFO "%s: %s Unknown open phase\n", dev->name, open_err); + break; + } + + /* Show the open errors. */ + switch(ssb_parm_0 & OPEN_ERROR_CODES_MASK) + { + case OPEN_FUNCTION_FAILURE: + printk(KERN_INFO "%s: %s OPEN_FUNCTION_FAILURE", dev->name, code_err); + tp->LastOpenStatus = + OPEN_FUNCTION_FAILURE; + break; + + case OPEN_SIGNAL_LOSS: + printk(KERN_INFO "%s: %s OPEN_SIGNAL_LOSS\n", dev->name, code_err); + tp->LastOpenStatus = + OPEN_SIGNAL_LOSS; + break; + + case OPEN_TIMEOUT: + printk(KERN_INFO "%s: %s OPEN_TIMEOUT\n", dev->name, code_err); + tp->LastOpenStatus = + OPEN_TIMEOUT; + break; + + case OPEN_RING_FAILURE: + printk(KERN_INFO "%s: %s OPEN_RING_FAILURE\n", dev->name, code_err); + tp->LastOpenStatus = + OPEN_RING_FAILURE; + break; + + case OPEN_RING_BEACONING: + printk(KERN_INFO "%s: %s OPEN_RING_BEACONING\n", dev->name, code_err); + tp->LastOpenStatus = + OPEN_RING_BEACONING; + break; + + case OPEN_DUPLICATE_NODEADDR: + printk(KERN_INFO "%s: %s OPEN_DUPLICATE_NODEADDR\n", dev->name, code_err); + tp->LastOpenStatus = + OPEN_DUPLICATE_NODEADDR; + break; + + case OPEN_REQUEST_INIT: + printk(KERN_INFO "%s: %s OPEN_REQUEST_INIT\n", dev->name, code_err); + tp->LastOpenStatus = + OPEN_REQUEST_INIT; + break; + + case OPEN_REMOVE_RECEIVED: + printk(KERN_INFO "%s: %s OPEN_REMOVE_RECEIVED", dev->name, code_err); + tp->LastOpenStatus = + OPEN_REMOVE_RECEIVED; + break; + + case OPEN_FULLDUPLEX_SET: + printk(KERN_INFO "%s: %s OPEN_FULLDUPLEX_SET\n", dev->name, code_err); + tp->LastOpenStatus = + OPEN_FULLDUPLEX_SET; + break; + + default: + printk(KERN_INFO "%s: %s Unknown open err code", dev->name, code_err); + tp->LastOpenStatus = + OPEN_FUNCTION_FAILURE; + break; + } + } + + tp->AdapterOpenFlag = 0; + tp->AdapterVirtOpenFlag = 0; + + return; + } + } + else + { + if(ssb_cmd != READ_ERROR_LOG) + return; + + /* Add values from the error log table to the MAC + * statistics counters and update the errorlogtable + * memory. + */ + tp->MacStat.line_errors += tp->errorlogtable.Line_Error; + tp->MacStat.burst_errors += tp->errorlogtable.Burst_Error; + tp->MacStat.A_C_errors += tp->errorlogtable.ARI_FCI_Error; + tp->MacStat.lost_frames += tp->errorlogtable.Lost_Frame_Error; + tp->MacStat.recv_congest_count += tp->errorlogtable.Rx_Congest_Error; + tp->MacStat.rx_errors += tp->errorlogtable.Rx_Congest_Error; + tp->MacStat.frame_copied_errors += tp->errorlogtable.Frame_Copied_Error; + tp->MacStat.token_errors += tp->errorlogtable.Token_Error; + tp->MacStat.dummy1 += tp->errorlogtable.DMA_Bus_Error; + tp->MacStat.dummy1 += tp->errorlogtable.DMA_Parity_Error; + tp->MacStat.abort_delimiters += tp->errorlogtable.AbortDelimeters; + tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error; + tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error; + } +} + +/* + * The inverse routine to tms380tr_open(). + */ +int tms380tr_close(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + netif_stop_queue(dev); + + del_timer(&tp->timer); + + /* Flush the Tx and disable Rx here. */ + + tp->HaltInProgress = 1; + tms380tr_exec_cmd(dev, OC_CLOSE); + tp->timer.expires = jiffies + 1*HZ; + tp->timer.function = tms380tr_timer_end_wait; + tp->timer.data = (unsigned long)dev; + add_timer(&tp->timer); + + tms380tr_enable_interrupts(dev); + + tp->Sleeping = 1; + interruptible_sleep_on(&tp->wait_for_tok_int); + tp->TransmitCommandActive = 0; + + del_timer(&tp->timer); + tms380tr_disable_interrupts(dev); + +#ifdef CONFIG_ISA + if(dev->dma > 0) + { + unsigned long flags=claim_dma_lock(); + disable_dma(dev->dma); + release_dma_lock(flags); + } +#endif + + SIFWRITEW(0xFF00, SIFCMD); +#if 0 + if(dev->dma > 0) /* what the? */ + SIFWRITEB(0xff, POSREG); +#endif + tms380tr_cancel_tx_queue(tp); + + return 0; +} + +/* + * Get the current statistics. This may be called with the card open + * or closed. + */ +static struct net_device_stats *tms380tr_get_stats(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + return (struct net_device_stats *)&tp->MacStat; +} + +/* + * Set or clear the multicast filter for this adapter. + */ +static void tms380tr_set_multicast_list(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned int OpenOptions; + + OpenOptions = tp->ocpl.OPENOptions & + ~(PASS_ADAPTER_MAC_FRAMES + | PASS_ATTENTION_FRAMES + | PASS_BEACON_MAC_FRAMES + | COPY_ALL_MAC_FRAMES + | COPY_ALL_NON_MAC_FRAMES); + + tp->ocpl.FunctAddr = 0; + + if(dev->flags & IFF_PROMISC) + /* Enable promiscuous mode */ + OpenOptions |= COPY_ALL_NON_MAC_FRAMES | + COPY_ALL_MAC_FRAMES; + else + { + if(dev->flags & IFF_ALLMULTI) + { + /* Disable promiscuous mode, use normal mode. */ + tp->ocpl.FunctAddr = 0xFFFFFFFF; + } + else + { + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + ((char *)(&tp->ocpl.FunctAddr))[0] |= + ha->addr[2]; + ((char *)(&tp->ocpl.FunctAddr))[1] |= + ha->addr[3]; + ((char *)(&tp->ocpl.FunctAddr))[2] |= + ha->addr[4]; + ((char *)(&tp->ocpl.FunctAddr))[3] |= + ha->addr[5]; + } + } + tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR); + } + + tp->ocpl.OPENOptions = OpenOptions; + tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS); +} + +/* + * Wait for some time (microseconds) + */ +void tms380tr_wait(unsigned long time) +{ +#if 0 + long tmp; + + tmp = jiffies + time/(1000000/HZ); + do { + tmp = schedule_timeout_interruptible(tmp); + } while(time_after(tmp, jiffies)); +#else + mdelay(time / 1000); +#endif +} + +/* + * Write a command value to the SIFCMD register + */ +static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue) +{ + unsigned short cmd; + unsigned short SifStsValue; + unsigned long loop_counter; + + WriteValue = ((WriteValue ^ CMD_SYSTEM_IRQ) | CMD_INTERRUPT_ADAPTER); + cmd = (unsigned short)WriteValue; + loop_counter = 0,5 * 800000; + do { + SifStsValue = SIFREADW(SIFSTS); + } while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--); + SIFWRITEW(cmd, SIFCMD); +} + +/* + * Processes adapter hardware reset, halts adapter and downloads firmware, + * clears the halt bit. + */ +static int tms380tr_reset_adapter(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned short *fw_ptr; + unsigned short count, c, count2; + const struct firmware *fw_entry = NULL; + + if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) { + printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n", + dev->name, "tms380tr.bin"); + return -1; + } + + fw_ptr = (unsigned short *)fw_entry->data; + count2 = fw_entry->size / 2; + + /* Hardware adapter reset */ + SIFWRITEW(ACL_ARESET, SIFACL); + tms380tr_wait(40); + + c = SIFREADW(SIFACL); + tms380tr_wait(20); + + if(dev->dma == 0) /* For PCI adapters */ + { + c &= ~(ACL_NSELOUT0 | ACL_NSELOUT1); /* Clear bits */ + if(tp->setnselout) + c |= (*tp->setnselout)(dev); + } + + /* In case a command is pending - forget it */ + tp->ScbInUse = 0; + + c &= ~ACL_ARESET; /* Clear adapter reset bit */ + c |= ACL_CPHALT; /* Halt adapter CPU, allow download */ + c |= ACL_BOOT; + c |= ACL_SINTEN; + c &= ~ACL_PSDMAEN; /* Clear pseudo dma bit */ + SIFWRITEW(c, SIFACL); + tms380tr_wait(40); + + count = 0; + /* Download firmware via DIO interface: */ + do { + if (count2 < 3) continue; + + /* Download first address part */ + SIFWRITEW(*fw_ptr, SIFADX); + fw_ptr++; + count2--; + /* Download second address part */ + SIFWRITEW(*fw_ptr, SIFADD); + fw_ptr++; + count2--; + + if((count = *fw_ptr) != 0) /* Load loop counter */ + { + fw_ptr++; /* Download block data */ + count2--; + if (count > count2) continue; + + for(; count > 0; count--) + { + SIFWRITEW(*fw_ptr, SIFINC); + fw_ptr++; + count2--; + } + } + else /* Stop, if last block downloaded */ + { + c = SIFREADW(SIFACL); + c &= (~ACL_CPHALT | ACL_SINTEN); + + /* Clear CPHALT and start BUD */ + SIFWRITEW(c, SIFACL); + release_firmware(fw_entry); + return 1; + } + } while(count == 0); + + release_firmware(fw_entry); + printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name); + return -1; +} + +MODULE_FIRMWARE("tms380tr.bin"); + +/* + * Starts bring up diagnostics of token ring adapter and evaluates + * diagnostic results. + */ +static int tms380tr_bringup_diags(struct net_device *dev) +{ + int loop_cnt, retry_cnt; + unsigned short Status; + + tms380tr_wait(HALF_SECOND); + tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET); + tms380tr_wait(HALF_SECOND); + + retry_cnt = BUD_MAX_RETRIES; /* maximal number of retrys */ + + do { + retry_cnt--; + if(tms380tr_debug > 3) + printk(KERN_DEBUG "BUD-Status: "); + loop_cnt = BUD_MAX_LOOPCNT; /* maximum: three seconds*/ + do { /* Inspect BUD results */ + loop_cnt--; + tms380tr_wait(HALF_SECOND); + Status = SIFREADW(SIFSTS); + Status &= STS_MASK; + + if(tms380tr_debug > 3) + printk(KERN_DEBUG " %04X\n", Status); + /* BUD successfully completed */ + if(Status == STS_INITIALIZE) + return 1; + /* Unrecoverable hardware error, BUD not completed? */ + } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST)) + != (STS_ERROR | STS_TEST))); + + /* Error preventing completion of BUD */ + if(retry_cnt > 0) + { + printk(KERN_INFO "%s: Adapter Software Reset.\n", + dev->name); + tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET); + tms380tr_wait(HALF_SECOND); + } + } while(retry_cnt > 0); + + Status = SIFREADW(SIFSTS); + + printk(KERN_INFO "%s: Hardware error\n", dev->name); + /* Hardware error occurred! */ + Status &= 0x001f; + if (Status & 0x0010) + printk(KERN_INFO "%s: BUD Error: Timeout\n", dev->name); + else if ((Status & 0x000f) > 6) + printk(KERN_INFO "%s: BUD Error: Illegal Failure\n", dev->name); + else + printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f); + + return -1; +} + +/* + * Copy initialisation data to adapter memory, beginning at address + * 1:0A00; Starting DMA test and evaluating result bits. + */ +static int tms380tr_init_adapter(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + const unsigned char SCB_Test[6] = {0x00, 0x00, 0xC1, 0xE2, 0xD4, 0x8B}; + const unsigned char SSB_Test[8] = {0xFF, 0xFF, 0xD1, 0xD7, + 0xC5, 0xD9, 0xC3, 0xD4}; + void *ptr = (void *)&tp->ipb; + unsigned short *ipb_ptr = (unsigned short *)ptr; + unsigned char *cb_ptr = (unsigned char *) &tp->scb; + unsigned char *sb_ptr = (unsigned char *) &tp->ssb; + unsigned short Status; + int i, loop_cnt, retry_cnt; + + /* Normalize: byte order low/high, word order high/low! (only IPB!) */ + tp->ipb.SCB_Addr = SWAPW(((char *)&tp->scb - (char *)tp) + tp->dmabuffer); + tp->ipb.SSB_Addr = SWAPW(((char *)&tp->ssb - (char *)tp) + tp->dmabuffer); + + if(tms380tr_debug > 3) + { + printk(KERN_DEBUG "%s: buffer (real): %lx\n", dev->name, (long) &tp->scb); + printk(KERN_DEBUG "%s: buffer (virt): %lx\n", dev->name, (long) ((char *)&tp->scb - (char *)tp) + (long) tp->dmabuffer); + printk(KERN_DEBUG "%s: buffer (DMA) : %lx\n", dev->name, (long) tp->dmabuffer); + printk(KERN_DEBUG "%s: buffer (tp) : %lx\n", dev->name, (long) tp); + } + /* Maximum: three initialization retries */ + retry_cnt = INIT_MAX_RETRIES; + + do { + retry_cnt--; + + /* Transfer initialization block */ + SIFWRITEW(0x0001, SIFADX); + + /* To address 0001:0A00 of adapter RAM */ + SIFWRITEW(0x0A00, SIFADD); + + /* Write 11 words to adapter RAM */ + for(i = 0; i < 11; i++) + SIFWRITEW(ipb_ptr[i], SIFINC); + + /* Execute SCB adapter command */ + tms380tr_exec_sifcmd(dev, CMD_EXECUTE); + + loop_cnt = INIT_MAX_LOOPCNT; /* Maximum: 11 seconds */ + + /* While remaining retries, no error and not completed */ + do { + Status = 0; + loop_cnt--; + tms380tr_wait(HALF_SECOND); + + /* Mask interesting status bits */ + Status = SIFREADW(SIFSTS); + Status &= STS_MASK; + } while(((Status &(STS_INITIALIZE | STS_ERROR | STS_TEST)) != 0) && + ((Status & STS_ERROR) == 0) && (loop_cnt != 0)); + + if((Status & (STS_INITIALIZE | STS_ERROR | STS_TEST)) == 0) + { + /* Initialization completed without error */ + i = 0; + do { /* Test if contents of SCB is valid */ + if(SCB_Test[i] != *(cb_ptr + i)) + { + printk(KERN_INFO "%s: DMA failed\n", dev->name); + /* DMA data error: wrong data in SCB */ + return -1; + } + i++; + } while(i < 6); + + i = 0; + do { /* Test if contents of SSB is valid */ + if(SSB_Test[i] != *(sb_ptr + i)) + /* DMA data error: wrong data in SSB */ + return -1; + i++; + } while (i < 8); + + return 1; /* Adapter successfully initialized */ + } + else + { + if((Status & STS_ERROR) != 0) + { + /* Initialization error occurred */ + Status = SIFREADW(SIFSTS); + Status &= STS_ERROR_MASK; + /* ShowInitialisationErrorCode(Status); */ + printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status); + return -1; /* Unrecoverable error */ + } + else + { + if(retry_cnt > 0) + { + /* Reset adapter and try init again */ + tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET); + tms380tr_wait(HALF_SECOND); + } + } + } + } while(retry_cnt > 0); + + printk(KERN_INFO "%s: Retry exceeded\n", dev->name); + return -1; +} + +/* + * Check for outstanding commands in command queue and tries to execute + * command immediately. Corresponding command flag in command queue is cleared. + */ +static void tms380tr_chk_outstanding_cmds(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned long Addr = 0; + + if(tp->CMDqueue == 0) + return; /* No command execution */ + + /* If SCB in use: no command */ + if(tp->ScbInUse == 1) + return; + + /* Check if adapter is opened, avoiding COMMAND_REJECT + * interrupt by the adapter! + */ + if(tp->AdapterOpenFlag == 0) + { + if(tp->CMDqueue & OC_OPEN) + { + /* Execute OPEN command */ + tp->CMDqueue ^= OC_OPEN; + + Addr = htonl(((char *)&tp->ocpl - (char *)tp) + tp->dmabuffer); + tp->scb.Parm[0] = LOWORD(Addr); + tp->scb.Parm[1] = HIWORD(Addr); + tp->scb.CMD = OPEN; + } + else + /* No OPEN command queued, but adapter closed. Note: + * We'll try to re-open the adapter in DriverPoll() + */ + return; /* No adapter command issued */ + } + else + { + /* Adapter is open; evaluate command queue: try to execute + * outstanding commands (depending on priority!) CLOSE + * command queued + */ + if(tp->CMDqueue & OC_CLOSE) + { + tp->CMDqueue ^= OC_CLOSE; + tp->AdapterOpenFlag = 0; + tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */ + tp->scb.Parm[1] = 0; /* but should be set to zero! */ + tp->scb.CMD = CLOSE; + if(!tp->HaltInProgress) + tp->CMDqueue |= OC_OPEN; /* re-open adapter */ + else + tp->CMDqueue = 0; /* no more commands */ + } + else + { + if(tp->CMDqueue & OC_RECEIVE) + { + tp->CMDqueue ^= OC_RECEIVE; + Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer); + tp->scb.Parm[0] = LOWORD(Addr); + tp->scb.Parm[1] = HIWORD(Addr); + tp->scb.CMD = RECEIVE; + } + else + { + if(tp->CMDqueue & OC_TRANSMIT_HALT) + { + /* NOTE: TRANSMIT.HALT must be checked + * before TRANSMIT. + */ + tp->CMDqueue ^= OC_TRANSMIT_HALT; + tp->scb.CMD = TRANSMIT_HALT; + + /* Parm[0] and Parm[1] are ignored + * but should be set to zero! + */ + tp->scb.Parm[0] = 0; + tp->scb.Parm[1] = 0; + } + else + { + if(tp->CMDqueue & OC_TRANSMIT) + { + /* NOTE: TRANSMIT must be + * checked after TRANSMIT.HALT + */ + if(tp->TransmitCommandActive) + { + if(!tp->TransmitHaltScheduled) + { + tp->TransmitHaltScheduled = 1; + tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT) ; + } + tp->TransmitCommandActive = 0; + return; + } + + tp->CMDqueue ^= OC_TRANSMIT; + tms380tr_cancel_tx_queue(tp); + Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer); + tp->scb.Parm[0] = LOWORD(Addr); + tp->scb.Parm[1] = HIWORD(Addr); + tp->scb.CMD = TRANSMIT; + tp->TransmitCommandActive = 1; + } + else + { + if(tp->CMDqueue & OC_MODIFY_OPEN_PARMS) + { + tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS; + tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/ + tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION; + tp->scb.Parm[1] = 0; /* is ignored but should be zero */ + tp->scb.CMD = MODIFY_OPEN_PARMS; + } + else + { + if(tp->CMDqueue & OC_SET_FUNCT_ADDR) + { + tp->CMDqueue ^= OC_SET_FUNCT_ADDR; + tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr); + tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr); + tp->scb.CMD = SET_FUNCT_ADDR; + } + else + { + if(tp->CMDqueue & OC_SET_GROUP_ADDR) + { + tp->CMDqueue ^= OC_SET_GROUP_ADDR; + tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr); + tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr); + tp->scb.CMD = SET_GROUP_ADDR; + } + else + { + if(tp->CMDqueue & OC_READ_ERROR_LOG) + { + tp->CMDqueue ^= OC_READ_ERROR_LOG; + Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer); + tp->scb.Parm[0] = LOWORD(Addr); + tp->scb.Parm[1] = HIWORD(Addr); + tp->scb.CMD = READ_ERROR_LOG; + } + else + { + printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n"); + tp->CMDqueue = 0; + return; + } + } + } + } + } + } + } + } + } + + tp->ScbInUse = 1; /* Set semaphore: SCB in use. */ + + /* Execute SCB and generate IRQ when done. */ + tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST); +} + +/* + * IRQ conditions: signal loss on the ring, transmit or receive of beacon + * frames (disabled if bit 1 of OPEN option is set); report error MAC + * frame transmit (disabled if bit 2 of OPEN option is set); open or short + * circuit fault on the lobe is detected; remove MAC frame received; + * error counter overflow (255); opened adapter is the only station in ring. + * After some of the IRQs the adapter is closed! + */ +static void tms380tr_ring_status_irq(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + + tp->CurrentRingStatus = be16_to_cpu((unsigned short)tp->ssb.Parm[0]); + + /* First: fill up statistics */ + if(tp->ssb.Parm[0] & SIGNAL_LOSS) + { + printk(KERN_INFO "%s: Signal Loss\n", dev->name); + tp->MacStat.line_errors++; + } + + /* Adapter is closed, but initialized */ + if(tp->ssb.Parm[0] & LOBE_WIRE_FAULT) + { + printk(KERN_INFO "%s: Lobe Wire Fault, Reopen Adapter\n", + dev->name); + tp->MacStat.line_errors++; + } + + if(tp->ssb.Parm[0] & RING_RECOVERY) + printk(KERN_INFO "%s: Ring Recovery\n", dev->name); + + /* Counter overflow: read error log */ + if(tp->ssb.Parm[0] & COUNTER_OVERFLOW) + { + printk(KERN_INFO "%s: Counter Overflow\n", dev->name); + tms380tr_exec_cmd(dev, OC_READ_ERROR_LOG); + } + + /* Adapter is closed, but initialized */ + if(tp->ssb.Parm[0] & REMOVE_RECEIVED) + printk(KERN_INFO "%s: Remove Received, Reopen Adapter\n", + dev->name); + + /* Adapter is closed, but initialized */ + if(tp->ssb.Parm[0] & AUTO_REMOVAL_ERROR) + printk(KERN_INFO "%s: Auto Removal Error, Reopen Adapter\n", + dev->name); + + if(tp->ssb.Parm[0] & HARD_ERROR) + printk(KERN_INFO "%s: Hard Error\n", dev->name); + + if(tp->ssb.Parm[0] & SOFT_ERROR) + printk(KERN_INFO "%s: Soft Error\n", dev->name); + + if(tp->ssb.Parm[0] & TRANSMIT_BEACON) + printk(KERN_INFO "%s: Transmit Beacon\n", dev->name); + + if(tp->ssb.Parm[0] & SINGLE_STATION) + printk(KERN_INFO "%s: Single Station\n", dev->name); + + /* Check if adapter has been closed */ + if(tp->ssb.Parm[0] & ADAPTER_CLOSED) + { + printk(KERN_INFO "%s: Adapter closed (Reopening)," + "CurrentRingStat %x\n", + dev->name, tp->CurrentRingStatus); + tp->AdapterOpenFlag = 0; + tms380tr_open_adapter(dev); + } +} + +/* + * Issued if adapter has encountered an unrecoverable hardware + * or software error. + */ +static void tms380tr_chk_irq(struct net_device *dev) +{ + int i; + unsigned short AdapterCheckBlock[4]; + struct net_local *tp = netdev_priv(dev); + + tp->AdapterOpenFlag = 0; /* Adapter closed now */ + + /* Page number of adapter memory */ + SIFWRITEW(0x0001, SIFADX); + /* Address offset */ + SIFWRITEW(CHECKADDR, SIFADR); + + /* Reading 8 byte adapter check block. */ + for(i = 0; i < 4; i++) + AdapterCheckBlock[i] = SIFREADW(SIFINC); + + if(tms380tr_debug > 3) + { + printk(KERN_DEBUG "%s: AdapterCheckBlock: ", dev->name); + for (i = 0; i < 4; i++) + printk("%04X", AdapterCheckBlock[i]); + printk("\n"); + } + + switch(AdapterCheckBlock[0]) + { + case DIO_PARITY: + printk(KERN_INFO "%s: DIO parity error\n", dev->name); + break; + + case DMA_READ_ABORT: + printk(KERN_INFO "%s DMA read operation aborted:\n", + dev->name); + switch (AdapterCheckBlock[1]) + { + case 0: + printk(KERN_INFO "Timeout\n"); + printk(KERN_INFO "Address: %04X %04X\n", + AdapterCheckBlock[2], + AdapterCheckBlock[3]); + break; + + case 1: + printk(KERN_INFO "Parity error\n"); + printk(KERN_INFO "Address: %04X %04X\n", + AdapterCheckBlock[2], + AdapterCheckBlock[3]); + break; + + case 2: + printk(KERN_INFO "Bus error\n"); + printk(KERN_INFO "Address: %04X %04X\n", + AdapterCheckBlock[2], + AdapterCheckBlock[3]); + break; + + default: + printk(KERN_INFO "Unknown error.\n"); + break; + } + break; + + case DMA_WRITE_ABORT: + printk(KERN_INFO "%s: DMA write operation aborted:\n", + dev->name); + switch (AdapterCheckBlock[1]) + { + case 0: + printk(KERN_INFO "Timeout\n"); + printk(KERN_INFO "Address: %04X %04X\n", + AdapterCheckBlock[2], + AdapterCheckBlock[3]); + break; + + case 1: + printk(KERN_INFO "Parity error\n"); + printk(KERN_INFO "Address: %04X %04X\n", + AdapterCheckBlock[2], + AdapterCheckBlock[3]); + break; + + case 2: + printk(KERN_INFO "Bus error\n"); + printk(KERN_INFO "Address: %04X %04X\n", + AdapterCheckBlock[2], + AdapterCheckBlock[3]); + break; + + default: + printk(KERN_INFO "Unknown error.\n"); + break; + } + break; + + case ILLEGAL_OP_CODE: + printk(KERN_INFO "%s: Illegal operation code in firmware\n", + dev->name); + /* Parm[0-3]: adapter internal register R13-R15 */ + break; + + case PARITY_ERRORS: + printk(KERN_INFO "%s: Adapter internal bus parity error\n", + dev->name); + /* Parm[0-3]: adapter internal register R13-R15 */ + break; + + case RAM_DATA_ERROR: + printk(KERN_INFO "%s: RAM data error\n", dev->name); + /* Parm[0-1]: MSW/LSW address of RAM location. */ + break; + + case RAM_PARITY_ERROR: + printk(KERN_INFO "%s: RAM parity error\n", dev->name); + /* Parm[0-1]: MSW/LSW address of RAM location. */ + break; + + case RING_UNDERRUN: + printk(KERN_INFO "%s: Internal DMA underrun detected\n", + dev->name); + break; + + case INVALID_IRQ: + printk(KERN_INFO "%s: Unrecognized interrupt detected\n", + dev->name); + /* Parm[0-3]: adapter internal register R13-R15 */ + break; + + case INVALID_ERROR_IRQ: + printk(KERN_INFO "%s: Unrecognized error interrupt detected\n", + dev->name); + /* Parm[0-3]: adapter internal register R13-R15 */ + break; + + case INVALID_XOP: + printk(KERN_INFO "%s: Unrecognized XOP request detected\n", + dev->name); + /* Parm[0-3]: adapter internal register R13-R15 */ + break; + + default: + printk(KERN_INFO "%s: Unknown status", dev->name); + break; + } + + if(tms380tr_chipset_init(dev) == 1) + { + /* Restart of firmware successful */ + tp->AdapterOpenFlag = 1; + } +} + +/* + * Internal adapter pointer to RAM data are copied from adapter into + * host system. + */ +static int tms380tr_read_ptr(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned short adapterram; + + tms380tr_read_ram(dev, (unsigned char *)&tp->intptrs.BurnedInAddrPtr, + ADAPTER_INT_PTRS, 16); + tms380tr_read_ram(dev, (unsigned char *)&adapterram, + cpu_to_be16((unsigned short)tp->intptrs.AdapterRAMPtr), 2); + return be16_to_cpu(adapterram); +} + +/* + * Reads a number of bytes from adapter to system memory. + */ +static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data, + unsigned short Address, int Length) +{ + int i; + unsigned short old_sifadx, old_sifadr, InWord; + + /* Save the current values */ + old_sifadx = SIFREADW(SIFADX); + old_sifadr = SIFREADW(SIFADR); + + /* Page number of adapter memory */ + SIFWRITEW(0x0001, SIFADX); + /* Address offset in adapter RAM */ + SIFWRITEW(Address, SIFADR); + + /* Copy len byte from adapter memory to system data area. */ + i = 0; + for(;;) + { + InWord = SIFREADW(SIFINC); + + *(Data + i) = HIBYTE(InWord); /* Write first byte */ + if(++i == Length) /* All is done break */ + break; + + *(Data + i) = LOBYTE(InWord); /* Write second byte */ + if (++i == Length) /* All is done break */ + break; + } + + /* Restore original values */ + SIFWRITEW(old_sifadx, SIFADX); + SIFWRITEW(old_sifadr, SIFADR); +} + +/* + * Cancel all queued packets in the transmission queue. + */ +static void tms380tr_cancel_tx_queue(struct net_local* tp) +{ + TPL *tpl; + + /* + * NOTE: There must not be an active TRANSMIT command pending, when + * this function is called. + */ + if(tp->TransmitCommandActive) + return; + + for(;;) + { + tpl = tp->TplBusy; + if(!tpl->BusyFlag) + break; + /* "Remove" TPL from busy list. */ + tp->TplBusy = tpl->NextTPLPtr; + tms380tr_write_tpl_status(tpl, 0); /* Clear VALID bit */ + tpl->BusyFlag = 0; /* "free" TPL */ + + printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl); + if (tpl->DMABuff) + dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(tpl->Skb); + } +} + +/* + * This function is called whenever a transmit interrupt is generated by the + * adapter. For a command complete interrupt, it is checked if we have to + * issue a new transmit command or not. + */ +static void tms380tr_tx_status_irq(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned char HighByte, HighAc, LowAc; + TPL *tpl; + + /* NOTE: At this point the SSB from TRANSMIT STATUS is no longer + * available, because the CLEAR SSB command has already been issued. + * + * Process all complete transmissions. + */ + + for(;;) + { + tpl = tp->TplBusy; + if(!tpl->BusyFlag || (tpl->Status + & (TX_VALID | TX_FRAME_COMPLETE)) + != TX_FRAME_COMPLETE) + { + break; + } + + /* "Remove" TPL from busy list. */ + tp->TplBusy = tpl->NextTPLPtr ; + + /* Check the transmit status field only for directed frames*/ + if(DIRECTED_FRAME(tpl) && (tpl->Status & TX_ERROR) == 0) + { + HighByte = GET_TRANSMIT_STATUS_HIGH_BYTE(tpl->Status); + HighAc = GET_FRAME_STATUS_HIGH_AC(HighByte); + LowAc = GET_FRAME_STATUS_LOW_AC(HighByte); + + if((HighAc != LowAc) || (HighAc == AC_NOT_RECOGNIZED)) + { + printk(KERN_DEBUG "%s: (DA=%08lX not recognized)\n", + dev->name, + *(unsigned long *)&tpl->MData[2+2]); + } + else + { + if(tms380tr_debug > 3) + printk(KERN_DEBUG "%s: Directed frame tx'd\n", + dev->name); + } + } + else + { + if(!DIRECTED_FRAME(tpl)) + { + if(tms380tr_debug > 3) + printk(KERN_DEBUG "%s: Broadcast frame tx'd\n", + dev->name); + } + } + + tp->MacStat.tx_packets++; + if (tpl->DMABuff) + dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE); + dev_kfree_skb_irq(tpl->Skb); + tpl->BusyFlag = 0; /* "free" TPL */ + } + + if(!tp->TplFree->NextTPLPtr->BusyFlag) + netif_wake_queue(dev); +} + +/* + * Called if a frame receive interrupt is generated by the adapter. + * Check if the frame is valid and indicate it to system. + */ +static void tms380tr_rcv_status_irq(struct net_device *dev) +{ + struct net_local *tp = netdev_priv(dev); + unsigned char *ReceiveDataPtr; + struct sk_buff *skb; + unsigned int Length, Length2; + RPL *rpl; + RPL *SaveHead; + dma_addr_t dmabuf; + + /* NOTE: At this point the SSB from RECEIVE STATUS is no longer + * available, because the CLEAR SSB command has already been issued. + * + * Process all complete receives. + */ + + for(;;) + { + rpl = tp->RplHead; + if(rpl->Status & RX_VALID) + break; /* RPL still in use by adapter */ + + /* Forward RPLHead pointer to next list. */ + SaveHead = tp->RplHead; + tp->RplHead = rpl->NextRPLPtr; + + /* Get the frame size (Byte swap for Intel). + * Do this early (see workaround comment below) + */ + Length = be16_to_cpu(rpl->FrameSize); + + /* Check if the Frame_Start, Frame_End and + * Frame_Complete bits are set. + */ + if((rpl->Status & VALID_SINGLE_BUFFER_FRAME) + == VALID_SINGLE_BUFFER_FRAME) + { + ReceiveDataPtr = rpl->MData; + + /* Workaround for delayed write of FrameSize on ISA + * (FrameSize is false but valid-bit is reset) + * Frame size is set to zero when the RPL is freed. + * Length2 is there because there have also been + * cases where the FrameSize was partially written + */ + Length2 = be16_to_cpu(rpl->FrameSize); + + if(Length == 0 || Length != Length2) + { + tp->RplHead = SaveHead; + break; /* Return to tms380tr_interrupt */ + } + tms380tr_update_rcv_stats(tp,ReceiveDataPtr,Length); + + if(tms380tr_debug > 3) + printk(KERN_DEBUG "%s: Packet Length %04X (%d)\n", + dev->name, Length, Length); + + /* Indicate the received frame to system the + * adapter does the Source-Routing padding for + * us. See: OpenOptions in tms380tr_init_opb() + */ + skb = rpl->Skb; + if(rpl->SkbStat == SKB_UNAVAILABLE) + { + /* Try again to allocate skb */ + skb = dev_alloc_skb(tp->MaxPacketSize); + if(skb == NULL) + { + /* Update Stats ?? */ + } + else + { + skb_put(skb, tp->MaxPacketSize); + rpl->SkbStat = SKB_DATA_COPY; + ReceiveDataPtr = rpl->MData; + } + } + + if(skb && (rpl->SkbStat == SKB_DATA_COPY || + rpl->SkbStat == SKB_DMA_DIRECT)) + { + if(rpl->SkbStat == SKB_DATA_COPY) + skb_copy_to_linear_data(skb, ReceiveDataPtr, + Length); + + /* Deliver frame to system */ + rpl->Skb = NULL; + skb_trim(skb,Length); + skb->protocol = tr_type_trans(skb,dev); + netif_rx(skb); + } + } + else /* Invalid frame */ + { + if(rpl->Skb != NULL) + dev_kfree_skb_irq(rpl->Skb); + + /* Skip list. */ + if(rpl->Status & RX_START_FRAME) + /* Frame start bit is set -> overflow. */ + tp->MacStat.rx_errors++; + } + if (rpl->DMABuff) + dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE); + rpl->DMABuff = 0; + + /* Allocate new skb for rpl */ + rpl->Skb = dev_alloc_skb(tp->MaxPacketSize); + /* skb == NULL ? then use local buffer */ + if(rpl->Skb == NULL) + { + rpl->SkbStat = SKB_UNAVAILABLE; + rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer); + rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex]; + } + else /* skb != NULL */ + { + rpl->Skb->dev = dev; + skb_put(rpl->Skb, tp->MaxPacketSize); + + /* Data unreachable for DMA ? then use local buffer */ + dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE); + if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) + { + rpl->SkbStat = SKB_DATA_COPY; + rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer); + rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex]; + } + else + { + /* DMA directly in skb->data */ + rpl->SkbStat = SKB_DMA_DIRECT; + rpl->FragList[0].DataAddr = htonl(dmabuf); + rpl->MData = rpl->Skb->data; + rpl->DMABuff = dmabuf; + } + } + + rpl->FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize); + rpl->FrameSize = 0; + + /* Pass the last RPL back to the adapter */ + tp->RplTail->FrameSize = 0; + + /* Reset the CSTAT field in the list. */ + tms380tr_write_rpl_status(tp->RplTail, RX_VALID | RX_FRAME_IRQ); + + /* Current RPL becomes last one in list. */ + tp->RplTail = tp->RplTail->NextRPLPtr; + + /* Inform adapter about RPL valid. */ + tms380tr_exec_sifcmd(dev, CMD_RX_VALID); + } +} + +/* + * This function should be used whenever the status of any RPL must be + * modified by the driver, because the compiler may otherwise change the + * order of instructions such that writing the RPL status may be executed + * at an undesirable time. When this function is used, the status is + * always written when the function is called. + */ +static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status) +{ + rpl->Status = Status; +} + +/* + * The function updates the statistic counters in mac->MacStat. + * It differtiates between directed and broadcast/multicast ( ==functional) + * frames. + */ +static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPtr[], + unsigned int Length) +{ + tp->MacStat.rx_packets++; + tp->MacStat.rx_bytes += Length; + + /* Test functional bit */ + if(DataPtr[2] & GROUP_BIT) + tp->MacStat.multicast++; +} + +static int tms380tr_set_mac_address(struct net_device *dev, void *addr) +{ + struct net_local *tp = netdev_priv(dev); + struct sockaddr *saddr = addr; + + if (tp->AdapterOpenFlag || tp->AdapterVirtOpenFlag) { + printk(KERN_WARNING "%s: Cannot set MAC/LAA address while card is open\n", dev->name); + return -EIO; + } + memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len); + return 0; +} + +#if TMS380TR_DEBUG > 0 +/* + * Dump Packet (data) + */ +static void tms380tr_dump(unsigned char *Data, int length) +{ + int i, j; + + for (i = 0, j = 0; i < length / 8; i++, j += 8) + { + printk(KERN_DEBUG "%02x %02x %02x %02x %02x %02x %02x %02x\n", + Data[j+0],Data[j+1],Data[j+2],Data[j+3], + Data[j+4],Data[j+5],Data[j+6],Data[j+7]); + } +} +#endif + +void tmsdev_term(struct net_device *dev) +{ + struct net_local *tp; + + tp = netdev_priv(dev); + dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local), + DMA_BIDIRECTIONAL); +} + +const struct net_device_ops tms380tr_netdev_ops = { + .ndo_open = tms380tr_open, + .ndo_stop = tms380tr_close, + .ndo_start_xmit = tms380tr_send_packet, + .ndo_tx_timeout = tms380tr_timeout, + .ndo_get_stats = tms380tr_get_stats, + .ndo_set_multicast_list = tms380tr_set_multicast_list, + .ndo_set_mac_address = tms380tr_set_mac_address, +}; +EXPORT_SYMBOL(tms380tr_netdev_ops); + +int tmsdev_init(struct net_device *dev, struct device *pdev) +{ + struct net_local *tms_local; + + memset(netdev_priv(dev), 0, sizeof(struct net_local)); + tms_local = netdev_priv(dev); + init_waitqueue_head(&tms_local->wait_for_tok_int); + if (pdev->dma_mask) + tms_local->dmalimit = *pdev->dma_mask; + else + return -ENOMEM; + tms_local->pdev = pdev; + tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local, + sizeof(struct net_local), DMA_BIDIRECTIONAL); + if (tms_local->dmabuffer + sizeof(struct net_local) > + tms_local->dmalimit) + { + printk(KERN_INFO "%s: Memory not accessible for DMA\n", + dev->name); + tmsdev_term(dev); + return -ENOMEM; + } + + dev->netdev_ops = &tms380tr_netdev_ops; + dev->watchdog_timeo = HZ; + + return 0; +} + +EXPORT_SYMBOL(tms380tr_open); +EXPORT_SYMBOL(tms380tr_close); +EXPORT_SYMBOL(tms380tr_interrupt); +EXPORT_SYMBOL(tmsdev_init); +EXPORT_SYMBOL(tmsdev_term); +EXPORT_SYMBOL(tms380tr_wait); + +#ifdef MODULE + +static struct module *TMS380_module = NULL; + +int init_module(void) +{ + printk(KERN_DEBUG "%s", version); + + TMS380_module = &__this_module; + return 0; +} + +void cleanup_module(void) +{ + TMS380_module = NULL; +} +#endif + +MODULE_LICENSE("GPL"); + diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h new file mode 100644 index 00000000..e5a617c5 --- /dev/null +++ b/drivers/net/tokenring/tms380tr.h @@ -0,0 +1,1141 @@ +/* + * tms380tr.h: TI TMS380 Token Ring driver for Linux + * + * Authors: + * - Christoph Goos + * - Adam Fritzler + */ + +#ifndef __LINUX_TMS380TR_H +#define __LINUX_TMS380TR_H + +#ifdef __KERNEL__ + +#include + +/* module prototypes */ +extern const struct net_device_ops tms380tr_netdev_ops; +int tms380tr_open(struct net_device *dev); +int tms380tr_close(struct net_device *dev); +irqreturn_t tms380tr_interrupt(int irq, void *dev_id); +int tmsdev_init(struct net_device *dev, struct device *pdev); +void tmsdev_term(struct net_device *dev); +void tms380tr_wait(unsigned long time); + +#define TMS380TR_MAX_ADAPTERS 7 + +#define SEND_TIMEOUT 10*HZ + +#define TR_RCF_LONGEST_FRAME_MASK 0x0070 +#define TR_RCF_FRAME4K 0x0030 + +/*------------------------------------------------------------------*/ +/* Bit order for adapter communication with DMA */ +/* -------------------------------------------------------------- */ +/* Bit 8 | 9| 10| 11|| 12| 13| 14| 15|| 0| 1| 2| 3|| 4| 5| 6| 7| */ +/* -------------------------------------------------------------- */ +/* The bytes in a word must be byte swapped. Also, if a double */ +/* word is used for storage, then the words, as well as the bytes, */ +/* must be swapped. */ +/* Bit order for adapter communication with DIO */ +/* -------------------------------------------------------------- */ +/* Bit 0 | 1| 2| 3|| 4| 5| 6| 7|| 8| 9| 10| 11|| 12| 13| 14| 15| */ +/* -------------------------------------------------------------- */ +/*------------------------------------------------------------------*/ + +/* Swap words of a long. */ +#define SWAPW(x) (((x) << 16) | ((x) >> 16)) + +/* Get the low byte of a word. */ +#define LOBYTE(w) ((unsigned char)(w)) + +/* Get the high byte of a word. */ +#define HIBYTE(w) ((unsigned char)((unsigned short)(w) >> 8)) + +/* Get the low word of a long. */ +#define LOWORD(l) ((unsigned short)(l)) + +/* Get the high word of a long. */ +#define HIWORD(l) ((unsigned short)((unsigned long)(l) >> 16)) + + + +/* Token ring adapter I/O addresses for normal mode. */ + +/* + * The SIF registers. Common to all adapters. + */ +/* Basic SIF (SRSX = 0) */ +#define SIFDAT 0x00 /* SIF/DMA data. */ +#define SIFINC 0x02 /* IO Word data with auto increment. */ +#define SIFINH 0x03 /* IO Byte data with auto increment. */ +#define SIFADR 0x04 /* SIF/DMA Address. */ +#define SIFCMD 0x06 /* SIF Command. */ +#define SIFSTS 0x06 /* SIF Status. */ + +/* "Extended" SIF (SRSX = 1) */ +#define SIFACL 0x08 /* SIF Adapter Control Register. */ +#define SIFADD 0x0a /* SIF/DMA Address. -- 0x0a */ +#define SIFADX 0x0c /* 0x0c */ +#define DMALEN 0x0e /* SIF DMA length. -- 0x0e */ + +/* + * POS Registers. Only for ISA Adapters. + */ +#define POSREG 0x10 /* Adapter Program Option Select (POS) + * Register: base IO address + 16 byte. + */ +#define POSREG_2 24L /* only for TR4/16+ adapter + * base IO address + 24 byte. -- 0x18 + */ + +/* SIFCMD command codes (high-low) */ +#define CMD_INTERRUPT_ADAPTER 0x8000 /* Cause internal adapter interrupt */ +#define CMD_ADAPTER_RESET 0x4000 /* Hardware reset of adapter */ +#define CMD_SSB_CLEAR 0x2000 /* Acknowledge to adapter to + * system interrupts. + */ +#define CMD_EXECUTE 0x1000 /* Execute SCB command */ +#define CMD_SCB_REQUEST 0x0800 /* Request adapter to interrupt + * system when SCB is available for + * another command. + */ +#define CMD_RX_CONTINUE 0x0400 /* Continue receive after odd pointer + * stop. (odd pointer receive method) + */ +#define CMD_RX_VALID 0x0200 /* Now actual RPL is valid. */ +#define CMD_TX_VALID 0x0100 /* Now actual TPL is valid. (valid + * bit receive/transmit method) + */ +#define CMD_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system + * interrupt is reset. + */ +#define CMD_CLEAR_SYSTEM_IRQ 0x0080 /* Clear SYSTEM_INTERRUPT bit. + * (write: 1=ignore, 0=reset) + */ +#define EXEC_SOFT_RESET 0xFF00 /* adapter soft reset. (restart + * adapter after hardware reset) + */ + + +/* ACL commands (high-low) */ +#define ACL_SWHLDA 0x0800 /* Software hold acknowledge. */ +#define ACL_SWDDIR 0x0400 /* Data transfer direction. */ +#define ACL_SWHRQ 0x0200 /* Pseudo DMA operation. */ +#define ACL_PSDMAEN 0x0100 /* Enable pseudo system DMA. */ +#define ACL_ARESET 0x0080 /* Adapter hardware reset command. + * (held in reset condition as + * long as bit is set) + */ +#define ACL_CPHALT 0x0040 /* Communication processor halt. + * (can only be set while ACL_ARESET + * bit is set; prevents adapter + * processor from executing code while + * downloading firmware) + */ +#define ACL_BOOT 0x0020 +#define ACL_SINTEN 0x0008 /* System interrupt enable/disable + * (1/0): can be written if ACL_ARESET + * is zero. + */ +#define ACL_PEN 0x0004 + +#define ACL_NSELOUT0 0x0002 +#define ACL_NSELOUT1 0x0001 /* NSELOUTx have a card-specific + * meaning for setting ring speed. + */ + +#define PS_DMA_MASK (ACL_SWHRQ | ACL_PSDMAEN) + + +/* SIFSTS register return codes (high-low) */ +#define STS_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system + * interrupt is valid. + */ +#define STS_INITIALIZE 0x0040 /* INITIALIZE status. (ready to + * initialize) + */ +#define STS_TEST 0x0020 /* TEST status. (BUD not completed) */ +#define STS_ERROR 0x0010 /* ERROR status. (unrecoverable + * HW error occurred) + */ +#define STS_MASK 0x00F0 /* Mask interesting status bits. */ +#define STS_ERROR_MASK 0x000F /* Get Error Code by masking the + * interrupt code bits. + */ +#define ADAPTER_INT_PTRS 0x0A00 /* Address offset of adapter internal + * pointers 01:0a00 (high-low) have to + * be read after init and before open. + */ + + +/* Interrupt Codes (only MAC IRQs) */ +#define STS_IRQ_ADAPTER_CHECK 0x0000 /* unrecoverable hardware or + * software error. + */ +#define STS_IRQ_RING_STATUS 0x0004 /* SSB is updated with ring status. */ +#define STS_IRQ_LLC_STATUS 0x0005 /* Not used in MAC-only microcode */ +#define STS_IRQ_SCB_CLEAR 0x0006 /* SCB clear, following an + * SCB_REQUEST IRQ. + */ +#define STS_IRQ_TIMER 0x0007 /* Not normally used in MAC ucode */ +#define STS_IRQ_COMMAND_STATUS 0x0008 /* SSB is updated with command + * status. + */ +#define STS_IRQ_RECEIVE_STATUS 0x000A /* SSB is updated with receive + * status. + */ +#define STS_IRQ_TRANSMIT_STATUS 0x000C /* SSB is updated with transmit + * status + */ +#define STS_IRQ_RECEIVE_PENDING 0x000E /* Not used in MAC-only microcode */ +#define STS_IRQ_MASK 0x000F /* = STS_ERROR_MASK. */ + + +/* TRANSMIT_STATUS completion code: (SSB.Parm[0]) */ +#define COMMAND_COMPLETE 0x0080 /* TRANSMIT command completed + * (avoid this!) issue another transmit + * to send additional frames. + */ +#define FRAME_COMPLETE 0x0040 /* Frame has been transmitted; + * INTERRUPT_FRAME bit was set in the + * CSTAT request; indication of possibly + * more than one frame transmissions! + * SSB.Parm[0-1]: 32 bit pointer to + * TPL of last frame. + */ +#define LIST_ERROR 0x0020 /* Error in one of the TPLs that + * compose the frame; TRANSMIT + * terminated; Parm[1-2]: 32bit pointer + * to TPL which starts the error + * frame; error details in bits 8-13. + * (14?) + */ +#define FRAME_SIZE_ERROR 0x8000 /* FRAME_SIZE does not equal the sum of + * the valid DATA_COUNT fields; + * FRAME_SIZE less than header plus + * information field. (15 bytes + + * routing field) Or if FRAME_SIZE + * was specified as zero in one list. + */ +#define TX_THRESHOLD 0x4000 /* FRAME_SIZE greater than (BUFFER_SIZE + * - 9) * TX_BUF_MAX. + */ +#define ODD_ADDRESS 0x2000 /* Odd forward pointer value is + * read on a list without END_FRAME + * indication. + */ +#define FRAME_ERROR 0x1000 /* START_FRAME bit (not) anticipated, + * but (not) set. + */ +#define ACCESS_PRIORITY_ERROR 0x0800 /* Access priority requested has not + * been allowed. + */ +#define UNENABLED_MAC_FRAME 0x0400 /* MAC frame has source class of zero + * or MAC frame PCF ATTN field is + * greater than one. + */ +#define ILLEGAL_FRAME_FORMAT 0x0200 /* Bit 0 or FC field was set to one. */ + + +/* + * Since we need to support some functions even if the adapter is in a + * CLOSED state, we have a (pseudo-) command queue which holds commands + * that are outstandig to be executed. + * + * Each time a command completes, an interrupt occurs and the next + * command is executed. The command queue is actually a simple word with + * a bit for each outstandig command. Therefore the commands will not be + * executed in the order they have been queued. + * + * The following defines the command code bits and the command queue: + */ +#define OC_OPEN 0x0001 /* OPEN command */ +#define OC_TRANSMIT 0x0002 /* TRANSMIT command */ +#define OC_TRANSMIT_HALT 0x0004 /* TRANSMIT_HALT command */ +#define OC_RECEIVE 0x0008 /* RECEIVE command */ +#define OC_CLOSE 0x0010 /* CLOSE command */ +#define OC_SET_GROUP_ADDR 0x0020 /* SET_GROUP_ADDR command */ +#define OC_SET_FUNCT_ADDR 0x0040 /* SET_FUNCT_ADDR command */ +#define OC_READ_ERROR_LOG 0x0080 /* READ_ERROR_LOG command */ +#define OC_READ_ADAPTER 0x0100 /* READ_ADAPTER command */ +#define OC_MODIFY_OPEN_PARMS 0x0400 /* MODIFY_OPEN_PARMS command */ +#define OC_RESTORE_OPEN_PARMS 0x0800 /* RESTORE_OPEN_PARMS command */ +#define OC_SET_FIRST_16_GROUP 0x1000 /* SET_FIRST_16_GROUP command */ +#define OC_SET_BRIDGE_PARMS 0x2000 /* SET_BRIDGE_PARMS command */ +#define OC_CONFIG_BRIDGE_PARMS 0x4000 /* CONFIG_BRIDGE_PARMS command */ + +#define OPEN 0x0300 /* C: open command. S: completion. */ +#define TRANSMIT 0x0400 /* C: transmit command. S: completion + * status. (reject: COMMAND_REJECT if + * adapter not opened, TRANSMIT already + * issued or address passed in the SCB + * not word aligned) + */ +#define TRANSMIT_HALT 0x0500 /* C: interrupt TX TPL chain; if no + * TRANSMIT command issued, the command + * is ignored (completion with TRANSMIT + * status (0x0400)!) + */ +#define RECEIVE 0x0600 /* C: receive command. S: completion + * status. (reject: COMMAND_REJECT if + * adapter not opened, RECEIVE already + * issued or address passed in the SCB + * not word aligned) + */ +#define CLOSE 0x0700 /* C: close adapter. S: completion. + * (COMMAND_REJECT if adapter not open) + */ +#define SET_GROUP_ADDR 0x0800 /* C: alter adapter group address after + * OPEN. S: completion. (COMMAND_REJECT + * if adapter not open) + */ +#define SET_FUNCT_ADDR 0x0900 /* C: alter adapter functional address + * after OPEN. S: completion. + * (COMMAND_REJECT if adapter not open) + */ +#define READ_ERROR_LOG 0x0A00 /* C: read adapter error counters. + * S: completion. (command ignored + * if adapter not open!) + */ +#define READ_ADAPTER 0x0B00 /* C: read data from adapter memory. + * (important: after init and before + * open!) S: completion. (ADAPTER_CHECK + * interrupt if undefined storage area + * read) + */ +#define MODIFY_OPEN_PARMS 0x0D00 /* C: modify some adapter operational + * parameters. (bit correspondend to + * WRAP_INTERFACE is ignored) + * S: completion. (reject: + * COMMAND_REJECT) + */ +#define RESTORE_OPEN_PARMS 0x0E00 /* C: modify some adapter operational + * parameters. (bit correspondend + * to WRAP_INTERFACE is ignored) + * S: completion. (reject: + * COMMAND_REJECT) + */ +#define SET_FIRST_16_GROUP 0x0F00 /* C: alter the first two bytes in + * adapter group address. + * S: completion. (reject: + * COMMAND_REJECT) + */ +#define SET_BRIDGE_PARMS 0x1000 /* C: values and conditions for the + * adapter hardware to use when frames + * are copied for forwarding. + * S: completion. (reject: + * COMMAND_REJECT) + */ +#define CONFIG_BRIDGE_PARMS 0x1100 /* C: .. + * S: completion. (reject: + * COMMAND_REJECT) + */ + +#define SPEED_4 4 +#define SPEED_16 16 /* Default transmission speed */ + + +/* Initialization Parameter Block (IPB); word alignment necessary! */ +#define BURST_SIZE 0x0018 /* Default burst size */ +#define BURST_MODE 0x9F00 /* Burst mode enable */ +#define DMA_RETRIES 0x0505 /* Magic DMA retry number... */ + +#define CYCLE_TIME 3 /* Default AT-bus cycle time: 500 ns + * (later adapter version: fix cycle time!) + */ +#define LINE_SPEED_BIT 0x80 + +/* Macro definition for the wait function. */ +#define ONE_SECOND_TICKS 1000000 +#define HALF_SECOND (ONE_SECOND_TICKS / 2) +#define ONE_SECOND (ONE_SECOND_TICKS) +#define TWO_SECONDS (ONE_SECOND_TICKS * 2) +#define THREE_SECONDS (ONE_SECOND_TICKS * 3) +#define FOUR_SECONDS (ONE_SECOND_TICKS * 4) +#define FIVE_SECONDS (ONE_SECOND_TICKS * 5) + +#define BUFFER_SIZE 2048 /* Buffers on Adapter */ + +#pragma pack(1) +typedef struct { + unsigned short Init_Options; /* Initialize with burst mode; + * LLC disabled. (MAC only) + */ + + /* Interrupt vectors the adapter places on attached system bus. */ + u_int8_t CMD_Status_IV; /* Interrupt vector: command status. */ + u_int8_t TX_IV; /* Interrupt vector: transmit. */ + u_int8_t RX_IV; /* Interrupt vector: receive. */ + u_int8_t Ring_Status_IV; /* Interrupt vector: ring status. */ + u_int8_t SCB_Clear_IV; /* Interrupt vector: SCB clear. */ + u_int8_t Adapter_CHK_IV; /* Interrupt vector: adapter check. */ + + u_int16_t RX_Burst_Size; /* Max. number of transfer cycles. */ + u_int16_t TX_Burst_Size; /* During DMA burst; even value! */ + u_int16_t DMA_Abort_Thrhld; /* Number of DMA retries. */ + + u_int32_t SCB_Addr; /* SCB address: even, word aligned, high-low */ + u_int32_t SSB_Addr; /* SSB address: even, word aligned, high-low */ +} IPB, *IPB_Ptr; +#pragma pack() + +/* + * OPEN Command Parameter List (OCPL) (can be reused, if the adapter has to + * be reopened) + */ +#define BUFFER_SIZE 2048 /* Buffers on Adapter. */ +#define TPL_SIZE 8+6*TX_FRAG_NUM /* Depending on fragments per TPL. */ +#define RPL_SIZE 14 /* (with TI firmware v2.26 handling + * up to nine fragments possible) + */ +#define TX_BUF_MIN 20 /* ??? (Stephan: calculation with */ +#define TX_BUF_MAX 40 /* BUFFER_SIZE and MAX_FRAME_SIZE) ??? + */ +#define DISABLE_EARLY_TOKEN_RELEASE 0x1000 + +/* OPEN Options (high-low) */ +#define WRAP_INTERFACE 0x0080 /* Inserting omitted for test + * purposes; transmit data appears + * as receive data. (useful for + * testing; change: CLOSE necessary) + */ +#define DISABLE_HARD_ERROR 0x0040 /* On HARD_ERROR & TRANSMIT_BEACON + * no RING.STATUS interrupt. + */ +#define DISABLE_SOFT_ERROR 0x0020 /* On SOFT_ERROR, no RING.STATUS + * interrupt. + */ +#define PASS_ADAPTER_MAC_FRAMES 0x0010 /* Passing unsupported MAC frames + * to system. + */ +#define PASS_ATTENTION_FRAMES 0x0008 /* All changed attention MAC frames are + * passed to the system. + */ +#define PAD_ROUTING_FIELD 0x0004 /* Routing field is padded to 18 + * bytes. + */ +#define FRAME_HOLD 0x0002 /*Adapter waits for entire frame before + * initiating DMA transfer; otherwise: + * DMA transfer initiation if internal + * buffer filled. + */ +#define CONTENDER 0x0001 /* Adapter participates in the monitor + * contention process. + */ +#define PASS_BEACON_MAC_FRAMES 0x8000 /* Adapter passes beacon MAC frames + * to the system. + */ +#define EARLY_TOKEN_RELEASE 0x1000 /* Only valid in 16 Mbps operation; + * 0 = ETR. (no effect in 4 Mbps + * operation) + */ +#define COPY_ALL_MAC_FRAMES 0x0400 /* All MAC frames are copied to + * the system. (after OPEN: duplicate + * address test (DAT) MAC frame is + * first received frame copied to the + * system) + */ +#define COPY_ALL_NON_MAC_FRAMES 0x0200 /* All non MAC frames are copied to + * the system. + */ +#define PASS_FIRST_BUF_ONLY 0x0100 /* Passes only first internal buffer + * of each received frame; FrameSize + * of RPLs must contain internal + * BUFFER_SIZE bits for promiscuous mode. + */ +#define ENABLE_FULL_DUPLEX_SELECTION 0x2000 + /* Enable the use of full-duplex + * settings with bits in byte 22 in + * ocpl. (new feature in firmware + * version 3.09) + */ + +/* Full-duplex settings */ +#define OPEN_FULL_DUPLEX_OFF 0x0000 +#define OPEN_FULL_DUPLEX_ON 0x00c0 +#define OPEN_FULL_DUPLEX_AUTO 0x0080 + +#define PROD_ID_SIZE 18 /* Length of product ID. */ + +#define TX_FRAG_NUM 3 /* Number of fragments used in one TPL. */ +#define TX_MORE_FRAGMENTS 0x8000 /* Bit set in DataCount to indicate more + * fragments following. + */ + +/* XXX is there some better way to do this? */ +#define ISA_MAX_ADDRESS 0x00ffffff +#define PCI_MAX_ADDRESS 0xffffffff + +#pragma pack(1) +typedef struct { + u_int16_t OPENOptions; + u_int8_t NodeAddr[6]; /* Adapter node address; use ROM + * address + */ + u_int32_t GroupAddr; /* Multicast: high order + * bytes = 0xC000 + */ + u_int32_t FunctAddr; /* High order bytes = 0xC000 */ + __be16 RxListSize; /* RPL size: 0 (=26), 14, 20 or + * 26 bytes read by the adapter. + * (Depending on the number of + * fragments/list) + */ + __be16 TxListSize; /* TPL size */ + __be16 BufSize; /* Is automatically rounded up to the + * nearest nK boundary. + */ + u_int16_t FullDuplex; + u_int16_t Reserved; + u_int8_t TXBufMin; /* Number of adapter buffers reserved + * for transmission a minimum of 2 + * buffers must be allocated. + */ + u_int8_t TXBufMax; /* Maximum number of adapter buffers + * for transmit; a minimum of 2 buffers + * must be available for receive. + * Default: 6 + */ + u_int16_t ProdIDAddr[2];/* Pointer to product ID. */ +} OPB, *OPB_Ptr; +#pragma pack() + +/* + * SCB: adapter commands enabled by the host system started by writing + * CMD_INTERRUPT_ADAPTER | CMD_EXECUTE (|SCB_REQUEST) to the SIFCMD IO + * register. (special case: | CMD_SYSTEM_IRQ for initialization) + */ +#pragma pack(1) +typedef struct { + u_int16_t CMD; /* Command code */ + u_int16_t Parm[2]; /* Pointer to Command Parameter Block */ +} SCB; /* System Command Block (32 bit physical address; big endian)*/ +#pragma pack() + +/* + * SSB: adapter command return status can be evaluated after COMMAND_STATUS + * adapter to system interrupt after reading SSB, the availability of the SSB + * has to be told the adapter by writing CMD_INTERRUPT_ADAPTER | CMD_SSB_CLEAR + * in the SIFCMD IO register. + */ +#pragma pack(1) +typedef struct { + u_int16_t STS; /* Status code */ + u_int16_t Parm[3]; /* Parameter or pointer to Status Parameter + * Block. + */ +} SSB; /* System Status Block (big endian - physical address) */ +#pragma pack() + +typedef struct { + unsigned short BurnedInAddrPtr; /* Pointer to adapter burned in + * address. (BIA) + */ + unsigned short SoftwareLevelPtr;/* Pointer to software level data. */ + unsigned short AdapterAddrPtr; /* Pointer to adapter addresses. */ + unsigned short AdapterParmsPtr; /* Pointer to adapter parameters. */ + unsigned short MACBufferPtr; /* Pointer to MAC buffer. (internal) */ + unsigned short LLCCountersPtr; /* Pointer to LLC counters. */ + unsigned short SpeedFlagPtr; /* Pointer to data rate flag. + * (4/16 Mbps) + */ + unsigned short AdapterRAMPtr; /* Pointer to adapter RAM found. (KB) */ +} INTPTRS; /* Adapter internal pointers */ + +#pragma pack(1) +typedef struct { + u_int8_t Line_Error; /* Line error: code violation in + * frame or in a token, or FCS error. + */ + u_int8_t Internal_Error; /* IBM specific. (Reserved_1) */ + u_int8_t Burst_Error; + u_int8_t ARI_FCI_Error; /* ARI/FCI bit zero in AMP or + * SMP MAC frame. + */ + u_int8_t AbortDelimeters; /* IBM specific. (Reserved_2) */ + u_int8_t Reserved_3; + u_int8_t Lost_Frame_Error; /* Receive of end of transmitted + * frame failed. + */ + u_int8_t Rx_Congest_Error; /* Adapter in repeat mode has not + * enough buffer space to copy incoming + * frame. + */ + u_int8_t Frame_Copied_Error; /* ARI bit not zero in frame + * addressed to adapter. + */ + u_int8_t Frequency_Error; /* IBM specific. (Reserved_4) */ + u_int8_t Token_Error; /* (active only in monitor station) */ + u_int8_t Reserved_5; + u_int8_t DMA_Bus_Error; /* DMA bus errors not exceeding the + * abort thresholds. + */ + u_int8_t DMA_Parity_Error; /* DMA parity errors not exceeding + * the abort thresholds. + */ +} ERRORTAB; /* Adapter error counters */ +#pragma pack() + + +/*--------------------- Send and Receive definitions -------------------*/ +#pragma pack(1) +typedef struct { + __be16 DataCount; /* Value 0, even and odd values are + * permitted; value is unaltered most + * significant bit set: following + * fragments last fragment: most + * significant bit is not evaluated. + * (???) + */ + __be32 DataAddr; /* Pointer to frame data fragment; + * even or odd. + */ +} Fragment; +#pragma pack() + +#define MAX_FRAG_NUMBERS 9 /* Maximal number of fragments possible to use + * in one RPL/TPL. (depending on TI firmware + * version) + */ + +/* + * AC (1), FC (1), Dst (6), Src (6), RIF (18), Data (4472) = 4504 + * The packet size can be one of the follows: 548, 1502, 2084, 4504, 8176, + * 11439, 17832. Refer to TMS380 Second Generation Token Ring User's Guide + * Page 2-27. + */ +#define HEADER_SIZE (1 + 1 + 6 + 6) +#define SRC_SIZE 18 +#define MIN_DATA_SIZE 516 +#define DEFAULT_DATA_SIZE 4472 +#define MAX_DATA_SIZE 17800 + +#define DEFAULT_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + DEFAULT_DATA_SIZE) +#define MIN_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MIN_DATA_SIZE) +#define MAX_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MAX_DATA_SIZE) + +/* + * Macros to deal with the frame status field. + */ +#define AC_NOT_RECOGNIZED 0x00 +#define GROUP_BIT 0x80 +#define GET_TRANSMIT_STATUS_HIGH_BYTE(Ts) ((unsigned char)((Ts) >> 8)) +#define GET_FRAME_STATUS_HIGH_AC(Fs) ((unsigned char)(((Fs) & 0xC0) >> 6)) +#define GET_FRAME_STATUS_LOW_AC(Fs) ((unsigned char)(((Fs) & 0x0C) >> 2)) +#define DIRECTED_FRAME(Context) (!((Context)->MData[2] & GROUP_BIT)) + + +/*--------------------- Send Functions ---------------------------------*/ +/* define TX_CSTAT _REQUEST (R) and _COMPLETE (C) values (high-low) */ + +#define TX_VALID 0x0080 /* R: set via TRANSMIT.VALID interrupt. + * C: always reset to zero! + */ +#define TX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero. + * C: set to one. + */ +#define TX_START_FRAME 0x0020 /* R: start of a frame: 1 + * C: unchanged. + */ +#define TX_END_FRAME 0x0010 /* R: end of a frame: 1 + * C: unchanged. + */ +#define TX_FRAME_IRQ 0x0008 /* R: request interrupt generation + * after transmission. + * C: unchanged. + */ +#define TX_ERROR 0x0004 /* R: reserved. + * C: set to one if Error occurred. + */ +#define TX_INTERFRAME_WAIT 0x0004 +#define TX_PASS_CRC 0x0002 /* R: set if CRC value is already + * calculated. (valid only in + * FRAME_START TPL) + * C: unchanged. + */ +#define TX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame + * source address and does not overwrite + * with the adapter node address. + * (valid only in FRAME_START TPL) + * + * C: unchanged. + */ +#define TX_STRIP_FS 0xFF00 /* R: reserved. + * C: if no Transmission Error, + * field contains copy of FS byte after + * stripping of frame. + */ + +/* + * Structure of Transmit Parameter Lists (TPLs) (only one frame every TPL, + * but possibly multiple TPLs for one frame) the length of the TPLs has to be + * initialized in the OPL. (OPEN parameter list) + */ +#define TPL_NUM 3 /* Number of Transmit Parameter Lists. + * !! MUST BE >= 3 !! + */ + +#pragma pack(1) +typedef struct s_TPL TPL; + +struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */ + __be32 NextTPLAddr; /* Pointer to next TPL in chain; if + * pointer is odd: this is the last + * TPL. Pointing to itself can cause + * problems! + */ + volatile u_int16_t Status; /* Initialized by the adapter: + * CSTAT_REQUEST important: update least + * significant bit first! Set by the + * adapter: CSTAT_COMPLETE status. + */ + __be16 FrameSize; /* Number of bytes to be transmitted + * as a frame including AC/FC, + * Destination, Source, Routing field + * not including CRC, FS, End Delimiter + * (valid only if START_FRAME bit in + * CSTAT nonzero) must not be zero in + * any list; maximum value: (BUFFER_SIZE + * - 8) * TX_BUF_MAX sum of DataCount + * values in FragmentList must equal + * Frame_Size value in START_FRAME TPL! + * frame data fragment list. + */ + + /* TPL/RPL size in OPEN parameter list depending on maximal + * numbers of fragments used in one parameter list. + */ + Fragment FragList[TX_FRAG_NUM]; /* Maximum: nine frame fragments in one + * TPL actual version of firmware: 9 + * fragments possible. + */ +#pragma pack() + + /* Special proprietary data and precalculations */ + + TPL *NextTPLPtr; /* Pointer to next TPL in chain. */ + unsigned char *MData; + struct sk_buff *Skb; + unsigned char TPLIndex; + volatile unsigned char BusyFlag;/* Flag: TPL busy? */ + dma_addr_t DMABuff; /* DMA IO bus address from dma_map */ +}; + +/* ---------------------Receive Functions-------------------------------* + * define RECEIVE_CSTAT_REQUEST (R) and RECEIVE_CSTAT_COMPLETE (C) values. + * (high-low) + */ +#define RX_VALID 0x0080 /* R: set; tell adapter with + * RECEIVE.VALID interrupt. + * C: reset to zero. + */ +#define RX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero, + * C: set to one. + */ +#define RX_START_FRAME 0x0020 /* R: must be reset to zero. + * C: set to one on the list. + */ +#define RX_END_FRAME 0x0010 /* R: must be reset to zero. + * C: set to one on the list + * that ends the frame. + */ +#define RX_FRAME_IRQ 0x0008 /* R: request interrupt generation + * after receive. + * C: unchanged. + */ +#define RX_INTERFRAME_WAIT 0x0004 /* R: after receiving a frame: + * interrupt and wait for a + * RECEIVE.CONTINUE. + * C: unchanged. + */ +#define RX_PASS_CRC 0x0002 /* R: if set, the adapter includes + * the CRC in data passed. (last four + * bytes; valid only if FRAME_START is + * set) + * C: set, if CRC is included in + * received data. + */ +#define RX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame + * source address and does not + * overwrite with the adapter node + * address. (valid only if FRAME_START + * is set) + * C: unchanged. + */ +#define RX_RECEIVE_FS 0xFC00 /* R: reserved; must be reset to zero. + * C: on lists with START_FRAME, field + * contains frame status field from + * received frame; otherwise cleared. + */ +#define RX_ADDR_MATCH 0x0300 /* R: reserved; must be reset to zero. + * C: address match code mask. + */ +#define RX_STATUS_MASK 0x00FF /* Mask for receive status bits. */ + +#define RX_INTERN_ADDR_MATCH 0x0100 /* C: internally address match. */ +#define RX_EXTERN_ADDR_MATCH 0x0200 /* C: externally matched via + * XMATCH/XFAIL interface. + */ +#define RX_INTEXT_ADDR_MATCH 0x0300 /* C: internally and externally + * matched. + */ +#define RX_READY (RX_VALID | RX_FRAME_IRQ) /* Ready for receive. */ + +/* Constants for Command Status Interrupt. + * COMMAND_REJECT status field bit functions (SSB.Parm[0]) + */ +#define ILLEGAL_COMMAND 0x0080 /* Set if an unknown command + * is issued to the adapter + */ +#define ADDRESS_ERROR 0x0040 /* Set if any address field in + * the SCB is odd. (not word aligned) + */ +#define ADAPTER_OPEN 0x0020 /* Command issued illegal with + * open adapter. + */ +#define ADAPTER_CLOSE 0x0010 /* Command issued illegal with + * closed adapter. + */ +#define SAME_COMMAND 0x0008 /* Command issued with same command + * already executing. + */ + +/* OPEN_COMPLETION values (SSB.Parm[0], MSB) */ +#define NODE_ADDR_ERROR 0x0040 /* Wrong address or BIA read + * zero address. + */ +#define LIST_SIZE_ERROR 0x0020 /* If List_Size value not in 0, + * 14, 20, 26. + */ +#define BUF_SIZE_ERROR 0x0010 /* Not enough available memory for + * two buffers. + */ +#define TX_BUF_COUNT_ERROR 0x0004 /* Remaining receive buffers less than + * two. + */ +#define OPEN_ERROR 0x0002 /* Error during ring insertion; more + * information in bits 8-15. + */ + +/* Standard return codes */ +#define GOOD_COMPLETION 0x0080 /* =OPEN_SUCCESSFULL */ +#define INVALID_OPEN_OPTION 0x0001 /* OPEN options are not supported by + * the adapter. + */ + +/* OPEN phases; details of OPEN_ERROR (SSB.Parm[0], LSB) */ +#define OPEN_PHASES_MASK 0xF000 /* Check only the bits 8-11. */ +#define LOBE_MEDIA_TEST 0x1000 +#define PHYSICAL_INSERTION 0x2000 +#define ADDRESS_VERIFICATION 0x3000 +#define PARTICIPATION_IN_RING_POLL 0x4000 +#define REQUEST_INITIALISATION 0x5000 +#define FULLDUPLEX_CHECK 0x6000 + +/* OPEN error codes; details of OPEN_ERROR (SSB.Parm[0], LSB) */ +#define OPEN_ERROR_CODES_MASK 0x0F00 /* Check only the bits 12-15. */ +#define OPEN_FUNCTION_FAILURE 0x0100 /* Unable to transmit to itself or + * frames received before insertion. + */ +#define OPEN_SIGNAL_LOSS 0x0200 /* Signal loss condition detected at + * receiver. + */ +#define OPEN_TIMEOUT 0x0500 /* Insertion timer expired before + * logical insertion. + */ +#define OPEN_RING_FAILURE 0x0600 /* Unable to receive own ring purge + * MAC frames. + */ +#define OPEN_RING_BEACONING 0x0700 /* Beacon MAC frame received after + * ring insertion. + */ +#define OPEN_DUPLICATE_NODEADDR 0x0800 /* Other station in ring found + * with the same address. + */ +#define OPEN_REQUEST_INIT 0x0900 /* RPS present but does not respond. */ +#define OPEN_REMOVE_RECEIVED 0x0A00 /* Adapter received a remove adapter + * MAC frame. + */ +#define OPEN_FULLDUPLEX_SET 0x0D00 /* Got this with full duplex on when + * trying to connect to a normal ring. + */ + +/* SET_BRIDGE_PARMS return codes: */ +#define BRIDGE_INVALID_MAX_LEN 0x4000 /* MAX_ROUTING_FIELD_LENGTH odd, + * less than 6 or > 30. + */ +#define BRIDGE_INVALID_SRC_RING 0x2000 /* SOURCE_RING number zero, too large + * or = TARGET_RING. + */ +#define BRIDGE_INVALID_TRG_RING 0x1000 /* TARGET_RING number zero, too large + * or = SOURCE_RING. + */ +#define BRIDGE_INVALID_BRDGE_NO 0x0800 /* BRIDGE_NUMBER too large. */ +#define BRIDGE_INVALID_OPTIONS 0x0400 /* Invalid bridge options. */ +#define BRIDGE_DIAGS_FAILED 0x0200 /* Diagnostics of TMS380SRA failed. */ +#define BRIDGE_NO_SRA 0x0100 /* The TMS380SRA does not exist in HW + * configuration. + */ + +/* + * Bring Up Diagnostics error codes. + */ +#define BUD_INITIAL_ERROR 0x0 +#define BUD_CHECKSUM_ERROR 0x1 +#define BUD_ADAPTER_RAM_ERROR 0x2 +#define BUD_INSTRUCTION_ERROR 0x3 +#define BUD_CONTEXT_ERROR 0x4 +#define BUD_PROTOCOL_ERROR 0x5 +#define BUD_INTERFACE_ERROR 0x6 + +/* BUD constants */ +#define BUD_MAX_RETRIES 3 +#define BUD_MAX_LOOPCNT 6 +#define BUD_TIMEOUT 3000 + +/* Initialization constants */ +#define INIT_MAX_RETRIES 3 /* Maximum three retries. */ +#define INIT_MAX_LOOPCNT 22 /* Maximum loop counts. */ + +/* RING STATUS field values (high/low) */ +#define SIGNAL_LOSS 0x0080 /* Loss of signal on the ring + * detected. + */ +#define HARD_ERROR 0x0040 /* Transmitting or receiving beacon + * frames. + */ +#define SOFT_ERROR 0x0020 /* Report error MAC frame + * transmitted. + */ +#define TRANSMIT_BEACON 0x0010 /* Transmitting beacon frames on the + * ring. + */ +#define LOBE_WIRE_FAULT 0x0008 /* Open or short circuit in the + * cable to concentrator; adapter + * closed. + */ +#define AUTO_REMOVAL_ERROR 0x0004 /* Lobe wrap test failed, deinserted; + * adapter closed. + */ +#define REMOVE_RECEIVED 0x0001 /* Received a remove ring station MAC + * MAC frame request; adapter closed. + */ +#define COUNTER_OVERFLOW 0x8000 /* Overflow of one of the adapters + * error counters; READ.ERROR.LOG. + */ +#define SINGLE_STATION 0x4000 /* Adapter is the only station on the + * ring. + */ +#define RING_RECOVERY 0x2000 /* Claim token MAC frames on the ring; + * reset after ring purge frame. + */ + +#define ADAPTER_CLOSED (LOBE_WIRE_FAULT | AUTO_REMOVAL_ERROR |\ + REMOVE_RECEIVED) + +/* Adapter_check_block.Status field bit assignments: */ +#define DIO_PARITY 0x8000 /* Adapter detects bad parity + * through direct I/O access. + */ +#define DMA_READ_ABORT 0x4000 /* Aborting DMA read operation + * from system Parm[0]: 0=timeout, + * 1=parity error, 2=bus error; + * Parm[1]: 32 bit pointer to host + * system address at failure. + */ +#define DMA_WRITE_ABORT 0x2000 /* Aborting DMA write operation + * to system. (parameters analogous to + * DMA_READ_ABORT) + */ +#define ILLEGAL_OP_CODE 0x1000 /* Illegal operation code in the + * the adapters firmware Parm[0]-2: + * communications processor registers + * R13-R15. + */ +#define PARITY_ERRORS 0x0800 /* Adapter detects internal bus + * parity error. + */ +#define RAM_DATA_ERROR 0x0080 /* Valid only during RAM testing; + * RAM data error Parm[0-1]: 32 bit + * pointer to RAM location. + */ +#define RAM_PARITY_ERROR 0x0040 /* Valid only during RAM testing; + * RAM parity error Parm[0-1]: 32 bit + * pointer to RAM location. + */ +#define RING_UNDERRUN 0x0020 /* Internal DMA underrun when + * transmitting onto ring. + */ +#define INVALID_IRQ 0x0008 /* Unrecognized interrupt generated + * internal to adapter Parm[0-2]: + * adapter register R13-R15. + */ +#define INVALID_ERROR_IRQ 0x0004 /* Unrecognized error interrupt + * generated Parm[0-2]: adapter register + * R13-R15. + */ +#define INVALID_XOP 0x0002 /* Unrecognized XOP request in + * communication processor Parm[0-2]: + * adapter register R13-R15. + */ +#define CHECKADDR 0x05E0 /* Adapter check status information + * address offset. + */ +#define ROM_PAGE_0 0x0000 /* Adapter ROM page 0. */ + +/* + * RECEIVE.STATUS interrupt result SSB values: (high-low) + * (RECEIVE_COMPLETE field bit definitions in SSB.Parm[0]) + */ +#define RX_COMPLETE 0x0080 /* SSB.Parm[0]; SSB.Parm[1]: 32 + * bit pointer to last RPL. + */ +#define RX_SUSPENDED 0x0040 /* SSB.Parm[0]; SSB.Parm[1]: 32 + * bit pointer to RPL with odd + * forward pointer. + */ + +/* Valid receive CSTAT: */ +#define RX_FRAME_CONTROL_BITS (RX_VALID | RX_START_FRAME | RX_END_FRAME | \ + RX_FRAME_COMPLETE) +#define VALID_SINGLE_BUFFER_FRAME (RX_START_FRAME | RX_END_FRAME | \ + RX_FRAME_COMPLETE) + +typedef enum SKB_STAT SKB_STAT; +enum SKB_STAT { + SKB_UNAVAILABLE, + SKB_DMA_DIRECT, + SKB_DATA_COPY +}; + +/* Receive Parameter List (RPL) The length of the RPLs has to be initialized + * in the OPL. (OPEN parameter list) + */ +#define RPL_NUM 3 + +#define RX_FRAG_NUM 1 /* Maximal number of used fragments in one RPL. + * (up to firmware v2.24: 3, now: up to 9) + */ + +#pragma pack(1) +typedef struct s_RPL RPL; +struct s_RPL { /* Receive Parameter List */ + __be32 NextRPLAddr; /* Pointer to next RPL in chain + * (normalized = physical 32 bit + * address) if pointer is odd: this + * is last RPL. Pointing to itself can + * cause problems! + */ + volatile u_int16_t Status; /* Set by creation of Receive Parameter + * List RECEIVE_CSTAT_COMPLETE set by + * adapter in lists that start or end + * a frame. + */ + volatile __be16 FrameSize; /* Number of bytes received as a + * frame including AC/FC, Destination, + * Source, Routing field not including + * CRC, FS (Frame Status), End Delimiter + * (valid only if START_FRAME bit in + * CSTAT nonzero) must not be zero in + * any list; maximum value: (BUFFER_SIZE + * - 8) * TX_BUF_MAX sum of DataCount + * values in FragmentList must equal + * Frame_Size value in START_FRAME TPL! + * frame data fragment list + */ + + /* TPL/RPL size in OPEN parameter list depending on maximal numbers + * of fragments used in one parameter list. + */ + Fragment FragList[RX_FRAG_NUM]; /* Maximum: nine frame fragments in + * one TPL. Actual version of firmware: + * 9 fragments possible. + */ +#pragma pack() + + /* Special proprietary data and precalculations. */ + RPL *NextRPLPtr; /* Logical pointer to next RPL in chain. */ + unsigned char *MData; + struct sk_buff *Skb; + SKB_STAT SkbStat; + int RPLIndex; + dma_addr_t DMABuff; /* DMA IO bus address from dma_map */ +}; + +/* Information that need to be kept for each board. */ +typedef struct net_local { +#pragma pack(1) + IPB ipb; /* Initialization Parameter Block. */ + SCB scb; /* System Command Block: system to adapter + * communication. + */ + SSB ssb; /* System Status Block: adapter to system + * communication. + */ + OPB ocpl; /* Open Options Parameter Block. */ + + ERRORTAB errorlogtable; /* Adapter statistic error counters. + * (read from adapter memory) + */ + unsigned char ProductID[PROD_ID_SIZE + 1]; /* Product ID */ +#pragma pack() + + TPL Tpl[TPL_NUM]; + TPL *TplFree; + TPL *TplBusy; + unsigned char LocalTxBuffers[TPL_NUM][DEFAULT_PACKET_SIZE]; + + RPL Rpl[RPL_NUM]; + RPL *RplHead; + RPL *RplTail; + unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE]; + + struct device *pdev; + int DataRate; + unsigned char ScbInUse; + unsigned short CMDqueue; + + unsigned long AdapterOpenFlag:1; + unsigned long AdapterVirtOpenFlag:1; + unsigned long OpenCommandIssued:1; + unsigned long TransmitCommandActive:1; + unsigned long TransmitHaltScheduled:1; + unsigned long HaltInProgress:1; + unsigned long LobeWireFaultLogged:1; + unsigned long ReOpenInProgress:1; + unsigned long Sleeping:1; + + unsigned long LastOpenStatus; + unsigned short CurrentRingStatus; + unsigned long MaxPacketSize; + + unsigned long StartTime; + unsigned long LastSendTime; + + struct tr_statistics MacStat; /* MAC statistics structure */ + + unsigned long dmalimit; /* the max DMA address (ie, ISA) */ + dma_addr_t dmabuffer; /* the DMA bus address corresponding to + priv. Might be different from virt_to_bus() + for architectures with IO MMU (Alpha) */ + + struct timer_list timer; + + wait_queue_head_t wait_for_tok_int; + + INTPTRS intptrs; /* Internal adapter pointer. Must be read + * before OPEN command. + */ + unsigned short (*setnselout)(struct net_device *); + unsigned short (*sifreadb)(struct net_device *, unsigned short); + void (*sifwriteb)(struct net_device *, unsigned short, unsigned short); + unsigned short (*sifreadw)(struct net_device *, unsigned short); + void (*sifwritew)(struct net_device *, unsigned short, unsigned short); + + spinlock_t lock; /* SMP protection */ + void *tmspriv; +} NET_LOCAL; + +#endif /* __KERNEL__ */ +#endif /* __LINUX_TMS380TR_H */ diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c new file mode 100644 index 00000000..d3e788a9 --- /dev/null +++ b/drivers/net/tokenring/tmspci.c @@ -0,0 +1,249 @@ +/* + * tmspci.c: A generic network driver for TMS380-based PCI token ring cards. + * + * Written 1999 by Adam Fritzler + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This driver module supports the following cards: + * - SysKonnect TR4/16(+) PCI (SK-4590) + * - SysKonnect TR4/16 PCI (SK-4591) + * - Compaq TR 4/16 PCI + * - Thomas-Conrad TC4048 4/16 PCI + * - 3Com 3C339 Token Link Velocity + * + * Maintainer(s): + * AF Adam Fritzler + * + * Modification History: + * 30-Dec-99 AF Split off from the tms380tr driver. + * 22-Jan-00 AF Updated to use indirect read/writes + * 23-Nov-00 JG New PCI API, cleanups + * + * TODO: + * 1. See if we can use MMIO instead of port accesses + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "tms380tr.h" + +static char version[] __devinitdata = +"tmspci.c: v1.02 23/11/2000 by Adam Fritzler\n"; + +#define TMS_PCI_IO_EXTENT 32 + +struct card_info { + unsigned char nselout[2]; /* NSELOUT vals for 4mb([0]) and 16mb([1]) */ + char *name; +}; + +static struct card_info card_info_table[] = { + { {0x03, 0x01}, "Compaq 4/16 TR PCI"}, + { {0x03, 0x01}, "SK NET TR 4/16 PCI"}, + { {0x03, 0x01}, "Thomas-Conrad TC4048 PCI 4/16"}, + { {0x03, 0x01}, "3Com Token Link Velocity"}, +}; + +static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = { + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, + { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, + { } /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, tmspci_pci_tbl); + +MODULE_LICENSE("GPL"); + +static void tms_pci_read_eeprom(struct net_device *dev); +static unsigned short tms_pci_setnselout_pins(struct net_device *dev); + +static unsigned short tms_pci_sifreadb(struct net_device *dev, unsigned short reg) +{ + return inb(dev->base_addr + reg); +} + +static unsigned short tms_pci_sifreadw(struct net_device *dev, unsigned short reg) +{ + return inw(dev->base_addr + reg); +} + +static void tms_pci_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) +{ + outb(val, dev->base_addr + reg); +} + +static void tms_pci_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) +{ + outw(val, dev->base_addr + reg); +} + +static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int versionprinted; + struct net_device *dev; + struct net_local *tp; + int ret; + unsigned int pci_irq_line; + unsigned long pci_ioaddr; + struct card_info *cardinfo = &card_info_table[ent->driver_data]; + + if (versionprinted++ == 0) + printk("%s", version); + + if (pci_enable_device(pdev)) + return -EIO; + + /* Remove I/O space marker in bit 0. */ + pci_irq_line = pdev->irq; + pci_ioaddr = pci_resource_start (pdev, 0); + + /* At this point we have found a valid card. */ + dev = alloc_trdev(sizeof(struct net_local)); + if (!dev) + return -ENOMEM; + + if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) { + ret = -EBUSY; + goto err_out_trdev; + } + + dev->base_addr = pci_ioaddr; + dev->irq = pci_irq_line; + dev->dma = 0; + + dev_info(&pdev->dev, "%s\n", cardinfo->name); + dev_info(&pdev->dev, " IO: %#4lx IRQ: %d\n", dev->base_addr, dev->irq); + + tms_pci_read_eeprom(dev); + + dev_info(&pdev->dev, " Ring Station Address: %pM\n", dev->dev_addr); + + ret = tmsdev_init(dev, &pdev->dev); + if (ret) { + dev_info(&pdev->dev, "unable to get memory for dev->priv.\n"); + goto err_out_region; + } + + tp = netdev_priv(dev); + tp->setnselout = tms_pci_setnselout_pins; + + tp->sifreadb = tms_pci_sifreadb; + tp->sifreadw = tms_pci_sifreadw; + tp->sifwriteb = tms_pci_sifwriteb; + tp->sifwritew = tms_pci_sifwritew; + + memcpy(tp->ProductID, cardinfo->name, PROD_ID_SIZE + 1); + + tp->tmspriv = cardinfo; + + dev->netdev_ops = &tms380tr_netdev_ops; + + ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED, + dev->name, dev); + if (ret) + goto err_out_tmsdev; + + pci_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + ret = register_netdev(dev); + if (ret) + goto err_out_irq; + + return 0; + +err_out_irq: + free_irq(pdev->irq, dev); +err_out_tmsdev: + pci_set_drvdata(pdev, NULL); + tmsdev_term(dev); +err_out_region: + release_region(pci_ioaddr, TMS_PCI_IO_EXTENT); +err_out_trdev: + free_netdev(dev); + return ret; +} + +/* + * Reads MAC address from adapter RAM, which should've read it from + * the onboard ROM. + * + * Calling this on a board that does not support it can be a very + * dangerous thing. The Madge board, for instance, will lock your + * machine hard when this is called. Luckily, its supported in a + * separate driver. --ASF + */ +static void tms_pci_read_eeprom(struct net_device *dev) +{ + int i; + + /* Address: 0000:0000 */ + tms_pci_sifwritew(dev, 0, SIFADX); + tms_pci_sifwritew(dev, 0, SIFADR); + + /* Read six byte MAC address data */ + dev->addr_len = 6; + for(i = 0; i < 6; i++) + dev->dev_addr[i] = tms_pci_sifreadw(dev, SIFINC) >> 8; +} + +static unsigned short tms_pci_setnselout_pins(struct net_device *dev) +{ + unsigned short val = 0; + struct net_local *tp = netdev_priv(dev); + struct card_info *cardinfo = tp->tmspriv; + + if(tp->DataRate == SPEED_4) + val |= cardinfo->nselout[0]; /* Set 4Mbps */ + else + val |= cardinfo->nselout[1]; /* Set 16Mbps */ + return val; +} + +static void __devexit tms_pci_detach (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + BUG_ON(!dev); + unregister_netdev(dev); + release_region(dev->base_addr, TMS_PCI_IO_EXTENT); + free_irq(dev->irq, dev); + tmsdev_term(dev); + free_netdev(dev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver tms_pci_driver = { + .name = "tmspci", + .id_table = tmspci_pci_tbl, + .probe = tms_pci_attach, + .remove = __devexit_p(tms_pci_detach), +}; + +static int __init tms_pci_init (void) +{ + return pci_register_driver(&tms_pci_driver); +} + +static void __exit tms_pci_rmmod (void) +{ + pci_unregister_driver (&tms_pci_driver); +} + +module_init(tms_pci_init); +module_exit(tms_pci_rmmod); + -- cgit v1.2.3