From a4dd1adf4a718f4eec35a561be068e2e8f885ae2 Mon Sep 17 00:00:00 2001 From: Imre Kaloz Date: Sun, 27 Apr 2008 17:03:01 +0000 Subject: add preliminary support for Storm SL3512 based devices, not ready yet SVN-Revision: 10956 --- target/linux/storm/patches/1002-gmac.patch | 18584 +++++++++++++++++++++++++++ 1 file changed, 18584 insertions(+) create mode 100644 target/linux/storm/patches/1002-gmac.patch (limited to 'target/linux/storm/patches/1002-gmac.patch') diff --git a/target/linux/storm/patches/1002-gmac.patch b/target/linux/storm/patches/1002-gmac.patch new file mode 100644 index 0000000000..82c72fbc35 --- /dev/null +++ b/target/linux/storm/patches/1002-gmac.patch @@ -0,0 +1,18584 @@ +Index: linux-2.6.23.16/drivers/net/sl2312_emac.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.23.16/drivers/net/sl2312_emac.c 2008-03-15 16:59:16.361058585 +0200 +@@ -0,0 +1,4604 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define BIG_ENDIAN 0 ++ ++#define GMAC_DEBUG 0 ++ ++#define GMAC_PHY_IF 2 ++ ++/* define PHY address */ ++#define HPHY_ADDR 0x01 ++#define GPHY_ADDR 0x02 ++ ++#define CONFIG_ADM_6999 1 ++/* define chip information */ ++#define DRV_NAME "SL2312" ++#define DRV_VERSION "0.1.1" ++#define SL2312_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION ++ ++/* define TX/RX descriptor parameter */ ++#define MAX_ETH_FRAME_SIZE 1920 ++#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE ++#define TX_DESC_NUM 128 ++#define TX_BUF_TOT_LEN (TX_BUF_SIZE * TX_DESC_NUM) ++#define RX_BUF_SIZE MAX_ETH_FRAME_SIZE ++#define RX_DESC_NUM 256 ++#define RX_BUF_TOT_LEN (RX_BUF_SIZE * RX_DESC_NUM) ++#define MAX_ISR_WORK 20 ++ ++unsigned int int_status = 0; ++ ++/* define GMAC base address */ ++#define GMAC_PHYSICAL_BASE_ADDR (SL2312_GMAC_BASE) ++#define GMAC_BASE_ADDR (IO_ADDRESS(GMAC_PHYSICAL_BASE_ADDR)) ++#define GMAC_GLOBAL_BASE_ADDR (IO_ADDRESS(SL2312_GLOBAL_BASE)) ++ ++#define GMAC0_BASE (IO_ADDRESS(SL2312_GMAC0_BASE)) ++#define GMAC1_BASE (IO_ADDRESS(SL2312_GMAC1_BASE)) ++ ++/* memory management utility */ ++#define DMA_MALLOC(size,handle) pci_alloc_consistent(NULL,size,handle) ++#define DMA_MFREE(mem,size,handle) pci_free_consistent(NULL,size,mem,handle) ++ ++//#define gmac_read_reg(offset) (readl(GMAC_BASE_ADDR + offset)) ++//#define gmac_write_reg(offset,data,mask) writel( (gmac_read_reg(offset)&~mask) |(data&mask),(GMAC_BASE_ADDR+offset)) ++ ++/* define owner bit */ ++#define CPU 0 ++#define DMA 1 ++ ++#define ACTIVE 1 ++#define NONACTIVE 0 ++ ++#define CONFIG_SL_NAPI ++ ++#ifndef CONFIG_SL2312_MPAGE ++#define CONFIG_SL2312_MPAGE ++#endif ++ ++#ifdef CONFIG_SL2312_MPAGE ++#include ++#include ++#include ++#endif ++ ++#ifndef CONFIG_TXINT_DISABLE ++//#define CONFIG_TXINT_DISABLE ++#endif ++ ++enum phy_state ++{ ++ LINK_DOWN = 0, ++ LINK_UP = 1 ++}; ++ ++ ++/* transmit timeout value */ ++#define TX_TIMEOUT (6*HZ) ++ ++/***************************************/ ++/* the offset address of GMAC register */ ++/***************************************/ ++enum GMAC_REGISTER { ++ GMAC_STA_ADD0 = 0x0000, ++ GMAC_STA_ADD1 = 0x0004, ++ GMAC_STA_ADD2 = 0x0008, ++ GMAC_RX_FLTR = 0x000c, ++ GMAC_MCAST_FIL0 = 0x0010, ++ GMAC_MCAST_FIL1 = 0x0014, ++ GMAC_CONFIG0 = 0x0018, ++ GMAC_CONFIG1 = 0x001c, ++ GMAC_CONFIG2 = 0x0020, ++ GMAC_BNCR = 0x0024, ++ GMAC_RBNR = 0x0028, ++ GMAC_STATUS = 0x002c, ++ GMAC_IN_DISCARDS= 0x0030, ++ GMAC_IN_ERRORS = 0x0034, ++ GMAC_IN_MCAST = 0x0038, ++ GMAC_IN_BCAST = 0x003c, ++ GMAC_IN_MAC1 = 0x0040, ++ GMAC_IN_MAC2 = 0x0044 ++}; ++ ++/*******************************************/ ++/* the offset address of GMAC DMA register */ ++/*******************************************/ ++enum GMAC_DMA_REGISTER { ++ GMAC_DMA_DEVICE_ID = 0xff00, ++ GMAC_DMA_STATUS = 0xff04, ++ GMAC_TXDMA_CTRL = 0xff08, ++ GMAC_TXDMA_FIRST_DESC = 0xff0c, ++ GMAC_TXDMA_CURR_DESC = 0xff10, ++ GMAC_RXDMA_CTRL = 0xff14, ++ GMAC_RXDMA_FIRST_DESC = 0xff18, ++ GMAC_RXDMA_CURR_DESC = 0xff1c, ++}; ++ ++/*******************************************/ ++/* the register structure of GMAC */ ++/*******************************************/ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit1_0004 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int sta_add2_l16 : 16; /* station MAC address2 bits 15 to 0 */ ++ unsigned int sta_add1_h16 : 16; /* station MAC address1 bits 47 to 32 */ ++#else ++ unsigned int sta_add1_h16 : 16; /* station MAC address1 bits 47 to 32 */ ++ unsigned int sta_add2_l16 : 16; /* station MAC address2 bits 15 to 0 */ ++#endif ++ } bits; ++} GMAC_STA_ADD1_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit1_000c ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int : 27; ++ unsigned int error : 1; /* enable receive of all error frames */ ++ unsigned int promiscuous : 1; /* enable receive of all frames */ ++ unsigned int broadcast : 1; /* enable receive of broadcast frames */ ++ unsigned int multicast : 1; /* enable receive of multicast frames that pass multicast filter */ ++ unsigned int unicast : 1; /* enable receive of unicast frames that are sent to STA address */ ++#else ++ unsigned int unicast : 1; /* enable receive of unicast frames that are sent to STA address */ ++ unsigned int multicast : 1; /* enable receive of multicast frames that pass multicast filter */ ++ unsigned int broadcast : 1; /* enable receive of broadcast frames */ ++ unsigned int promiscuous : 1; /* enable receive of all frames */ ++ unsigned int error : 1; /* enable receive of all error frames */ ++ unsigned int : 27; ++#endif ++ } bits; ++} GMAC_RX_FLTR_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit1_0018 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int : 10; ++ unsigned int inv_rx_clk : 1; /* Inverse RX Clock */ ++ unsigned int rising_latch : 1; ++ unsigned int rx_tag_remove : 1; /* Remove Rx VLAN tag */ ++ unsigned int ipv6_tss_rx_en : 1; /* IPv6 TSS RX enable */ ++ unsigned int ipv4_tss_rx_en : 1; /* IPv4 TSS RX enable */ ++ unsigned int rgmii_en : 1; /* RGMII in-band status enable */ ++ unsigned int tx_fc_en : 1; /* TX flow control enable */ ++ unsigned int rx_fc_en : 1; /* RX flow control enable */ ++ unsigned int sim_test : 1; /* speed up timers in simulation */ ++ unsigned int dis_col : 1; /* disable 16 collisions abort function */ ++ unsigned int dis_bkoff : 1; /* disable back-off function */ ++ unsigned int max_len : 3; /* maximum receive frame length allowed */ ++ unsigned int adj_ifg : 4; /* adjust IFG from 96+/-56 */ ++ unsigned int : 1; /* reserved */ ++ unsigned int loop_back : 1; /* transmit data loopback enable */ ++ unsigned int dis_rx : 1; /* disable receive */ ++ unsigned int dis_tx : 1; /* disable transmit */ ++#else ++ unsigned int dis_tx : 1; /* disable transmit */ ++ unsigned int dis_rx : 1; /* disable receive */ ++ unsigned int loop_back : 1; /* transmit data loopback enable */ ++ unsigned int : 1; /* reserved */ ++ unsigned int adj_ifg : 4; /* adjust IFG from 96+/-56 */ ++ unsigned int max_len : 3; /* maximum receive frame length allowed */ ++ unsigned int dis_bkoff : 1; /* disable back-off function */ ++ unsigned int dis_col : 1; /* disable 16 collisions abort function */ ++ unsigned int sim_test : 1; /* speed up timers in simulation */ ++ unsigned int rx_fc_en : 1; /* RX flow control enable */ ++ unsigned int tx_fc_en : 1; /* TX flow control enable */ ++ unsigned int rgmii_en : 1; /* RGMII in-band status enable */ ++ unsigned int ipv4_tss_rx_en : 1; /* IPv4 TSS RX enable */ ++ unsigned int ipv6_tss_rx_en : 1; /* IPv6 TSS RX enable */ ++ unsigned int rx_tag_remove : 1; /* Remove Rx VLAN tag */ ++ unsigned int rising_latch : 1; ++ unsigned int inv_rx_clk : 1; /* Inverse RX Clock */ ++ unsigned int : 10; ++#endif ++ } bits; ++} GMAC_CONFIG0_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit1_001c ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int : 28; ++ unsigned int buf_size : 4; /* per packet buffer size */ ++#else ++ unsigned int buf_size : 4; /* per packet buffer size */ ++ unsigned int : 28; ++#endif ++ } bits; ++} GMAC_CONFIG1_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit1_0020 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int rel_threshold : 16; /* flow control release threshold */ ++ unsigned int set_threshold : 16; /* flow control set threshold */ ++#else ++ unsigned int set_threshold : 16; /* flow control set threshold */ ++ unsigned int rel_threshold : 16; /* flow control release threshold */ ++#endif ++ } bits; ++} GMAC_CONFIG2_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit1_0024 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int : 16; ++ unsigned int buf_num : 16; /* return buffer number from software */ ++#else ++ unsigned int buf_num : 16; /* return buffer number from software */ ++ unsigned int : 16; ++#endif ++ } bits; ++} GMAC_BNCR_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit1_0028 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int : 16; ++ unsigned int buf_remain : 16; /* remaining buffer number */ ++#else ++ unsigned int buf_remain : 16; /* remaining buffer number */ ++ unsigned int : 16; ++#endif ++ } bits; ++} GMAC_RBNR_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit1_002c ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int : 25; ++ unsigned int mii_rmii : 2; /* PHY interface type */ ++ unsigned int phy_mode : 1; /* PHY interface mode in 10M-bps */ ++ unsigned int duplex : 1; /* duplex mode */ ++ unsigned int speed : 2; /* link speed(00->2.5M 01->25M 10->125M) */ ++ unsigned int link : 1; /* link status */ ++#else ++ unsigned int link : 1; /* link status */ ++ unsigned int speed : 2; /* link speed(00->2.5M 01->25M 10->125M) */ ++ unsigned int duplex : 1; /* duplex mode */ ++ unsigned int phy_mode : 1; /* PHY interface mode in 10M-bps */ ++ unsigned int mii_rmii : 2; /* PHY interface type */ ++ unsigned int : 25; ++#endif ++ } bits; ++} GMAC_STATUS_T; ++ ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit1_009 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int : 10; ++ unsigned int tx_fail : 1; /* Tx fail interrupt */ ++ unsigned int cnt_full : 1; /* MIB counters half full interrupt */ ++ unsigned int rx_pause_on : 1; /* received pause on frame interrupt */ ++ unsigned int tx_pause_on : 1; /* transmit pause on frame interrupt */ ++ unsigned int rx_pause_off : 1; /* received pause off frame interrupt */ ++ unsigned int tx_pause_off : 1; /* received pause off frame interrupt */ ++ unsigned int rx_overrun : 1; /* GMAC Rx FIFO overrun interrupt */ ++ unsigned int tx_underrun : 1; /* GMAC Tx FIFO underrun interrupt */ ++ unsigned int : 6; ++ unsigned int m_tx_fail : 1; /* Tx fail interrupt mask */ ++ unsigned int m_cnt_full : 1; /* MIB counters half full interrupt mask */ ++ unsigned int m_rx_pause_on : 1; /* received pause on frame interrupt mask */ ++ unsigned int m_tx_pause_on : 1; /* transmit pause on frame interrupt mask */ ++ unsigned int m_rx_pause_off : 1; /* received pause off frame interrupt mask */ ++ unsigned int m_tx_pause_off : 1; /* received pause off frame interrupt mask */ ++ unsigned int m_rx_overrun : 1; /* GMAC Rx FIFO overrun interrupt mask */ ++ unsigned int m_tx_underrun : 1; /* GMAC Tx FIFO underrun interrupt mask */ ++#else ++ unsigned int m_tx_underrun : 1; /* GMAC Tx FIFO underrun interrupt mask */ ++ unsigned int m_rx_overrun : 1; /* GMAC Rx FIFO overrun interrupt mask */ ++ unsigned int m_tx_pause_off : 1; /* received pause off frame interrupt mask */ ++ unsigned int m_rx_pause_off : 1; /* received pause off frame interrupt mask */ ++ unsigned int m_tx_pause_on : 1; /* transmit pause on frame interrupt mask */ ++ unsigned int m_rx_pause_on : 1; /* received pause on frame interrupt mask */ ++ unsigned int m_cnt_full : 1; /* MIB counters half full interrupt mask */ ++ unsigned int m_tx_fail : 1; /* Tx fail interrupt mask */ ++ unsigned int : 6; ++ unsigned int tx_underrun : 1; /* GMAC Tx FIFO underrun interrupt */ ++ unsigned int rx_overrun : 1; /* GMAC Rx FIFO overrun interrupt */ ++ unsigned int tx_pause_off : 1; /* received pause off frame interrupt */ ++ unsigned int rx_pause_off : 1; /* received pause off frame interrupt */ ++ unsigned int tx_pause_on : 1; /* transmit pause on frame interrupt */ ++ unsigned int rx_pause_on : 1; /* received pause on frame interrupt */ ++ unsigned int cnt_full : 1; /* MIB counters half full interrupt */ ++ unsigned int tx_fail : 1; /* Tx fail interrupt */ ++ unsigned int : 10; ++#endif ++ } bits; ++} GMAC_INT_MASK_T; ++ ++ ++/*******************************************/ ++/* the register structure of GMAC DMA */ ++/*******************************************/ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit2_ff00 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int : 7; /* reserved */ ++ unsigned int s_ahb_err : 1; /* Slave AHB bus error */ ++ unsigned int tx_err_code : 4; /* TxDMA error code */ ++ unsigned int rx_err_code : 4; /* RxDMA error code */ ++ unsigned int device_id : 12; ++ unsigned int revision_id : 4; ++#else ++ unsigned int revision_id : 4; ++ unsigned int device_id : 12; ++ unsigned int rx_err_code : 4; /* RxDMA error code */ ++ unsigned int tx_err_code : 4; /* TxDMA error code */ ++ unsigned int s_ahb_err : 1; /* Slave AHB bus error */ ++ unsigned int : 7; /* reserved */ ++#endif ++ } bits; ++} GMAC_DMA_DEVICE_ID_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit2_ff04 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int ts_finish : 1; /* finished tx interrupt */ ++ unsigned int ts_derr : 1; /* AHB Bus Error while tx */ ++ unsigned int ts_perr : 1; /* Tx Descriptor protocol error */ ++ unsigned int ts_eodi : 1; /* TxDMA end of descriptor interrupt */ ++ unsigned int ts_eofi : 1; /* TxDMA end of frame interrupt */ ++ unsigned int rs_finish : 1; /* finished rx interrupt */ ++ unsigned int rs_derr : 1; /* AHB Bus Error while rx */ ++ unsigned int rs_perr : 1; /* Rx Descriptor protocol error */ ++ unsigned int rs_eodi : 1; /* RxDMA end of descriptor interrupt */ ++ unsigned int rs_eofi : 1; /* RxDMA end of frame interrupt */ ++ unsigned int : 1; /* Tx fail interrupt */ ++ unsigned int cnt_full : 1; /* MIB counters half full interrupt */ ++ unsigned int rx_pause_on : 1; /* received pause on frame interrupt */ ++ unsigned int tx_pause_on : 1; /* transmit pause on frame interrupt */ ++ unsigned int rx_pause_off : 1; /* received pause off frame interrupt */ ++ unsigned int tx_pause_off : 1; /* received pause off frame interrupt */ ++ unsigned int rx_overrun : 1; /* GMAC Rx FIFO overrun interrupt */ ++ unsigned int link_change : 1; /* GMAC link changed Interrupt for RGMII mode */ ++ unsigned int : 1; ++ unsigned int : 1; ++ unsigned int : 3; ++ unsigned int loop_back : 1; /* loopback TxDMA to RxDMA */ ++ unsigned int : 1; /* Tx fail interrupt mask */ ++ unsigned int m_cnt_full : 1; /* MIB counters half full interrupt mask */ ++ unsigned int m_rx_pause_on : 1; /* received pause on frame interrupt mask */ ++ unsigned int m_tx_pause_on : 1; /* transmit pause on frame interrupt mask */ ++ unsigned int m_rx_pause_off : 1; /* received pause off frame interrupt mask */ ++ unsigned int m_tx_pause_off : 1; /* received pause off frame interrupt mask */ ++ unsigned int m_rx_overrun : 1; /* GMAC Rx FIFO overrun interrupt mask */ ++ unsigned int m_link_change : 1; /* GMAC link changed Interrupt mask for RGMII mode */ ++#else ++ unsigned int m_link_change : 1; /* GMAC link changed Interrupt mask for RGMII mode */ ++ unsigned int m_rx_overrun : 1; /* GMAC Rx FIFO overrun interrupt mask */ ++ unsigned int m_tx_pause_off : 1; /* received pause off frame interrupt mask */ ++ unsigned int m_rx_pause_off : 1; /* received pause off frame interrupt mask */ ++ unsigned int m_tx_pause_on : 1; /* transmit pause on frame interrupt mask */ ++ unsigned int m_rx_pause_on : 1; /* received pause on frame interrupt mask */ ++ unsigned int m_cnt_full : 1; /* MIB counters half full interrupt mask */ ++ unsigned int : 1; /* Tx fail interrupt mask */ ++ unsigned int loop_back : 1; /* loopback TxDMA to RxDMA */ ++ unsigned int : 3; ++ unsigned int : 1; ++ unsigned int : 1; ++ unsigned int link_change : 1; /* GMAC link changed Interrupt for RGMII mode */ ++ unsigned int rx_overrun : 1; /* GMAC Rx FIFO overrun interrupt */ ++ unsigned int tx_pause_off : 1; /* received pause off frame interrupt */ ++ unsigned int rx_pause_off : 1; /* received pause off frame interrupt */ ++ unsigned int tx_pause_on : 1; /* transmit pause on frame interrupt */ ++ unsigned int rx_pause_on : 1; /* received pause on frame interrupt */ ++ unsigned int cnt_full : 1; /* MIB counters half full interrupt */ ++ unsigned int : 1; /* Tx fail interrupt */ ++ unsigned int rs_eofi : 1; /* RxDMA end of frame interrupt */ ++ unsigned int rs_eodi : 1; /* RxDMA end of descriptor interrupt */ ++ unsigned int rs_perr : 1; /* Rx Descriptor protocol error */ ++ unsigned int rs_derr : 1; /* AHB Bus Error while rx */ ++ unsigned int rs_finish : 1; /* finished rx interrupt */ ++ unsigned int ts_eofi : 1; /* TxDMA end of frame interrupt */ ++ unsigned int ts_eodi : 1; /* TxDMA end of descriptor interrupt */ ++ unsigned int ts_perr : 1; /* Tx Descriptor protocol error */ ++ unsigned int ts_derr : 1; /* AHB Bus Error while tx */ ++ unsigned int ts_finish : 1; /* finished tx interrupt */ ++#endif ++ } bits; ++} GMAC_DMA_STATUS_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit2_ff08 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int td_start : 1; /* Start DMA transfer */ ++ unsigned int td_continue : 1; /* Continue DMA operation */ ++ unsigned int td_chain_mode : 1; /* Descriptor Chain Mode;1-Descriptor Chain mode, 0-Direct DMA mode*/ ++ unsigned int : 1; ++ unsigned int td_prot : 4; /* TxDMA protection control */ ++ unsigned int td_burst_size : 2; /* TxDMA max burst size for every AHB request */ ++ unsigned int td_bus : 2; /* peripheral bus width;0x->8 bits,10->16 bits,11->32 bits */ ++ unsigned int td_endian : 1; /* AHB Endian. 0-little endian; 1-big endian */ ++ unsigned int td_finish_en : 1; /* DMA Finish Event Interrupt Enable;1-enable;0-mask */ ++ unsigned int td_fail_en : 1; /* DMA Fail Interrupt Enable;1-enable;0-mask */ ++ unsigned int td_perr_en : 1; /* Protocol Failure Interrupt Enable;1-enable;0-mask */ ++ unsigned int td_eod_en : 1; /* End of Descriptor interrupt Enable;1-enable;0-mask */ ++ unsigned int td_eof_en : 1; /* End of frame interrupt Enable;1-enable;0-mask */ ++ unsigned int : 14; ++#else ++ unsigned int : 14; ++ unsigned int td_eof_en : 1; /* End of frame interrupt Enable;1-enable;0-mask */ ++ unsigned int td_eod_en : 1; /* End of Descriptor interrupt Enable;1-enable;0-mask */ ++ unsigned int td_perr_en : 1; /* Protocol Failure Interrupt Enable;1-enable;0-mask */ ++ unsigned int td_fail_en : 1; /* DMA Fail Interrupt Enable;1-enable;0-mask */ ++ unsigned int td_finish_en : 1; /* DMA Finish Event Interrupt Enable;1-enable;0-mask */ ++ unsigned int td_endian : 1; /* AHB Endian. 0-little endian; 1-big endian */ ++ unsigned int td_bus : 2; /* peripheral bus width;0x->8 bits,10->16 bits,11->32 bits */ ++ unsigned int td_burst_size : 2; /* TxDMA max burst size for every AHB request */ ++ unsigned int td_prot : 4; /* TxDMA protection control */ ++ unsigned int : 1; ++ unsigned int td_chain_mode : 1; /* Descriptor Chain Mode;1-Descriptor Chain mode, 0-Direct DMA mode*/ ++ unsigned int td_continue : 1; /* Continue DMA operation */ ++ unsigned int td_start : 1; /* Start DMA transfer */ ++#endif ++ } bits; ++} GMAC_TXDMA_CTRL_T; ++ ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit2_ff0c ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int td_first_des_ptr : 28;/* first descriptor address */ ++ unsigned int td_busy : 1;/* 1-TxDMA busy; 0-TxDMA idle */ ++ unsigned int : 3; ++#else ++ unsigned int : 3; ++ unsigned int td_busy : 1;/* 1-TxDMA busy; 0-TxDMA idle */ ++ unsigned int td_first_des_ptr : 28;/* first descriptor address */ ++#endif ++ } bits; ++} GMAC_TXDMA_FIRST_DESC_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit2_ff10 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int ndar : 28; /* next descriptor address */ ++ unsigned int eofie : 1; /* end of frame interrupt enable */ ++ unsigned int : 1; ++ unsigned int sof_eof : 2; ++#else ++ unsigned int sof_eof : 2; ++ unsigned int : 1; ++ unsigned int eofie : 1; /* end of frame interrupt enable */ ++ unsigned int ndar : 28; /* next descriptor address */ ++#endif ++ } bits; ++} GMAC_TXDMA_CURR_DESC_T; ++ ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit2_ff14 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int rd_start : 1; /* Start DMA transfer */ ++ unsigned int rd_continue : 1; /* Continue DMA operation */ ++ unsigned int rd_chain_mode : 1; /* Descriptor Chain Mode;1-Descriptor Chain mode, 0-Direct DMA mode*/ ++ unsigned int : 1; ++ unsigned int rd_prot : 4; /* DMA protection control */ ++ unsigned int rd_burst_size : 2; /* DMA max burst size for every AHB request */ ++ unsigned int rd_bus : 2; /* peripheral bus width;0x->8 bits,10->16 bits,11->32 bits */ ++ unsigned int rd_endian : 1; /* AHB Endian. 0-little endian; 1-big endian */ ++ unsigned int rd_finish_en : 1; /* DMA Finish Event Interrupt Enable;1-enable;0-mask */ ++ unsigned int rd_fail_en : 1; /* DMA Fail Interrupt Enable;1-enable;0-mask */ ++ unsigned int rd_perr_en : 1; /* Protocol Failure Interrupt Enable;1-enable;0-mask */ ++ unsigned int rd_eod_en : 1; /* End of Descriptor interrupt Enable;1-enable;0-mask */ ++ unsigned int rd_eof_en : 1; /* End of frame interrupt Enable;1-enable;0-mask */ ++ unsigned int : 14; ++#else ++ unsigned int : 14; ++ unsigned int rd_eof_en : 1; /* End of frame interrupt Enable;1-enable;0-mask */ ++ unsigned int rd_eod_en : 1; /* End of Descriptor interrupt Enable;1-enable;0-mask */ ++ unsigned int rd_perr_en : 1; /* Protocol Failure Interrupt Enable;1-enable;0-mask */ ++ unsigned int rd_fail_en : 1; /* DMA Fail Interrupt Enable;1-enable;0-mask */ ++ unsigned int rd_finish_en : 1; /* DMA Finish Event Interrupt Enable;1-enable;0-mask */ ++ unsigned int rd_endian : 1; /* AHB Endian. 0-little endian; 1-big endian */ ++ unsigned int rd_bus : 2; /* peripheral bus width;0x->8 bits,10->16 bits,11->32 bits */ ++ unsigned int rd_burst_size : 2; /* DMA max burst size for every AHB request */ ++ unsigned int rd_prot : 4; /* DMA protection control */ ++ unsigned int : 1; ++ unsigned int rd_chain_mode : 1; /* Descriptor Chain Mode;1-Descriptor Chain mode, 0-Direct DMA mode*/ ++ unsigned int rd_continue : 1; /* Continue DMA operation */ ++ unsigned int rd_start : 1; /* Start DMA transfer */ ++#endif ++ } bits; ++} GMAC_RXDMA_CTRL_T; ++ ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit2_ff18 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int rd_first_des_ptr : 28;/* first descriptor address */ ++ unsigned int rd_busy : 1;/* 1-RxDMA busy; 0-RxDMA idle */ ++ unsigned int : 3; ++#else ++ unsigned int : 3; ++ unsigned int rd_busy : 1;/* 1-RxDMA busy; 0-RxDMA idle */ ++ unsigned int rd_first_des_ptr : 28;/* first descriptor address */ ++#endif ++ } bits; ++} GMAC_RXDMA_FIRST_DESC_T; ++ ++typedef union ++{ ++ unsigned int bits32; ++ struct bit2_ff1c ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int ndar : 28; /* next descriptor address */ ++ unsigned int eofie : 1; /* end of frame interrupt enable */ ++ unsigned int : 1; ++ unsigned int sof_eof : 2; ++#else ++ unsigned int sof_eof : 2; ++ unsigned int : 1; ++ unsigned int eofie : 1; /* end of frame interrupt enable */ ++ unsigned int ndar : 28; /* next descriptor address */ ++#endif ++ } bits; ++} GMAC_RXDMA_CURR_DESC_T; ++ ++ ++/********************************************/ ++/* Descriptor Format */ ++/********************************************/ ++ ++typedef struct descriptor_t ++{ ++ union frame_control_t ++ { ++ unsigned int bits32; ++ struct bits_0000 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int own : 1; /* owner bit. 0-CPU, 1-DMA */ ++ unsigned int derr : 1; /* data error during processing this descriptor */ ++ unsigned int perr : 1; /* protocol error during processing this descriptor */ ++ unsigned int csum_state : 3; /* checksum error status */ ++ unsigned int vlan_tag : 1; /* 802.1q vlan tag packet */ ++ unsigned int frame_state: 3; /* reference Rx Status1 */ ++ unsigned int desc_count : 6; /* number of descriptors used for the current frame */ ++ unsigned int buffer_size:16; /* transfer buffer size associated with current description*/ ++#else ++ unsigned int buffer_size:16; /* transfer buffer size associated with current description*/ ++ unsigned int desc_count : 6; /* number of descriptors used for the current frame */ ++ unsigned int frame_state: 3; /* reference Rx Status1 */ ++ unsigned int vlan_tag : 1; /* 802.1q vlan tag packet */ ++ unsigned int csum_state : 3; /* checksum error status */ ++ unsigned int perr : 1; /* protocol error during processing this descriptor */ ++ unsigned int derr : 1; /* data error during processing this descriptor */ ++ unsigned int own : 1; /* owner bit. 0-CPU, 1-DMA */ ++#endif ++ } bits_rx; ++ ++ struct bits_0001 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int own : 1; /* owner bit. 0-CPU, 1-DMA */ ++ unsigned int derr : 1; /* data error during processing this descriptor */ ++ unsigned int perr : 1; /* protocol error during processing this descriptor */ ++ unsigned int : 6; ++ unsigned int success_tx : 1; /* successful transmitted */ ++ unsigned int desc_count : 6; /* number of descriptors used for the current frame */ ++ unsigned int buffer_size:16; /* transfer buffer size associated with current description*/ ++#else ++ unsigned int buffer_size:16; /* transfer buffer size associated with current description*/ ++ unsigned int desc_count : 6; /* number of descriptors used for the current frame */ ++ unsigned int success_tx : 1; /* successful transmitted */ ++ unsigned int : 6; ++ unsigned int perr : 1; /* protocol error during processing this descriptor */ ++ unsigned int derr : 1; /* data error during processing this descriptor */ ++ unsigned int own : 1; /* owner bit. 0-CPU, 1-DMA */ ++#endif ++ } bits_tx_in; ++ ++ struct bits_0002 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int own : 1; /* owner bit. 0-CPU, 1-DMA */ ++ unsigned int derr : 1; /* data error during processing this descriptor */ ++ unsigned int perr : 1; /* protocol error during processing this descriptor */ ++ unsigned int : 2; ++ unsigned int udp_csum_en: 1; /* TSS UDP checksum enable */ ++ unsigned int tcp_csum_en: 1; /* TSS TCP checksum enable */ ++ unsigned int ipv6_tx_en : 1; /* TSS IPv6 TX enable */ ++ unsigned int ip_csum_en : 1; /* TSS IPv4 IP Header checksum enable */ ++ unsigned int vlan_enable: 1; /* VLAN TIC insertion enable */ ++ unsigned int desc_count : 6; /* number of descriptors used for the current frame */ ++ unsigned int buffer_size:16; /* transfer buffer size associated with current description*/ ++#else ++ unsigned int buffer_size:16; /* transfer buffer size associated with current description*/ ++ unsigned int desc_count : 6; /* number of descriptors used for the current frame */ ++ unsigned int vlan_enable: 1; /* VLAN TIC insertion enable */ ++ unsigned int ip_csum_en : 1; /* TSS IPv4 IP Header checksum enable */ ++ unsigned int ipv6_tx_en : 1; /* TSS IPv6 TX enable */ ++ unsigned int tcp_csum_en: 1; /* TSS TCP checksum enable */ ++ unsigned int udp_csum_en: 1; /* TSS UDP checksum enable */ ++ unsigned int : 2; ++ unsigned int perr : 1; /* protocol error during processing this descriptor */ ++ unsigned int derr : 1; /* data error during processing this descriptor */ ++ unsigned int own : 1; /* owner bit. 0-CPU, 1-DMA */ ++#endif ++ } bits_tx_out; ++ ++ } frame_ctrl; ++ ++ union flag_status_t ++ { ++ unsigned int bits32; ++ struct bits_0004 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int priority : 3; /* user priority extracted from receiving frame*/ ++ unsigned int cfi : 1; /* cfi extracted from receiving frame*/ ++ unsigned int vlan_id :12; /* VLAN ID extracted from receiving frame */ ++ unsigned int frame_count:16; /* received frame byte count,include CRC,not include VLAN TIC */ ++#else ++ unsigned int frame_count:16; /* received frame byte count,include CRC,not include VLAN TIC */ ++ unsigned int vlan_id :12; /* VLAN ID extracted from receiving frame */ ++ unsigned int cfi : 1; /* cfi extracted from receiving frame*/ ++ unsigned int priority : 3; /* user priority extracted from receiving frame*/ ++#endif ++ } bits_rx_status; ++ ++ struct bits_0005 ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int priority : 3; /* user priority to transmit*/ ++ unsigned int cfi : 1; /* cfi to transmit*/ ++ unsigned int vlan_id :12; /* VLAN ID to transmit */ ++ unsigned int frame_count:16; /* total tx frame byte count */ ++#else ++ unsigned int frame_count:16; /* total tx frame byte count */ ++ unsigned int vlan_id :12; /* VLAN ID to transmit */ ++ unsigned int cfi : 1; /* cfi to transmit*/ ++ unsigned int priority : 3; /* user priority to transmit*/ ++#endif ++ } bits_tx_flag; ++ } flag_status; ++ ++ unsigned int buf_adr; /* data buffer address */ ++ ++ union next_desc_t ++ { ++ unsigned int next_descriptor; ++ struct bits_000c ++ { ++#if (BIG_ENDIAN==1) ++ unsigned int ndar :28; /* next descriptor address */ ++ unsigned int eofie : 1; /* end of frame interrupt enable */ ++ unsigned int : 1; ++ unsigned int sof_eof : 2; /* 00-the linking descriptor 01-the last descriptor of a frame*/ ++ /* 10-the first descriptor of a frame 11-only one descriptor for a frame*/ ++#else ++ unsigned int sof_eof : 2; /* 00-the linking descriptor 01-the last descriptor of a frame*/ ++ /* 10-the first descriptor of a frame 11-only one descriptor for a frame*/ ++ unsigned int : 1; ++ unsigned int eofie : 1; /* end of frame interrupt enable */ ++ unsigned int ndar :28; /* next descriptor address */ ++#endif ++ } bits; ++ } next_desc; ++} GMAC_DESCRIPTOR_T; ++ ++typedef struct gmac_conf { ++ struct net_device *dev; ++ int portmap; ++ int vid; ++ int flag; /* 1: active 0: non-active */ ++} sys_gmac_conf; ++ ++struct gmac_private { ++ unsigned char *tx_bufs; /* Tx bounce buffer region. */ ++ unsigned char *rx_bufs; ++ GMAC_DESCRIPTOR_T *tx_desc; /* point to virtual TX descriptor address*/ ++ GMAC_DESCRIPTOR_T *rx_desc; /* point to virtual RX descriptor address*/ ++ GMAC_DESCRIPTOR_T *tx_cur_desc; /* point to current TX descriptor */ ++ GMAC_DESCRIPTOR_T *rx_cur_desc; /* point to current RX descriptor */ ++ GMAC_DESCRIPTOR_T *tx_finished_desc; ++ GMAC_DESCRIPTOR_T *rx_finished_desc; ++ unsigned long cur_tx; ++ unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ ++ unsigned int tx_flag; ++ unsigned long dirty_tx; ++ unsigned char *tx_buf[TX_DESC_NUM]; /* Tx bounce buffers */ ++ dma_addr_t tx_desc_dma; /* physical TX descriptor address */ ++ dma_addr_t rx_desc_dma; /* physical RX descriptor address */ ++ dma_addr_t tx_bufs_dma; /* physical TX descriptor address */ ++ dma_addr_t rx_bufs_dma; /* physical RX descriptor address */ ++ struct net_device_stats stats; ++ pid_t thr_pid; ++ wait_queue_head_t thr_wait; ++ struct completion thr_exited; ++ spinlock_t lock; ++ int time_to_die; ++ unsigned int tx_desc_hdr[GMAC_PHY_IF]; /* the descriptor which sw can fill */ ++ unsigned int tx_desc_tail[GMAC_PHY_IF]; /* the descriptor which is not cleaned yet */ ++}; ++ ++ ++struct reg_ioctl_data { ++ unsigned int reg_addr; /* the register address */ ++ unsigned int val_in; /* data write to the register */ ++ unsigned int val_out; /* data read from the register */ ++}; ++ ++#ifdef CONFIG_SL2312_MPAGE ++typedef struct tx_data_t { ++ int freeable; // 1 when it's skb. it can be freed in tx interrupt handler ++ struct sk_buff* skb; // skb ++ int desc_in_use; // 1 when the desc is in use. 0 when desc is available. ++ long end_seq; // to find out packets are in seq. ++ // so this value is the seq of next packet. ++} tx_data; ++#endif ++ ++/************************************************************* ++ * Global Variable ++ *************************************************************/ ++struct semaphore sem_gmac; /* semaphore for share pins issue */ ++ ++/************************************************************* ++ * Static Global Variable ++ *************************************************************/ ++// static unsigned int MAC_BASE_ADDR = GMAC0_BASE; ++static unsigned int gmac_base_addr[GMAC_PHY_IF] = {GMAC0_BASE,GMAC1_BASE}; ++static unsigned int gmac_irq[GMAC_PHY_IF] = {IRQ_GMAC0,IRQ_GMAC1}; ++static struct net_device *gmac_dev[GMAC_PHY_IF]; ++ ++static unsigned int FLAG_SWITCH=0; /* if 1-->switch chip presented. if 0-->switch chip unpresented */ ++static unsigned int flow_control_enable[GMAC_PHY_IF] = {1,1}; ++static unsigned int pre_phy_status[GMAC_PHY_IF] = {LINK_DOWN,LINK_DOWN}; ++static unsigned int tx_desc_virtual_base[GMAC_PHY_IF]; ++static unsigned int rx_desc_virtual_base[GMAC_PHY_IF]; ++static unsigned int full_duplex = 1; ++static unsigned int speed = 1; ++#ifdef CONFIG_SL2312_MPAGE ++static tx_data tx_skb[GMAC_PHY_IF][TX_DESC_NUM]; ++#else ++static struct sk_buff *tx_skb[GMAC_PHY_IF][TX_DESC_NUM]; ++#endif ++static struct sk_buff *rx_skb[GMAC_PHY_IF][RX_DESC_NUM]; ++static unsigned int tx_desc_start_adr[GMAC_PHY_IF]; ++static unsigned int rx_desc_start_adr[GMAC_PHY_IF]; ++static unsigned char eth0_mac[6]= {0x00,0x50,0xc2,0x2b,0xd3,0x25}; ++static unsigned char eth1_mac[6]= {0x00,0x50,0xc2,0x2b,0xdf,0xfe}; ++static unsigned int next_tick = 3 * HZ; ++ ++static unsigned int phy_addr[GMAC_PHY_IF] = {0x01,0x02}; /* define PHY address */ ++ ++DECLARE_WAIT_QUEUE_HEAD(gmac_queue); ++//static wait_queue_t wait; ++ ++struct gmac_conf VLAN_conf[] = { ++#ifdef CONFIG_ADM_6999 ++ { (struct net_device *)0,0x7F,1 }, ++ { (struct net_device *)0,0x80,2 } ++#endif ++#ifdef CONFIG_ADM_6996 ++ { (struct net_device *)0,0x0F,1 }, ++ { (struct net_device *)0,0x10,2 } ++#endif ++}; ++ ++#define NUM_VLAN_IF (sizeof(VLAN_conf)/sizeof(struct gmac_conf)) ++ ++ ++/************************************************/ ++/* GMAC function declare */ ++/************************************************/ ++ ++unsigned int mii_read(unsigned char phyad,unsigned char regad); ++void mii_write(unsigned char phyad,unsigned char regad,unsigned int value); ++static void gmac_set_phy_status(struct net_device *dev); ++static void gmac_get_phy_status(struct net_device *dev); ++static int gmac_phy_thread (void *data); ++static int gmac_set_mac_address(struct net_device *dev, void *addr); ++static void gmac_tx_timeout(struct net_device *dev); ++static void gmac_tx_packet_complete(struct net_device *dev); ++static int gmac_start_xmit(struct sk_buff *skb, struct net_device *dev); ++static void gmac_set_rx_mode(struct net_device *dev); ++static void gmac_rx_packet(struct net_device *dev); ++static int gmac_open (struct net_device *dev); ++static int gmac_netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); ++ ++static unsigned int gmac_get_dev_index(struct net_device *dev); ++static unsigned int gmac_select_interface(struct net_device *dev); ++ ++#ifdef CONFIG_SL2312_MPAGE ++int printk_all(int dev_index, struct gmac_private* tp); ++#endif ++ ++/****************************************/ ++/* SPI Function Declare */ ++/****************************************/ ++void SPI_write(unsigned char addr,unsigned int value); ++unsigned int SPI_read(unsigned char table,unsigned char addr); ++void SPI_write_bit(char bit_EEDO); ++unsigned int SPI_read_bit(void); ++void SPI_default(void); ++void SPI_reset(unsigned char rstype,unsigned char port_cnt); ++void SPI_pre_st(void); ++void SPI_CS_enable(unsigned char enable); ++void SPI_Set_VLAN(unsigned char LAN,unsigned int port_mask); ++void SPI_Set_tag(unsigned int port,unsigned tag); ++void SPI_Set_PVID(unsigned int PVID,unsigned int port_mask); ++unsigned int SPI_Get_PVID(unsigned int port); ++void SPI_mac_lock(unsigned int port, unsigned char lock); ++void SPI_get_port_state(unsigned int port); ++void SPI_port_enable(unsigned int port,unsigned char enable); ++unsigned int SPI_get_identifier(void); ++void SPI_get_status(unsigned int port); ++ ++/****************************************/ ++/* VLAN Function Declare */ ++/****************************************/ ++int getVLANfromdev (struct net_device *dev ); ++struct net_device * getdevfromVLAN( int VID); ++ ++ ++ ++/************************************************/ ++/* function body */ ++/************************************************/ ++#if 0 ++void hw_memcpy(void *to,const void *from,unsigned long n) ++{ ++ writel(from,SL2312_DRAM_CTRL_BASE+0x20); /* set source address */ ++ writel(to,SL2312_DRAM_CTRL_BASE+0x24); /* set destination address */ ++ writel(n,SL2312_DRAM_CTRL_BASE+0x28); /* set byte count */ ++ writel(0x00000001,SL2312_DRAM_CTRL_BASE+0x2c); ++ while (readl(SL2312_DRAM_CTRL_BASE+0x2c)); ++} ++#endif ++ ++static unsigned int gmac_read_reg(unsigned int addr) ++{ ++ unsigned int reg_val; ++// unsigned int flags; ++// spinlock_t lock; ++ ++// spin_lock_irqsave(&lock, flags); ++ reg_val = readl(addr); // Gary Chen ++// spin_unlock_irqrestore(&lock, flags); ++ return (reg_val); ++} ++ ++static void gmac_write_reg(unsigned int addr,unsigned int data,unsigned int bit_mask) ++{ ++ unsigned int reg_val; ++ //unsigned int *addr; ++// unsigned int flags; ++// spinlock_t lock; ++ ++// spin_lock_irqsave(&lock, flags); ++ reg_val = ( gmac_read_reg(addr) & (~bit_mask) ) | (data & bit_mask); ++ writel(reg_val,addr); ++// spin_unlock_irqrestore(&lock, flags); ++ return; ++} ++ ++ ++static void gmac_sw_reset(struct net_device *dev) ++{ ++ unsigned int index; ++ unsigned int reg_val; ++ ++ index = gmac_get_dev_index(dev); ++ if (index==0) ++ reg_val = readl(GMAC_GLOBAL_BASE_ADDR+0x0c) | 0x00000020; /* GMAC0 S/W reset */ ++ else ++ reg_val = readl(GMAC_GLOBAL_BASE_ADDR+0x0c) | 0x00000040; /* GMAC1 S/W reset */ ++ ++ writel(reg_val,GMAC_GLOBAL_BASE_ADDR+0x0c); ++ return; ++} ++ ++static void gmac_get_mac_address(void) ++{ ++#ifdef CONFIG_MTD ++ extern int get_vlaninfo(vlaninfo* vlan); ++ static vlaninfo vlan[2]; ++ ++ if (get_vlaninfo(&vlan[0])) ++ { ++ memcpy(eth0_mac,vlan[0].mac,6); ++ VLAN_conf[0].vid = vlan[0].vlanid; ++ VLAN_conf[0].portmap = vlan[0].vlanmap; ++ memcpy(eth1_mac,vlan[1].mac,6); ++ VLAN_conf[1].vid = vlan[1].vlanid; ++ VLAN_conf[1].portmap = vlan[1].vlanmap; ++ } ++#else ++ unsigned int reg_val; ++ ++ reg_val = readl(IO_ADDRESS(SL2312_SECURITY_BASE)+0xac); ++ eth0_mac[4] = (reg_val & 0xff00) >> 8; ++ eth0_mac[5] = reg_val & 0x00ff; ++ reg_val = readl(IO_ADDRESS(SL2312_SECURITY_BASE)+0xac); ++ eth1_mac[4] = (reg_val & 0xff00) >> 8; ++ eth1_mac[5] = reg_val & 0x00ff; ++#endif ++ return; ++} ++ ++static unsigned int gmac_get_dev_index(struct net_device *dev) ++{ ++ unsigned int i; ++ ++ /* get device index number */ ++ for (i=0;iMII 1->GMII 2->RGMII(10/100) 3->RGMII(1000) */ ++ else ++ phy_mode = 2; /* 0->MII 1->GMII 2->RGMII(10/100) 3->RGMII(1000) */ ++ ++ /* set PHY operation mode */ ++ status = (phy_mode<<5) | 0x11 | (full_duplex<<3) | (speed<<1); ++ gmac_write_reg(gmac_base_addr[index] + GMAC_STATUS,status ,0x0000007f); ++ ++ /* set station MAC address1 and address2 */ ++ if (index==0) ++ memcpy(&sock.sa_data[0],ð0_mac[0],6); ++ else ++ memcpy(&sock.sa_data[0],ð1_mac[0],6); ++ gmac_set_mac_address(dev,(void *)&sock); ++ ++ /* set RX_FLTR register to receive all multicast packet */ ++ gmac_write_reg(gmac_base_addr[index] + GMAC_RX_FLTR,0x0000001F,0x0000001f); ++ //gmac_write_reg(gmac_base_addr[index] + GMAC_RX_FLTR,0x00000007,0x0000001f); ++ ++ /* set per packet buffer size */ ++ config1.bits32 = 0; ++ config1.bits.buf_size = 11; /* buffer size = 2048-byte */ ++ gmac_write_reg(gmac_base_addr[index] + GMAC_CONFIG1,config1.bits32,0x0000000f); ++ ++ /* set flow control threshold */ ++ config2_val.bits32 = 0; ++ config2_val.bits.set_threshold = RX_DESC_NUM/4; ++ config2_val.bits.rel_threshold = RX_DESC_NUM*3/4; ++ gmac_write_reg(gmac_base_addr[index] + GMAC_CONFIG2,config2_val.bits32,0xffffffff); ++ ++ /* init remaining buffer number register */ ++ rbnr_val.bits32 = 0; ++ rbnr_val.bits.buf_remain = RX_DESC_NUM; ++ rbnr_mask.bits32 = 0; ++ rbnr_mask.bits.buf_remain = 0xffff; ++ gmac_write_reg(gmac_base_addr[index] + GMAC_RBNR,rbnr_val.bits32,rbnr_mask.bits32); ++ ++ /* disable TX/RX and disable internal loop back */ ++ config0.bits32 = 0; ++ config0_mask.bits32 = 0; ++ config0.bits.max_len = 2; ++ if (flow_control_enable[index]==1) ++ { ++ config0.bits.tx_fc_en = 1; /* enable tx flow control */ ++ config0.bits.rx_fc_en = 1; /* enable rx flow control */ ++ printk("Enable MAC Flow Control...\n"); ++ } ++ else ++ { ++ config0.bits.tx_fc_en = 0; /* disable tx flow control */ ++ config0.bits.rx_fc_en = 0; /* disable rx flow control */ ++ printk("Disable MAC Flow Control...\n"); ++ } ++ config0.bits.dis_rx = 1; /* disable rx */ ++ config0.bits.dis_tx = 1; /* disable tx */ ++ config0.bits.loop_back = 0; /* enable/disable GMAC loopback */ ++ config0.bits.inv_rx_clk = 0; ++ config0.bits.rising_latch = 1; ++ config0.bits.ipv4_tss_rx_en = 1; /* enable H/W to check ip checksum */ ++ config0.bits.ipv6_tss_rx_en = 1; /* enable H/W to check ip checksum */ ++ ++ config0_mask.bits.max_len = 7; ++ config0_mask.bits.tx_fc_en = 1; ++ config0_mask.bits.rx_fc_en = 1; ++ config0_mask.bits.dis_rx = 1; ++ config0_mask.bits.dis_tx = 1; ++ config0_mask.bits.loop_back = 1; ++ config0_mask.bits.inv_rx_clk = 1; ++ config0_mask.bits.rising_latch = 1; ++ config0_mask.bits.ipv4_tss_rx_en = 1; ++ config0_mask.bits.ipv6_tss_rx_en = 1; ++ gmac_write_reg(gmac_base_addr[index] + GMAC_CONFIG0,config0.bits32,config0_mask.bits32); ++ ++ return (0); ++} ++ ++static void gmac_enable_tx_rx(struct net_device *dev) ++{ ++ GMAC_CONFIG0_T config0,config0_mask; ++ int dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ /* enable TX/RX */ ++ config0.bits32 = 0; ++ config0_mask.bits32 = 0; ++ config0.bits.dis_rx = 0; /* enable rx */ ++ config0.bits.dis_tx = 0; /* enable tx */ ++ config0_mask.bits.dis_rx = 1; ++ config0_mask.bits.dis_tx = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_CONFIG0,config0.bits32,config0_mask.bits32); ++} ++ ++static void gmac_disable_tx_rx(struct net_device *dev) ++{ ++ GMAC_CONFIG0_T config0,config0_mask; ++ int dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ /* enable TX/RX */ ++ config0.bits32 = 0; ++ config0_mask.bits32 = 0; ++ config0.bits.dis_rx = 1; /* disable rx */ ++ config0.bits.dis_tx = 1; /* disable tx */ ++ config0_mask.bits.dis_rx = 1; ++ config0_mask.bits.dis_tx = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_CONFIG0,config0.bits32,config0_mask.bits32); ++} ++ ++#ifdef CONFIG_SL_NAPI ++static int gmac_rx_poll_ga(struct net_device *dev, int *budget) ++{ ++ struct gmac_private *tp = dev->priv; ++ struct sk_buff *skb; ++ GMAC_RXDMA_CTRL_T rxdma_ctrl,rxdma_ctrl_mask; ++ GMAC_RXDMA_FIRST_DESC_T rxdma_busy; ++ GMAC_DESCRIPTOR_T *rx_desc; ++ unsigned int pkt_size; ++ unsigned int desc_count; ++ unsigned int vid; ++// unsigned int priority; ++ unsigned int own; ++ unsigned int good_frame = 0; ++ unsigned int index; ++ unsigned int dev_index; ++ int work = 0; ++ int work_done = 0; ++ int quota = min(dev->quota, *budget); ++ ++ dev_index = gmac_select_interface(dev); ++ ++ for (;;) ++ { ++ own = tp->rx_cur_desc->frame_ctrl.bits32 >> 31; ++ if (own == CPU) /* check owner bit */ ++ { ++ rx_desc = tp->rx_cur_desc; ++#if (GMAC_DEBUG==1) ++ /* check error interrupt */ ++ if ( (rx_desc->frame_ctrl.bits_rx.derr==1)||(rx_desc->frame_ctrl.bits_rx.perr==1) ) ++ { ++ printk("%s::Rx Descriptor Processing Error !!!\n",__func__); ++ } ++#endif ++ /* get frame information from the first descriptor of the frame */ ++ pkt_size = rx_desc->flag_status.bits_rx_status.frame_count - 4; /*total byte count in a frame*/ ++#if (GMAC_DEBUG==1) ++ priority = rx_desc->flag_status.bits_rx_status.priority; /* 802.1p priority */ ++#endif ++ vid = rx_desc->flag_status.bits_rx_status.vlan_id; /* 802.1q vlan id */ ++ if (vid == 0) ++ { ++ vid = 1; /* default vlan */ ++ } ++ desc_count = rx_desc->frame_ctrl.bits_rx.desc_count; /* get descriptor count per frame */ ++ ++ if (rx_desc->frame_ctrl.bits_rx.frame_state == 0x000) /* good frame */ ++ { ++ tp->stats.rx_bytes += pkt_size; ++ tp->stats.rx_packets++; ++ good_frame = 1; ++ } ++ else ++ { ++ tp->stats.rx_errors++; ++ good_frame = 0; ++ printk("RX status: 0x%x\n",rx_desc->frame_ctrl.bits_rx.frame_state); ++ } ++ } ++ else ++ { ++ work_done = 1; ++ break; /* Rx process is completed */ ++ } ++ ++ if (good_frame == 1) ++ { ++ /* get rx skb buffer index */ ++ index = ((unsigned int)tp->rx_cur_desc - rx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ if (rx_skb[dev_index][index]) ++ { ++ skb_reserve (rx_skb[dev_index][index], 2); /* 16 byte align the IP fields. */ ++ rx_skb[dev_index][index]->dev = dev; ++ rx_skb[dev_index][index]->ip_summed = CHECKSUM_UNNECESSARY; ++ skb_put(rx_skb[dev_index][index],pkt_size); ++ rx_skb[dev_index][index]->protocol = eth_type_trans(rx_skb[dev_index][index],dev); /* set skb protocol */ ++ netif_rx(rx_skb[dev_index][index]); /* socket rx */ ++ dev->last_rx = jiffies; ++ ++ /* allocate rx skb buffer */ ++ if ( (skb = dev_alloc_skb(RX_BUF_SIZE))==NULL) /* allocate socket buffer */ ++ { ++ printk("%s::skb buffer allocation fail !\n",__func__); ++ } ++ rx_skb[dev_index][index] = skb; ++ tp->rx_cur_desc->buf_adr = (unsigned int)__pa(skb->data) | 0x02; /* insert two bytes in the beginning of rx data */ ++ } ++ else ++ { ++ printk("%s::rx skb index error !\n",__func__); ++ } ++ } ++ ++ tp->rx_cur_desc->frame_ctrl.bits_rx.own = DMA; /* release rx descriptor to DMA */ ++ /* point to next rx descriptor */ ++ tp->rx_cur_desc = (GMAC_DESCRIPTOR_T *)((tp->rx_cur_desc->next_desc.next_descriptor & 0xfffffff0)+rx_desc_virtual_base[dev_index]); ++ ++ /* release buffer to Remaining Buffer Number Register */ ++ if (flow_control_enable[dev_index] ==1) ++ { ++// gmac_write_reg(gmac_base_addr[dev_index] + GMAC_BNCR,desc_count,0x0000ffff); ++ writel(desc_count,(unsigned int *)(gmac_base_addr[dev_index] + GMAC_BNCR)); ++ } ++ ++ if (work++ >= quota ) ++ { ++ break; ++ } ++ } ++ ++ /* if RX DMA process is stoped , restart it */ ++ rxdma_busy.bits.rd_first_des_ptr = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_FIRST_DESC); ++ if (rxdma_busy.bits.rd_busy == 0) ++ { ++ rxdma_ctrl.bits32 = 0; ++ rxdma_ctrl.bits.rd_start = 1; /* start RX DMA transfer */ ++ rxdma_ctrl.bits.rd_continue = 1; /* continue RX DMA operation */ ++ rxdma_ctrl_mask.bits32 = 0; ++ rxdma_ctrl_mask.bits.rd_start = 1; ++ rxdma_ctrl_mask.bits.rd_continue = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CTRL,rxdma_ctrl.bits32,rxdma_ctrl_mask.bits32); ++ } ++ ++ dev->quota -= work; ++ *budget -= work; ++ if (work_done==1) ++ { ++ /* Receive descriptor is empty now */ ++ netif_rx_complete(dev); ++ /* enable receive interrupt */ ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CTRL,0x0007c000,0x0007c000); /* enable rx interrupt */ ++ return 0; ++ } ++ else ++ { ++ return -1; ++ } ++} ++ ++static int gmac_rx_poll_gb(struct net_device *dev, int *budget) ++{ ++ struct gmac_private *tp = dev->priv; ++ struct sk_buff *skb; ++ GMAC_RXDMA_CTRL_T rxdma_ctrl,rxdma_ctrl_mask; ++ GMAC_RXDMA_FIRST_DESC_T rxdma_busy; ++ GMAC_DESCRIPTOR_T *rx_desc; ++ unsigned int pkt_size; ++ unsigned int desc_count; ++ unsigned int vid; ++// unsigned int priority; ++ unsigned int own; ++ unsigned int good_frame = 0; ++ unsigned int index; ++ unsigned int dev_index; ++ int work = 0; ++ int work_done = 0; ++ int quota = min(dev->quota, *budget); ++ ++ dev_index = gmac_select_interface(dev); ++ ++ for (;;) ++ { ++ own = tp->rx_cur_desc->frame_ctrl.bits32 >> 31; ++ if (own == CPU) /* check owner bit */ ++ { ++ rx_desc = tp->rx_cur_desc; ++#if (GMAC_DEBUG==1) ++ /* check error interrupt */ ++ if ( (rx_desc->frame_ctrl.bits_rx.derr==1)||(rx_desc->frame_ctrl.bits_rx.perr==1) ) ++ { ++ printk("%s::Rx Descriptor Processing Error !!!\n",__func__); ++ } ++#endif ++ /* get frame information from the first descriptor of the frame */ ++ pkt_size = rx_desc->flag_status.bits_rx_status.frame_count - 4; /*total byte count in a frame*/ ++#if (GMAC_DEBUG==1) ++ priority = rx_desc->flag_status.bits_rx_status.priority; /* 802.1p priority */ ++#endif ++ vid = rx_desc->flag_status.bits_rx_status.vlan_id; /* 802.1q vlan id */ ++ if (vid == 0) ++ { ++ vid = 1; /* default vlan */ ++ } ++ desc_count = rx_desc->frame_ctrl.bits_rx.desc_count; /* get descriptor count per frame */ ++ ++ if (rx_desc->frame_ctrl.bits_rx.frame_state == 0x000) /* good frame */ ++ { ++ tp->stats.rx_bytes += pkt_size; ++ tp->stats.rx_packets++; ++ good_frame = 1; ++ } ++ else ++ { ++ tp->stats.rx_errors++; ++ good_frame = 0; ++ printk("RX status: 0x%x\n",rx_desc->frame_ctrl.bits_rx.frame_state); ++ } ++ } ++ else ++ { ++ work_done = 1; ++ break; /* Rx process is completed */ ++ } ++ ++ if (good_frame == 1) ++ { ++ /* get rx skb buffer index */ ++ index = ((unsigned int)tp->rx_cur_desc - rx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ if (rx_skb[dev_index][index]) ++ { ++ skb_reserve (rx_skb[dev_index][index], 2); /* 16 byte align the IP fields. */ ++ rx_skb[dev_index][index]->dev = dev; ++ rx_skb[dev_index][index]->ip_summed = CHECKSUM_UNNECESSARY; ++ skb_put(rx_skb[dev_index][index],pkt_size); ++ rx_skb[dev_index][index]->protocol = eth_type_trans(rx_skb[dev_index][index],dev); /* set skb protocol */ ++ netif_rx(rx_skb[dev_index][index]); /* socket rx */ ++ dev->last_rx = jiffies; ++ ++ /* allocate rx skb buffer */ ++ if ( (skb = dev_alloc_skb(RX_BUF_SIZE))==NULL) /* allocate socket buffer */ ++ { ++ printk("%s::skb buffer allocation fail !\n",__func__); ++ } ++ rx_skb[dev_index][index] = skb; ++ tp->rx_cur_desc->buf_adr = (unsigned int)__pa(skb->data) | 0x02; /* insert two bytes in the beginning of rx data */ ++ } ++ else ++ { ++ printk("%s::rx skb index error !\n",__func__); ++ } ++ } ++ ++ tp->rx_cur_desc->frame_ctrl.bits_rx.own = DMA; /* release rx descriptor to DMA */ ++ /* point to next rx descriptor */ ++ tp->rx_cur_desc = (GMAC_DESCRIPTOR_T *)((tp->rx_cur_desc->next_desc.next_descriptor & 0xfffffff0)+rx_desc_virtual_base[dev_index]); ++ ++ /* release buffer to Remaining Buffer Number Register */ ++ if (flow_control_enable[dev_index] ==1) ++ { ++// gmac_write_reg(gmac_base_addr[dev_index] + GMAC_BNCR,desc_count,0x0000ffff); ++ writel(desc_count,(unsigned int *)(gmac_base_addr[dev_index] + GMAC_BNCR)); ++ } ++ ++ if (work++ >= quota ) ++ { ++ break; ++ } ++ } ++ ++ /* if RX DMA process is stoped , restart it */ ++ rxdma_busy.bits.rd_first_des_ptr = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_FIRST_DESC); ++ if (rxdma_busy.bits.rd_busy == 0) ++ { ++ rxdma_ctrl.bits32 = 0; ++ rxdma_ctrl.bits.rd_start = 1; /* start RX DMA transfer */ ++ rxdma_ctrl.bits.rd_continue = 1; /* continue RX DMA operation */ ++ rxdma_ctrl_mask.bits32 = 0; ++ rxdma_ctrl_mask.bits.rd_start = 1; ++ rxdma_ctrl_mask.bits.rd_continue = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CTRL,rxdma_ctrl.bits32,rxdma_ctrl_mask.bits32); ++ } ++ ++ dev->quota -= work; ++ *budget -= work; ++ if (work_done==1) ++ { ++ /* Receive descriptor is empty now */ ++ netif_rx_complete(dev); ++ /* enable receive interrupt */ ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CTRL,0x0007c000,0x0007c000); /* enable rx interrupt */ ++ return 0; ++ } ++ else ++ { ++ return -1; ++ } ++} ++ ++#endif ++ ++static void gmac_rx_packet(struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ struct sk_buff *skb; ++ GMAC_RXDMA_CTRL_T rxdma_ctrl,rxdma_ctrl_mask; ++ GMAC_RXDMA_FIRST_DESC_T rxdma_busy; ++ GMAC_DESCRIPTOR_T *rx_desc; ++ unsigned int pkt_size; ++ unsigned int desc_count; ++ unsigned int vid; ++// unsigned int priority; ++ unsigned int own; ++ unsigned int good_frame = 0; ++ unsigned int i,index; ++ unsigned int dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ for (i=0;i<256;i++) ++ { ++ own = tp->rx_cur_desc->frame_ctrl.bits32 >> 31; ++ if (own == CPU) /* check owner bit */ ++ { ++ rx_desc = tp->rx_cur_desc; ++#if (GMAC_DEBUG==1) ++ /* check error interrupt */ ++ if ( (rx_desc->frame_ctrl.bits_rx.derr==1)||(rx_desc->frame_ctrl.bits_rx.perr==1) ) ++ { ++ printk("%s::Rx Descriptor Processing Error !!!\n",__func__); ++ } ++#endif ++ /* get frame information from the first descriptor of the frame */ ++ pkt_size = rx_desc->flag_status.bits_rx_status.frame_count - 4; /*total byte count in a frame*/ ++#if (GMAC_DEBUG==1) ++ priority = rx_desc->flag_status.bits_rx_status.priority; /* 802.1p priority */ ++#endif ++ vid = rx_desc->flag_status.bits_rx_status.vlan_id; /* 802.1q vlan id */ ++ if (vid == 0) ++ { ++ vid = 1; /* default vlan */ ++ } ++ desc_count = rx_desc->frame_ctrl.bits_rx.desc_count; /* get descriptor count per frame */ ++ ++ if (rx_desc->frame_ctrl.bits_rx.frame_state == 0x000) /* good frame */ ++ { ++ tp->stats.rx_bytes += pkt_size; ++ tp->stats.rx_packets++; ++ good_frame = 1; ++ } ++ else ++ { ++ tp->stats.rx_errors++; ++ good_frame = 0; ++ printk("RX status: 0x%x\n",rx_desc->frame_ctrl.bits_rx.frame_state); ++ } ++ } ++ else ++ { ++ break; /* Rx process is completed */ ++ } ++ ++ if (good_frame == 1) ++ { ++ /* get rx skb buffer index */ ++ index = ((unsigned int)tp->rx_cur_desc - rx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ if (rx_skb[dev_index][index]) ++ { ++ skb_reserve (rx_skb[dev_index][index], 2); /* 16 byte align the IP fields. */ ++ rx_skb[dev_index][index]->dev = dev; ++ rx_skb[dev_index][index]->ip_summed = CHECKSUM_UNNECESSARY; ++ skb_put(rx_skb[dev_index][index],pkt_size); ++ rx_skb[dev_index][index]->protocol = eth_type_trans(rx_skb[dev_index][index],dev); /* set skb protocol */ ++ netif_rx(rx_skb[dev_index][index]); /* socket rx */ ++ dev->last_rx = jiffies; ++ ++ /* allocate rx skb buffer */ ++ if ( (skb = dev_alloc_skb(RX_BUF_SIZE))==NULL) /* allocate socket buffer */ ++ { ++ printk("%s::skb buffer allocation fail !\n",__func__); ++ } ++ rx_skb[dev_index][index] = skb; ++ tp->rx_cur_desc->buf_adr = (unsigned int)__pa(skb->data) | 0x02; /* insert two bytes in the beginning of rx data */ ++ } ++ else ++ { ++ printk("%s::rx skb index error !\n",__func__); ++ } ++ } ++ ++ tp->rx_cur_desc->frame_ctrl.bits_rx.own = DMA; /* release rx descriptor to DMA */ ++ /* point to next rx descriptor */ ++ tp->rx_cur_desc = (GMAC_DESCRIPTOR_T *)((tp->rx_cur_desc->next_desc.next_descriptor & 0xfffffff0)+rx_desc_virtual_base[dev_index]); ++ ++ /* release buffer to Remaining Buffer Number Register */ ++ if (flow_control_enable[dev_index] ==1) ++ { ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_BNCR,desc_count,0x0000ffff); ++ } ++ } ++ ++ /* if RX DMA process is stoped , restart it */ ++ rxdma_busy.bits.rd_first_des_ptr = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_FIRST_DESC); ++ if (rxdma_busy.bits.rd_busy == 0) ++ { ++ rxdma_ctrl.bits32 = 0; ++ rxdma_ctrl.bits.rd_start = 1; /* start RX DMA transfer */ ++ rxdma_ctrl.bits.rd_continue = 1; /* continue RX DMA operation */ ++ rxdma_ctrl_mask.bits32 = 0; ++ rxdma_ctrl_mask.bits.rd_start = 1; ++ rxdma_ctrl_mask.bits.rd_continue = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CTRL,rxdma_ctrl.bits32,rxdma_ctrl_mask.bits32); ++ } ++} ++ ++#ifdef CONFIG_SL2312_MPAGE ++static inline void free_tx_buf(int dev_index, int desc_index) ++{ ++ if (tx_skb[dev_index][desc_index].freeable && ++ tx_skb[dev_index][desc_index].skb) { ++ struct sk_buff* skb = tx_skb[dev_index][desc_index].skb; ++ //printk("free_skb %x, len %d\n", skb, skb->len); ++#ifdef CONFIG_TXINT_DISABLE ++ dev_kfree_skb(skb); ++#else ++ dev_kfree_skb_irq(skb); ++#endif ++ tx_skb[dev_index][desc_index].skb = 0; ++ } ++} ++ ++#ifdef CONFIG_TXINT_DISABLE ++static void gmac_tx_packet_complete(struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ GMAC_DESCRIPTOR_T *tx_hw_complete_desc, *next_desc; ++ unsigned int desc_cnt=0; ++ unsigned int i,index,dev_index; ++ unsigned int tx_current_descriptor = 0; ++ // int own_dma = 0; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ index = ((unsigned int)tp->tx_finished_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ if (tx_skb[dev_index][index].desc_in_use && tp->tx_finished_desc->frame_ctrl.bits_tx_in.own == CPU) { ++ free_tx_buf(dev_index, index); ++ tx_skb[dev_index][index].desc_in_use = 0; ++ } ++ next_desc = (GMAC_DESCRIPTOR_T*)((tp->tx_finished_desc->next_desc.next_descriptor & 0xfffffff0) + tx_desc_virtual_base[dev_index]); ++ ++ for (;;) { ++ tx_hw_complete_desc = (GMAC_DESCRIPTOR_T *)((gmac_read_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CURR_DESC) & 0xfffffff0)+ tx_desc_virtual_base[dev_index]); ++ if (next_desc == tx_hw_complete_desc) ++ break; ++ if (next_desc->frame_ctrl.bits_tx_in.own == CPU) { ++ if (next_desc->frame_ctrl.bits_tx_in.success_tx == 1) { ++ tp->stats.tx_bytes += next_desc->flag_status.bits_tx_flag.frame_count; ++ tp->stats.tx_packets ++; ++ } else { ++ tp->stats.tx_errors++; ++ } ++ desc_cnt = next_desc->frame_ctrl.bits_tx_in.desc_count; ++ for (i=1; iframe_ctrl.bits_tx_in.own = CPU; ++ free_tx_buf(dev_index, index); ++ tx_skb[dev_index][index].desc_in_use = 0; ++ tp->tx_desc_tail[dev_index] = (tp->tx_desc_tail[dev_index] +1) & (TX_DESC_NUM-1); ++ /* release Tx descriptor to CPU */ ++ next_desc = (GMAC_DESCRIPTOR_T *)((next_desc->next_desc.next_descriptor & 0xfffffff0)+tx_desc_virtual_base[dev_index]); ++ } ++ /* get tx skb buffer index */ ++ index = ((unsigned int)next_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ /* free skb buffer */ ++ next_desc->frame_ctrl.bits_tx_in.own = CPU; ++ free_tx_buf(dev_index, index); ++ tx_skb[dev_index][index].desc_in_use = 0; ++ tp->tx_desc_tail[dev_index] = (tp->tx_desc_tail[dev_index] +1) & (TX_DESC_NUM-1); ++ tp->tx_finished_desc = next_desc; ++// printk("finish tx_desc index %d\n", index); ++ next_desc = (GMAC_DESCRIPTOR_T *)((next_desc->next_desc.next_descriptor & 0xfffffff0)+tx_desc_virtual_base[dev_index]); ++ } ++ else ++ break; ++ } ++ if (netif_queue_stopped(dev)) ++ { ++ netif_wake_queue(dev); ++ } ++ ++} ++#else ++static void gmac_tx_packet_complete(struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ GMAC_DESCRIPTOR_T *tx_hw_complete_desc; ++ unsigned int desc_cnt=0; ++ unsigned int i,index,dev_index; ++ unsigned int tx_current_descriptor = 0; ++ // int own_dma = 0; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ index = ((unsigned int)tp->tx_finished_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ ++ /* check tx status and accumulate tx statistics */ ++ for (;;) ++ { ++ ++ for (i=0;i<1000;i++) ++ { ++ tx_current_descriptor = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CURR_DESC); ++ if ( ((tx_current_descriptor & 0x00000003)==0x00000003) || /* only one descriptor */ ++ ((tx_current_descriptor & 0x00000003)==0x00000001) ) /* the last descriptor */ ++ { ++ break; ++ } ++ udelay(1); ++ } ++ if (i==1000) ++ { ++// gmac_dump_register(dev); ++// printk("%s: tx current descriptor = %x \n",__func__,tx_current_descriptor); ++// printk_all(dev_index, tp); ++ continue; ++ } ++ ++ /* get tx H/W completed descriptor virtual address */ ++ tx_hw_complete_desc = (GMAC_DESCRIPTOR_T *)((tx_current_descriptor & 0xfffffff0)+ tx_desc_virtual_base[dev_index]); ++// tx_hw_complete_desc = (GMAC_DESCRIPTOR_T *)((gmac_read_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CURR_DESC) & 0xfffffff0)+ tx_desc_virtual_base[dev_index]); ++ if (tp->tx_finished_desc == tx_hw_complete_desc ) // || ++ //tx_skb[dev_index][index].desc_in_use ) /* complete tx processing */ ++ { ++ break; ++ } ++ ++ for (;;) ++ { ++ if (tp->tx_finished_desc->frame_ctrl.bits_tx_in.own == CPU) ++ { ++ #if (GMAC_DEBUG==1) ++ if ( (tp->tx_finished_desc->frame_ctrl.bits_tx_in.derr) || ++ (tp->tx_finished_desc->frame_ctrl.bits_tx_in.perr) ) ++ { ++ printk("%s::Descriptor Processing Error !!!\n",__func__); ++ } ++ #endif ++ if (tp->tx_finished_desc->frame_ctrl.bits_tx_in.success_tx == 1) ++ { ++ tp->stats.tx_bytes += tp->tx_finished_desc->flag_status.bits_tx_flag.frame_count; ++ tp->stats.tx_packets ++; ++ } ++ else ++ { ++ tp->stats.tx_errors++; ++ } ++ desc_cnt = tp->tx_finished_desc->frame_ctrl.bits_tx_in.desc_count; ++ for (i=1; itx_finished_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ tp->tx_finished_desc->frame_ctrl.bits_tx_in.own = CPU; ++ free_tx_buf(dev_index, index); ++ tx_skb[dev_index][index].desc_in_use = 0; ++ /* release Tx descriptor to CPU */ ++ tp->tx_finished_desc = (GMAC_DESCRIPTOR_T *)((tp->tx_finished_desc->next_desc.next_descriptor & 0xfffffff0)+tx_desc_virtual_base[dev_index]); ++ } ++ /* get tx skb buffer index */ ++ index = ((unsigned int)tp->tx_finished_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ /* free skb buffer */ ++ tp->tx_finished_desc->frame_ctrl.bits_tx_in.own = CPU; ++ free_tx_buf(dev_index, index); ++ tx_skb[dev_index][index].desc_in_use = 0; ++ tp->tx_finished_desc = (GMAC_DESCRIPTOR_T *)((tp->tx_finished_desc->next_desc.next_descriptor & 0xfffffff0)+tx_desc_virtual_base[dev_index]); ++ ++ if (tp->tx_finished_desc == tx_hw_complete_desc ) ++ { ++ break; ++ } ++ } ++ else ++ { ++ break; ++ } ++ } ++ } ++ ++ if (netif_queue_stopped(dev)) ++ { ++ netif_wake_queue(dev); ++ } ++ ++} ++#endif ++#else ++ ++static void gmac_tx_packet_complete(struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ GMAC_DESCRIPTOR_T *tx_hw_complete_desc; ++ unsigned int desc_cnt=0; ++ unsigned int i,index,dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ /* get tx H/W completed descriptor virtual address */ ++ tx_hw_complete_desc = (GMAC_DESCRIPTOR_T *)((gmac_read_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CURR_DESC) & 0xfffffff0)+ tx_desc_virtual_base[dev_index]); ++ /* check tx status and accumulate tx statistics */ ++ for (;;) ++ { ++ if (tp->tx_finished_desc == tx_hw_complete_desc) /* complete tx processing */ ++ { ++ break; ++ } ++ if (tp->tx_finished_desc->frame_ctrl.bits_tx_in.own == CPU) ++ { ++#if (GMAC_DEBUG==1) ++ if ( (tp->tx_finished_desc->frame_ctrl.bits_tx_in.derr) || ++ (tp->tx_finished_desc->frame_ctrl.bits_tx_in.perr) ) ++ { ++ printk("%s::Descriptor Processing Error !!!\n",__func__); ++ } ++#endif ++ if (tp->tx_finished_desc->frame_ctrl.bits_tx_in.success_tx == 1) ++ { ++ tp->stats.tx_bytes += tp->tx_finished_desc->flag_status.bits_tx_flag.frame_count; ++ tp->stats.tx_packets ++; ++ } ++ else ++ { ++ tp->stats.tx_errors++; ++ } ++ desc_cnt = tp->tx_finished_desc->frame_ctrl.bits_tx_in.desc_count; ++ for (i=1; itx_finished_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ /* free skb buffer */ ++ if (tx_skb[dev_index][index]) ++ { ++ dev_kfree_skb_irq(tx_skb[dev_index][index]); ++ } ++ /* release Tx descriptor to CPU */ ++ tp->tx_finished_desc = (GMAC_DESCRIPTOR_T *)((tp->tx_finished_desc->next_desc.next_descriptor & 0xfffffff0)+tx_desc_virtual_base[dev_index]); ++ tp->tx_finished_desc->frame_ctrl.bits_tx_in.own = CPU; ++ } ++ /* get tx skb buffer index */ ++ index = ((unsigned int)tp->tx_finished_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ /* free skb buffer */ ++ if (tx_skb[dev_index][index]) ++ { ++ dev_kfree_skb_irq(tx_skb[dev_index][index]); ++ } ++ tp->tx_finished_desc = (GMAC_DESCRIPTOR_T *)((tp->tx_finished_desc->next_desc.next_descriptor & 0xfffffff0)+tx_desc_virtual_base[dev_index]); ++ } ++ } ++ ++ if (netif_queue_stopped(dev)) ++ { ++ netif_wake_queue(dev); ++ } ++ ++} ++ ++ ++#endif ++ ++#if 0 ++static void gmac_weird_interrupt(struct net_device *dev) ++{ ++ gmac_dump_register(dev); ++} ++#endif ++ ++/* The interrupt handler does all of the Rx thread work and cleans up ++ after the Tx thread. */ ++static irqreturn_t gmac_interrupt (int irq, void *dev_instance, struct pt_regs *regs) ++{ ++ struct net_device *dev = (struct net_device *)dev_instance; ++ GMAC_RXDMA_FIRST_DESC_T rxdma_busy; ++// GMAC_TXDMA_FIRST_DESC_T txdma_busy; ++// GMAC_TXDMA_CTRL_T txdma_ctrl,txdma_ctrl_mask; ++ GMAC_RXDMA_CTRL_T rxdma_ctrl,rxdma_ctrl_mask; ++ GMAC_DMA_STATUS_T status; ++ unsigned int i,dev_index; ++ int handled = 0; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ handled = 1; ++ ++#ifdef CONFIG_SL_NAPI ++ disable_irq(gmac_irq[dev_index]); /* disable GMAC interrupt */ ++ ++ status.bits32 = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_DMA_STATUS); /* read DMA status */ ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_DMA_STATUS,status.bits32,status.bits32); /* clear DMA status */ ++ ++ if (status.bits.rx_overrun == 1) ++ { ++ printk("%s::RX Overrun !!!%d\n",__func__,gmac_read_reg(gmac_base_addr[dev_index] + GMAC_RBNR)); ++ gmac_dump_register(dev); ++ /* if RX DMA process is stoped , restart it */ ++ rxdma_busy.bits32 = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_FIRST_DESC) ; ++ if (rxdma_busy.bits.rd_busy == 0) ++ { ++ /* restart Rx DMA process */ ++ rxdma_ctrl.bits32 = 0; ++ rxdma_ctrl.bits.rd_start = 1; /* start RX DMA transfer */ ++ rxdma_ctrl.bits.rd_continue = 1; /* continue RX DMA operation */ ++ rxdma_ctrl_mask.bits32 = 0; ++ rxdma_ctrl_mask.bits.rd_start = 1; ++ rxdma_ctrl_mask.bits.rd_continue = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CTRL,rxdma_ctrl.bits32,rxdma_ctrl_mask.bits32); ++ } ++ } ++ ++ /* process rx packet */ ++ if (netif_running(dev) && ((status.bits.rs_eofi==1)||(status.bits.rs_finish==1))) ++ { ++ if (likely(netif_rx_schedule_prep(dev))) ++ { ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CTRL,0,0x0007c000); /* disable rx interrupt */ ++ __netif_rx_schedule(dev); ++ } ++ } ++#ifndef CONFIG_TXINT_DISABLE ++ /* process tx packet */ ++ if (netif_running(dev) && ((status.bits.ts_eofi==1)||(status.bits.ts_finish==1))) ++ { ++ gmac_tx_packet_complete(dev); ++ } ++#endif ++ ++ enable_irq(gmac_irq[dev_index]); /* enable GMAC interrupt */ ++ return IRQ_RETVAL(handled); ++#endif ++ ++ /* disable GMAC interrupt */ ++ disable_irq(gmac_irq[dev_index]); ++ for (i=0;ipriv; ++ GMAC_TXDMA_CURR_DESC_T tx_desc; ++ GMAC_RXDMA_CURR_DESC_T rx_desc; ++ GMAC_TXDMA_CTRL_T txdma_ctrl,txdma_ctrl_mask; ++ GMAC_RXDMA_CTRL_T rxdma_ctrl,rxdma_ctrl_mask; ++ GMAC_DMA_STATUS_T dma_status,dma_status_mask; ++ int dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ /* program TxDMA Current Descriptor Address register for first descriptor */ ++ tx_desc.bits32 = (unsigned int)(tp->tx_desc_dma); ++ tx_desc.bits.eofie = 1; ++ tx_desc.bits.sof_eof = 0x03; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CURR_DESC,tx_desc.bits32,0xffffffff); ++ gmac_write_reg(gmac_base_addr[dev_index] + 0xff2c,tx_desc.bits32,0xffffffff); /* tx next descriptor address */ ++ ++ /* program RxDMA Current Descriptor Address register for first descriptor */ ++ rx_desc.bits32 = (unsigned int)(tp->rx_desc_dma); ++ rx_desc.bits.eofie = 1; ++ rx_desc.bits.sof_eof = 0x03; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CURR_DESC,rx_desc.bits32,0xffffffff); ++ gmac_write_reg(gmac_base_addr[dev_index] + 0xff3c,rx_desc.bits32,0xffffffff); /* rx next descriptor address */ ++ ++ /* enable GMAC interrupt & disable loopback */ ++ dma_status.bits32 = 0; ++ dma_status.bits.loop_back = 0; /* disable DMA loop-back mode */ ++// dma_status.bits.m_tx_fail = 1; ++ dma_status.bits.m_cnt_full = 1; ++ dma_status.bits.m_rx_pause_on = 1; ++ dma_status.bits.m_tx_pause_on = 1; ++ dma_status.bits.m_rx_pause_off = 1; ++ dma_status.bits.m_tx_pause_off = 1; ++ dma_status.bits.m_rx_overrun = 1; ++ dma_status.bits.m_link_change = 1; ++ dma_status_mask.bits32 = 0; ++ dma_status_mask.bits.loop_back = 1; ++// dma_status_mask.bits.m_tx_fail = 1; ++ dma_status_mask.bits.m_cnt_full = 1; ++ dma_status_mask.bits.m_rx_pause_on = 1; ++ dma_status_mask.bits.m_tx_pause_on = 1; ++ dma_status_mask.bits.m_rx_pause_off = 1; ++ dma_status_mask.bits.m_tx_pause_off = 1; ++ dma_status_mask.bits.m_rx_overrun = 1; ++ dma_status_mask.bits.m_link_change = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_DMA_STATUS,dma_status.bits32,dma_status_mask.bits32); ++ ++ /* program tx dma control register */ ++ txdma_ctrl.bits32 = 0; ++ txdma_ctrl.bits.td_start = 0; /* start TX DMA transfer */ ++ txdma_ctrl.bits.td_continue = 0; /* continue Tx DMA operation */ ++ txdma_ctrl.bits.td_chain_mode = 1; /* chain mode */ ++ txdma_ctrl.bits.td_prot = 0; ++ txdma_ctrl.bits.td_burst_size = 2; /* DMA burst size for every AHB request */ ++ txdma_ctrl.bits.td_bus = 2; /* peripheral bus width */ ++ txdma_ctrl.bits.td_endian = 0; /* little endian */ ++#ifdef CONFIG_TXINT_DISABLE ++ txdma_ctrl.bits.td_finish_en = 0; /* DMA finish event interrupt disable */ ++#else ++ txdma_ctrl.bits.td_finish_en = 1; /* DMA finish event interrupt enable */ ++#endif ++ txdma_ctrl.bits.td_fail_en = 1; /* DMA fail interrupt enable */ ++ txdma_ctrl.bits.td_perr_en = 1; /* protocol failure interrupt enable */ ++ txdma_ctrl.bits.td_eod_en = 0; /* disable Tx End of Descriptor Interrupt */ ++ //txdma_ctrl.bits.td_eod_en = 0; /* disable Tx End of Descriptor Interrupt */ ++#ifdef CONFIG_TXINT_DISABLE ++ txdma_ctrl.bits.td_eof_en = 0; /* end of frame interrupt disable */ ++#else ++ txdma_ctrl.bits.td_eof_en = 1; /* end of frame interrupt enable */ ++#endif ++ txdma_ctrl_mask.bits32 = 0; ++ txdma_ctrl_mask.bits.td_start = 1; ++ txdma_ctrl_mask.bits.td_continue = 1; ++ txdma_ctrl_mask.bits.td_chain_mode = 1; ++ txdma_ctrl_mask.bits.td_prot = 15; ++ txdma_ctrl_mask.bits.td_burst_size = 3; ++ txdma_ctrl_mask.bits.td_bus = 3; ++ txdma_ctrl_mask.bits.td_endian = 1; ++ txdma_ctrl_mask.bits.td_finish_en = 1; ++ txdma_ctrl_mask.bits.td_fail_en = 1; ++ txdma_ctrl_mask.bits.td_perr_en = 1; ++ txdma_ctrl_mask.bits.td_eod_en = 1; ++ //txdma_ctrl_mask.bits.td_eod_en = 1; ++ txdma_ctrl_mask.bits.td_eof_en = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CTRL,txdma_ctrl.bits32,txdma_ctrl_mask.bits32); ++ ++ /* program rx dma control register */ ++ rxdma_ctrl.bits32 = 0; ++ rxdma_ctrl.bits.rd_start = 1; /* start RX DMA transfer */ ++ rxdma_ctrl.bits.rd_continue = 1; /* continue RX DMA operation */ ++ rxdma_ctrl.bits.rd_chain_mode = 1; /* chain mode */ ++ rxdma_ctrl.bits.rd_prot = 0; ++ rxdma_ctrl.bits.rd_burst_size = 2; /* DMA burst size for every AHB request */ ++ rxdma_ctrl.bits.rd_bus = 2; /* peripheral bus width */ ++ rxdma_ctrl.bits.rd_endian = 0; /* little endian */ ++ rxdma_ctrl.bits.rd_finish_en = 1; /* DMA finish event interrupt enable */ ++ rxdma_ctrl.bits.rd_fail_en = 1; /* DMA fail interrupt enable */ ++ rxdma_ctrl.bits.rd_perr_en = 1; /* protocol failure interrupt enable */ ++ rxdma_ctrl.bits.rd_eod_en = 0; /* disable Rx End of Descriptor Interrupt */ ++ rxdma_ctrl.bits.rd_eof_en = 1; /* end of frame interrupt enable */ ++ rxdma_ctrl_mask.bits32 = 0; ++ rxdma_ctrl_mask.bits.rd_start = 1; ++ rxdma_ctrl_mask.bits.rd_continue = 1; ++ rxdma_ctrl_mask.bits.rd_chain_mode = 1; ++ rxdma_ctrl_mask.bits.rd_prot = 15; ++ rxdma_ctrl_mask.bits.rd_burst_size = 3; ++ rxdma_ctrl_mask.bits.rd_bus = 3; ++ rxdma_ctrl_mask.bits.rd_endian = 1; ++ rxdma_ctrl_mask.bits.rd_finish_en = 1; ++ rxdma_ctrl_mask.bits.rd_fail_en = 1; ++ rxdma_ctrl_mask.bits.rd_perr_en = 1; ++ rxdma_ctrl_mask.bits.rd_eod_en = 1; ++ rxdma_ctrl_mask.bits.rd_eof_en = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CTRL,rxdma_ctrl.bits32,rxdma_ctrl_mask.bits32); ++ return; ++} ++ ++static void gmac_hw_stop(struct net_device *dev) ++{ ++ GMAC_TXDMA_CTRL_T txdma_ctrl,txdma_ctrl_mask; ++ GMAC_RXDMA_CTRL_T rxdma_ctrl,rxdma_ctrl_mask; ++ int dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ /* program tx dma control register */ ++ txdma_ctrl.bits32 = 0; ++ txdma_ctrl.bits.td_start = 0; ++ txdma_ctrl.bits.td_continue = 0; ++ txdma_ctrl_mask.bits32 = 0; ++ txdma_ctrl_mask.bits.td_start = 1; ++ txdma_ctrl_mask.bits.td_continue = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CTRL,txdma_ctrl.bits32,txdma_ctrl_mask.bits32); ++ /* program rx dma control register */ ++ rxdma_ctrl.bits32 = 0; ++ rxdma_ctrl.bits.rd_start = 0; /* stop RX DMA transfer */ ++ rxdma_ctrl.bits.rd_continue = 0; /* stop continue RX DMA operation */ ++ rxdma_ctrl_mask.bits32 = 0; ++ rxdma_ctrl_mask.bits.rd_start = 1; ++ rxdma_ctrl_mask.bits.rd_continue = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_RXDMA_CTRL,rxdma_ctrl.bits32,rxdma_ctrl_mask.bits32); ++} ++ ++static int gmac_init_desc_buf(struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ struct sk_buff *skb; ++ dma_addr_t tx_first_desc_dma=0; ++ dma_addr_t rx_first_desc_dma=0; ++ dma_addr_t rx_first_buf_dma=0; ++ unsigned int i,index; ++ ++ printk("Descriptor buffer init......\n"); ++ ++ /* get device index number */ ++ index = gmac_get_dev_index(dev); ++#ifdef CONFIG_SL2312_MPAGE ++ for (i=0; itx_desc = DMA_MALLOC(TX_DESC_NUM*sizeof(GMAC_DESCRIPTOR_T),(dma_addr_t *)&tp->tx_desc_dma); ++ tx_desc_virtual_base[index] = (unsigned int)tp->tx_desc - (unsigned int)tp->tx_desc_dma; ++ memset(tp->tx_desc,0x00,TX_DESC_NUM*sizeof(GMAC_DESCRIPTOR_T)); ++ tp->rx_desc = DMA_MALLOC(RX_DESC_NUM*sizeof(GMAC_DESCRIPTOR_T),(dma_addr_t *)&tp->rx_desc_dma); ++ rx_desc_virtual_base[index] = (unsigned int)tp->rx_desc - (unsigned int)tp->rx_desc_dma; ++ memset(tp->rx_desc,0x00,RX_DESC_NUM*sizeof(GMAC_DESCRIPTOR_T)); ++ tx_desc_start_adr[index] = (unsigned int)tp->tx_desc; /* for tx skb index calculation */ ++ rx_desc_start_adr[index] = (unsigned int)tp->rx_desc; /* for rx skb index calculation */ ++ printk("tx_desc = %08x\n",(unsigned int)tp->tx_desc); ++ printk("rx_desc = %08x\n",(unsigned int)tp->rx_desc); ++ printk("tx_desc_dma = %08x\n",tp->tx_desc_dma); ++ printk("rx_desc_dma = %08x\n",tp->rx_desc_dma); ++ ++ if (tp->tx_desc==0x00 || tp->rx_desc==0x00) ++ { ++ free_irq(dev->irq, dev); ++ ++ if (tp->tx_desc) ++ DMA_MFREE(tp->tx_desc, TX_DESC_NUM*sizeof(GMAC_DESCRIPTOR_T),tp->tx_desc_dma); ++ if (tp->rx_desc) ++ DMA_MFREE(tp->rx_desc, RX_DESC_NUM*sizeof(GMAC_DESCRIPTOR_T),tp->rx_desc_dma); ++ return -ENOMEM; ++ } ++ ++ /* TX descriptors initial */ ++ tp->tx_cur_desc = tp->tx_desc; /* virtual address */ ++ tp->tx_finished_desc = tp->tx_desc; /* virtual address */ ++ tx_first_desc_dma = tp->tx_desc_dma; /* physical address */ ++ for (i = 1; i < TX_DESC_NUM; i++) ++ { ++ tp->tx_desc->frame_ctrl.bits_tx_out.own = CPU; /* set owner to CPU */ ++ tp->tx_desc->frame_ctrl.bits_tx_out.buffer_size = TX_BUF_SIZE; /* set tx buffer size for descriptor */ ++ tp->tx_desc_dma = tp->tx_desc_dma + sizeof(GMAC_DESCRIPTOR_T); /* next tx descriptor DMA address */ ++ tp->tx_desc->next_desc.next_descriptor = tp->tx_desc_dma | 0x0000000b; ++ tp->tx_desc = &tp->tx_desc[1] ; /* next tx descriptor virtual address */ ++ } ++ /* the last descriptor will point back to first descriptor */ ++ tp->tx_desc->frame_ctrl.bits_tx_out.own = CPU; ++ tp->tx_desc->frame_ctrl.bits_tx_out.buffer_size = TX_BUF_SIZE; ++ tp->tx_desc->next_desc.next_descriptor = tx_first_desc_dma | 0x0000000b; ++ tp->tx_desc = tp->tx_cur_desc; ++ tp->tx_desc_dma = tx_first_desc_dma; ++ ++ /* RX descriptors initial */ ++ tp->rx_cur_desc = tp->rx_desc; /* virtual address */ ++ rx_first_desc_dma = tp->rx_desc_dma; /* physical address */ ++ for (i = 1; i < RX_DESC_NUM; i++) ++ { ++ if ( (skb = dev_alloc_skb(RX_BUF_SIZE))==NULL) /* allocate socket buffer */ ++ { ++ printk("%s::skb buffer allocation fail !\n",__func__); ++ } ++ rx_skb[index][i-1] = skb; ++ tp->rx_desc->buf_adr = (unsigned int)__pa(skb->data) | 0x02; /* insert two bytes in the beginning of rx data */ ++ tp->rx_desc->frame_ctrl.bits_rx.own = DMA; /* set owner bit to DMA */ ++ tp->rx_desc->frame_ctrl.bits_rx.buffer_size = RX_BUF_SIZE; /* set rx buffer size for descriptor */ ++ tp->rx_bufs_dma = tp->rx_bufs_dma + RX_BUF_SIZE; /* point to next buffer address */ ++ tp->rx_desc_dma = tp->rx_desc_dma + sizeof(GMAC_DESCRIPTOR_T); /* next rx descriptor DMA address */ ++ tp->rx_desc->next_desc.next_descriptor = tp->rx_desc_dma | 0x0000000b; ++ tp->rx_desc = &tp->rx_desc[1]; /* next rx descriptor virtual address */ ++ } ++ /* the last descriptor will point back to first descriptor */ ++ if ( (skb = dev_alloc_skb(RX_BUF_SIZE))==NULL) /* allocate socket buffer */ ++ { ++ printk("%s::skb buffer allocation fail !\n",__func__); ++ } ++ rx_skb[index][i-1] = skb; ++ tp->rx_desc->buf_adr = (unsigned int)__pa(skb->data) | 0x02; /* insert two bytes in the beginning of rx data */ ++ tp->rx_desc->frame_ctrl.bits_rx.own = DMA; ++ tp->rx_desc->frame_ctrl.bits_rx.buffer_size = RX_BUF_SIZE; ++ tp->rx_desc->next_desc.next_descriptor = rx_first_desc_dma | 0x0000000b; ++ tp->rx_desc = tp->rx_cur_desc; ++ tp->rx_desc_dma = rx_first_desc_dma; ++ tp->rx_bufs_dma = rx_first_buf_dma; ++ ++ for (i=0; itx_desc_hdr[i] = 0; ++ tp->tx_desc_tail[i] = 0; ++ } ++ return (0); ++} ++ ++static int gmac_clear_counter (struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ unsigned int dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++// tp = gmac_dev[index]->priv; ++ /* clear counter */ ++ gmac_read_reg(gmac_base_addr[dev_index] + GMAC_IN_DISCARDS); ++ gmac_read_reg(gmac_base_addr[dev_index] + GMAC_IN_ERRORS); ++ tp->stats.tx_bytes = 0; ++ tp->stats.tx_packets = 0; ++ tp->stats.tx_errors = 0; ++ tp->stats.rx_bytes = 0; ++ tp->stats.rx_packets = 0; ++ tp->stats.rx_errors = 0; ++ tp->stats.rx_dropped = 0; ++ return (0); ++} ++ ++static int gmac_open (struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ int retval; ++ ++ gmac_select_interface(dev); ++ ++ /* chip reset */ ++ gmac_sw_reset(dev); ++ ++ /* allocates tx/rx descriptor and buffer memory */ ++ gmac_init_desc_buf(dev); ++ ++ /* get mac address from FLASH */ ++ gmac_get_mac_address(); ++ ++ /* set PHY register to start autonegition process */ ++ gmac_set_phy_status(dev); ++ ++ /* GMAC initialization */ ++ if (gmac_init_chip(dev)) ++ { ++ printk (KERN_ERR "GMAC init fail\n"); ++ } ++ ++ /* start DMA process */ ++ gmac_hw_start(dev); ++ ++ /* enable tx/rx register */ ++ gmac_enable_tx_rx(dev); ++ ++ /* clear statistic counter */ ++ gmac_clear_counter(dev); ++ ++ netif_start_queue (dev); ++ ++ /* hook ISR */ ++ retval = request_irq (dev->irq, gmac_interrupt, SA_INTERRUPT, dev->name, dev); ++ if (retval) ++ return retval; ++ ++ if(!FLAG_SWITCH) ++ { ++ init_waitqueue_head (&tp->thr_wait); ++ init_completion(&tp->thr_exited); ++ ++ tp->time_to_die = 0; ++ tp->thr_pid = kernel_thread (gmac_phy_thread, dev, CLONE_FS | CLONE_FILES); ++ if (tp->thr_pid < 0) ++ { ++ printk (KERN_WARNING "%s: unable to start kernel thread\n",dev->name); ++ } ++ } ++ return (0); ++} ++ ++static int gmac_close(struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ unsigned int i,dev_index; ++ unsigned int ret; ++ ++ dev_index = gmac_get_dev_index(dev); ++ ++ /* stop tx/rx packet */ ++ gmac_disable_tx_rx(dev); ++ ++ /* stop the chip's Tx and Rx DMA processes */ ++ gmac_hw_stop(dev); ++ ++ netif_stop_queue(dev); ++ ++ /* disable interrupts by clearing the interrupt mask */ ++ synchronize_irq(); ++ free_irq(dev->irq,dev); ++ ++ DMA_MFREE(tp->tx_desc, TX_DESC_NUM*sizeof(GMAC_DESCRIPTOR_T),(unsigned int)tp->tx_desc_dma); ++ DMA_MFREE(tp->rx_desc, RX_DESC_NUM*sizeof(GMAC_DESCRIPTOR_T),(unsigned int)tp->rx_desc_dma); ++ ++#ifdef CONFIG_SL2312_MPAGE ++// kfree(tx_skb); ++#endif ++ ++ for (i=0;ithr_pid >= 0) ++ { ++ tp->time_to_die = 1; ++ wmb(); ++ ret = kill_proc (tp->thr_pid, SIGTERM, 1); ++ if (ret) ++ { ++ printk (KERN_ERR "%s: unable to signal thread\n", dev->name); ++ return ret; ++ } ++// wait_for_completion (&tp->thr_exited); ++ } ++ } ++ ++ return (0); ++} ++ ++#ifdef CONFIG_SL2312_MPAGE ++int printk_all(int dev_index, struct gmac_private* tp) ++{ ++ int i=0; ++ unsigned int tx_current_descriptor = 0; ++ int hw_index; ++ int fi; ++ GMAC_DESCRIPTOR_T* tmp_desc; ++ ++ GMAC_DESCRIPTOR_T* cur_desc=tp->tx_cur_desc; ++ fi = ((unsigned int)cur_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ printk("tmp_desc %x, id %d\n", (int)cur_desc, fi); ++ ++ tmp_desc = (GMAC_DESCRIPTOR_T*)((gmac_read_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CURR_DESC) & 0xfffffff0) + tx_desc_virtual_base[dev_index]); ++ hw_index = ((unsigned int)tmp_desc - tx_desc_start_adr[dev_index])/ sizeof(GMAC_DESCRIPTOR_T); ++ printk("hd_desc %x, ind %d, fin desc %x\n",(int)tmp_desc, hw_index, (int)tp->tx_finished_desc); ++ ++ for (i=0; i ", fi, hw_index); ++ printk("fc %8x ", tmp_desc->frame_ctrl.bits32); ++ printk("fs %8x ", tmp_desc->flag_status.bits32); ++ printk("fb %8x ", tmp_desc->buf_adr); ++ printk("fd %8x\n", tmp_desc->next_desc.next_descriptor); ++ tmp_desc = (GMAC_DESCRIPTOR_T*)((tmp_desc->next_desc.next_descriptor & 0xfffffff0) + tx_desc_virtual_base[dev_index]); ++ fi = ((unsigned int)tmp_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ } ++ tx_current_descriptor = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CURR_DESC); ++ printk("%s: tx current descriptor = %x \n",__func__,tx_current_descriptor); ++ printk("%s: interrupt status = %x \n",__func__,int_status); ++ return 0; ++} ++ ++int cleanup_desc(int dev_index, struct gmac_private* tp) ++{ ++ int i=0; ++ int index = ((unsigned int)tp->tx_cur_desc - tx_desc_start_adr[dev_index])/sizeof(GMAC_DESCRIPTOR_T); ++ GMAC_DESCRIPTOR_T* fill_desc = tp->tx_cur_desc; ++ ++ for (i=0; i< TX_DESC_NUM; i++) ++ { ++ fill_desc->frame_ctrl.bits_tx_out.own = CPU; ++ fill_desc->frame_ctrl.bits_tx_out.buffer_size = TX_BUF_SIZE; ++ tx_skb[dev_index][index].desc_in_use = 0; ++ free_tx_buf(dev_index, index); ++ printk("cleanup di %d\n", index); ++ fill_desc = (GMAC_DESCRIPTOR_T*)((fill_desc->next_desc.next_descriptor & 0xfffffff0) + tx_desc_virtual_base[dev_index]); ++ index++; ++ if (index > TX_DESC_NUM) ++ index = 0; ++ } ++ return 1; ++} ++ ++size_t get_available_tx_desc(struct net_device* dev, int dev_index) ++{ ++ struct gmac_private *tp = dev->priv; ++ unsigned int desc_hdr = tp->tx_desc_hdr[dev_index]; ++ unsigned int desc_tail = tp->tx_desc_tail[dev_index]; ++ int available_desc_num = (TX_DESC_NUM - desc_hdr + desc_tail) & (TX_DESC_NUM-1); ++ if (!available_desc_num) { ++ if (tx_skb[dev_index][desc_hdr].desc_in_use) ++ return 0; ++ else ++ return TX_DESC_NUM; ++ } ++ return available_desc_num; ++} ++ ++int check_free_tx_desc(int dev_index, int n, GMAC_DESCRIPTOR_T* desc) ++{ ++ int i,index; ++ GMAC_DESCRIPTOR_T* tmp_desc = desc; ++ ++ if (n > TX_DESC_NUM) ++ return 0; ++ ++ index = ((unsigned int)tmp_desc - tx_desc_start_adr[dev_index])/sizeof(GMAC_DESCRIPTOR_T); ++ for (i=0; i> 12) & 0x000F) ++ ++inline int fill_in_desc(int dev_index, GMAC_DESCRIPTOR_T *desc, char* data, int len, int total_len, int sof, int freeable, int ownership, struct sk_buff* skb) ++{ ++ int index = ((unsigned int)desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ ++ if (desc->frame_ctrl.bits_tx_in.own == CPU) ++ { ++ tx_skb[dev_index][index].freeable = freeable; ++ if ((sof & 0x01) && skb) { ++ tx_skb[dev_index][index].skb = skb; ++ } ++ else ++ tx_skb[dev_index][index].skb = 0; ++ ++ if (sof != 2) ++ tx_skb[dev_index][index].desc_in_use = 1; ++ else ++ tx_skb[dev_index][index].desc_in_use = 0; ++ ++ consistent_sync(data, len, PCI_DMA_TODEVICE); ++ desc->buf_adr = (unsigned int)__pa(data); ++ desc->frame_ctrl.bits_tx_out.buffer_size = len; ++ desc->flag_status.bits_tx_flag.frame_count = total_len; ++ desc->next_desc.bits.eofie = 1; ++ desc->next_desc.bits.sof_eof = sof; ++ desc->frame_ctrl.bits_tx_out.vlan_enable = 0; ++ desc->frame_ctrl.bits_tx_out.ip_csum_en = 1; /* TSS IPv4 IP header checksum enable */ ++ desc->frame_ctrl.bits_tx_out.ipv6_tx_en = 1; /* TSS IPv6 tx enable */ ++ desc->frame_ctrl.bits_tx_out.tcp_csum_en = 1; /* TSS TCP checksum enable */ ++ desc->frame_ctrl.bits_tx_out.udp_csum_en = 1; /* TSS UDP checksum enable */ ++ wmb(); ++ desc->frame_ctrl.bits_tx_out.own = ownership; ++// consistent_sync(desc, sizeof(GMAC_DESCRIPTOR_T), PCI_DMA_TODEVICE); ++ } ++ return 0; ++} ++#endif ++ ++static int gmac_start_xmit(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ GMAC_TXDMA_CTRL_T tx_ctrl,tx_ctrl_mask; ++ GMAC_TXDMA_FIRST_DESC_T txdma_busy; ++ unsigned int len = skb->len; ++ unsigned int dev_index; ++ static unsigned int pcount = 0; ++#ifdef CONFIG_SL2312_MPAGE ++ GMAC_DESCRIPTOR_T *fill_desc; ++ int snd_pages = skb_shinfo(skb)->nr_frags; /* get number of descriptor */ ++ int desc_needed = 1; // for jumbo packet, one descriptor is enough. ++ int header_len = skb->len; ++ struct iphdr *ip_hdr; ++ struct tcphdr *tcp_hdr; ++ int tcp_hdr_len; ++ int data_len; ++ int prv_index; ++ long seq_num; ++ int first_desc_index; ++ int ownership, freeable; ++ int eof; ++ int i=0; ++#endif ++#ifdef CONFIG_TXINT_DISABLE ++ int available_desc_cnt = 0; ++#endif ++ ++ dev_index = gmac_select_interface(dev); ++ ++#ifdef CONFIG_TXINT_DISABLE ++ available_desc_cnt = get_available_tx_desc(dev, dev_index); ++ ++ if (available_desc_cnt < (TX_DESC_NUM >> 2)) { ++ gmac_tx_packet_complete(dev); ++ } ++#endif ++ ++#ifdef CONFIG_SL2312_MPAGE ++ ++ fill_desc = tp->tx_cur_desc; ++ if(!fill_desc) { ++ printk("cur_desc is NULL!\n"); ++ return -1; ++ } ++ ++ if (storlink_ctl.recvfile==2) ++ { ++ printk("snd_pages=%d skb->len=%d\n",snd_pages,skb->len); ++ } ++ ++ if (snd_pages) ++ desc_needed += snd_pages; /* decriptors needed for this large packet */ ++ ++ if (!check_free_tx_desc(dev_index, desc_needed, fill_desc)) { ++ printk("no available desc!\n"); ++ gmac_dump_register(dev); ++ printk_all(dev_index, tp); ++ tp->stats.tx_dropped++; ++ if (pcount++ > 10) ++ { ++ for (;;); ++ } ++ return -1; ++ } ++ ++ first_desc_index = ((unsigned int)fill_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ ++ /* check if the tcp packet is in order*/ ++ ip_hdr = (struct iphdr*) &(skb->data[14]); ++ tcp_hdr = (struct tcphdr*) &(skb->data[14+ip_hdr->ihl * 4]); ++ tcp_hdr_len = TCPHDRLEN(tcp_hdr) * 4; ++ data_len = skb->len - 14 - ip_hdr->ihl *4 - tcp_hdr_len; ++ ++ prv_index = first_desc_index-1; ++ if (prv_index <0) ++ prv_index += TX_DESC_NUM; ++ seq_num = ntohl(tcp_hdr->seq); ++ ++ if (snd_pages) ++ { ++ // calculate header length ++ // check fragment total length and header len = skb len - frag len ++ // or parse the header. ++ for (i=0; ifrags[i]; ++ header_len -= frag->size; ++ } ++ ownership = CPU; ++ freeable = 0; ++ /* fill header into first descriptor */ ++ fill_in_desc(dev_index, fill_desc, skb->data, header_len, len, 2, freeable, ownership, 0); ++ fill_desc = (GMAC_DESCRIPTOR_T*)((fill_desc->next_desc.next_descriptor & 0xfffffff0) + tx_desc_virtual_base[dev_index]); ++ tx_skb[dev_index][first_desc_index].end_seq = seq_num + data_len; ++ ++ eof = 0; ++ ownership = DMA; ++ for (i=0; ifrags[i]; ++ int start_pos = frag->page_offset; ++ char* data_buf = page_address(frag->page); ++ int data_size = frag->size; ++ int cur_index; ++ ++ if (i == snd_pages-1) ++ { ++ eof=1; ++ freeable = 1; ++ } ++ fill_in_desc(dev_index, fill_desc, data_buf+(start_pos), data_size, ++ len, eof, freeable, ownership, skb); ++ cur_index = ((unsigned int)fill_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ ++ fill_desc = (GMAC_DESCRIPTOR_T*)((fill_desc->next_desc.next_descriptor & 0xfffffff0) + tx_desc_virtual_base[dev_index]); ++ } ++ /* pass the ownership of the first descriptor to hardware */ ++// disable_irq(gmac_irq[dev_index]); ++ tx_skb[dev_index][first_desc_index].desc_in_use = 1; ++ wmb(); ++ tp->tx_cur_desc->frame_ctrl.bits_tx_out.own = DMA; ++// consistent_sync(tp->tx_cur_desc, sizeof(GMAC_DESCRIPTOR_T), PCI_DMA_TODEVICE); ++ tp->tx_cur_desc = fill_desc; ++ dev->trans_start = jiffies; ++// enable_irq(gmac_irq[dev_index]); ++ } ++ else if ( tp->tx_cur_desc->frame_ctrl.bits_tx_out.own == CPU ) ++ { ++// tx_skb[dev_index][first_desc_index].end_seq = seq_num + data_len; ++// disable_irq(gmac_irq[dev_index]); ++ fill_in_desc(dev_index, tp->tx_cur_desc, skb->data, skb->len, skb->len, 3, 1, DMA, skb); ++// enable_irq(gmac_irq[dev_index]); ++ //consistent_sync(tp->tx_cur_desc, sizeof(GMAC_DESCRIPTOR_T), PCI_DMA_TODEVICE); ++ tp->tx_cur_desc = (GMAC_DESCRIPTOR_T*)((tp->tx_cur_desc->next_desc.next_descriptor & 0xfffffff0) + tx_desc_virtual_base[dev_index]); ++ dev->trans_start = jiffies; ++ } ++ else ++ { ++ printk("gmac tx drop!\n"); ++ tp->stats.tx_dropped++; ++ return -1; ++ } ++ ++#ifdef CONFIG_TXINT_DISABLE ++ tp->tx_desc_hdr[dev_index] = (tp->tx_desc_hdr[dev_index] + desc_needed) & (TX_DESC_NUM-1); ++#endif ++ ++#else ++ if ((tp->tx_cur_desc->frame_ctrl.bits_tx_out.own == CPU) && (len < TX_BUF_SIZE)) ++ { ++ index = ((unsigned int)tp->tx_cur_desc - tx_desc_start_adr[dev_index]) / sizeof(GMAC_DESCRIPTOR_T); ++ tx_skb[dev_index][index] = skb; ++ consistent_sync(skb->data,skb->len,PCI_DMA_TODEVICE); ++ tp->tx_cur_desc->buf_adr = (unsigned int)__pa(skb->data); ++ tp->tx_cur_desc->flag_status.bits_tx_flag.frame_count = len; /* total frame byte count */ ++ tp->tx_cur_desc->next_desc.bits.sof_eof = 0x03; /*only one descriptor*/ ++ tp->tx_cur_desc->frame_ctrl.bits_tx_out.buffer_size = len; /* descriptor byte count */ ++ tp->tx_cur_desc->frame_ctrl.bits_tx_out.vlan_enable = 0; ++ tp->tx_cur_desc->frame_ctrl.bits_tx_out.ip_csum_en = 0; /* TSS IPv4 IP header checksum enable */ ++ tp->tx_cur_desc->frame_ctrl.bits_tx_out.ipv6_tx_en = 0 ; /* TSS IPv6 tx enable */ ++ tp->tx_cur_desc->frame_ctrl.bits_tx_out.tcp_csum_en = 0; /* TSS TCP checksum enable */ ++ tp->tx_cur_desc->frame_ctrl.bits_tx_out.udp_csum_en = 0; /* TSS UDP checksum enable */ ++ wmb(); ++ tp->tx_cur_desc->frame_ctrl.bits_tx_out.own = DMA; /* set owner bit */ ++ tp->tx_cur_desc = (GMAC_DESCRIPTOR_T *)((tp->tx_cur_desc->next_desc.next_descriptor & 0xfffffff0)+tx_desc_virtual_base[dev_index]); ++ dev->trans_start = jiffies; ++ } ++ else ++ { ++ /* no free tx descriptor */ ++ dev_kfree_skb(skb); ++ netif_stop_queue(dev); ++ tp->stats.tx_dropped++; ++ return (-1); ++ } ++#endif ++ /* if TX DMA process is stoped , restart it */ ++ txdma_busy.bits32 = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_FIRST_DESC); ++ if (txdma_busy.bits.td_busy == 0) ++ { ++ /* restart DMA process */ ++ tx_ctrl.bits32 = 0; ++ tx_ctrl.bits.td_start = 1; ++ tx_ctrl.bits.td_continue = 1; ++ tx_ctrl_mask.bits32 = 0; ++ tx_ctrl_mask.bits.td_start = 1; ++ tx_ctrl_mask.bits.td_continue = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CTRL,tx_ctrl.bits32,tx_ctrl_mask.bits32); ++ } ++ return (0); ++} ++ ++ ++struct net_device_stats * gmac_get_stats(struct net_device *dev) ++{ ++ struct gmac_private *tp = dev->priv; ++ unsigned long flags; ++ unsigned int pkt_drop; ++ unsigned int pkt_error; ++ unsigned int dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++ ++// if (storlink_ctl.recvfile==3) ++// { ++// printk("GMAC_GLOBAL_BASE_ADDR=%x\n", readl(GMAC_GLOBAL_BASE_ADDR+0x30)); ++// gmac_dump_register(dev); ++// printk_all(0, dev); ++// } ++ ++ if (netif_running(dev)) ++ { ++ /* read H/W counter */ ++ spin_lock_irqsave(&tp->lock,flags); ++ pkt_drop = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_IN_DISCARDS); ++ pkt_error = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_IN_ERRORS); ++ tp->stats.rx_dropped = tp->stats.rx_dropped + pkt_drop; ++ tp->stats.rx_errors = tp->stats.rx_errors + pkt_error; ++ spin_unlock_irqrestore(&tp->lock,flags); ++ } ++ return &tp->stats; ++} ++ ++static unsigned const ethernet_polynomial = 0x04c11db7U; ++static inline u32 ether_crc (int length, unsigned char *data) ++{ ++ int crc = -1; ++ unsigned int i; ++ unsigned int crc_val=0; ++ ++ while (--length >= 0) { ++ unsigned char current_octet = *data++; ++ int bit; ++ for (bit = 0; bit < 8; bit++, current_octet >>= 1) ++ crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ++ ethernet_polynomial : 0); ++ } ++ crc = ~crc; ++ for (i=0;i<32;i++) ++ { ++ crc_val = crc_val + (((crc << i) & 0x80000000) >> (31-i)); ++ } ++ return crc_val; ++} ++ ++static void gmac_set_rx_mode(struct net_device *dev) ++{ ++ GMAC_RX_FLTR_T filter; ++ unsigned int mc_filter[2]; /* Multicast hash filter */ ++ int bit_nr; ++ unsigned int i, dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++ ++// printk("%s : dev->flags = %x \n",__func__,dev->flags); ++// dev->flags |= IFF_ALLMULTI; /* temp */ ++ filter.bits32 = 0; ++ filter.bits.error = 0; ++ if (dev->flags & IFF_PROMISC) ++ { ++ filter.bits.error = 1; ++ filter.bits.promiscuous = 1; ++ filter.bits.broadcast = 1; ++ filter.bits.multicast = 1; ++ filter.bits.unicast = 1; ++ mc_filter[1] = mc_filter[0] = 0xffffffff; ++ } ++ else if (dev->flags & IFF_ALLMULTI) ++ { ++ filter.bits.promiscuous = 1; ++ filter.bits.broadcast = 1; ++ filter.bits.multicast = 1; ++ filter.bits.unicast = 1; ++ mc_filter[1] = mc_filter[0] = 0xffffffff; ++ } ++ else ++ { ++ struct dev_mc_list *mclist; ++ ++ filter.bits.promiscuous = 1; ++ filter.bits.broadcast = 1; ++ filter.bits.multicast = 1; ++ filter.bits.unicast = 1; ++ mc_filter[1] = mc_filter[0] = 0; ++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;i++, mclist = mclist->next) ++ { ++ bit_nr = ether_crc(ETH_ALEN,mclist->dmi_addr) & 0x0000003f; ++ if (bit_nr < 32) ++ { ++ mc_filter[0] = mc_filter[0] | (1<dev_addr[i] = sock->sa_data[i]; ++ } ++ ++ reg_val = dev->dev_addr[0] + (dev->dev_addr[1]<<8) + (dev->dev_addr[2]<<16) + (dev->dev_addr[3]<<24); ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_STA_ADD0,reg_val,0xffffffff); ++ reg_val = dev->dev_addr[4] + (dev->dev_addr[5]<<8) ; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_STA_ADD1,reg_val,0x0000ffff); ++ memcpy(ð0_mac[0],&dev->dev_addr[0],6); ++ printk("Storlink %s address = ",dev->name); ++ printk("%02x",dev->dev_addr[0]); ++ printk("%02x",dev->dev_addr[1]); ++ printk("%02x",dev->dev_addr[2]); ++ printk("%02x",dev->dev_addr[3]); ++ printk("%02x",dev->dev_addr[4]); ++ printk("%02x\n",dev->dev_addr[5]); ++ ++ return (0); ++} ++ ++static void gmac_tx_timeout(struct net_device *dev) ++{ ++ GMAC_TXDMA_CTRL_T tx_ctrl,tx_ctrl_mask; ++ GMAC_TXDMA_FIRST_DESC_T txdma_busy; ++ int dev_index; ++ ++ dev_index = gmac_select_interface(dev); ++ ++ /* if TX DMA process is stoped , restart it */ ++ txdma_busy.bits32 = gmac_read_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_FIRST_DESC); ++ if (txdma_busy.bits.td_busy == 0) ++ { ++ /* restart DMA process */ ++ tx_ctrl.bits32 = 0; ++ tx_ctrl.bits.td_start = 1; ++ tx_ctrl.bits.td_continue = 1; ++ tx_ctrl_mask.bits32 = 0; ++ tx_ctrl_mask.bits.td_start = 1; ++ tx_ctrl_mask.bits.td_continue = 1; ++ gmac_write_reg(gmac_base_addr[dev_index] + GMAC_TXDMA_CTRL,tx_ctrl.bits32,tx_ctrl_mask.bits32); ++ } ++ netif_wake_queue(dev); ++ return; ++} ++ ++static int gmac_netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++ int rc = 0; ++ unsigned char *hwa = rq->ifr_ifru.ifru_hwaddr.sa_data; ++ ++ if (!netif_running(dev)) ++ { ++ printk("Before changing the H/W address,please down the device.\n"); ++ return -EINVAL; ++ } ++ ++ switch (cmd) { ++ case SIOCETHTOOL: ++ break; ++ ++ case SIOCSIFHWADDR: ++ gmac_set_mac_address(dev,hwa); ++ break; ++ ++ case SIOCGMIIPHY: /* Get the address of the PHY in use. */ ++ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ ++ break; ++ ++ case SIOCGMIIREG: /* Read the specified MII register. */ ++ case SIOCDEVPRIVATE+1: ++ break; ++ ++ case SIOCSMIIREG: /* Write the specified MII register */ ++ case SIOCDEVPRIVATE+2: ++ break; ++ ++ default: ++ rc = -EOPNOTSUPP; ++ break; ++ } ++ ++ return rc; ++} ++ ++static void gmac_cleanup_module(void) ++{ ++ int i; ++ ++ for (i=0;ipriv; ++ ++ dev[i]->base_addr = gmac_base_addr[i]; ++ dev[i]->irq = gmac_irq[i]; ++ dev[i]->open = gmac_open; ++ dev[i]->stop = gmac_close; ++ dev[i]->hard_start_xmit = gmac_start_xmit; ++ dev[i]->get_stats = gmac_get_stats; ++ dev[i]->set_multicast_list = gmac_set_rx_mode; ++ dev[i]->set_mac_address = gmac_set_mac_address; ++ dev[i]->do_ioctl = gmac_netdev_ioctl; ++ dev[i]->tx_timeout = gmac_tx_timeout; ++ dev[i]->watchdog_timeo = TX_TIMEOUT; ++ dev[i]->features |= NETIF_F_SG|NETIF_F_HW_CSUM|NETIF_F_TSO; ++#ifdef CONFIG_SL_NAPI ++ printk("NAPI driver is enabled.\n"); ++ if (i==0) ++ { ++ dev[i]->poll = gmac_rx_poll_ga; ++ dev[i]->weight = 64; ++ } ++ else ++ { ++ dev[i]->poll = gmac_rx_poll_gb; ++ dev[i]->weight = 64; ++ } ++#endif ++ ++ if (register_netdev(dev[i])) ++ { ++ gmac_cleanup_module(); ++ return(-1); ++ } ++ } ++ ++#ifdef CONFIG_SL3516_ASIC ++{ ++ unsigned int val; ++ ++ /* set GMAC global register */ ++ val = readl(GMAC_GLOBAL_BASE_ADDR+0x10); ++ val = val | 0x005a0000; ++ writel(val,GMAC_GLOBAL_BASE_ADDR+0x10); ++ writel(0x07f007f0,GMAC_GLOBAL_BASE_ADDR+0x1c); ++ writel(0x77770000,GMAC_GLOBAL_BASE_ADDR+0x20); ++ writel(0x77770000,GMAC_GLOBAL_BASE_ADDR+0x24); ++ val = readl(GMAC_GLOBAL_BASE_ADDR+0x04); ++ if((val&(1<<20))==0){ // GMAC1 enable ++ val = readl(GMAC_GLOBAL_BASE_ADDR+0x30); ++ val = (val & 0xe7ffffff) | 0x08000000; ++ writel(val,GMAC_GLOBAL_BASE_ADDR+0x30); ++ } ++ ++} ++#endif ++ ++// printk("%s: dev0=%x dev1=%x \n",__func__,dev[0],dev[1]); ++// FLAG_SWITCH = 0 ; ++// FLAG_SWITCH = SPI_get_identifier(); ++// if(FLAG_SWITCH) ++// { ++// printk("Configure ADM699X...\n"); ++// SPI_default(); //Add by jason for ADM699X configuration ++// } ++ return (0); ++} ++ ++ ++module_init(gmac_init_module); ++module_exit(gmac_cleanup_module); ++ ++static int gmac_phy_thread (void *data) ++{ ++ struct net_device *dev = data; ++ struct gmac_private *tp = dev->priv; ++ unsigned long timeout; ++ ++ daemonize("%s", dev->name); ++ allow_signal(SIGTERM); ++// reparent_to_init(); ++// spin_lock_irq(¤t->sigmask_lock); ++// sigemptyset(¤t->blocked); ++// recalc_sigpending(current); ++// spin_unlock_irq(¤t->sigmask_lock); ++// strncpy (current->comm, dev->name, sizeof(current->comm) - 1); ++// current->comm[sizeof(current->comm) - 1] = '\0'; ++ ++ while (1) ++ { ++ timeout = next_tick; ++ do ++ { ++ timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout); ++ } while (!signal_pending (current) && (timeout > 0)); ++ ++ if (signal_pending (current)) ++ { ++// spin_lock_irq(¤t->sigmask_lock); ++ flush_signals(current); ++// spin_unlock_irq(¤t->sigmask_lock); ++ } ++ ++ if (tp->time_to_die) ++ break; ++ ++// printk("%s : Polling PHY Status...%x\n",__func__,dev); ++ rtnl_lock (); ++ gmac_get_phy_status(dev); ++ rtnl_unlock (); ++ } ++ complete_and_exit (&tp->thr_exited, 0); ++} ++ ++static void gmac_set_phy_status(struct net_device *dev) ++{ ++ GMAC_STATUS_T status; ++ unsigned int reg_val; ++ unsigned int i = 0; ++ unsigned int index; ++ ++ if (FLAG_SWITCH==1) ++ { ++ return; /* GMAC connects to a switch chip, not PHY */ ++ } ++ ++ index = gmac_get_dev_index(dev); ++ ++ if (index == 0) ++ { ++// mii_write(phy_addr[index],0x04,0x0461); /* advertisement 10M full duplex, pause capable on */ ++// mii_write(phy_addr[index],0x04,0x0421); /* advertisement 10M half duplex, pause capable on */ ++ mii_write(phy_addr[index],0x04,0x05e1); /* advertisement 100M full duplex, pause capable on */ ++// mii_write(phy_addr[index],0x04,0x04a1); /* advertisement 100M half duplex, pause capable on */ ++#ifdef CONFIG_SL3516_ASIC ++ mii_write(phy_addr[index],0x09,0x0300); /* advertisement 1000M full duplex, pause capable on */ ++// mii_write(phy_addr[index],0x09,0x0000); /* advertisement 1000M full duplex, pause capable on */ ++#endif ++ } ++ else ++ { ++// mii_write(phy_addr[index],0x04,0x0461); /* advertisement 10M full duplex, pause capable on */ ++// mii_write(phy_addr[index],0x04,0x0421); /* advertisement 10M half duplex, pause capable on */ ++ mii_write(phy_addr[index],0x04,0x05e1); /* advertisement 100M full duplex, pause capable on */ ++// mii_write(phy_addr[index],0x04,0x04a1); /* advertisement 100M half duplex, pause capable on */ ++#ifdef CONFIG_SL3516_ASIC ++// mii_write(phy_addr[index],0x09,0x0000); /* advertisement no 1000M */ ++ mii_write(phy_addr[index],0x09,0x0300); /* advertisement 1000M full duplex, pause capable on */ ++#endif ++ } ++ ++ mii_write(phy_addr[index],0x00,0x1200); /* Enable and Restart Auto-Negotiation */ ++ mii_write(phy_addr[index],0x18,0x0041); /* Enable Active led */ ++ while (((reg_val=mii_read(phy_addr[index],0x01)) & 0x00000004)!=0x04) ++ { ++ i++; ++ if (i > 30) ++ { ++ break; ++ } ++ msleep(100); ++ } ++ if (i>30) ++ { ++ pre_phy_status[index] = LINK_DOWN; ++ clear_bit(__LINK_STATE_START, &dev->state); ++ netif_stop_queue(dev); ++ storlink_ctl.link = 0; ++ printk("Link Down (%04x) ",reg_val); ++ } ++ else ++ { ++ pre_phy_status[index] = LINK_UP; ++ set_bit(__LINK_STATE_START, &dev->state); ++ netif_wake_queue(dev); ++ storlink_ctl.link = 1; ++ printk("Link Up (%04x) ",reg_val); ++ } ++ ++ status.bits32 = 0; ++ reg_val = mii_read(phy_addr[index],10); ++ printk("reg_val0 = %x \n",reg_val); ++ if ((reg_val & 0x0800) == 0x0800) ++ { ++ status.bits.duplex = 1; ++ status.bits.speed = 2; ++ printk(" 1000M/Full \n"); ++ } ++ else if ((reg_val & 0x0400) == 0x0400) ++ { ++ status.bits.duplex = 0; ++ status.bits.speed = 2; ++ printk(" 1000M/Half \n"); ++ } ++ else ++ { ++ reg_val = (mii_read(phy_addr[index],0x05) & 0x05E0) >> 5; ++ printk("reg_val1 = %x \n",reg_val); ++ if ((reg_val & 0x08)==0x08) /* 100M full duplex */ ++ { ++ status.bits.duplex = 1; ++ status.bits.speed = 1; ++ printk(" 100M/Full \n"); ++ } ++ else if ((reg_val & 0x04)==0x04) /* 100M half duplex */ ++ { ++ status.bits.duplex = 0; ++ status.bits.speed = 1; ++ printk(" 100M/Half \n"); ++ } ++ else if ((reg_val & 0x02)==0x02) /* 10M full duplex */ ++ { ++ status.bits.duplex = 1; ++ status.bits.speed = 0; ++ printk(" 10M/Full \n"); ++ } ++ else if ((reg_val & 0x01)==0x01) /* 10M half duplex */ ++ { ++ status.bits.duplex = 0; ++ status.bits.speed = 0; ++ printk(" 100M/Half \n"); ++ } ++ } ++ ++ reg_val = (mii_read(phy_addr[index],0x05) & 0x05E0) >> 5; ++ if ((reg_val & 0x20)==0x20) ++ { ++ flow_control_enable[index] = 1; ++ printk("Flow Control Enable. \n"); ++ } ++ else ++ { ++ flow_control_enable[index] = 0; ++ printk("Flow Control Disable. \n"); ++ } ++ full_duplex = status.bits.duplex; ++ speed = status.bits.speed; ++} ++ ++static void gmac_get_phy_status(struct net_device *dev) ++{ ++ GMAC_CONFIG0_T config0,config0_mask; ++ GMAC_STATUS_T status; ++ unsigned int reg_val; ++ unsigned int index; ++ ++ index = gmac_select_interface(dev); ++ ++ status.bits32 = 0; ++ status.bits.phy_mode = 1; ++ ++#ifdef CONFIG_SL3516_ASIC ++ status.bits.mii_rmii = 2; /* default value for ASIC version */ ++// status.bits.speed = 1; ++#else ++ if (index==0) ++ status.bits.mii_rmii = 0; ++ else ++ status.bits.mii_rmii = 2; ++#endif ++ ++ /* read PHY status register */ ++ reg_val = mii_read(phy_addr[index],0x01); ++ if ((reg_val & 0x0024) == 0x0024) /* link is established and auto_negotiate process completed */ ++ { ++ /* read PHY Auto-Negotiation Link Partner Ability Register */ ++ reg_val = mii_read(phy_addr[index],10); ++ if ((reg_val & 0x0800) == 0x0800) ++ { ++ status.bits.mii_rmii = 3; /* RGMII 1000Mbps mode */ ++ status.bits.duplex = 1; ++ status.bits.speed = 2; ++ } ++ else if ((reg_val & 0x0400) == 0x0400) ++ { ++ status.bits.mii_rmii = 3; /* RGMII 1000Mbps mode */ ++ status.bits.duplex = 0; ++ status.bits.speed = 2; ++ } ++ else ++ { ++ reg_val = (mii_read(phy_addr[index],0x05) & 0x05E0) >> 5; ++ if ((reg_val & 0x08)==0x08) /* 100M full duplex */ ++ { ++ status.bits.mii_rmii = 2; /* RGMII 10/100Mbps mode */ ++ status.bits.duplex = 1; ++ status.bits.speed = 1; ++ } ++ else if ((reg_val & 0x04)==0x04) /* 100M half duplex */ ++ { ++ status.bits.mii_rmii = 2; /* RGMII 10/100Mbps mode */ ++ status.bits.duplex = 0; ++ status.bits.speed = 1; ++ } ++ else if ((reg_val & 0x02)==0x02) /* 10M full duplex */ ++ { ++ status.bits.mii_rmii = 2; /* RGMII 10/100Mbps mode */ ++ status.bits.duplex = 1; ++ status.bits.speed = 0; ++ } ++ else if ((reg_val & 0x01)==0x01) /* 10M half duplex */ ++ { ++ status.bits.mii_rmii = 2; /* RGMII 10/100Mbps mode */ ++ status.bits.duplex = 0; ++ status.bits.speed = 0; ++ } ++ } ++ status.bits.link = LINK_UP; /* link up */ ++ netif_wake_queue(dev); ++ ++ reg_val = (mii_read(phy_addr[index],0x05) & 0x05E0) >> 5; ++ if ((reg_val & 0x20)==0x20) ++ { ++ if (flow_control_enable[index] == 0) ++ { ++ config0.bits32 = 0; ++ config0_mask.bits32 = 0; ++ config0.bits.tx_fc_en = 1; /* enable tx flow control */ ++ config0.bits.rx_fc_en = 1; /* enable rx flow control */ ++ config0_mask.bits.tx_fc_en = 1; ++ config0_mask.bits.rx_fc_en = 1; ++ gmac_write_reg(gmac_base_addr[index] + GMAC_CONFIG0,config0.bits32,config0_mask.bits32); ++// printk("eth%d Flow Control Enable. \n",index); ++ } ++ flow_control_enable[index] = 1; ++ } ++ else ++ { ++ if (flow_control_enable[index] == 1) ++ { ++ config0.bits32 = 0; ++ config0_mask.bits32 = 0; ++ config0.bits.tx_fc_en = 0; /* disable tx flow control */ ++ config0.bits.rx_fc_en = 0; /* disable rx flow control */ ++ config0_mask.bits.tx_fc_en = 1; ++ config0_mask.bits.rx_fc_en = 1; ++ gmac_write_reg(gmac_base_addr[index] + GMAC_CONFIG0,config0.bits32,config0_mask.bits32); ++// printk("eth%d Flow Control Disable. \n",index); ++ } ++ flow_control_enable[index] = 0; ++ } ++ ++ if (pre_phy_status[index] == LINK_DOWN) ++ { ++ gmac_enable_tx_rx(dev); ++ pre_phy_status[index] = LINK_UP; ++ set_bit(__LINK_STATE_START, &dev->state); ++ storlink_ctl.link = 1; ++// printk("eth%d Link Up ...\n",index); ++ } ++ } ++ else ++ { ++ status.bits.link = LINK_DOWN; /* link down */ ++ netif_stop_queue(dev); ++ flow_control_enable[index] = 0; ++ storlink_ctl.link = 0; ++ if (pre_phy_status[index] == LINK_UP) ++ { ++ gmac_disable_tx_rx(dev); ++ pre_phy_status[index] = LINK_DOWN; ++ clear_bit(__LINK_STATE_START, &dev->state); ++// printk("eth%d Link Down ...\n",index); ++ } ++ ++ } ++ ++ reg_val = gmac_read_reg(gmac_base_addr[index] + GMAC_STATUS); ++ if (reg_val != status.bits32) ++ { ++ gmac_write_reg(gmac_base_addr[index] + GMAC_STATUS,status.bits32,0x0000007f); ++ } ++} ++ ++/***************************************/ ++/* define GPIO module base address */ ++/***************************************/ ++#define GPIO_BASE_ADDR (IO_ADDRESS(SL2312_GPIO_BASE)) ++ ++/* define GPIO pin for MDC/MDIO */ ++ ++// for gemini ASIC ++#ifdef CONFIG_SL3516_ASIC ++#define H_MDC_PIN 22 ++#define H_MDIO_PIN 21 ++#define G_MDC_PIN 22 ++#define G_MDIO_PIN 21 ++#else ++#define H_MDC_PIN 3 ++#define H_MDIO_PIN 2 ++#define G_MDC_PIN 0 ++#define G_MDIO_PIN 1 ++#endif ++ ++//#define GPIO_MDC 0x80000000 ++//#define GPIO_MDIO 0x00400000 ++ ++static unsigned int GPIO_MDC = 0; ++static unsigned int GPIO_MDIO = 0; ++static unsigned int GPIO_MDC_PIN = 0; ++static unsigned int GPIO_MDIO_PIN = 0; ++ ++// For PHY test definition!! ++#define LPC_EECK 0x02 ++#define LPC_EDIO 0x04 ++#define LPC_GPIO_SET 3 ++#define LPC_BASE_ADDR IO_ADDRESS(IT8712_IO_BASE) ++#define inb_gpio(x) inb(LPC_BASE_ADDR + IT8712_GPIO_BASE + x) ++#define outb_gpio(x, y) outb(y, LPC_BASE_ADDR + IT8712_GPIO_BASE + x) ++ ++enum GPIO_REG ++{ ++ GPIO_DATA_OUT = 0x00, ++ GPIO_DATA_IN = 0x04, ++ GPIO_PIN_DIR = 0x08, ++ GPIO_BY_PASS = 0x0c, ++ GPIO_DATA_SET = 0x10, ++ GPIO_DATA_CLEAR = 0x14, ++}; ++/***********************/ ++/* MDC : GPIO[31] */ ++/* MDIO: GPIO[22] */ ++/***********************/ ++ ++/*************************************************** ++* All the commands should have the frame structure: ++*

++****************************************************/
++
++/*****************************************************************
++* Inject a bit to NWay register through CSR9_MDC,MDIO
++*******************************************************************/
++void mii_serial_write(char bit_MDO) // write data into mii PHY
++{
++#if 0 //def CONFIG_SL2312_LPC_IT8712
++	unsigned char iomode,status;
++
++	iomode = LPCGetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET);
++	iomode |= (LPC_EECK|LPC_EDIO) ;				// Set EECK,EDIO,EECS output
++	LPCSetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET, iomode);
++
++	if(bit_MDO)
++	{
++		status = inb_gpio( LPC_GPIO_SET);
++		status |= LPC_EDIO ;		//EDIO high
++		outb_gpio(LPC_GPIO_SET, status);
++	}
++	else
++	{
++		status = inb_gpio( LPC_GPIO_SET);
++		status &= ~(LPC_EDIO) ;		//EDIO low
++		outb_gpio(LPC_GPIO_SET, status);
++	}
++
++	status |= LPC_EECK ;		//EECK high
++	outb_gpio(LPC_GPIO_SET, status);
++
++	status &= ~(LPC_EECK) ;		//EECK low
++	outb_gpio(LPC_GPIO_SET, status);
++
++#else
++    unsigned int addr;
++    unsigned int value;
++
++    addr = GPIO_BASE_ADDR + GPIO_PIN_DIR;
++    value = readl(addr) | GPIO_MDC | GPIO_MDIO; /* set MDC/MDIO Pin to output */
++    writel(value,addr);
++    if(bit_MDO)
++    {
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++        writel(GPIO_MDIO,addr); /* set MDIO to 1 */
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++        writel(GPIO_MDC,addr); /* set MDC to 1 */
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++        writel(GPIO_MDC,addr); /* set MDC to 0 */
++    }
++    else
++    {
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++        writel(GPIO_MDIO,addr); /* set MDIO to 0 */
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++        writel(GPIO_MDC,addr); /* set MDC to 1 */
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++        writel(GPIO_MDC,addr); /* set MDC to 0 */
++    }
++
++#endif
++}
++
++/**********************************************************************
++* read a bit from NWay register through CSR9_MDC,MDIO
++***********************************************************************/
++unsigned int mii_serial_read(void) // read data from mii PHY
++{
++#if 0 //def CONFIG_SL2312_LPC_IT8712
++  	unsigned char iomode,status;
++	unsigned int value ;
++
++	iomode = LPCGetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET);
++	iomode &= ~(LPC_EDIO) ;		// Set EDIO input
++	iomode |= (LPC_EECK) ;		// Set EECK,EECS output
++	LPCSetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET, iomode);
++
++	status = inb_gpio( LPC_GPIO_SET);
++	status |= LPC_EECK ;		//EECK high
++	outb_gpio(LPC_GPIO_SET, status);
++
++	status &= ~(LPC_EECK) ;		//EECK low
++	outb_gpio(LPC_GPIO_SET, status);
++
++	value = inb_gpio( LPC_GPIO_SET);
++
++	value = value>>2 ;
++	value &= 0x01;
++
++	return value ;
++
++#else
++    unsigned int *addr;
++    unsigned int value;
++
++    addr = (unsigned int *)(GPIO_BASE_ADDR + GPIO_PIN_DIR);
++    value = readl(addr) & ~GPIO_MDIO; //0xffbfffff;   /* set MDC to output and MDIO to input */
++    writel(value,addr);
++
++    addr = (unsigned int *)(GPIO_BASE_ADDR + GPIO_DATA_SET);
++    writel(GPIO_MDC,addr); /* set MDC to 1 */
++    addr = (unsigned int *)(GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++    writel(GPIO_MDC,addr); /* set MDC to 0 */
++
++    addr = (unsigned int *)(GPIO_BASE_ADDR + GPIO_DATA_IN);
++    value = readl(addr);
++    value = (value & (1<> GPIO_MDIO_PIN;
++    return(value);
++
++#endif
++}
++
++/***************************************
++* preamble + ST
++***************************************/
++void mii_pre_st(void)
++{
++    unsigned char i;
++
++    for(i=0;i<32;i++) // PREAMBLE
++        mii_serial_write(1);
++    mii_serial_write(0); // ST
++    mii_serial_write(1);
++}
++
++
++/******************************************
++* Read MII register
++* phyad -> physical address
++* regad -> register address
++***************************************** */
++unsigned int mii_read(unsigned char phyad,unsigned char regad)
++{
++    unsigned int i,value;
++    unsigned int bit;
++
++    if (phyad == GPHY_ADDR)
++    {
++        GPIO_MDC_PIN = G_MDC_PIN;   /* assigned MDC pin for giga PHY */
++        GPIO_MDIO_PIN = G_MDIO_PIN; /* assigned MDIO pin for giga PHY */
++    }
++    else
++    {
++        GPIO_MDC_PIN = H_MDC_PIN;   /* assigned MDC pin for 10/100 PHY */
++        GPIO_MDIO_PIN = H_MDIO_PIN; /* assigned MDIO pin for 10/100 PHY */
++    }
++    GPIO_MDC = (1<>(4-i)) & 0x01) ? 1 :0 ;
++        mii_serial_write(bit);
++    }
++
++    for (i=0;i<5;i++) { // REGAD
++        bit= ((regad>>(4-i)) & 0x01) ? 1 :0 ;
++        mii_serial_write(bit);
++    }
++
++    mii_serial_read(); // TA_Z
++//    if((bit=mii_serial_read()) !=0 ) // TA_0
++//    {
++//        return(0);
++//    }
++    value=0;
++    for (i=0;i<16;i++) { // READ DATA
++        bit=mii_serial_read();
++        value += (bit<<(15-i)) ;
++    }
++
++    mii_serial_write(0); // dumy clock
++    mii_serial_write(0); // dumy clock
++//printk("%s: phy_addr=%x reg_addr=%x value=%x \n",__func__,phyad,regad,value);
++    return(value);
++}
++
++/******************************************
++* Write MII register
++* phyad -> physical address
++* regad -> register address
++* value -> value to be write
++***************************************** */
++void mii_write(unsigned char phyad,unsigned char regad,unsigned int value)
++{
++    unsigned int i;
++    char bit;
++
++printk("%s: phy_addr=%x reg_addr=%x value=%x \n",__func__,phyad,regad,value);
++    if (phyad == GPHY_ADDR)
++    {
++        GPIO_MDC_PIN = G_MDC_PIN;   /* assigned MDC pin for giga PHY */
++        GPIO_MDIO_PIN = G_MDIO_PIN; /* assigned MDIO pin for giga PHY */
++    }
++    else
++    {
++        GPIO_MDC_PIN = H_MDC_PIN;   /* assigned MDC pin for 10/100 PHY */
++        GPIO_MDIO_PIN = H_MDIO_PIN; /* assigned MDIO pin for 10/100 PHY */
++    }
++    GPIO_MDC = (1<>(4-i)) & 0x01) ? 1 :0 ;
++        mii_serial_write(bit);
++    }
++
++    for (i=0;i<5;i++) { // REGAD
++        bit= ((regad>>(4-i)) & 0x01) ? 1 :0 ;
++        mii_serial_write(bit);
++    }
++    mii_serial_write(1); // TA_1
++    mii_serial_write(0); // TA_0
++
++    for (i=0;i<16;i++) { // OUT DATA
++        bit= ((value>>(15-i)) & 0x01) ? 1 : 0 ;
++        mii_serial_write(bit);
++    }
++    mii_serial_write(0); // dumy clock
++    mii_serial_write(0); // dumy clock
++}
++
++
++
++
++
++
++
++
++
++/*				NOTES
++ *   The instruction set of the 93C66/56/46/26/06 chips are as follows:
++ *
++ *               Start  OP	    *
++ *     Function   Bit  Code  Address**  Data     Description
++ *     -------------------------------------------------------------------
++ *     READ        1    10   A7 - A0             Reads data stored in memory,
++ *                                               starting at specified address
++ *     EWEN        1    00   11XXXXXX            Write enable must precede
++ *                                               all programming modes
++ *     ERASE       1    11   A7 - A0             Erase register A7A6A5A4A3A2A1A0
++ *     WRITE       1    01   A7 - A0   D15 - D0  Writes register
++ *     ERAL        1    00   10XXXXXX            Erase all registers
++ *     WRAL        1    00   01XXXXXX  D15 - D0  Writes to all registers
++ *     EWDS        1    00   00XXXXXX            Disables all programming
++ *                                               instructions
++ *    *Note: A value of X for address is a don't care condition.
++ *    **Note: There are 8 address bits for the 93C56/66 chips unlike
++ *	      the 93C46/26/06 chips which have 6 address bits.
++ *
++ *   The 93Cx6 has a four wire interface: clock, chip select, data in, and
++ *   data out.While the ADM6996 uning three interface: clock, chip select,and data line.
++ *   The input and output are the same pin. ADM6996 can only recognize the write cmd.
++ *   In order to perform above functions, you need
++ *   1. to enable the chip select .
++ *   2. send one clock of dummy clock
++ *   3. send start bit and opcode
++ *   4. send 8 bits address and 16 bits data
++ *   5. to disable the chip select.
++ *							Jason Lee 2003/07/30
++ */
++
++/***************************************/
++/* define GPIO module base address     */
++/***************************************/
++#define GPIO_EECS	     0x00400000		/*   EECS: GPIO[22]   */
++//#define GPIO_MOSI	     0x20000000         /*   EEDO: GPIO[29]   send to 6996*/
++#define GPIO_MISO	     0x40000000         /*   EEDI: GPIO[30]   receive from 6996*/
++#define GPIO_EECK	     0x80000000         /*   EECK: GPIO[31]   */
++
++#define ADM_EECS		0x01
++#define ADM_EECK		0x02
++#define ADM_EDIO		0x04
++/*************************************************************
++* SPI protocol for ADM6996 control
++**************************************************************/
++#define SPI_OP_LEN	     0x03		// the length of start bit and opcode
++#define SPI_OPWRITE	     0X05		// write
++#define SPI_OPREAD	     0X06		// read
++#define SPI_OPERASE	     0X07		// erase
++#define SPI_OPWTEN	     0X04		// write enable
++#define SPI_OPWTDIS	     0X04		// write disable
++#define SPI_OPERSALL	     0X04		// erase all
++#define SPI_OPWTALL	     0X04		// write all
++
++#define SPI_ADD_LEN	     8			// bits of Address
++#define SPI_DAT_LEN	     16			// bits of Data
++#define ADM6996_PORT_NO	     6			// the port number of ADM6996
++#define ADM6999_PORT_NO	     9			// the port number of ADM6999
++#ifdef CONFIG_ADM_6996
++	#define ADM699X_PORT_NO		ADM6996_PORT_NO
++#endif
++#ifdef CONFIG_ADM_6999
++	#define ADM699X_PORT_NO		ADM6999_PORT_NO
++#endif
++#define LPC_GPIO_SET		3
++#define LPC_BASE_ADDR			IO_ADDRESS(IT8712_IO_BASE)
++
++extern int it8712_exist;
++
++#define inb_gpio(x)			inb(LPC_BASE_ADDR + IT8712_GPIO_BASE + x)
++#define outb_gpio(x, y)		outb(y, LPC_BASE_ADDR + IT8712_GPIO_BASE + x)
++
++/****************************************/
++/*	Function Declare		*/
++/****************************************/
++/*
++void SPI_write(unsigned char addr,unsigned int value);
++unsigned int SPI_read(unsigned char table,unsigned char addr);
++void SPI_write_bit(char bit_EEDO);
++unsigned int SPI_read_bit(void);
++void SPI_default(void);
++void SPI_reset(unsigned char rstype,unsigned char port_cnt);
++void SPI_pre_st(void);
++void SPI_CS_enable(unsigned char enable);
++void SPI_Set_VLAN(unsigned char LAN,unsigned int port_mask);
++void SPI_Set_tag(unsigned int port,unsigned tag);
++void SPI_Set_PVID(unsigned int PVID,unsigned int port_mask);
++void SPI_mac_lock(unsigned int port, unsigned char lock);
++void SPI_get_port_state(unsigned int port);
++void SPI_port_enable(unsigned int port,unsigned char enable);
++
++void SPI_get_status(unsigned int port);
++*/
++
++struct PORT_CONFIG
++{
++	unsigned char auto_negotiation;	// 0:Disable	1:Enable
++	unsigned char speed;		// 0:10M	1:100M
++	unsigned char duplex;		// 0:Half	1:Full duplex
++	unsigned char Tag;		// 0:Untag	1:Tag
++	unsigned char port_disable;	// 0:port enable	1:disable
++	unsigned char pvid;		// port VLAN ID 0001
++	unsigned char mdix;		// Crossover judgement. 0:Disable 1:Enable
++	unsigned char mac_lock;		// MAC address Lock 0:Disable 1:Enable
++};
++
++struct PORT_STATUS
++{
++	unsigned char link;		// 0:not link	1:link established
++	unsigned char speed;		// 0:10M	1:100M
++	unsigned char duplex;		// 0:Half	1:Full duplex
++	unsigned char flow_ctl;		// 0:flow control disable 1:enable
++	unsigned char mac_lock;		// MAC address Lock 0:Disable 1:Enable
++	unsigned char port_disable;	// 0:port enable	1:disable
++
++	// Serial Management
++	unsigned long rx_pac_count;		//receive packet count
++	unsigned long rx_pac_byte;		//receive packet byte count
++	unsigned long tx_pac_count;		//transmit packet count
++	unsigned long tx_pac_byte;		//transmit packet byte count
++	unsigned long collision_count;		//error count
++	unsigned long error_count ;
++
++	unsigned long rx_pac_count_overflow;		//overflow flag
++	unsigned long rx_pac_byte_overflow;
++	unsigned long tx_pac_count_overflow;
++	unsigned long tx_pac_byte_overflow;
++	unsigned long collision_count_overflow;
++	unsigned long error_count_overflow;
++};
++
++struct PORT_CONFIG port_config[ADM699X_PORT_NO];	// 0~3:LAN , 4:WAN , 5:MII
++static struct PORT_STATUS port_state[ADM699X_PORT_NO];
++
++/******************************************
++* SPI_write
++* addr -> Write Address
++* value -> value to be write
++***************************************** */
++void SPI_write(unsigned char addr,unsigned int value)
++{
++	int     i;
++	char    bit;
++#ifdef CONFIG_IT8712_GPIO
++	char    status;
++#else
++    int     ad1;
++#endif
++
++#ifdef CONFIG_IT8712_GPIO
++	status = inb_gpio(LPC_GPIO_SET);
++	status &= ~(ADM_EDIO) ;		//EDIO low
++	outb_gpio(LPC_GPIO_SET, status);
++#else
++   	ad1 = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++	writel(GPIO_MISO,ad1); /* set MISO to 0 */
++#endif
++	SPI_CS_enable(1);
++
++	SPI_write_bit(0);       //dummy clock
++
++	//send write command (0x05)
++	for(i=SPI_OP_LEN-1;i>=0;i--)
++	{
++		bit = (SPI_OPWRITE>>i)& 0x01;
++		SPI_write_bit(bit);
++	}
++	// send 8 bits address (MSB first, LSB last)
++	for(i=SPI_ADD_LEN-1;i>=0;i--)
++	{
++		bit = (addr>>i)& 0x01;
++		SPI_write_bit(bit);
++	}
++	// send 16 bits data (MSB first, LSB last)
++	for(i=SPI_DAT_LEN-1;i>=0;i--)
++	{
++		bit = (value>>i)& 0x01;
++		SPI_write_bit(bit);
++	}
++
++	SPI_CS_enable(0);	// CS low
++
++	for(i=0;i<0xFFF;i++) ;
++#ifdef CONFIG_IT8712_GPIO
++	status = inb_gpio(LPC_GPIO_SET);
++	status &= ~(ADM_EDIO) ;		//EDIO low
++	outb_gpio(LPC_GPIO_SET, status);
++#else
++   	ad1 = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++	writel(GPIO_MISO,ad1); /* set MISO to 0 */
++#endif
++}
++
++
++/************************************
++* SPI_write_bit
++* bit_EEDO -> 1 or 0 to be written
++************************************/
++void SPI_write_bit(char bit_EEDO)
++{
++#ifdef CONFIG_IT8712_GPIO
++	unsigned char iomode,status;
++
++	iomode = LPCGetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET);
++	iomode |= (ADM_EECK|ADM_EDIO|ADM_EECS) ;				// Set EECK,EDIO,EECS output
++	LPCSetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET, iomode);
++
++	if(bit_EEDO)
++	{
++		status = inb_gpio( LPC_GPIO_SET);
++		status |= ADM_EDIO ;		//EDIO high
++		outb_gpio(LPC_GPIO_SET, status);
++	}
++	else
++	{
++		status = inb_gpio( LPC_GPIO_SET);
++		status &= ~(ADM_EDIO) ;		//EDIO low
++		outb_gpio(LPC_GPIO_SET, status);
++	}
++
++	status |= ADM_EECK ;		//EECK high
++	outb_gpio(LPC_GPIO_SET, status);
++
++	status &= ~(ADM_EECK) ;		//EECK low
++	outb_gpio(LPC_GPIO_SET, status);
++
++#else
++	unsigned int addr;
++	unsigned int value;
++
++	addr = (GPIO_BASE_ADDR + GPIO_PIN_DIR);
++	value = readl(addr) |GPIO_EECK |GPIO_MISO ;   /* set EECK/MISO Pin to output */
++	writel(value,addr);
++	if(bit_EEDO)
++	{
++		addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++		writel(GPIO_MISO,addr); /* set MISO to 1 */
++		writel(GPIO_EECK,addr); /* set EECK to 1 */
++		addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++		writel(GPIO_EECK,addr); /* set EECK to 0 */
++	}
++	else
++	{
++		addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++		writel(GPIO_MISO,addr); /* set MISO to 0 */
++		addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++		writel(GPIO_EECK,addr); /* set EECK to 1 */
++		addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++		writel(GPIO_EECK,addr); /* set EECK to 0 */
++	}
++
++	return ;
++#endif
++}
++
++/**********************************************************************
++* read a bit from ADM6996 register
++***********************************************************************/
++unsigned int SPI_read_bit(void) // read data from
++{
++#ifdef CONFIG_IT8712_GPIO
++	unsigned char iomode,status;
++	unsigned int value ;
++
++	iomode = LPCGetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET);
++	iomode &= ~(ADM_EDIO) ;		// Set EDIO input
++	iomode |= (ADM_EECS|ADM_EECK) ;		// Set EECK,EECS output
++	LPCSetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET, iomode);
++
++	status = inb_gpio( LPC_GPIO_SET);
++	status |= ADM_EECK ;		//EECK high
++	outb_gpio(LPC_GPIO_SET, status);
++
++	status &= ~(ADM_EECK) ;		//EECK low
++	outb_gpio(LPC_GPIO_SET, status);
++
++	value = inb_gpio( LPC_GPIO_SET);
++
++	value = value>>2 ;
++	value &= 0x01;
++
++	return value ;
++#else
++	unsigned int addr;
++	unsigned int value;
++
++	addr = (GPIO_BASE_ADDR + GPIO_PIN_DIR);
++	value = readl(addr) & (~GPIO_MISO);   // set EECK to output and MISO to input
++	writel(value,addr);
++
++	addr =(GPIO_BASE_ADDR + GPIO_DATA_SET);
++	writel(GPIO_EECK,addr); // set EECK to 1
++	addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++	writel(GPIO_EECK,addr); // set EECK to 0
++
++	addr = (GPIO_BASE_ADDR + GPIO_DATA_IN);
++	value = readl(addr) ;
++	value = value >> 30;
++	return value ;
++#endif
++}
++
++/******************************************
++* SPI_default
++* EEPROM content default value
++*******************************************/
++void SPI_default(void)
++{
++	int i;
++#ifdef CONFIG_ADM_6999
++	SPI_write(0x11,0xFF30);
++	for(i=1;i<8;i++)
++		SPI_write(i,0x840F);
++
++	SPI_write(0x08,0x880F);			//port 8 Untag, PVID=2
++	SPI_write(0x09,0x881D);			//port 9 Tag, PVID=2 ,10M
++	SPI_write(0x14,0x017F);			//Group 0~6,8 as VLAN 1
++	SPI_write(0x15,0x0180);			//Group 7,8 as VLAN 2
++#endif
++
++#ifdef CONFIG_ADM_6996
++	SPI_write(0x11,0xFF30);
++	SPI_write(0x01,0x840F);			//port 0~3 Untag ,PVID=1 ,100M ,duplex
++	SPI_write(0x03,0x840F);
++	SPI_write(0x05,0x840F);
++	SPI_write(0x07,0x840F);
++	SPI_write(0x08,0x880F);			//port 4 Untag, PVID=2
++	SPI_write(0x09,0x881D);			//port 5 Tag, PVID=2 ,10M
++	SPI_write(0x14,0x0155);			//Group 0~3,5 as VLAN 1
++	SPI_write(0x15,0x0180);			//Group 4,5 as VLAN 2
++
++#endif
++
++	for(i=0x16;i<=0x22;i++)
++		SPI_write((unsigned char)i,0x0000);		// clean VLAN¡@map 3~15
++
++	for (i=0;i reset type
++*	    0:reset all count for 'port_cnt' port
++*	    1:reset specified count 'port_cnt'
++* port_cnt   ->  port number or counter index
++***************************************************/
++void SPI_reset(unsigned char rstype,unsigned char port_cnt)
++{
++
++	int i;
++#ifdef CONFIG_IT8712_GPIO
++    char status;
++#else
++	int ad1;
++#endif
++	char bit;
++
++#ifdef CONFIG_IT8712_GPIO
++	status = inb_gpio(LPC_GPIO_SET);
++	status &= ~(ADM_EDIO) ;		//EDIO low
++	outb_gpio(LPC_GPIO_SET, status);
++#else
++   	ad1 = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++	writel(GPIO_MISO,ad1); /* set MISO to 0 */
++#endif
++
++	SPI_CS_enable(0);	// CS low
++
++	SPI_pre_st(); // PRE+ST
++	SPI_write_bit(0); // OP
++	SPI_write_bit(1);
++
++	SPI_write_bit(1);		// Table select, must be 1 -> reset Counter
++
++	SPI_write_bit(0);		// Device Address
++	SPI_write_bit(0);
++
++	rstype &= 0x01;
++	SPI_write_bit(rstype);		// Reset type 0:clear dedicate port's all counters 1:clear dedicate counter
++
++	for (i=5;i>=0;i--) 		// port or cnt index
++	{
++		bit = port_cnt >> i ;
++		bit &= 0x01 ;
++		SPI_write_bit(bit);
++	}
++
++	SPI_write_bit(0); 		// dumy clock
++	SPI_write_bit(0); 		// dumy clock
++
++#ifdef CONFIG_IT8712_GPIO
++	status = inb_gpio(LPC_GPIO_SET);
++	status &= ~(ADM_EDIO) ;		//EDIO low
++	outb_gpio(LPC_GPIO_SET, status);
++#else
++   	ad1 = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++	writel(GPIO_MISO,ad1); /* set MISO to 0 */
++#endif
++}
++
++/*****************************************************
++* SPI_pre_st
++* preambler: 32 bits '1'   start bit: '01'
++*****************************************************/
++void SPI_pre_st(void)
++{
++	int i;
++
++	for(i=0;i<32;i++) // PREAMBLE
++		SPI_write_bit(1);
++	SPI_write_bit(0); // ST
++	SPI_write_bit(1);
++}
++
++
++/***********************************************************
++* SPI_CS_enable
++* before access ,you have to enable Chip Select. (pull high)
++* When fisish, you should pull low !!
++*************************************************************/
++void SPI_CS_enable(unsigned char enable)
++{
++#ifdef CONFIG_IT8712_GPIO
++
++	unsigned char iomode,status;
++
++	iomode = LPCGetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET);
++	iomode |= (ADM_EECK|ADM_EDIO|ADM_EECS) ;				// Set EECK,EDIO,EECS output
++	LPCSetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET, iomode);
++
++
++	status = inb_gpio( LPC_GPIO_SET);
++	if(enable)
++		status |= ADM_EECS ;		//EECS high
++	else
++		status &= ~(ADM_EECS) ;	//EECS low
++
++	outb_gpio(LPC_GPIO_SET, status);
++
++
++	status |= ADM_EECK ;		//EECK high
++	outb_gpio(LPC_GPIO_SET, status);
++
++	status &= ~(ADM_EECK) ;		//EECK low
++	outb_gpio(LPC_GPIO_SET, status);
++
++#else
++	unsigned int addr,value;
++
++	addr = (GPIO_BASE_ADDR + GPIO_PIN_DIR);
++	value = readl(addr) |GPIO_EECS |GPIO_EECK;   /* set EECS/EECK Pin to output */
++	writel(value,addr);
++
++	if(enable)
++	{
++		addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++		writel(GPIO_EECS,addr); /* set EECS to 1 */
++
++	}
++	else
++	{
++		addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++		writel(GPIO_EECS,addr); /* set EECS to 0 */
++		addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++		writel(GPIO_EECK,addr); /* set EECK to 1 */	// at least one clock after CS low
++		addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++		writel(GPIO_EECK,addr); /* set EECK to 0 */
++	}
++#endif
++}
++
++/*********************************************************
++* SPI_Set_VLAN: group ports as VLAN
++* LAN  -> VLAN number : 0~16
++* port_mask -> ports which would group as LAN
++* 	       ex. 0x03 = 0000 0011
++*			port 0 and port 1
++*********************************************************/
++void SPI_Set_VLAN(unsigned char LAN,unsigned int port_mask)
++{
++	unsigned int i,value=0;
++	unsigned reg_add = 0x13 + LAN ;
++
++	for(i=0;i>= 1;
++	}
++
++	SPI_write(reg_add,value);
++}
++
++
++/*******************************************
++* SPI_Set_tag
++* port -> port number to set tag or untag
++* tag  -> 0/set untag,  1/set tag
++* In general, tag is for MII port. LAN and
++* WAN port is configed as untag!!
++********************************************/
++void SPI_Set_tag(unsigned int port,unsigned tag)
++{
++	unsigned int regadd,value;
++
++	// mapping port's register !! (0,1,2,3,4,5) ==> (1,3,5,7,8,9)
++	if(port<=3)
++		regadd=2*port+1;
++	else if(port==4) regadd = 8 ;
++	else regadd = 9 ;
++
++
++	value = SPI_read(0,regadd);		//read original setting
++
++	if(tag)
++		value |= 0x0010 ;		// set tag
++	else
++		value &= 0xFFEF ;		// set untag
++
++	SPI_write(regadd,value);		// write back!!
++}
++
++/************************************************
++* SPI_Set_PVID
++* PVID -> PVID number :
++* port_mask -> ports which would group as LAN
++* 	       ex. 0x0F = 0000 1111 ==> port 0~3
++************************************************/
++void SPI_Set_PVID(unsigned int PVID,unsigned int port_mask)
++{
++	unsigned int i,value=0;
++
++	PVID &= 0x000F ;
++
++	for(i=0;i>= 1;
++	}
++}
++
++
++/************************************************
++* SPI_get_PVID
++* port -> which ports to VID
++************************************************/
++unsigned int SPI_Get_PVID(unsigned int port)
++{
++	unsigned int value=0;
++
++	if (port>=ADM6996_PORT_NO)
++		return 0;
++
++	switch(port)
++	{
++		case 0:
++			value = SPI_read(0,0x01);	// read original value
++			value &= 0x3C00 ;		// get VID
++			value = value >> 10 ;		// Shift
++			break;
++		case 1:
++			value = SPI_read(0,0x03);
++			value &= 0x3C00 ;
++			value = value >> 10 ;
++			break;
++		case 2:
++			value = SPI_read(0,0x05);
++			value &= 0x3C00 ;
++			value = value >> 10 ;
++			break;
++		case 3:
++			value = SPI_read(0,0x07);
++			value &= 0x3C00 ;
++			value = value >> 10 ;
++			break;
++		case 4:
++			value = SPI_read(0,0x08);
++			value &= 0x3C00 ;
++			value = value >> 10 ;
++			break;
++		case 5:
++			value = SPI_read(0,0x09);
++			value &= 0x3C00 ;
++			value = value >> 10 ;
++			break;
++	}
++	return value ;
++}
++
++
++/**********************************************
++* SPI_mac_clone
++* port -> the port which will lock or unlock
++* lock -> 0/the port will be unlock
++*	  1/the port will be locked
++**********************************************/
++void SPI_mac_lock(unsigned int port, unsigned char lock)
++{
++	unsigned int i,value=0;
++
++	value = SPI_read(0,0x12);		// read original
++
++	for(i=0;i 0: if DA == pause then drop and stop mac learning
++*	     1: if DA == pause ,then forward it
++***************************************************/
++void SPI_pause_cmd_forward(unsigned char forward)
++{
++	unsigned int value=0;
++
++	value = SPI_read(0,0x2C);		// read original setting
++	if(forward)
++		value |= 0x2000;		// set bit[13] '1'
++	else
++		value &= 0xDFFF;		// set bit[13] '0'
++
++	SPI_write(0x2C,value);
++
++}
++
++
++/************************************************
++* SPI_read
++* table -> which table to be read: 1/count  0/EEPROM
++* addr  -> Address to be read
++* return : Value of the register
++*************************************************/
++unsigned int SPI_read(unsigned char table,unsigned char addr)
++{
++	int i ;
++	unsigned int value=0;
++	unsigned int bit;
++#ifdef CONFIG_IT8712_GPIO
++	unsigned char status;
++#else
++    unsigned int ad1;
++#endif
++
++#ifdef CONFIG_IT8712_GPIO
++	status = inb_gpio(LPC_GPIO_SET);
++	status &= ~(ADM_EDIO) ;		//EDIO low
++	outb_gpio(LPC_GPIO_SET, status);
++#else
++   	ad1 = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++	writel(GPIO_MISO,ad1); /* set MISO to 0 */
++#endif
++
++	SPI_CS_enable(0);
++
++	SPI_pre_st(); // PRE+ST
++	SPI_write_bit(1); // OPCODE '10' for read
++	SPI_write_bit(0);
++
++	(table==1) ? SPI_write_bit(1) : SPI_write_bit(0) ;	// table select
++
++	SPI_write_bit(0);		// Device Address
++	SPI_write_bit(0);
++
++
++	// send 7 bits address to be read
++	for (i=6;i>=0;i--) {
++		bit= ((addr>>i) & 0x01) ? 1 :0 ;
++		SPI_write_bit(bit);
++	}
++
++
++	// turn around
++	SPI_read_bit(); // TA_Z
++
++	value=0;
++	for (i=31;i>=0;i--) { // READ DATA
++		bit=SPI_read_bit();
++		value |= bit << i ;
++	}
++
++	SPI_read_bit(); // dumy clock
++	SPI_read_bit(); // dumy clock
++
++	if(!table)					// EEPROM, only fetch 16 bits data
++	{
++	    if(addr&0x01)				// odd number content (register,register-1)
++		    value >>= 16 ;			// so we remove the rear 16bits
++	    else					// even number content (register+1,register),
++		    value &= 0x0000FFFF ;		// so we keep the rear 16 bits
++	}
++
++
++	SPI_CS_enable(0);
++
++#ifdef CONFIG_IT8712_GPIO
++	status = inb_gpio(LPC_GPIO_SET);
++	status &= ~(ADM_EDIO) ;		//EDIO low
++	outb_gpio(LPC_GPIO_SET, status);
++#else
++   	ad1 = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++	writel(GPIO_MISO,ad1); /* set MISO to 0 */
++#endif
++
++	return(value);
++
++}
++
++
++
++/**************************************************
++* SPI_port_en
++* port -> Number of port to config
++* enable -> 1/ enable this port
++*	    0/ disable this port
++**************************************************/
++void SPI_port_enable(unsigned int port,unsigned char enable)
++{
++	unsigned int reg_val ;
++	unsigned char reg_add ;
++
++	if(port<=3)
++		reg_add=2*port+1;
++	else if(port==4) reg_add = 8 ;
++	else reg_add = 9 ;
++
++	reg_val = SPI_read(0,reg_add);
++	if(enable)
++	{
++		reg_val &= 0xFFDF ;
++		SPI_write(reg_add,reg_val);
++	}
++	else
++	{
++		reg_val |= 0x0020 ;
++		SPI_write(reg_add,reg_val);
++	}
++}
++
++/********************************************************
++* get port status
++* port -> specify the port number to get configuration
++*********************************************************/
++void SPI_get_status(unsigned int port)
++{
++/*	unsigned int reg_val,add_offset[6];
++	struct PORT_STATUS *status;
++	status = &port_state[port];
++
++	if(port>(ADM6996_PORT_NO-1))
++		return ;
++
++	// Link estabilish , speed, deplex, flow control ?
++	if(port < 5 )
++	{
++		reg_val = SPI_read(1, 1) ;
++		if(port < 4)
++			reg_val >>= port*8 ;
++		else
++			reg_val >>=28 ;
++		status->link = reg_val & 0x00000001 ;
++		status->speed = reg_val  & 0x00000002 ;
++		status->duplex = reg_val & 0x00000004 ;
++		status->flow_ctl = reg_val & 0x00000008 ;
++	}
++	else if(port ==5 )
++	{
++		reg_val = SPI_read(1, 2) ;
++		status->link = reg_val & 0x00000001 ;
++		status->speed = reg_val  & 0x00000002 ;
++		status->duplex = reg_val & 0x00000008 ;
++		status->flow_ctl = reg_val & 0x00000010 ;
++	}
++
++	//   Mac Lock ?
++	reg_val = SPI_read(0,0x12);
++	switch(port)
++	{
++		case 0:	status->mac_lock = reg_val & 0x00000001;
++		case 1:	status->mac_lock = reg_val & 0x00000004;
++		case 2:	status->mac_lock = reg_val & 0x00000010;
++		case 3:	status->mac_lock = reg_val & 0x00000040;
++		case 4:	status->mac_lock = reg_val & 0x00000080;
++		case 5:	status->mac_lock = reg_val & 0x00000100;
++	}
++
++	// port enable ?
++	add_offset[0] = 0x01 ;		add_offset[1] = 0x03 ;
++	add_offset[2] = 0x05 ;		add_offset[3] = 0x07 ;
++	add_offset[4] = 0x08 ;		add_offset[5] = 0x09 ;
++	reg_val = SPI_read(0,add_offset[port]);
++	status->port_disable = reg_val & 0x0020;
++
++
++	//  Packet Count ...
++	add_offset[0] = 0x04 ;		add_offset[1] = 0x06 ;
++	add_offset[2] = 0x08 ;		add_offset[3] = 0x0a ;
++	add_offset[4] = 0x0b ;		add_offset[5] = 0x0c ;
++
++	reg_val = SPI_read(1,add_offset[port]);
++	status->rx_pac_count = reg_val ;
++	reg_val = SPI_read(1,add_offset[port]+9);
++	status->rx_pac_byte = reg_val ;
++	reg_val = SPI_read(1,add_offset[port]+18);
++	status->tx_pac_count = reg_val ;
++	reg_val = SPI_read(1,add_offset[port]+27);
++	status->tx_pac_byte = reg_val ;
++	reg_val = SPI_read(1,add_offset[port]+36);
++	status->collision_count = reg_val ;
++	reg_val = SPI_read(1,add_offset[port]+45);
++	status->error_count = reg_val ;
++	reg_val = SPI_read(1, 0x3A);
++	switch(port)
++	{
++		case 0:	status->rx_pac_count_overflow = reg_val & 0x00000001;
++			status->rx_pac_byte_overflow = reg_val & 0x00000200 ;
++		case 1:	status->rx_pac_count_overflow = reg_val & 0x00000004;
++			status->rx_pac_byte_overflow = reg_val & 0x00000800 ;
++		case 2:	status->rx_pac_count_overflow = reg_val & 0x00000010;
++			status->rx_pac_byte_overflow = reg_val & 0x00002000 ;
++		case 3:	status->rx_pac_count_overflow = reg_val & 0x00000040;;
++			status->rx_pac_byte_overflow = reg_val & 0x00008000 ;
++		case 4:	status->rx_pac_count_overflow = reg_val & 0x00000080;
++			status->rx_pac_byte_overflow = reg_val & 0x00010000 ;
++		case 5:	status->rx_pac_count_overflow = reg_val & 0x00000100;
++			status->rx_pac_byte_overflow = reg_val & 0x00020000 ;
++	}
++
++	reg_val = SPI_read(1, 0x3B);
++	switch(port)
++	{
++		case 0:	status->tx_pac_count_overflow = reg_val & 0x00000001;
++			status->tx_pac_byte_overflow  = reg_val & 0x00000200 ;
++		case 1:	status->tx_pac_count_overflow  = reg_val & 0x00000004;
++			status->tx_pac_byte_overflow  = reg_val & 0x00000800 ;
++		case 2:	status->tx_pac_count_overflow  = reg_val & 0x00000010;
++			status->tx_pac_byte_overflow  = reg_val & 0x00002000 ;
++		case 3:	status->tx_pac_count_overflow  = reg_val & 0x00000040;;
++			status->tx_pac_byte_overflow  = reg_val & 0x00008000 ;
++		case 4:	status->tx_pac_count_overflow  = reg_val & 0x00000080;
++			status->tx_pac_byte_overflow  = reg_val & 0x00010000 ;
++		case 5:	status->tx_pac_count_overflow  = reg_val & 0x00000100;
++			status->tx_pac_byte_overflow  = reg_val & 0x00020000 ;
++	}
++*/
++
++	unsigned int reg_val;
++	struct PORT_STATUS *status;
++	status = &port_state[port];
++
++	if(port>=ADM6999_PORT_NO)
++		return ;
++
++	// Link estabilish , speed, deplex, flow control ?
++	if(port < ADM6999_PORT_NO-1 )
++	{
++		reg_val = SPI_read(1, 0x01) ;
++		reg_val = reg_val >> port*4 ;
++		status->link = reg_val & 0x00000001 ;
++		status->speed = reg_val  & 0x00000002 ;
++		status->duplex = reg_val & 0x00000004 ;
++		status->flow_ctl = reg_val & 0x00000008 ;
++	}
++	else if(port == (ADM6999_PORT_NO-1) )
++	{
++		reg_val = SPI_read(1, 0x02) ;
++		status->link = reg_val & 0x00000001 ;
++		status->speed = reg_val  & 0x00000002 ;
++		status->duplex = reg_val & 0x00000008 ;
++		status->flow_ctl = reg_val & 0x00000010 ;
++	}
++
++	// Mac Lock ?
++	reg_val = SPI_read(0,0x12);
++	reg_val = reg_val >> port ;
++	reg_val = reg_val & 0x01 ;
++	status->mac_lock = reg_val ? 0x01:0x00 ;
++
++	// port enable ?
++	reg_val = SPI_read(0,(unsigned char)port+1);
++	status->port_disable = reg_val & 0x0020;
++
++	//  Packet Count ...
++	reg_val = SPI_read(1,(unsigned char)port+0x04);
++	status->rx_pac_count = reg_val ;
++	reg_val = SPI_read(1,(unsigned char)port+0x0D);
++	status->rx_pac_byte = reg_val ;
++	reg_val = SPI_read(1,(unsigned char)port+0x16);
++	status->tx_pac_count = reg_val ;
++	reg_val = SPI_read(1,(unsigned char)port+0x1F);
++	status->tx_pac_byte = reg_val ;
++	reg_val = SPI_read(1,(unsigned char)port+0x28);
++	status->collision_count = reg_val ;
++	reg_val = SPI_read(1,(unsigned char)port+0x31);
++	status->error_count = reg_val ;
++	reg_val = SPI_read(1, 0x3A);
++	reg_val = reg_val >> port ;
++	status->rx_pac_count_overflow = reg_val & 0x00000001;
++	reg_val = reg_val >> 0x09 ;
++	status->rx_pac_byte_overflow = reg_val & 0x00000001 ;
++
++	reg_val = SPI_read(1, 0x3B);
++	reg_val = reg_val >> port ;
++	status->tx_pac_count_overflow = reg_val & 0x00000001;
++	reg_val = reg_val >> 0x09 ;
++	status->tx_pac_byte_overflow  = reg_val & 0x00000001 ;
++
++	reg_val = SPI_read(1, 0x3C);
++	reg_val = reg_val >> port ;
++	status->collision_count_overflow = reg_val & 0x00000001;
++	reg_val = reg_val >> 0x09 ;
++	status->error_count_overflow  = reg_val & 0x00000001 ;
++
++}
++
++unsigned int SPI_get_identifier(void)
++{
++	unsigned int flag=0;
++
++#ifdef CONFIG_IT8712_GPIO
++
++	if (!it8712_exist) {
++		return -ENODEV;
++	}
++	printk("it8712_gpio init\n");
++
++	/* initialize registers */
++	// switch all multi-function pins to GPIO
++	LPCSetConfig(LDN_GPIO, 0x28, 0xff);
++
++	// set simple I/O base address
++	LPCSetConfig(LDN_GPIO, 0x62, IT8712_GPIO_BASE >> 8);
++	LPCSetConfig(LDN_GPIO, 0x63, (unsigned char) IT8712_GPIO_BASE >> 8);
++
++	// select GPIO to simple I/O
++	LPCSetConfig(LDN_GPIO, 0xc3, 0xff);
++
++	// enable internal pull-up
++	LPCSetConfig(LDN_GPIO, 0xbb, 0xff);
++
++#endif
++
++	flag = SPI_read(1,0x00);
++	printk("Get ADM identifier %6x\n",flag);
++	if ((flag & 0xFFFF0) == 0x21120) {
++		printk("ADM699X Found\n");
++		return 1;
++	}
++	else {
++		printk("ADM699X not Found\n");
++		return 0;
++	}
++}
++
+Index: linux-2.6.23.16/drivers/net/sl351x_crc16.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/drivers/net/sl351x_crc16.c	2008-03-15 16:57:25.854761029 +0200
+@@ -0,0 +1,93 @@
++/****************************************************************************
++* Name			: sl351x_crc16.c
++* Description	:
++*		Implement CRC16
++*		refer to RFC1662
++* History
++*
++*	Date		Writer		Description
++*	-----------	-----------	-------------------------------------------------
++*	09/14/2005	Gary Chen	Create
++*
++****************************************************************************/
++
++#define INITFCS16		0xffff  /* Initial FCS value */
++#define GOODFCS16		0xf0b8  /* Good final FCS value */
++#define SWAP_WORD(x)	(unsigned short)((((unsigned short)x & 0x00FF) << 8) |	\
++										 (((unsigned short)x & 0xFF00) >> 8))
++
++/*----------------------------------------------------------------------
++* 	x**0 + x**5 + x**12 + x**16
++*----------------------------------------------------------------------*/
++static const unsigned short crc16_tbl[256] = {
++      0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
++      0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
++      0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
++      0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
++      0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
++      0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
++      0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
++      0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
++      0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
++      0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
++      0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
++      0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
++      0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
++      0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
++      0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
++      0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
++      0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
++      0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
++      0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
++      0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
++      0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
++      0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
++      0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
++      0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
++      0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
++      0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
++      0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
++      0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
++      0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
++      0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
++      0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
++      0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
++};
++
++/*----------------------------------------------------------------------
++* hash_crc16
++*----------------------------------------------------------------------*/
++unsigned short hash_crc16(unsigned short crc, unsigned char *datap, unsigned long len)
++{
++    while (len--)
++    {
++        crc = (crc >> 8) ^ crc16_tbl[(crc ^ (*datap++)) & 0xff];
++    }
++
++    return (crc);
++
++}
++
++/*----------------------------------------------------------------------
++* hash_check_crc16
++*----------------------------------------------------------------------*/
++unsigned long hash_check_crc16(unsigned char *datap, unsigned long len)
++{
++    unsigned short crc;
++
++    crc = hash_crc16(INITFCS16, datap, len );
++    return (crc == GOODFCS16) ?  0 : 1;
++}
++
++/*----------------------------------------------------------------------
++* hash_gen_crc16
++*----------------------------------------------------------------------*/
++unsigned short hash_gen_crc16(unsigned char *datap, unsigned long len)
++{
++    unsigned short crc;
++
++    crc = hash_crc16(INITFCS16, datap, len);
++    crc ^= 0xffff;
++
++    return(SWAP_WORD(crc));
++}
+Index: linux-2.6.23.16/drivers/net/sl351x_gmac.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/drivers/net/sl351x_gmac.c	2008-03-15 16:59:23.361457295 +0200
+@@ -0,0 +1,5622 @@
++/**************************************************************************
++* Copyright 2006 StorLink Semiconductors, Inc.  All rights reserved.
++*--------------------------------------------------------------------------
++* Name			: sl351x_gmac.c
++* Description	:
++*		Ethernet device driver for Storlink SL351x FPGA
++*
++* History
++*
++*	Date		Writer		Description
++*	-----------	-----------	-------------------------------------------------
++*	08/22/2005	Gary Chen	Create and implement
++*   27/10/2005  CH Hsu      Porting to Linux
++*
++****************************************************************************/
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++
++#include 
++
++#define	 MIDWAY
++#define	 SL_LEPUS
++#define VITESSE_G5SWITCH	1
++
++#ifndef CONFIG_SL351x_RXTOE
++//#define CONFIG_SL351x_RXTOE	1
++#endif
++#undef CONFIG_SL351x_RXTOE
++
++#include 
++#include 
++#include 
++#include 
++
++#ifdef CONFIG_SL351x_SYSCTL
++#include 
++#endif
++
++#ifdef CONFIG_SL351x_RXTOE
++#include 
++#include 
++#include 
++#include 
++#endif
++
++// #define SL351x_TEST_WORKAROUND
++#ifdef CONFIG_SL351x_NAT
++#define CONFIG_SL_NAPI					1
++#endif
++#define GMAX_TX_INTR_DISABLED			1
++#define DO_HW_CHKSUM					1
++#define ENABLE_TSO						1
++#define GMAC_USE_TXQ0					1
++// #define NAT_WORKAROUND_BY_RESET_GMAC	1
++// #define HW_RXBUF_BY_KMALLOC			1
++//#define _DUMP_TX_TCP_CONTENT	1
++#define	br_if_ioctl						1
++#define GMAC_LEN_1_2_ISSUE				1
++
++#define GMAC_EXISTED_FLAG			0x5566abcd
++#define CONFIG_MAC_NUM				GMAC_NUM
++#define GMAC0_BASE					TOE_GMAC0_BASE
++#define GMAC1_BASE					TOE_GMAC1_BASE
++#define PAUSE_SET_HW_FREEQ			(TOE_HW_FREEQ_DESC_NUM / 2)
++#define PAUSE_REL_HW_FREEQ			((TOE_HW_FREEQ_DESC_NUM / 2) + 10)
++#define DEFAULT_RXQ_MAX_CNT			256
++#ifdef	L2_jumbo_frame
++#define TCPHDRLEN(tcp_hdr)  ((ntohs(*((__u16 *)tcp_hdr + 6)) >> 12) & 0x000F)
++#endif
++
++/* define chip information */
++#define DRV_NAME					"SL351x"
++#define DRV_VERSION					"0.1.4"
++#define SL351x_DRIVER_NAME  		DRV_NAME " Giga Ethernet driver " DRV_VERSION
++
++#define toe_gmac_enable_interrupt(irq)	enable_irq(irq)
++#define toe_gmac_disable_interrupt(irq)	disable_irq(irq)
++
++#ifdef SL351x_GMAC_WORKAROUND
++#define GMAC_SHORT_FRAME_THRESHOLD		10
++static struct timer_list gmac_workround_timer_obj;
++void sl351x_poll_gmac_hanged_status(u32 data);
++#ifdef CONFIG_SL351x_NAT
++//#define IxscriptMate_1518				1
++	void sl351x_nat_workaround_init(void);
++	#ifndef NAT_WORKAROUND_BY_RESET_GMAC
++		static void sl351x_nat_workaround_handler(void);
++	#endif
++#endif
++#endif
++
++#ifdef GMAC_LEN_1_2_ISSUE
++	#define _DEBUG_PREFETCH_NUM	256
++static	int	_debug_prefetch_cnt;
++static	char _debug_prefetch_buf[_DEBUG_PREFETCH_NUM][4] __attribute__((aligned(4)));
++#endif
++/*************************************************************
++ *         Global Variable
++ *************************************************************/
++static int	gmac_initialized = 0;
++TOE_INFO_T toe_private_data;
++//static int		do_again = 0;
++spinlock_t gmac_fq_lock;
++unsigned int FLAG_SWITCH;
++
++static unsigned int     	next_tick = 3 * HZ;
++static unsigned char    	eth_mac[CONFIG_MAC_NUM][6]= {{0x00,0x11,0x11,0x87,0x87,0x87}, {0x00,0x22,0x22,0xab,0xab,0xab}};
++
++#undef CONFIG_SL351x_RXTOE
++extern NAT_CFG_T nat_cfg;
++
++/************************************************/
++/*                 function declare             */
++/************************************************/
++static int gmac_set_mac_address(struct net_device *dev, void *addr);
++static unsigned int gmac_get_phy_vendor(int phy_addr);
++static void gmac_set_phy_status(struct net_device *dev);
++void gmac_get_phy_status(struct net_device *dev);
++static int gmac_netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static void gmac_tx_timeout(struct net_device *dev);
++static int gmac_phy_thread (void *data);
++struct net_device_stats * gmac_get_stats(struct net_device *dev);
++static int gmac_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static void gmac_set_rx_mode(struct net_device *dev);
++static irqreturn_t toe_gmac_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
++static void toe_gmac_handle_default_rxq(struct net_device *dev, GMAC_INFO_T *tp);
++unsigned int mii_read(unsigned char phyad,unsigned char regad);
++void mii_write(unsigned char phyad,unsigned char regad,unsigned int value);
++void mac_init_drv(void);
++
++static void toe_init_free_queue(void);
++static void toe_init_swtx_queue(void);
++static void toe_init_default_queue(void);
++#ifdef CONFIG_SL351x_RXTOE
++static void toe_init_interrupt_queue(void);
++#endif
++static void toe_init_interrupt_config(void);
++static void toe_gmac_sw_reset(void);
++static int toe_gmac_init_chip(struct net_device *dev);
++static void toe_gmac_enable_tx_rx(struct net_device* dev);
++static void toe_gmac_disable_tx_rx(struct net_device *dev);
++static void toe_gmac_hw_start(struct net_device *dev);
++static void toe_gmac_hw_stop(struct net_device *dev);
++static int toe_gmac_clear_counter(struct net_device *dev);
++static void toe_init_gmac(struct net_device *dev);
++static  void toe_gmac_tx_complete(GMAC_INFO_T *tp, unsigned int tx_qid, struct net_device *dev, int interrupt);
++#ifdef CONFIG_SL_NAPI
++static int gmac_rx_poll(struct net_device *dev, int *budget);
++// static void toe_gmac_disable_rx(struct net_device *dev);
++// static void toe_gmac_enable_rx(struct net_device *dev);
++#endif
++
++u32 mac_read_dma_reg(int mac, unsigned int offset);
++void mac_write_dma_reg(int mac, unsigned int offset, u32 data);
++void mac_stop_txdma(struct net_device *dev);
++void mac_get_sw_tx_weight(struct net_device *dev, char *weight);
++void mac_set_sw_tx_weight(struct net_device *dev, char *weight);
++void mac_get_hw_tx_weight(struct net_device *dev, char *weight);
++void mac_set_hw_tx_weight(struct net_device *dev, char *weight);
++static inline void toe_gmac_fill_free_q(void);
++
++#ifdef VITESSE_G5SWITCH
++extern int Get_Set_port_status(void);
++extern int SPI_default(void);
++extern unsigned int SPI_get_identifier(void);
++void gmac_get_switch_status(struct net_device *dev);
++unsigned int Giga_switch=0;
++unsigned int switch_port_no=0;
++unsigned int ever_dwon=0;
++#endif
++
++/************************************************/
++/*            GMAC function declare             */
++/************************************************/
++static int gmac_open (struct net_device *dev);
++static int gmac_close (struct net_device *dev);
++static void gmac_cleanup_module(void);
++static void gmac_get_mac_address(void);
++
++#ifdef CONFIG_SL351x_NAT
++static void toe_init_hwtx_queue(void);
++extern void sl351x_nat_init(void);
++extern void sl351x_nat_input(struct sk_buff *skb, int port, void *l3off, void *l4off);
++extern int sl351x_nat_output(struct sk_buff *skb, int port);
++extern int sl351x_nat_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++#endif
++
++#ifdef CONFIG_SL351x_RXTOE
++extern void set_toeq_hdr(struct toe_conn* connection, TOE_INFO_T* toe, struct net_device *dev);
++extern void sl351x_toe_init(void);
++extern void toe_gmac_handle_toeq(struct net_device *dev, GMAC_INFO_T* tp, __u32 status);
++extern struct toe_conn* init_toeq(int ipver, void* iph, struct tcphdr* tcp_hdr, TOE_INFO_T* toe, unsigned char* l2hdr);
++#endif
++
++int mac_set_rule_reg(int mac, int rule, int enabled, u32 reg0, u32 reg1, u32 reg2);
++void mac_set_rule_enable_bit(int mac, int rule, int data);
++int mac_set_rule_action(int mac, int rule, int data);
++int mac_get_MRxCRx(int mac, int rule, int ctrlreg);
++void mac_set_MRxCRx(int mac, int rule, int ctrlreg, u32 data);
++
++/*----------------------------------------------------------------------
++*	Ethernet Driver init
++*----------------------------------------------------------------------*/
++
++static int __init gmac_init_module(void)
++{
++	GMAC_INFO_T 		*tp;
++	struct net_device	*dev;
++	int 		i,j;
++	unsigned int	chip_id;
++//	unsigned int chip_version;
++
++#ifdef CONFIG_SL3516_ASIC
++{
++    unsigned int    val;
++    /* set GMAC global register */
++    val = readl(GMAC_GLOBAL_BASE_ADDR+0x10);
++    val = val | 0x005f0000;
++    writel(val,GMAC_GLOBAL_BASE_ADDR+0x10);
++//    writel(0xb737b737,GMAC_GLOBAL_BASE_ADDR+0x1c); //For Socket Board
++    writel(0x77777777,GMAC_GLOBAL_BASE_ADDR+0x20);
++//    writel(0xa737b747,GMAC_GLOBAL_BASE_ADDR+0x1c);//For Mounting Board
++
++	//debug_Aaron
++    //writel(0xa7f0a7f0,GMAC_GLOBAL_BASE_ADDR+0x1c);//For Mounting Board
++    writel(0xa7f0b7f0,GMAC_GLOBAL_BASE_ADDR+0x1c);//For Mounting Board
++
++    writel(0x77777777,GMAC_GLOBAL_BASE_ADDR+0x24);
++	writel(0x09200030,GMAC_GLOBAL_BASE_ADDR+0x2C);
++	val = readl(GMAC_GLOBAL_BASE_ADDR+0x04);
++	if((val&(1<<20))==0){           // GMAC1 enable
++ 		val = readl(GMAC_GLOBAL_BASE_ADDR+0x30);
++		val = (val & 0xe7ffffff) | 0x08000000;
++		writel(val,GMAC_GLOBAL_BASE_ADDR+0x30);
++	}
++}
++#endif
++
++#ifdef VITESSE_G5SWITCH
++	Giga_switch = SPI_get_identifier();
++	if(Giga_switch)
++		switch_port_no = SPI_default();
++#endif
++
++	chip_id = readl(GMAC_GLOBAL_BASE_ADDR+0x0);
++	if (chip_id == 0x3512C1)
++	{
++		writel(0x5787a5f0,GMAC_GLOBAL_BASE_ADDR+0x1c);//For 3512 Switch Board
++		writel(0x55557777,GMAC_GLOBAL_BASE_ADDR+0x20);//For 3512 Switch Board
++	}
++//#endif
++
++	mac_init_drv();
++
++	printk (KERN_INFO SL351x_DRIVER_NAME " built at %s %s\n", __DATE__, __TIME__);
++
++//	init_waitqueue_entry(&wait, current);
++
++	// printk("GMAC Init......\n");
++
++	i = 0;
++	for(j = 0; idev = NULL;
++		if (tp->existed != GMAC_EXISTED_FLAG) continue;
++
++		dev = alloc_etherdev(0);
++		if (dev == NULL)
++		{
++			printk (KERN_ERR "Can't allocate ethernet device #%d .\n",i);
++			return -ENOMEM;
++		}
++
++		dev->priv=tp;
++		tp->dev = dev;
++
++		SET_MODULE_OWNER(dev);
++
++		// spin_lock_init(&tp->lock);
++		spin_lock_init(&gmac_fq_lock);
++		dev->base_addr = tp->base_addr;
++		dev->irq = tp->irq;
++	    dev->open = gmac_open;
++	    dev->stop = gmac_close;
++		dev->hard_start_xmit = gmac_start_xmit;
++		dev->get_stats = gmac_get_stats;
++		dev->set_multicast_list = gmac_set_rx_mode;
++		dev->set_mac_address = gmac_set_mac_address;
++		dev->do_ioctl = gmac_netdev_ioctl;
++		dev->tx_timeout = gmac_tx_timeout;
++		dev->watchdog_timeo = GMAC_DEV_TX_TIMEOUT;
++#ifdef	L2_jumbo_frame
++		dev->mtu = 2018; //2002  ,2018
++#endif
++		if (tp->port_id == 0)
++			dev->tx_queue_len = TOE_GMAC0_SWTXQ_DESC_NUM;
++		else
++			dev->tx_queue_len = TOE_GMAC1_SWTXQ_DESC_NUM;
++
++#ifdef DO_HW_CHKSUM
++		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
++#ifdef ENABLE_TSO
++		dev->features |= NETIF_F_TSO;
++#endif
++#endif
++#ifdef CONFIG_SL_NAPI
++        dev->poll = gmac_rx_poll;
++        dev->weight = 64;
++#endif
++
++		if (register_netdev(dev))
++		{
++			gmac_cleanup_module();
++			return(-1);
++		}
++	}
++
++
++//	FLAG_SWITCH = 0 ;
++//	FLAG_SWITCH = SPI_get_identifier();
++//	if(FLAG_SWITCH)
++//	{
++//		printk("Configure ADM699X...\n");
++//		SPI_default();	//Add by jason for ADM699X configuration
++//	}
++	return (0);
++}
++
++/*----------------------------------------------------------------------
++*	gmac_cleanup_module
++*----------------------------------------------------------------------*/
++
++static void gmac_cleanup_module(void)
++{
++    int i;
++
++#ifdef SL351x_GMAC_WORKAROUND
++	del_timer(&gmac_workround_timer_obj);
++#endif
++
++    for (i=0;igmac[0].base_addr = GMAC0_BASE;
++		toe->gmac[1].base_addr = GMAC1_BASE;
++		toe->gmac[0].dma_base_addr = TOE_GMAC0_DMA_BASE;
++		toe->gmac[1].dma_base_addr = TOE_GMAC1_DMA_BASE;
++        toe->gmac[0].auto_nego_cfg = 1;
++        toe->gmac[1].auto_nego_cfg = 1;
++#ifdef CONFIG_SL3516_ASIC
++        toe->gmac[0].speed_cfg = GMAC_SPEED_1000;
++        toe->gmac[1].speed_cfg = GMAC_SPEED_1000;
++#else
++		toe->gmac[0].speed_cfg = GMAC_SPEED_100;
++        toe->gmac[1].speed_cfg = GMAC_SPEED_100;
++#endif
++        toe->gmac[0].full_duplex_cfg = 1;
++        toe->gmac[1].full_duplex_cfg = 1;
++#ifdef CONFIG_SL3516_ASIC
++        toe->gmac[0].phy_mode = GMAC_PHY_RGMII_1000;
++        toe->gmac[1].phy_mode = GMAC_PHY_RGMII_1000;
++#else
++		toe->gmac[0].phy_mode = GMAC_PHY_RGMII_100;
++        toe->gmac[1].phy_mode = GMAC_PHY_RGMII_100;
++#endif
++        toe->gmac[0].port_id = GMAC_PORT0;
++        toe->gmac[1].port_id = GMAC_PORT1;
++        toe->gmac[0].phy_addr = 0x1;
++        toe->gmac[1].phy_addr = 2;
++//      toe->gmac[0].irq = SL2312_INTERRUPT_GMAC0;
++		toe->gmac[0].irq =1;
++//      toe->gmac[1].irq = SL2312_INTERRUPT_GMAC1;
++		toe->gmac[1].irq =2;
++        toe->gmac[0].mac_addr1 = ð_mac[0][0];
++        toe->gmac[1].mac_addr1 = ð_mac[1][0];
++
++		for (i=0; igmac[i].base_addr, GMAC_STA_ADD2, 0x55aa55aa, 0xffffffff);
++			data = gmac_read_reg(toe->gmac[i].base_addr, GMAC_STA_ADD2);
++			if (data == 0x55aa55aa)
++			{
++#ifdef VITESSE_G5SWITCH
++				if(Giga_switch && (i==1)){
++					toe->gmac[i].existed = GMAC_EXISTED_FLAG;
++					break;
++				}
++#endif
++				phy_vendor = gmac_get_phy_vendor(toe->gmac[i].phy_addr);
++				if (phy_vendor != 0 && phy_vendor != 0xffffffff)
++					toe->gmac[i].existed = GMAC_EXISTED_FLAG;
++			}
++		}
++
++		// Write GLOBAL_QUEUE_THRESHOLD_REG
++		threshold.bits32 = 0;
++		threshold.bits.swfq_empty = (TOE_SW_FREEQ_DESC_NUM > 256) ? 255 :
++		                                        TOE_SW_FREEQ_DESC_NUM/2;
++		threshold.bits.hwfq_empty = (TOE_HW_FREEQ_DESC_NUM > 256) ? 256/4 :
++		                                        TOE_HW_FREEQ_DESC_NUM/4;
++		threshold.bits.toe_class = (TOE_TOE_DESC_NUM > 256) ? 256/4 :
++		                                        TOE_TOE_DESC_NUM/4;
++		threshold.bits.intrq = (TOE_INTR_DESC_NUM > 256) ? 256/4 :
++		                                        TOE_INTR_DESC_NUM/4;
++		writel(threshold.bits32, TOE_GLOBAL_BASE + GLOBAL_QUEUE_THRESHOLD_REG);
++
++		FLAG_SWITCH = 0;
++		toe_gmac_sw_reset();
++		toe_init_free_queue();
++		toe_init_swtx_queue();
++#ifdef CONFIG_SL351x_NAT
++		toe_init_hwtx_queue();
++#endif
++		toe_init_default_queue();
++#ifdef CONFIG_SL351x_RXTOE
++		toe_init_interrupt_queue();
++#endif
++		toe_init_interrupt_config();
++
++#if defined(CONFIG_SL351x_NAT) || defined(CONFIG_SL351x_RXTOE)
++		sl351x_hash_init();
++#else
++	{
++		volatile u32 *dp1, *dp2, dword;
++
++		dp1 = (volatile u32 *) TOE_V_BIT_BASE;
++		dp2 = (volatile u32 *) TOE_A_BIT_BASE;
++
++		for (i=0; isw_freeq_desc_base_dma) ;
++	sw_desc_ptr = (GMAC_RXDESC_T *)desc_buf;
++	if (!desc_buf)
++	{
++		printk("%s::DMA_MALLOC fail !\n",__func__);
++		return;
++	}
++	memset((void *)desc_buf, 0, TOE_SW_FREEQ_DESC_NUM * sizeof(GMAC_RXDESC_T));
++
++	// DMA Queue Base & Size
++	writel((toe->sw_freeq_desc_base_dma & DMA_Q_BASE_MASK) | TOE_SW_FREEQ_DESC_POWER,
++			TOE_GLOBAL_BASE + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
++
++	// init descriptor base
++	toe->swfq_desc_base = desc_buf;
++
++	// SW Free Queue Read/Write Pointer
++	rwptr_reg.bits.wptr = TOE_SW_FREEQ_DESC_NUM - 1;
++	rwptr_reg.bits.rptr = 0;
++	toe->fq_rx_rwptr.bits32 = rwptr_reg.bits32;
++	writel(rwptr_reg.bits32, TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++
++	// SW Free Queue Descriptors
++	for (i=0; iword0.bits.buffer_size = SW_RX_BUF_SIZE;
++		sw_desc_ptr->word1.bits.sw_id = i;	// used to locate skb
++		if ( (skb = dev_alloc_skb(SW_RX_BUF_SIZE))==NULL)  /* allocate socket buffer */
++		{
++			printk("%s::skb buffer allocation fail !\n",__func__); while(1);
++		}
++		REG32(skb->data) = (unsigned int)skb;
++		skb_reserve(skb, SKB_RESERVE_BYTES);
++		// toe->rx_skb[i] = skb;
++		sw_desc_ptr->word2.buf_adr = (unsigned int)__pa(skb->data);
++//   		consistent_sync((unsigned int)desc_ptr, sizeof(GMAC_RXDESC_T), PCI_DMA_TODEVICE);
++   		sw_desc_ptr++;
++	}
++
++#ifdef CONFIG_SL351x_NAT
++	if (sizeof(skb->cb) < 64)
++	{
++			printk("==> %s:: sk structure is incorrect -->Change to cb[64] !\n",__func__); while(1);
++	}
++	// init hardware free queues
++	desc_buf = (unsigned int)DMA_MALLOC((TOE_HW_FREEQ_DESC_NUM * sizeof(GMAC_RXDESC_T)),
++						(dma_addr_t *)&toe->hw_freeq_desc_base_dma) ;
++	desc_ptr = (GMAC_RXDESC_T *)desc_buf;
++	if (!desc_buf)
++	{
++		printk("%s::DMA_MALLOC fail !\n",__func__);
++		return;
++	}
++	memset((void *)desc_buf, 0, TOE_HW_FREEQ_DESC_NUM * sizeof(GMAC_RXDESC_T));
++
++	// DMA Queue Base & Size
++	writel((toe->hw_freeq_desc_base_dma & DMA_Q_BASE_MASK) | TOE_HW_FREEQ_DESC_POWER,
++			TOE_GLOBAL_BASE + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
++
++	// init descriptor base
++	toe->hwfq_desc_base = desc_buf;
++
++	// HW Free Queue Read/Write Pointer
++	rwptr_reg.bits.wptr = TOE_HW_FREEQ_DESC_NUM - 1;
++	rwptr_reg.bits.rptr = 0;
++	writel(rwptr_reg.bits32, TOE_GLOBAL_BASE + GLOBAL_HWFQ_RWPTR_REG);
++#ifndef HW_RXBUF_BY_KMALLOC
++	buf_ptr = (unsigned int)DMA_MALLOC(TOE_HW_FREEQ_DESC_NUM * HW_RX_BUF_SIZE,
++						(dma_addr_t *)&toe->hwfq_buf_base_dma);
++#else
++	buf_ptr = (unsigned int)kmalloc(TOE_HW_FREEQ_DESC_NUM * HW_RX_BUF_SIZE, GFP_KERNEL);
++	toe->hwfq_buf_base_dma = __pa(buf_ptr);
++#endif
++	if (!buf_ptr)
++	{
++		printk("===> %s::Failed to allocate HW TxQ Buffers!\n",__func__);
++		while(1);	// could not be happened, if happened, adjust the buffer descriptor number
++		return;
++	}
++
++	toe->hwfq_buf_base = buf_ptr;
++	toe->hwfq_buf_end_dma = toe->hwfq_buf_base_dma + (TOE_HW_FREEQ_DESC_NUM * HW_RX_BUF_SIZE);
++	buf_ptr = (unsigned int)toe->hwfq_buf_base_dma;
++	for (i=0; iword0.bits.buffer_size = HW_RX_BUF_SIZE;
++		desc_ptr->word1.bits.sw_id = i;
++		desc_ptr->word2.buf_adr = (unsigned int)buf_ptr;
++//   		consistent_sync((unsigned int)desc_ptr, sizeof(GMAC_RXDESC_T), PCI_DMA_TODEVICE);
++   		// consistent_sync((unsigned int)buf_ptr, HW_RX_BUF_SIZE, PCI_DMA_TODEVICE);
++   		desc_ptr++;
++   		buf_ptr += HW_RX_BUF_SIZE;
++	}
++#else
++	// DMA Queue Base & Size
++	writel((0) | TOE_SW_FREEQ_DESC_POWER,
++			TOE_GLOBAL_BASE + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
++	rwptr_reg.bits.wptr = TOE_HW_FREEQ_DESC_NUM - 1;
++	rwptr_reg.bits.rptr = 0;
++	writel(rwptr_reg.bits32, TOE_GLOBAL_BASE + GLOBAL_HWFQ_RWPTR_REG);
++
++#endif
++}
++/*----------------------------------------------------------------------
++*	toe_init_swtx_queue
++*	(2) Initialize the GMAC 0/1 SW TXQ Queue Descriptor Base Address & sizeup
++*		GMAC_SW_TX_QUEUE_BASE_REG(0x0050)
++*	(2) Initialize DMA Read/Write pointer for
++*		GMAC 0/1 SW TX Q0-5
++*----------------------------------------------------------------------*/
++static void toe_init_swtx_queue(void)
++{
++	int 				i;
++	TOE_INFO_T			*toe;
++	DMA_RWPTR_T			rwptr_reg;
++	unsigned int 		rwptr_addr;
++	unsigned int		desc_buf;
++
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++
++	// GMAC-0, SW-TXQ
++	// The GMAC-0 and GMAC-0 maybe have different descriptor number
++	// so, not use for instruction
++	desc_buf = (unsigned int)DMA_MALLOC((TOE_GMAC0_SWTXQ_DESC_NUM * TOE_SW_TXQ_NUM * sizeof(GMAC_TXDESC_T)),
++						(dma_addr_t *)&toe->gmac[0].swtxq_desc_base_dma) ;
++	toe->gmac[0].swtxq_desc_base = desc_buf;
++	if (!desc_buf)
++	{
++		printk("%s::DMA_MALLOC fail !\n",__func__);
++		return	;
++	}
++	memset((void *)desc_buf, 0,	TOE_GMAC0_SWTXQ_DESC_NUM * TOE_SW_TXQ_NUM * sizeof(GMAC_TXDESC_T));
++	writel((toe->gmac[0].swtxq_desc_base_dma & DMA_Q_BASE_MASK) | TOE_GMAC0_SWTXQ_DESC_POWER,
++			TOE_GMAC0_DMA_BASE+ GMAC_SW_TX_QUEUE_BASE_REG);
++
++	// GMAC0 SW TX Q0-Q5
++	rwptr_reg.bits.wptr = 0;
++	rwptr_reg.bits.rptr = 0;
++	rwptr_addr = TOE_GMAC0_DMA_BASE + GMAC_SW_TX_QUEUE0_PTR_REG;
++	for (i=0; igmac[0].swtxq[i].rwptr_reg = rwptr_addr;
++		toe->gmac[0].swtxq[i].desc_base = desc_buf;
++		toe->gmac[0].swtxq[i].total_desc_num = TOE_GMAC0_SWTXQ_DESC_NUM;
++		desc_buf += TOE_GMAC0_SWTXQ_DESC_NUM * sizeof(GMAC_TXDESC_T);
++		writel(rwptr_reg.bits32, rwptr_addr);
++		rwptr_addr+=4;
++	}
++
++	// GMAC-1, SW-TXQ
++	desc_buf = (unsigned int)DMA_MALLOC((TOE_GMAC1_SWTXQ_DESC_NUM * TOE_SW_TXQ_NUM * sizeof(GMAC_TXDESC_T)),
++						(dma_addr_t *)&toe->gmac[1].swtxq_desc_base_dma) ;
++	toe->gmac[1].swtxq_desc_base = desc_buf;
++	if (!desc_buf)
++	{
++		printk("%s::DMA_MALLOC fail !\n",__func__);
++		return	;
++	}
++	memset((void *)desc_buf, 0,	TOE_GMAC1_SWTXQ_DESC_NUM * TOE_SW_TXQ_NUM * sizeof(GMAC_TXDESC_T));
++	writel((toe->gmac[1].swtxq_desc_base_dma & DMA_Q_BASE_MASK) | TOE_GMAC1_SWTXQ_DESC_POWER,
++			TOE_GMAC1_DMA_BASE+ GMAC_SW_TX_QUEUE_BASE_REG);
++
++
++	// GMAC1 SW TX Q0-Q5
++	rwptr_reg.bits.wptr = 0;
++	rwptr_reg.bits.rptr = 0;
++	rwptr_addr = TOE_GMAC1_DMA_BASE + GMAC_SW_TX_QUEUE0_PTR_REG;
++	for (i=0; igmac[1].swtxq[i].rwptr_reg = rwptr_addr;
++		toe->gmac[1].swtxq[i].desc_base = desc_buf;
++		toe->gmac[1].swtxq[i].total_desc_num = TOE_GMAC1_SWTXQ_DESC_NUM;
++		desc_buf += TOE_GMAC1_SWTXQ_DESC_NUM * sizeof(GMAC_TXDESC_T);
++		writel(rwptr_reg.bits32, rwptr_addr);
++		rwptr_addr+=4;
++	}
++}
++
++/*----------------------------------------------------------------------
++*	toe_init_hwtx_queue
++*	(2) Initialize the GMAC 0/1 HW TXQ Queue Descriptor Base Address & size
++*		GMAC_HW_TX_QUEUE_BASE_REG(0x0054)
++*	(2) Initialize DMA Read/Write pointer for
++*		GMAC 0/1 HW TX Q0-5
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL351x_NAT
++static void toe_init_hwtx_queue(void)
++{
++	int 				i;
++	TOE_INFO_T			*toe;
++	DMA_RWPTR_T			rwptr_reg;
++	unsigned int 		rwptr_addr;
++	unsigned int		desc_buf;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	// GMAC-0, HW-TXQ
++	// The GMAC-0 and GMAC-0 maybe have different descriptor number
++	// so, not use for instruction
++	desc_buf = (unsigned int)DMA_MALLOC((TOE_GMAC0_HWTXQ_DESC_NUM * TOE_HW_TXQ_NUM * sizeof(GMAC_TXDESC_T)),
++						(dma_addr_t *)&toe->gmac[0].hwtxq_desc_base_dma) ;
++	toe->gmac[0].hwtxq_desc_base = desc_buf;
++	if (!desc_buf)
++	{
++		printk("%s::DMA_MALLOC fail !\n",__func__);
++		return	;
++	}
++	memset((void *)desc_buf, 0,	TOE_GMAC0_HWTXQ_DESC_NUM * TOE_HW_TXQ_NUM * sizeof(GMAC_TXDESC_T));
++	writel((toe->gmac[0].hwtxq_desc_base_dma & DMA_Q_BASE_MASK) | TOE_GMAC0_HWTXQ_DESC_POWER,
++			TOE_GMAC0_DMA_BASE+ GMAC_HW_TX_QUEUE_BASE_REG);
++
++	// GMAC0 HW TX Q0-Q5
++	rwptr_reg.bits.wptr = 0;
++	rwptr_reg.bits.rptr = 0;
++	rwptr_addr = TOE_GMAC0_DMA_BASE + GMAC_HW_TX_QUEUE0_PTR_REG;
++	for (i=0; igmac[0].hwtxq[i].desc_base = desc_buf;
++		desc_buf += TOE_GMAC0_HWTXQ_DESC_NUM * sizeof(GMAC_TXDESC_T);
++		writel(rwptr_reg.bits32, rwptr_addr);
++		rwptr_addr+=4;
++	}
++
++	// GMAC-1, HW-TXQ
++	desc_buf = (unsigned int)DMA_MALLOC((TOE_GMAC1_HWTXQ_DESC_NUM * TOE_HW_TXQ_NUM * sizeof(GMAC_TXDESC_T)),
++						(dma_addr_t *)&toe->gmac[1].hwtxq_desc_base_dma) ;
++	toe->gmac[1].hwtxq_desc_base = desc_buf;
++	if (!desc_buf)
++	{
++		printk("%s::DMA_MALLOC fail !\n",__func__);
++		return	;
++	}
++	memset((void *)desc_buf, 0,	TOE_GMAC1_HWTXQ_DESC_NUM * TOE_HW_TXQ_NUM * sizeof(GMAC_TXDESC_T));
++	writel((toe->gmac[1].hwtxq_desc_base_dma & DMA_Q_BASE_MASK) | TOE_GMAC1_HWTXQ_DESC_POWER,
++			TOE_GMAC1_DMA_BASE+ GMAC_HW_TX_QUEUE_BASE_REG);
++
++	// GMAC1 HW TX Q0-Q5
++	rwptr_reg.bits.wptr = 0;
++	rwptr_reg.bits.rptr = 0;
++	rwptr_addr = TOE_GMAC1_DMA_BASE + GMAC_HW_TX_QUEUE0_PTR_REG;
++	for (i=0; igmac[1].hwtxq[i].desc_base = desc_buf;
++		desc_buf += TOE_GMAC1_HWTXQ_DESC_NUM * sizeof(GMAC_TXDESC_T);
++		writel(rwptr_reg.bits32, rwptr_addr);
++		rwptr_addr+=4;
++	}
++}
++#endif
++
++/*----------------------------------------------------------------------
++*	toe_init_default_queue
++*	(1) Initialize the default 0/1 Queue Header
++*		Register: TOE_DEFAULT_Q0_HDR_BASE (0x60002000)
++*				  TOE_DEFAULT_Q1_HDR_BASE (0x60002008)
++*	(2)	Initialize Descriptors of Default Queue 0/1
++*----------------------------------------------------------------------*/
++static void toe_init_default_queue(void)
++{
++	TOE_INFO_T				*toe;
++	volatile NONTOE_QHDR_T	*qhdr;
++	GMAC_RXDESC_T			*desc_ptr;
++	DMA_SKB_SIZE_T			skb_size;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	desc_ptr = (GMAC_RXDESC_T *)DMA_MALLOC((TOE_DEFAULT_Q0_DESC_NUM * sizeof(GMAC_RXDESC_T)),
++											(dma_addr_t *)&toe->gmac[0].default_desc_base_dma);
++	if (!desc_ptr)
++	{
++		printk("%s::DMA_MALLOC fail !\n",__func__);
++		return	;
++	}
++	memset((void *)desc_ptr, 0, TOE_DEFAULT_Q0_DESC_NUM * sizeof(GMAC_RXDESC_T));
++	toe->gmac[0].default_desc_base = (unsigned int)desc_ptr;
++	toe->gmac[0].default_desc_num = TOE_DEFAULT_Q0_DESC_NUM;
++	qhdr = (volatile NONTOE_QHDR_T *)TOE_DEFAULT_Q0_HDR_BASE;
++	qhdr->word0.base_size = ((unsigned int)toe->gmac[0].default_desc_base_dma & NONTOE_QHDR0_BASE_MASK) | TOE_DEFAULT_Q0_DESC_POWER;
++	qhdr->word1.bits32 = 0;
++	toe->gmac[0].rx_rwptr.bits32 = 0;
++	toe->gmac[0].default_qhdr = (NONTOE_QHDR_T *)qhdr;
++	desc_ptr = (GMAC_RXDESC_T *)DMA_MALLOC((TOE_DEFAULT_Q1_DESC_NUM * sizeof(GMAC_RXDESC_T)),
++											(dma_addr_t *)&toe->gmac[1].default_desc_base_dma);
++	if (!desc_ptr)
++	{
++		printk("%s::DMA_MALLOC fail !\n",__func__);
++		return	;
++	}
++	memset((void *)desc_ptr, 0, TOE_DEFAULT_Q1_DESC_NUM * sizeof(GMAC_RXDESC_T));
++	toe->gmac[1].default_desc_base = (unsigned int)desc_ptr;
++	toe->gmac[1].default_desc_num = TOE_DEFAULT_Q1_DESC_NUM;
++	qhdr = (volatile NONTOE_QHDR_T *)TOE_DEFAULT_Q1_HDR_BASE;
++	qhdr->word0.base_size = ((unsigned int)toe->gmac[1].default_desc_base_dma & NONTOE_QHDR0_BASE_MASK) | TOE_DEFAULT_Q1_DESC_POWER;
++	qhdr->word1.bits32 = 0;
++	toe->gmac[1].rx_rwptr.bits32 = 0;
++	toe->gmac[1].default_qhdr = (NONTOE_QHDR_T *)qhdr;
++
++	skb_size.bits.hw_skb_size = HW_RX_BUF_SIZE;
++	skb_size.bits.sw_skb_size = SW_RX_BUF_SIZE;
++	writel(skb_size.bits32, TOE_GLOBAL_BASE + GLOBAL_DMA_SKB_SIZE_REG);
++}
++
++/*----------------------------------------------------------------------
++*	toe_init_interrupt_queue
++*	(1) Initialize the Interrupt Queue Header
++*		Register: TOE_INTR_Q_HDR_BASE (0x60002080)
++*	(2)	Initialize Descriptors of Interrupt Queues
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL351x_RXTOE
++static void toe_init_interrupt_queue(void)
++{
++	TOE_INFO_T				*toe;
++	volatile NONTOE_QHDR_T	*qhdr;
++	INTR_QHDR_T				*desc_ptr;
++	// unsigned int			desc_buf_addr;
++	int						i;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	desc_ptr = (INTR_QHDR_T *)DMA_MALLOC((TOE_INTR_QUEUE_NUM * TOE_INTR_DESC_NUM * sizeof(INTR_QHDR_T)),
++											(dma_addr_t *)&toe->intr_desc_base_dma);
++	if (!desc_ptr)
++	{
++		printk("%s::DMA_MALLOC interrupt queue fail !\n",__func__);
++		return	;
++	}
++	/*
++	desc_buf_addr = (unsigned int)DMA_MALLOC((TOE_INTR_DESC_NUM * sizeof(TOE_QHDR_T)),
++												(dma_addr_t *)&toe->intr_buf_base_dma);
++	if (!desc_buf_addr)
++	{
++		printk("%s::DMA_MALLOC interrupt desc fail !\n",__func__);
++		return	;
++	}*/
++	printk("#### %s::Intr Q desc %x\n", __func__, (u32)desc_ptr);
++
++	memset((void *)desc_ptr, 0, TOE_INTR_QUEUE_NUM * TOE_INTR_DESC_NUM * sizeof(INTR_QHDR_T));
++//	memset((void *)desc_buf_addr, 0, TOE_INTR_DESC_NUM * sizeof(TOE_QHDR_T));
++	toe->intr_desc_base = (unsigned int)desc_ptr;
++	toe->intr_desc_num = TOE_INTR_DESC_NUM;
++
++	qhdr = (volatile NONTOE_QHDR_T *)TOE_INTR_Q_HDR_BASE;
++//	intrq = (INTRQ_INFO_T*) &toe->intrq[0];
++	for (i=0; iword0.base_size = ((unsigned int)toe->intr_desc_base_dma & NONTOE_QHDR0_BASE_MASK) | TOE_INTR_DESC_POWER;
++		qhdr->word1.bits32 = 0;
++		desc_ptr += TOE_INTR_DESC_NUM;
++	}
++}
++
++#endif
++
++/*----------------------------------------------------------------------
++*	toe_init_interrupt_config
++*	Interrupt Select Registers are used to map interrupt to int0 or int1
++*	Int0 and int1 are wired to CPU 0/1 GMAC 0/1
++* 	Interrupt Device Inteface data are used to pass device info to
++*		upper device deiver or store status/statistics
++*	ISR handler
++*		(1) If status bit ON but masked, the prinf error message (bug issue)
++*		(2) If select bits are for me, handle it, else skip to let
++*			the other ISR handles it.
++*  Notes:
++*		GMACx init routine (for eCOS) or open routine (for Linux)
++*       enable the interrupt bits only which are selected for him.
++*
++*	Default Setting:
++*		GMAC0 intr bits ------>	int0 ----> eth0
++*		GMAC1 intr bits ------> int1 ----> eth1
++*		TOE intr -------------> int0 ----> eth0
++*		Classification Intr --> int0 ----> eth0
++*		Default Q0 -----------> int0 ----> eth0
++*		Default Q1 -----------> int1 ----> eth1
++*----------------------------------------------------------------------*/
++static void toe_init_interrupt_config(void)
++{
++	// clear all status bits
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_0_REG);
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_1_REG);
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_2_REG);
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_3_REG);
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_4_REG);
++
++	// Init select registers
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_0_REG);
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_1_REG);
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_2_REG);
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_3_REG);
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_4_REG);
++
++	// disable all interrupt
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_0_REG);
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_1_REG);
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_2_REG);
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_3_REG);
++	writel(0, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_4_REG);
++}
++
++/*----------------------------------------------------------------------
++*	toe_init_gmac
++*----------------------------------------------------------------------*/
++static void toe_init_gmac(struct net_device *dev)
++{
++	GMAC_INFO_T		*tp = dev->priv;
++	TOE_INFO_T		*toe;
++	u32 			data;
++
++	if (!gmac_initialized)
++		return ;
++
++	if (!tp->existed)
++		return;
++
++	tp->dev = dev;
++	tp->flow_control_enable = 1;
++	tp->pre_phy_status = LINK_DOWN;
++	tp->full_duplex_status = tp->full_duplex_cfg;
++	tp->speed_status = tp->speed_status;
++
++#if 0
++   /* get mac address from FLASH */
++    gmac_get_mac_address();
++#endif
++
++    /* set PHY register to start autonegition process */
++    gmac_set_phy_status(dev);
++
++	/* GMAC initialization */
++	if ( toe_gmac_init_chip(dev) )
++	{
++		printk ("GMAC %d init fail\n", tp->port_id);
++	}
++
++    /* clear statistic counter */
++    toe_gmac_clear_counter(dev);
++
++	memset((void *)&tp->ifStatics, 0, sizeof(struct net_device_stats));
++
++	/* -----------------------------------------------------------
++	Enable GMAC interrupt & disable loopback
++	Notes:
++		GMACx init routine (for eCOS) or open routine (for Linux)
++		enable the interrupt bits only which are selected for him.
++	--------------------------------------------------------------*/
++	toe = (TOE_INFO_T *)&toe_private_data;
++
++	// Enable Interrupt Bits
++	if (tp->port_id == 0)
++	{
++		tp->intr0_selected =	GMAC0_TXDERR_INT_BIT	 | GMAC0_TXPERR_INT_BIT		|
++	                         	GMAC0_RXDERR_INT_BIT	 | GMAC0_RXPERR_INT_BIT		|
++	                            GMAC0_SWTQ05_FIN_INT_BIT | GMAC0_SWTQ05_EOF_INT_BIT |
++	                            GMAC0_SWTQ04_FIN_INT_BIT | GMAC0_SWTQ04_EOF_INT_BIT |
++	                            GMAC0_SWTQ03_FIN_INT_BIT | GMAC0_SWTQ03_EOF_INT_BIT |
++	                            GMAC0_SWTQ02_FIN_INT_BIT | GMAC0_SWTQ02_EOF_INT_BIT |
++	                            GMAC0_SWTQ01_FIN_INT_BIT | GMAC0_SWTQ01_EOF_INT_BIT |
++	                            GMAC0_SWTQ00_FIN_INT_BIT | GMAC0_SWTQ00_EOF_INT_BIT;
++
++#ifdef GMAX_TX_INTR_DISABLED
++	    tp->intr0_enabled =		0;
++#else
++	    tp->intr0_enabled =		GMAC0_SWTQ00_FIN_INT_BIT | GMAC0_SWTQ00_EOF_INT_BIT;
++#endif
++
++	    tp->intr1_selected =	TOE_IQ_ALL_BITS			 | TOE_CLASS_RX_INT_BITS	|
++	    						GMAC0_HWTQ03_EOF_INT_BIT | GMAC0_HWTQ02_EOF_INT_BIT |
++	    						GMAC0_HWTQ01_EOF_INT_BIT | GMAC0_HWTQ00_EOF_INT_BIT |
++	    						DEFAULT_Q0_INT_BIT;
++	    tp->intr1_enabled = 	DEFAULT_Q0_INT_BIT | TOE_IQ_ALL_BITS;
++	    tp->intr2_selected = 	0xffffffff;	 // TOE Queue 32-63 FUUL Intr
++	    tp->intr2_enabled = 	0xffffffff;
++	    tp->intr3_selected = 	0xffffffff;	 // TOE Queue 0-31 FUUL Intr
++	    tp->intr3_enabled = 	0xffffffff;
++	    tp->intr4_selected = 	GMAC0_INT_BITS | CLASS_RX_FULL_INT_BITS |
++	    						HWFQ_EMPTY_INT_BIT | SWFQ_EMPTY_INT_BIT;
++	    tp->intr4_enabled = 	GMAC0_INT_BITS | SWFQ_EMPTY_INT_BIT;
++
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_0_REG) & ~tp->intr0_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_0_REG);
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_1_REG) & ~tp->intr1_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_1_REG);
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_2_REG) & ~tp->intr2_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_2_REG);
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_3_REG) & ~tp->intr3_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_3_REG);
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_4_REG) & ~tp->intr4_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_4_REG);
++	}
++	else
++	{
++		tp->intr0_selected =	GMAC1_TXDERR_INT_BIT	 | GMAC1_TXPERR_INT_BIT		|
++	                         	GMAC1_RXDERR_INT_BIT	 | GMAC1_RXPERR_INT_BIT		|
++	                            GMAC1_SWTQ15_FIN_INT_BIT | GMAC1_SWTQ15_EOF_INT_BIT |
++	                            GMAC1_SWTQ14_FIN_INT_BIT | GMAC1_SWTQ14_EOF_INT_BIT |
++	                            GMAC1_SWTQ13_FIN_INT_BIT | GMAC1_SWTQ13_EOF_INT_BIT |
++	                            GMAC1_SWTQ12_FIN_INT_BIT | GMAC1_SWTQ12_EOF_INT_BIT |
++	                            GMAC1_SWTQ11_FIN_INT_BIT | GMAC1_SWTQ11_EOF_INT_BIT |
++	                            GMAC1_SWTQ10_FIN_INT_BIT | GMAC1_SWTQ10_EOF_INT_BIT;
++#ifdef GMAX_TX_INTR_DISABLED
++	    tp->intr0_enabled =		0;
++#else
++	    tp->intr0_enabled =		GMAC1_SWTQ10_FIN_INT_BIT | GMAC1_SWTQ10_EOF_INT_BIT;
++#endif
++
++	    tp->intr1_selected =	DEFAULT_Q1_INT_BIT;
++	    tp->intr1_enabled = 	DEFAULT_Q1_INT_BIT | TOE_IQ_ALL_BITS;
++	    tp->intr2_selected = 	0;	 // TOE Queue 32-63 FUUL Intr
++	    tp->intr2_enabled = 	0;
++	    tp->intr3_selected = 	0;	 // TOE Queue 0-31 FUUL Intr
++	    tp->intr3_enabled = 	0;
++	    tp->intr4_selected = 	GMAC1_INT_BITS;
++	    tp->intr4_enabled = 	GMAC1_INT_BITS;
++
++	    if (toe->gmac[0].existed != GMAC_EXISTED_FLAG)
++	    {
++	    	tp->intr1_selected	|= 	TOE_IQ_ALL_BITS | TOE_CLASS_RX_INT_BITS	|
++	    						  	GMAC0_HWTQ03_EOF_INT_BIT | GMAC0_HWTQ02_EOF_INT_BIT |
++	    						  	GMAC0_HWTQ01_EOF_INT_BIT | GMAC0_HWTQ00_EOF_INT_BIT;
++	    	tp->intr1_enabled	|= 	TOE_IQ_ALL_BITS;
++	    	tp->intr2_selected	|= 	0xffffffff;	 // TOE Queue 32-63 FUUL Intr
++	    	tp->intr2_enabled	|= 	0xffffffff;
++	    	tp->intr3_selected	|= 	0xffffffff;	 // TOE Queue 0-31 FUUL Intr
++	    	tp->intr3_enabled	|= 	0xffffffff;
++	    	tp->intr4_selected 	|= 	CLASS_RX_FULL_INT_BITS |
++	    							HWFQ_EMPTY_INT_BIT | SWFQ_EMPTY_INT_BIT;
++	    	tp->intr4_enabled	|= 	SWFQ_EMPTY_INT_BIT;
++		}
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_0_REG) | tp->intr0_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_0_REG);
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_1_REG) | tp->intr1_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_1_REG);
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_2_REG) | tp->intr2_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_2_REG);
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_3_REG) | tp->intr3_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_3_REG);
++	    data = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_4_REG) | tp->intr4_selected;
++	    writel(data, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_SELECT_4_REG);
++	}
++
++	// enable only selected bits
++	gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_ENABLE_0_REG,
++					tp->intr0_enabled, tp->intr0_selected);
++	gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_ENABLE_1_REG,
++					tp->intr1_enabled, tp->intr1_selected);
++	gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_ENABLE_2_REG,
++					tp->intr2_enabled, tp->intr2_selected);
++	gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_ENABLE_3_REG,
++					tp->intr3_enabled, tp->intr3_selected);
++	gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_ENABLE_4_REG,
++					tp->intr4_enabled, tp->intr4_selected);
++
++    /* start DMA process */
++	toe_gmac_hw_start(dev);
++
++    /* enable tx/rx register */
++    toe_gmac_enable_tx_rx(dev);
++
++//	toe_gmac_enable_interrupt(tp->irq);
++
++    return ;
++}
++
++
++/*----------------------------------------------------------------------
++* toe_gmac_sw_reset
++*----------------------------------------------------------------------*/
++static void toe_gmac_sw_reset(void)
++{
++	unsigned int	reg_val;
++	reg_val = readl(GMAC_GLOBAL_BASE_ADDR+GLOBAL_RESET_REG) | 0x00000060;   /* GMAC0 S/W reset */
++    writel(reg_val,GMAC_GLOBAL_BASE_ADDR+GLOBAL_RESET_REG);
++    udelay(100);
++    return;
++}
++
++/*----------------------------------------------------------------------
++*	toe_gmac_init_chip
++*----------------------------------------------------------------------*/
++static int toe_gmac_init_chip(struct net_device *dev)
++{
++	GMAC_INFO_T 	*tp = dev->priv;
++	GMAC_CONFIG2_T	config2_val;
++	GMAC_CONFIG0_T	config0,config0_mask;
++	GMAC_CONFIG1_T	config1;
++	#ifdef CONFIG_SL351x_NAT
++	GMAC_CONFIG3_T	config3_val;
++	#endif
++	GMAC_TX_WCR0_T	hw_weigh;
++	GMAC_TX_WCR1_T	sw_weigh;
++//	GMAC_HASH_ENABLE_REG0_T hash_ctrl;
++//
++#if 0 /* mac address will be set in late_initcall */
++	struct sockaddr sock;
++	// GMAC_AHB_WEIGHT_T	ahb_weight, ahb_weight_mask;
++
++
++	/* set station MAC address1 and address2 */
++	memcpy(&sock.sa_data[0],ð_mac[tp->port_id][0],6);
++	gmac_set_mac_address(dev,(void *)&sock);
++#endif
++
++	/* set RX_FLTR register to receive all multicast packet */
++	gmac_write_reg(tp->base_addr, GMAC_RX_FLTR, 0x00000007,0x0000001f);
++	//    gmac_write_reg(tp->base_addr, GMAC_RX_FLTR, 0x00000007,0x0000001f);
++	//gmac_write_reg(tp->base_addr, GMAC_RX_FLTR,0x00000007,0x0000001f);
++
++	/* set per packet buffer size */
++	//	config1.bits32 = 0x002004;	//next version
++	/* set flow control threshold */
++	config1.bits32 = 0;
++	config1.bits.set_threshold = 32 / 2;
++	config1.bits.rel_threshold = 32 / 4 * 3;
++	gmac_write_reg(tp->base_addr, GMAC_CONFIG1, config1.bits32, 0xffffffff);
++
++	/* set flow control threshold */
++	config2_val.bits32 = 0;
++	config2_val.bits.set_threshold = TOE_SW_FREEQ_DESC_NUM/2;
++	config2_val.bits.rel_threshold = TOE_SW_FREEQ_DESC_NUM*3/4;
++	gmac_write_reg(tp->base_addr, GMAC_CONFIG2, config2_val.bits32,0xffffffff);
++
++	#ifdef CONFIG_SL351x_NAT
++	/* set HW free queue flow control threshold */
++	config3_val.bits32 = 0;
++	config3_val.bits.set_threshold = PAUSE_SET_HW_FREEQ;
++	config3_val.bits.rel_threshold = PAUSE_REL_HW_FREEQ;
++	gmac_write_reg(tp->base_addr, GMAC_CONFIG3, config3_val.bits32,0xffffffff);
++	#endif
++	/* set_mcast_filter mask*/
++	//	gmac_write_reg(tp->base_addr,GMAC_MCAST_FIL0,0x0,0xffffffff);
++	//  gmac_write_reg(tp->base_addr,GMAC_MCAST_FIL1,0x0,0xffffffff);
++
++	/* disable TX/RX and disable internal loop back */
++	config0.bits32 = 0;
++	config0_mask.bits32 = 0;
++
++	//debug_Aaron
++#ifdef	L2_jumbo_frame
++	config0.bits.max_len = 5;
++#else
++	config0.bits.max_len = 2;
++#endif
++
++	if (tp->flow_control_enable==1)
++	{
++		config0.bits.tx_fc_en = 1; /* enable tx flow control */
++		config0.bits.rx_fc_en = 1; /* enable rx flow control */
++		printk("Enable MAC Flow Control...\n");
++	}
++	else
++	{
++		config0.bits.tx_fc_en = 0; /* disable tx flow control */
++		config0.bits.rx_fc_en = 0; /* disable rx flow control */
++		printk("Disable MAC Flow Control...\n");
++	}
++	config0.bits.dis_rx = 1;  /* disable rx */
++	config0.bits.dis_tx = 1;  /* disable tx */
++	config0.bits.loop_back = 0; /* enable/disable GMAC loopback */
++	config0.bits.rx_err_detect = 1;
++	config0.bits.rgmii_en = 0;
++	config0.bits.rgmm_edge = 1;
++	config0.bits.rxc_inv = 0;
++	config0.bits.ipv4_rx_chksum = 1;  /* enable H/W to check ip checksum */
++	config0.bits.ipv6_rx_chksum = 1;  /* enable H/W to check ip checksum */
++	config0.bits.port0_chk_hwq = 1;	// GaryChen 3/24/2006 2:26PM
++	config0.bits.port1_chk_hwq = 1;	// GaryChen 3/24/2006 2:26PM
++	config0.bits.port0_chk_toeq = 1;
++	config0.bits.port1_chk_toeq = 1;
++	config0.bits.port0_chk_classq = 1;
++	config0.bits.port1_chk_classq = 1;
++
++	config0_mask.bits.max_len = 7;
++	config0_mask.bits.tx_fc_en = 1;
++	config0_mask.bits.rx_fc_en = 1;
++	config0_mask.bits.dis_rx = 1;
++	config0_mask.bits.dis_tx = 1;
++	config0_mask.bits.loop_back = 1;
++	config0_mask.bits.rgmii_en = 1;
++	config0_mask.bits.rgmm_edge = 1;
++	config0_mask.bits.rxc_inv = 1;
++	config0_mask.bits.ipv4_rx_chksum = 1;
++	config0_mask.bits.ipv6_rx_chksum = 1;
++	config0_mask.bits.port0_chk_hwq = 1;
++	config0_mask.bits.port1_chk_hwq = 1;
++	config0_mask.bits.port0_chk_toeq = 1;
++	config0_mask.bits.port1_chk_toeq = 1;
++	config0_mask.bits.port0_chk_classq = 1;
++	config0_mask.bits.port1_chk_classq = 1;
++	config0_mask.bits.rx_err_detect = 1;
++
++	#if 0
++	config0.bits.dis_rx = 1;  /* disable rx */
++	config0.bits.dis_tx = 1;  /* disable tx */
++	config0.bits.loop_back = 0; /* enable/disable GMAC loopback */
++	config0.bits.txc_inv = 0;
++	config0.bits.rgmii_en = 0;
++	config0.bits.rgmm_edge = 1;
++	config0.bits.rxc_inv = 1;
++	config0.bits.ipv4_tss_rx_en = 1;  /* enable H/W to check ip checksum */
++	config0.bits.ipv6_tss_rx_en = 1;  /* enable H/W to check ip checksum */
++
++	config0_mask.bits.max_len = 3;
++	config0_mask.bits.tx_fc_en = 1;
++	config0_mask.bits.rx_fc_en = 1;
++	config0_mask.bits.dis_rx = 1;
++	config0_mask.bits.dis_tx = 1;
++	config0_mask.bits.loop_back = 1;
++	config0_mask.bits.rgmii_en = 1;
++	config0_mask.bits.rgmm_edge = 1;
++	config0_mask.bits.txc_inv = 1;
++	config0_mask.bits.rxc_inv = 1;
++	config0_mask.bits.ipv4_tss_rx_en = 1;
++	config0_mask.bits.ipv6_tss_rx_en = 1;
++	#endif
++
++	gmac_write_reg(tp->base_addr, GMAC_CONFIG0, config0.bits32,config0_mask.bits32);
++
++	#if 1
++	hw_weigh.bits32 = 0;
++	hw_weigh.bits.hw_tq3 = 1;
++	hw_weigh.bits.hw_tq2 = 1;
++	hw_weigh.bits.hw_tq1 = 1;
++	hw_weigh.bits.hw_tq0 = 1;
++	gmac_write_reg(tp->dma_base_addr, GMAC_TX_WEIGHTING_CTRL_0_REG, hw_weigh.bits32, 0xffffffff);
++
++	sw_weigh.bits32 = 0;
++	sw_weigh.bits.sw_tq5 = 1;
++	sw_weigh.bits.sw_tq4 = 1;
++	sw_weigh.bits.sw_tq3 = 1;
++	sw_weigh.bits.sw_tq2 = 1;
++	sw_weigh.bits.sw_tq1 = 1;
++	sw_weigh.bits.sw_tq0 = 1;
++	gmac_write_reg(tp->dma_base_addr, GMAC_TX_WEIGHTING_CTRL_1_REG, sw_weigh.bits32, 0xffffffff);
++	#endif
++
++	#if 0
++	ahb_weight.bits32 = 0;
++	ahb_weight_mask.bits32 = 0;
++	ahb_weight.bits.rx_weight = 1;
++	ahb_weight.bits.tx_weight = 1;
++	ahb_weight.bits.hash_weight = 1;
++	ahb_weight.bits.pre_req = 0x1f;
++	ahb_weight.bits.tqDV_threshold = 0;
++	ahb_weight_mask.bits.rx_weight = 0x1f;
++	ahb_weight_mask.bits.tx_weight = 0x1f;
++	ahb_weight_mask.bits.hash_weight = 0x1f;
++	ahb_weight_mask.bits.pre_req = 0x1f;
++	ahb_weight_mask.bits.tqDV_threshold = 0x1f;
++	gmac_write_reg(tp->dma_base_addr, GMAC_AHB_WEIGHT_REG, ahb_weight.bits32, ahb_weight_mask.bits32);
++	#endif
++
++	#if defined(CONFIG_SL351x_NAT) || defined(CONFIG_SL351x_RXTOE)
++	gmac_write_reg(tp->dma_base_addr, GMAC_SPR0, IPPROTO_TCP, 0xffffffff);
++	#endif
++	#ifdef CONFIG_SL351x_NAT
++	gmac_write_reg(tp->dma_base_addr, GMAC_SPR1, IPPROTO_UDP, 0xffffffff);
++	gmac_write_reg(tp->dma_base_addr, GMAC_SPR2, IPPROTO_GRE, 0xffffffff);
++	gmac_write_reg(tp->dma_base_addr, GMAC_SPR3, 0xff, 0xffffffff);
++	gmac_write_reg(tp->dma_base_addr, GMAC_SPR4, 0xff, 0xffffffff);
++	gmac_write_reg(tp->dma_base_addr, GMAC_SPR5, 0xff, 0xffffffff);
++	gmac_write_reg(tp->dma_base_addr, GMAC_SPR6, 0xff, 0xffffffff);
++	gmac_write_reg(tp->dma_base_addr, GMAC_SPR7, 0xff, 0xffffffff);
++
++	sl351x_nat_init();
++	#endif
++
++	#ifdef CONFIG_SL351x_RXTOE
++	/* setup matching rule to TOE */
++	sl351x_toe_init();
++	#endif
++
++	// for A1 ASIC version
++//	hash_ctrl.bits32 = 0;
++//	hash_ctrl.bits.timing = 6;
++//	gmac_write_reg(tp->dma_base_addr, GMAC_HASH_ENGINE_REG0, hash_ctrl.bits32, 0xffffffff);
++
++	return (0);
++}
++
++/*----------------------------------------------------------------------
++*	toe_gmac_enable_tx_rx
++*----------------------------------------------------------------------*/
++static void toe_gmac_enable_tx_rx(struct net_device *dev)
++{
++	GMAC_INFO_T		*tp = dev->priv;
++	GMAC_CONFIG0_T	config0,config0_mask;
++
++    /* enable TX/RX */
++    config0.bits32 = 0;
++    config0_mask.bits32 = 0;
++    config0.bits.dis_rx = 0;  /* enable rx */
++    config0.bits.dis_tx = 0;  /* enable tx */
++    config0_mask.bits.dis_rx = 1;
++    config0_mask.bits.dis_tx = 1;
++    gmac_write_reg(tp->base_addr, GMAC_CONFIG0, config0.bits32,config0_mask.bits32);
++}
++/*----------------------------------------------------------------------
++*	toe_gmac_disable_rx
++*----------------------------------------------------------------------*/
++#if 0
++static void toe_gmac_disable_rx(struct net_device *dev)
++{
++	GMAC_INFO_T		*tp = dev->priv;
++	GMAC_CONFIG0_T	config0,config0_mask;
++
++    /* enable TX/RX */
++    config0.bits32 = 0;
++    config0_mask.bits32 = 0;
++    config0.bits.dis_rx = 1;  /* disable rx */
++//    config0.bits.dis_tx = 1;  /* disable tx */
++    config0_mask.bits.dis_rx = 1;
++//     config0_mask.bits.dis_tx = 1;
++    gmac_write_reg(tp->base_addr, GMAC_CONFIG0, config0.bits32,config0_mask.bits32);
++}
++#endif
++/*----------------------------------------------------------------------
++*	toe_gmac_enable_rx
++*----------------------------------------------------------------------*/
++#if 0
++static void toe_gmac_enable_rx(struct net_device *dev)
++{
++	GMAC_INFO_T		*tp = dev->priv;
++	GMAC_CONFIG0_T	config0,config0_mask;
++
++    /* enable TX/RX */
++    config0.bits32 = 0;
++    config0_mask.bits32 = 0;
++    config0.bits.dis_rx = 0;  /* enable rx */
++//    config0.bits.dis_tx = 0;  /* enable tx */
++    config0_mask.bits.dis_rx = 1;
++//    config0_mask.bits.dis_tx = 1;
++    gmac_write_reg(tp->base_addr, GMAC_CONFIG0, config0.bits32,config0_mask.bits32);
++}
++#endif
++/*----------------------------------------------------------------------
++*	toe_gmac_disable_tx_rx
++*----------------------------------------------------------------------*/
++static void toe_gmac_disable_tx_rx(struct net_device *dev)
++{
++	GMAC_INFO_T		*tp = dev->priv;
++	GMAC_CONFIG0_T	config0,config0_mask;
++
++    /* enable TX/RX */
++    config0.bits32 = 0;
++    config0_mask.bits32 = 0;
++    config0.bits.dis_rx = 1;  /* disable rx */
++    config0.bits.dis_tx = 1;  /* disable tx */
++    config0_mask.bits.dis_rx = 1;
++    config0_mask.bits.dis_tx = 1;
++    gmac_write_reg(tp->base_addr, GMAC_CONFIG0, config0.bits32,config0_mask.bits32);
++}
++
++/*----------------------------------------------------------------------
++*	toe_gmac_hw_start
++*----------------------------------------------------------------------*/
++static void toe_gmac_hw_start(struct net_device *dev)
++{
++	GMAC_INFO_T				*tp = (GMAC_INFO_T *)dev->priv;
++	GMAC_DMA_CTRL_T			dma_ctrl, dma_ctrl_mask;
++
++
++    /* program dma control register */
++	dma_ctrl.bits32 = 0;
++	dma_ctrl.bits.rd_enable = 1;
++	dma_ctrl.bits.td_enable = 1;
++	dma_ctrl.bits.loopback = 0;
++	dma_ctrl.bits.drop_small_ack = 0;
++	dma_ctrl.bits.rd_prot = 0;
++	dma_ctrl.bits.rd_burst_size = 3;
++	dma_ctrl.bits.rd_insert_bytes = RX_INSERT_BYTES;
++	dma_ctrl.bits.rd_bus = 3;
++	dma_ctrl.bits.td_prot = 0;
++	dma_ctrl.bits.td_burst_size = 3;
++	dma_ctrl.bits.td_bus = 3;
++
++	dma_ctrl_mask.bits32 = 0;
++	dma_ctrl_mask.bits.rd_enable = 1;
++	dma_ctrl_mask.bits.td_enable = 1;
++	dma_ctrl_mask.bits.loopback = 1;
++	dma_ctrl_mask.bits.drop_small_ack = 1;
++	dma_ctrl_mask.bits.rd_prot = 3;
++	dma_ctrl_mask.bits.rd_burst_size = 3;
++	dma_ctrl_mask.bits.rd_insert_bytes = 3;
++	dma_ctrl_mask.bits.rd_bus = 3;
++	dma_ctrl_mask.bits.td_prot = 0x0f;
++	dma_ctrl_mask.bits.td_burst_size = 3;
++	dma_ctrl_mask.bits.td_bus = 3;
++
++	gmac_write_reg(tp->dma_base_addr, GMAC_DMA_CTRL_REG, dma_ctrl.bits32, dma_ctrl_mask.bits32);
++
++    return;
++}
++
++/*----------------------------------------------------------------------
++*	toe_gmac_hw_stop
++*----------------------------------------------------------------------*/
++static void toe_gmac_hw_stop(struct net_device *dev)
++{
++	GMAC_INFO_T			*tp = (GMAC_INFO_T *)dev->priv;
++	GMAC_DMA_CTRL_T		dma_ctrl, dma_ctrl_mask;
++
++    /* program dma control register */
++	dma_ctrl.bits32 = 0;
++	dma_ctrl.bits.rd_enable = 0;
++	dma_ctrl.bits.td_enable = 0;
++
++	dma_ctrl_mask.bits32 = 0;
++	dma_ctrl_mask.bits.rd_enable = 1;
++	dma_ctrl_mask.bits.td_enable = 1;
++
++	gmac_write_reg(tp->dma_base_addr, GMAC_DMA_CTRL_REG, dma_ctrl.bits32, dma_ctrl_mask.bits32);
++}
++
++/*----------------------------------------------------------------------
++*	toe_gmac_clear_counter
++*----------------------------------------------------------------------*/
++static int toe_gmac_clear_counter (struct net_device *dev)
++{
++	GMAC_INFO_T	*tp = (GMAC_INFO_T *)dev->priv;
++
++    /* clear counter */
++    gmac_read_reg(tp->base_addr, GMAC_IN_DISCARDS);
++    gmac_read_reg(tp->base_addr, GMAC_IN_ERRORS);
++    gmac_read_reg(tp->base_addr, GMAC_IN_MCAST);
++    gmac_read_reg(tp->base_addr, GMAC_IN_BCAST);
++    gmac_read_reg(tp->base_addr, GMAC_IN_MAC1);
++    gmac_read_reg(tp->base_addr, GMAC_IN_MAC2);
++		tp->ifStatics.tx_bytes = 0;
++		tp->ifStatics.tx_packets = 0;
++		tp->ifStatics.tx_errors = 0;
++		tp->ifStatics.rx_bytes = 0;
++		tp->ifStatics.rx_packets = 0;
++		tp->ifStatics.rx_errors = 0;
++		tp->ifStatics.rx_dropped = 0;
++	return (0);
++}
++
++
++/*----------------------------------------------------------------------
++*	toe_gmac_tx_complete
++*----------------------------------------------------------------------*/
++static  void toe_gmac_tx_complete(GMAC_INFO_T *tp, unsigned int tx_qid,
++   										struct net_device *dev, int interrupt)
++{
++	volatile GMAC_TXDESC_T	*curr_desc;
++	GMAC_TXDESC_0_T			word0;
++	GMAC_TXDESC_1_T			word1;
++	unsigned int			desc_count;
++//	struct net_device_stats *isPtr = (struct net_device_stats *)&tp->ifStatics;
++	GMAC_SWTXQ_T			*swtxq;
++	DMA_RWPTR_T				rwptr;
++
++	/* get tx H/W completed descriptor virtual address */
++	/* check tx status and accumulate tx statistics */
++	swtxq = &tp->swtxq[tx_qid];
++	swtxq->intr_cnt++;
++	for (;;)
++	{
++		rwptr.bits32 = readl(swtxq->rwptr_reg);
++		if (rwptr.bits.rptr == swtxq->finished_idx)
++			break;
++    	curr_desc = (volatile GMAC_TXDESC_T *)swtxq->desc_base + swtxq->finished_idx;
++//   		consistent_sync((void *)curr_desc, sizeof(GMAC_TXDESC_T), PCI_DMA_FROMDEVICE);
++		word0.bits32 = curr_desc->word0.bits32;
++		word1.bits32 = curr_desc->word1.bits32;
++
++		if (word0.bits.status_tx_ok)
++		{
++			tp->ifStatics.tx_bytes += word1.bits.byte_count;
++			desc_count = word0.bits.desc_count;
++			if (desc_count==0)
++			{
++				printk("%s::Desc 0x%x = 0x%x, desc_count=%d\n",__func__, (u32)curr_desc, word0.bits32, desc_count);
++				while(1);
++			}
++			while (--desc_count)
++			{
++				word0.bits.status_tx_ok = 0;
++				curr_desc->word0.bits32 = word0.bits32;
++				swtxq->finished_idx = RWPTR_ADVANCE_ONE(swtxq->finished_idx, swtxq->total_desc_num);
++				curr_desc = (GMAC_TXDESC_T *)swtxq->desc_base + swtxq->finished_idx;
++				word0.bits32 = curr_desc->word0.bits32;
++#ifdef _DUMP_TX_TCP_CONTENT
++				if (curr_desc->word0.bits.buffer_size < 16)
++				{
++					int a;
++					char *datap;
++					printk("\t Tx Finished Desc 0x%x Len %d Addr 0x%08x: ", (u32)curr_desc, curr_desc->word0.bits.buffer_size, curr_desc->word2.buf_adr);
++					datap = (char *)__va(curr_desc->word2.buf_adr);
++					for (a=0; a<8 && aword0.bits.buffer_size; a++, datap++)
++					{
++						printk("0x%02x ", *datap);
++					}
++					printk("\n");
++				}
++#endif
++			}
++
++			word0.bits.status_tx_ok = 0;
++			if (swtxq->tx_skb[swtxq->finished_idx])
++			{
++				if (interrupt)
++					dev_kfree_skb_irq(swtxq->tx_skb[swtxq->finished_idx]);
++				else
++					dev_kfree_skb(swtxq->tx_skb[swtxq->finished_idx]);
++				swtxq->tx_skb[swtxq->finished_idx] = NULL;
++			}
++			curr_desc->word0.bits32 = word0.bits32;
++  			swtxq->curr_finished_desc = (GMAC_TXDESC_T *)curr_desc;
++ 			swtxq->total_finished++;
++  			tp->ifStatics.tx_packets++;
++			swtxq->finished_idx = RWPTR_ADVANCE_ONE(swtxq->finished_idx, swtxq->total_desc_num);
++		}
++		else
++		{
++			// tp->ifStatics.tx_errors++;
++			// printk("%s::Tx Descriptor is !!!\n",__func__);
++			// wait ready by breaking
++			break;
++		}
++	}
++
++	if (netif_queue_stopped(dev))
++	{
++		netif_wake_queue(dev);
++	}
++}
++
++/*----------------------------------------------------------------------
++*	gmac_start_xmit
++*----------------------------------------------------------------------*/
++static int gmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++	GMAC_INFO_T 			*tp= dev->priv;
++//	static unsigned int     pcount = 0;
++//	unsigned int			tx_qid;
++    DMA_RWPTR_T				rwptr;
++	volatile GMAC_TXDESC_T	*curr_desc;
++	int 					snd_pages = skb_shinfo(skb)->nr_frags + 1;  /* get number of descriptor */
++	int 					frag_id = 0;
++	int 					len, total_len = skb->len;
++	struct net_device_stats *isPtr;
++	unsigned int			free_desc;
++	GMAC_SWTXQ_T			*swtxq;
++	register unsigned long	word0, word1, word2, word3;
++	unsigned short			wptr, rptr;
++#ifdef	L2_jumbo_frame
++	int header_len = skb->len;
++	struct iphdr	*ip_hdr;
++    struct tcphdr	*tcp_hdr;
++    int             tcp_hdr_len;
++    unsigned char 	*ptr;
++    int             data_len,a;
++    unsigned int    val;
++#endif
++
++#ifdef GMAC_LEN_1_2_ISSUE
++	int						total_pages;
++	total_pages = snd_pages;
++#endif
++
++	isPtr = (struct net_device_stats *)&tp->ifStatics;
++#if 1
++	if (skb->len >= 0x10000)
++	{
++//		spin_unlock(&tp->tx_mutex);
++		isPtr->tx_dropped++;
++		printk("%s::[GMAC %d] skb->len %d >= 64K\n", __func__, tp->port_id, skb->len);
++		netif_stop_queue(dev);
++		return 1;
++    }
++#endif
++
++#if 0
++	if (storlink_ctl.recvfile==2)
++	{
++	    printk("snd_pages=%d skb->len=%d\n",snd_pages,skb->len);
++	}
++#endif
++
++#ifdef GMAC_USE_TXQ0
++	#define tx_qid 	0
++#endif
++
++	swtxq = &tp->swtxq[tx_qid];
++
++//	spin_lock(&tp->tx_mutex);
++    rwptr.bits32 = readl(swtxq->rwptr_reg);
++	wptr = rwptr.bits.wptr;
++	rptr = rwptr.bits.rptr;
++
++	// check finished desc or empty BD
++	// cannot check by read ptr of RW PTR register,
++	// because the HW complete to send but the SW may NOT handle it
++#ifndef	GMAX_TX_INTR_DISABLED
++	if (wptr >= swtxq->finished_idx)
++		free_desc = swtxq->total_desc_num - wptr - 1 + swtxq->finished_idx;
++	else
++		free_desc = swtxq->finished_idx - wptr - 1;
++
++	if (free_desc < snd_pages)
++	{
++//		spin_unlock(&tp->tx_mutex);
++		isPtr->tx_dropped++;
++//		printk("GMAC %d No available descriptor!\n", tp->port_id);
++		netif_stop_queue(dev);
++		return 1;
++    }
++#else
++	toe_gmac_tx_complete(tp, tx_qid, dev, 0);
++
++	if (wptr >= swtxq->finished_idx)
++		free_desc = swtxq->total_desc_num - wptr - 1 + swtxq->finished_idx;
++	else
++		free_desc = swtxq->finished_idx - wptr - 1;
++	if (free_desc < snd_pages)
++	{
++//		spin_unlock(&tp->tx_mutex);
++		isPtr->tx_dropped++;
++//		printk("GMAC %d No available descriptor!\n", tp->port_id);
++		netif_stop_queue(dev);
++		return 1;
++    }
++
++#if 0
++	printk("1: free_desc=%d, wptr=%d, finished_idx=%d\n", free_desc, wptr, swtxq->finished_idx);
++	if ((free_desc < (snd_pages << 2)) ||
++	    (free_desc < (swtxq->total_desc_num >> 2)))
++	{
++		printk("2: free_desc = %d\n", free_desc);
++		toe_gmac_tx_complete(tp, tx_qid, dev, 0);
++		rwptr.bits32 = readl(swtxq->rwptr_reg);
++		wptr = rwptr.bits.wptr;
++		if (wptr>= swtxq->finished_idx)
++			free_desc = swtxq->total_desc_num - wptr -1 + swtxq->finished_idx;
++		else
++			free_desc = swtxq->finished_idx - wptr - 1;
++	}
++#endif
++#endif
++
++#ifdef	L2_jumbo_frame
++//		data_len = skb->len - 14 - ip_hdr->ihl *4 - tcp_hdr_len;
++//		if ((skb->nh.iph->protocol == __constant_htons(ETH_P_IP)) && ((skb->nh.iph->protocol & 0x00ff)  == IPPROTO_TCP))
++//		if (skb->nh.iph->protocol == 0x006 && (skb->nh.iph->protocol == __constant_htons(ETH_P_IP)))
++		if (((skb->nh.iph->protocol & 0x00ff)  == IPPROTO_TCP))
++		{
++				ip_hdr = (struct iphdr*)(skb->nh.iph);
++				tcp_hdr = (struct tcphdr*)(skb->h.th);
++				tcp_hdr_len = TCPHDRLEN(tcp_hdr) * 4;
++				tcp_hdr_len = TCPHDRLEN(tcp_hdr) * 4;
++
++				if ((skb->h.th->syn) && (tcp_hdr_len > 20))
++				{
++					ptr = (unsigned char *)(tcp_hdr+1);
++					if ((ptr[0] == 0x02) && (ptr[1] == 0x04) && (ptr[2] == 0x07) && (ptr[3] == 0xba)) // 0x07 aa=2016-54=1962  ,0x07ba=2032-54=1978
++					{
++						ptr[2]=0x20;	//23
++						ptr[3]=0x00;   	//00
++						printk("-----> Change MSS to 8K \n" );
++					}
++				}
++		}
++//		if ((ip_hdr->protocol & 0x00ff) != IPPROTO_TCP)
++//		if ((tcp_hdr_len > 20) && (skb->h.th->syn))
++#endif
++
++
++#if 0
++	if (snd_pages > 1)
++		printk("-----> snd_pages=%d\n", snd_pages);
++	if (total_len > 1514)
++	{
++		printk("-----> total_len=%d\n", total_len);
++	}
++#endif
++
++    while (snd_pages)
++    {
++    	char *pkt_datap;
++
++    	curr_desc = (GMAC_TXDESC_T *)swtxq->desc_base + wptr;
++//		consistent_sync((void *)curr_desc, sizeof(GMAC_TXDESC_T), PCI_DMA_FROMDEVICE);
++#if 0
++//#if (GMAC_DEBUG==1)
++    	// if curr_desc->word2.buf_adr !=0 means that the ISR does NOT handle it
++    	// if (curr_desc->word2.buf_adr)
++    	if (swtxq->tx_skb[wptr])
++    	{
++    		printk("Error! Stop due to TX descriptor's buffer is not freed!\n");
++    		while(1);
++    		dev_kfree_skb(swtxq->tx_skb[wptr]);
++    		swtxq->tx_skb[wptr] = NULL;
++		}
++#endif
++
++		if (frag_id == 0)
++		{
++#if 0
++			int i;
++			pkt_datap = skb->data;
++			len = total_len;
++			for (i=0; inr_frags; i++)
++			{
++				skb_frag_t* frag = &skb_shinfo(skb)->frags[i];
++				len -= frag->size;
++			}
++#else
++			pkt_datap = skb->data;
++			len = total_len - skb->data_len;
++#endif
++		}
++		else
++		{
++			skb_frag_t* frag = &skb_shinfo(skb)->frags[frag_id-1];
++			pkt_datap = page_address(frag->page) + frag->page_offset;
++			len = frag->size;
++			if (len > total_len)
++			{
++				printk("===> Fatal Error! Send Frag size %d > Total Size %d!!!!!\n",
++					len, total_len);
++			}
++		}
++
++		/* set TX descriptor */
++		/* copy packet to descriptor buffer address */
++		// curr_desc->word0.bits32 = len;    /* total frame byte count */
++		word0 = len;
++#ifdef	L2_jumbo_frame
++		word3 = (dev->mtu+14) | EOFIE_BIT;  //2016 ,2032
++#else
++		word3 = 1514 | EOFIE_BIT;
++#endif
++
++#ifdef DO_HW_CHKSUM
++#ifdef	L2_jumbo_frame
++		if (total_len >= (dev->mtu+14) && (skb->nh.iph->protocol == 0x011) && skb->nh.iph && (skb->nh.iph->frag_off & __constant_htons(0x3fff)))
++#else
++		if (total_len <= 1514 && skb->nh.iph && (skb->nh.iph->frag_off & __constant_htons(0x3fff)))
++#endif
++			word1  = total_len |
++					TSS_IP_CHKSUM_BIT  |
++					TSS_IPV6_ENABLE_BIT |
++					TSS_MTU_ENABLE_BIT;
++		else
++			word1 = total_len |
++					TSS_UDP_CHKSUM_BIT |
++					TSS_TCP_CHKSUM_BIT |
++					TSS_IP_CHKSUM_BIT  |
++					TSS_IPV6_ENABLE_BIT |
++					TSS_MTU_ENABLE_BIT;
++#else
++		word1 = total_len | TSS_MTU_ENABLE_BIT;
++#endif
++		word2 = (unsigned long)__pa(pkt_datap);
++
++		if (frag_id == 0)
++		{
++			word3 |= SOF_BIT;	// SOF
++		}
++
++		if (snd_pages == 1)
++		{
++			word3 |= EOF_BIT;	// EOF
++			swtxq->tx_skb[wptr] = skb;
++#ifdef CONFIG_SL351x_NAT
++			if (nat_cfg.enabled && sl351x_nat_output(skb, tp->port_id))
++				word1 |= TSS_IP_FIXED_LEN_BIT;
++#endif
++		}
++		else
++			swtxq->tx_skb[wptr] = NULL;
++		// word1 |= TSS_IP_FIXED_LEN_BIT;
++#if 1
++#ifdef CONFIG_SL351x_RXTOE
++		// check if this frame has the mission to enable toe hash entry..
++		// if rx_max_pktsize ==0, do not enable RXTOE
++		if (TCP_SKB_CB(skb)->connection && storlink_ctl.rx_max_pktsize) {
++			set_toeq_hdr(TCP_SKB_CB(skb)->connection, &toe_private_data, dev);
++		}
++#endif
++#endif
++#ifdef _DUMP_TX_TCP_CONTENT
++		if (len < 16 && frag_id && skb->h.th && (skb->h.th->source == __constant_htons(445) || skb->h.th->source == __constant_htons(139)))
++		{
++			int a;
++			char *datap;
++			printk("Tx Desc 0x%x Frag %d Len %d [IP-ID 0x%x] 0x%08x: ", (u32)curr_desc, frag_id, len, htons(skb->nh.iph->id), (u32)pkt_datap);
++			datap = (char *)pkt_datap;
++			for (a=0; a<8 && a= _DEBUG_PREFETCH_NUM)
++				_debug_prefetch_cnt = 0;
++		}
++#endif
++
++		consistent_sync((void *)pkt_datap, len, PCI_DMA_TODEVICE);
++		wmb();
++		curr_desc->word0.bits32 = word0;
++		curr_desc->word1.bits32 = word1;
++		curr_desc->word2.bits32 = word2;
++		curr_desc->word3.bits32 = word3;
++		swtxq->curr_tx_desc = (GMAC_TXDESC_T *)curr_desc;
++//		consistent_sync((void *)curr_desc, sizeof(GMAC_TXDESC_T), PCI_DMA_TODEVICE);
++#ifdef _DUMP_TX_TCP_CONTENT
++		if (len < 16 && frag_id && skb->h.th && (skb->h.th->source == __constant_htons(445) || skb->h.th->source == __constant_htons(139)))
++		{
++			int a;
++			char *datap;
++			printk("\t 0x%08x: ", (u32)pkt_datap);
++			datap = (char *)pkt_datap;
++			for (a=0; a<8 && atotal_desc_num);
++		frag_id++;
++		snd_pages--;
++	}
++
++    swtxq->total_sent++;
++	SET_WPTR(swtxq->rwptr_reg, wptr);
++	dev->trans_start = jiffies;
++
++
++	// printk("MAC %d Qid %d rwptr = 0x%x, curr_desc=0x%x\n", skb->tx_port_id, tx_qid, rwptr.bits32, curr_desc);
++//#ifdef	GMAX_TX_INTR_DISABLED
++//		toe_gmac_tx_complete(tp, tx_qid, dev, 0);
++//#endif
++	return (0);
++}
++
++/*----------------------------------------------------------------------
++* gmac_set_mac_address
++*----------------------------------------------------------------------*/
++
++static int gmac_set_mac_address(struct net_device *dev, void *addr)
++{
++	GMAC_INFO_T		*tp= dev->priv;
++	struct sockaddr *sock;
++	unsigned int    reg_val;
++    unsigned int    i;
++
++	sock = (struct sockaddr *) addr;
++	for (i = 0; i < 6; i++)
++	{
++		dev->dev_addr[i] = sock->sa_data[i];
++	}
++
++    reg_val = dev->dev_addr[0] + (dev->dev_addr[1]<<8) + (dev->dev_addr[2]<<16) + (dev->dev_addr[3]<<24);
++    gmac_write_reg(tp->base_addr,GMAC_STA_ADD0,reg_val,0xffffffff);
++    reg_val = dev->dev_addr[4] + (dev->dev_addr[5]<<8);
++    gmac_write_reg(tp->base_addr,GMAC_STA_ADD1,reg_val,0x0000ffff);
++	memcpy(ð_mac[tp->port_id][0],&dev->dev_addr[0],6);
++
++    printk("Storlink %s address = ",dev->name);
++    printk("%02x",dev->dev_addr[0]);
++    printk("%02x",dev->dev_addr[1]);
++    printk("%02x",dev->dev_addr[2]);
++    printk("%02x",dev->dev_addr[3]);
++    printk("%02x",dev->dev_addr[4]);
++    printk("%02x\n",dev->dev_addr[5]);
++
++    return (0);
++}
++
++/*----------------------------------------------------------------------
++* gmac_get_mac_address
++*	get mac address from FLASH
++*----------------------------------------------------------------------*/
++static void gmac_get_mac_address(void)
++{
++#ifdef CONFIG_MTD
++	extern int get_vlaninfo(vlaninfo* vlan);
++    static vlaninfo    vlan[2];
++
++    if (get_vlaninfo(&vlan[0]))
++    {
++        memcpy((void *)ð_mac[0][0],vlan[0].mac,6);
++        // VLAN_conf[0].vid = vlan[0].vlanid;
++        // VLAN_conf[0].portmap = vlan[0].vlanmap;
++        memcpy((void *)ð_mac[1][0],vlan[1].mac,6);
++        // VLAN_conf[1].vid = vlan[1].vlanid;
++        // VLAN_conf[1].portmap = vlan[1].vlanmap;
++    }
++#else
++    unsigned int reg_val;
++
++    reg_val = readl(IO_ADDRESS(TOE_GMAC0_BASE)+0xac);
++    eth_mac[0][4] = (reg_val & 0xff00) >> 8;
++    eth_mac[0][5] = reg_val & 0x00ff;
++    reg_val = readl(IO_ADDRESS(SL2312_SECURITY_BASE)+0xac);
++    eth_mac[1][4] = (reg_val & 0xff00) >> 8;
++    eth_mac[1][5] = reg_val & 0x00ff;
++#endif
++    return;
++}
++
++
++/*----------------------------------------------------------------------
++* mac_stop_txdma
++*----------------------------------------------------------------------*/
++void mac_stop_txdma(struct net_device *dev)
++{
++	GMAC_INFO_T				*tp = (GMAC_INFO_T *)dev->priv;
++	GMAC_DMA_CTRL_T			dma_ctrl, dma_ctrl_mask;
++	GMAC_TXDMA_FIRST_DESC_T	txdma_busy;
++
++	// wait idle
++	do
++	{
++		txdma_busy.bits32 = gmac_read_reg(tp->dma_base_addr, GMAC_DMA_TX_FIRST_DESC_REG);
++	} while (txdma_busy.bits.td_busy);
++
++    /* program dma control register */
++	dma_ctrl.bits32 = 0;
++	dma_ctrl.bits.rd_enable = 0;
++	dma_ctrl.bits.td_enable = 0;
++
++	dma_ctrl_mask.bits32 = 0;
++	dma_ctrl_mask.bits.rd_enable = 1;
++	dma_ctrl_mask.bits.td_enable = 1;
++
++	gmac_write_reg(tp->dma_base_addr, GMAC_DMA_CTRL_REG, dma_ctrl.bits32, dma_ctrl_mask.bits32);
++}
++
++/*----------------------------------------------------------------------
++* mac_start_txdma
++*----------------------------------------------------------------------*/
++void mac_start_txdma(struct net_device *dev)
++{
++	GMAC_INFO_T			*tp = (GMAC_INFO_T *)dev->priv;
++	GMAC_DMA_CTRL_T		dma_ctrl, dma_ctrl_mask;
++
++    /* program dma control register */
++	dma_ctrl.bits32 = 0;
++	dma_ctrl.bits.rd_enable = 1;
++	dma_ctrl.bits.td_enable = 1;
++
++	dma_ctrl_mask.bits32 = 0;
++	dma_ctrl_mask.bits.rd_enable = 1;
++	dma_ctrl_mask.bits.td_enable = 1;
++
++	gmac_write_reg(tp->dma_base_addr, GMAC_DMA_CTRL_REG, dma_ctrl.bits32, dma_ctrl_mask.bits32);
++}
++
++
++/*----------------------------------------------------------------------
++* gmac_get_stats
++*----------------------------------------------------------------------*/
++
++struct net_device_stats * gmac_get_stats(struct net_device *dev)
++{
++    GMAC_INFO_T *tp = (GMAC_INFO_T *)dev->priv;
++    // unsigned int        flags;
++    unsigned int        pkt_drop;
++    unsigned int        pkt_error;
++
++    if (netif_running(dev))
++    {
++        /* read H/W counter */
++        // spin_lock_irqsave(&tp->lock,flags);
++        pkt_drop = gmac_read_reg(tp->base_addr,GMAC_IN_DISCARDS);
++        pkt_error = gmac_read_reg(tp->base_addr,GMAC_IN_ERRORS);
++        tp->ifStatics.rx_dropped = tp->ifStatics.rx_dropped + pkt_drop;
++        tp->ifStatics.rx_errors = tp->ifStatics.rx_errors + pkt_error;
++        // spin_unlock_irqrestore(&tp->lock,flags);
++    }
++    return &tp->ifStatics;
++}
++
++
++
++/*----------------------------------------------------------------------
++* mac_get_sw_tx_weight
++*----------------------------------------------------------------------*/
++void mac_get_sw_tx_weight(struct net_device *dev, char *weight)
++{
++	GMAC_TX_WCR1_T	sw_weigh;
++    GMAC_INFO_T		*tp = (GMAC_INFO_T *)dev->priv;
++
++	sw_weigh.bits32 = gmac_read_reg(tp->dma_base_addr, GMAC_TX_WEIGHTING_CTRL_1_REG);
++
++	weight[0] = sw_weigh.bits.sw_tq0;
++   	weight[1] = sw_weigh.bits.sw_tq1;
++   	weight[2] = sw_weigh.bits.sw_tq2;
++   	weight[3] = sw_weigh.bits.sw_tq3;
++   	weight[4] = sw_weigh.bits.sw_tq4;
++   	weight[5] = sw_weigh.bits.sw_tq5;
++}
++
++/*----------------------------------------------------------------------
++* mac_set_sw_tx_weight
++*----------------------------------------------------------------------*/
++void mac_set_sw_tx_weight(struct net_device *dev, char *weight)
++{
++	GMAC_TX_WCR1_T	sw_weigh;
++    GMAC_INFO_T		*tp = (GMAC_INFO_T *)dev->priv;
++
++	sw_weigh.bits32 = 0;
++	sw_weigh.bits.sw_tq0 = weight[0];
++   	sw_weigh.bits.sw_tq1 = weight[1];
++   	sw_weigh.bits.sw_tq2 = weight[2];
++   	sw_weigh.bits.sw_tq3 = weight[3];
++   	sw_weigh.bits.sw_tq4 = weight[4];
++   	sw_weigh.bits.sw_tq5 = weight[5];
++
++	gmac_write_reg(tp->dma_base_addr, GMAC_TX_WEIGHTING_CTRL_1_REG, sw_weigh.bits32, 0xffffffff);
++}
++
++/*----------------------------------------------------------------------
++* mac_get_hw_tx_weight
++*----------------------------------------------------------------------*/
++void mac_get_hw_tx_weight(struct net_device *dev, char *weight)
++{
++	GMAC_TX_WCR0_T	hw_weigh;
++    GMAC_INFO_T		*tp = (GMAC_INFO_T *)dev->priv;
++
++	hw_weigh.bits32 = gmac_read_reg(tp->dma_base_addr, GMAC_TX_WEIGHTING_CTRL_0_REG);
++
++	weight[0] = hw_weigh.bits.hw_tq0;
++   	weight[1] = hw_weigh.bits.hw_tq1;
++   	weight[2] = hw_weigh.bits.hw_tq2;
++   	weight[3] = hw_weigh.bits.hw_tq3;
++}
++
++/*----------------------------------------------------------------------
++* mac_set_hw_tx_weight
++*----------------------------------------------------------------------*/
++void mac_set_hw_tx_weight(struct net_device *dev, char *weight)
++{
++	GMAC_TX_WCR0_T	hw_weigh;
++    GMAC_INFO_T		*tp = (GMAC_INFO_T *)dev->priv;
++
++	hw_weigh.bits32 = 0;
++	hw_weigh.bits.hw_tq0 = weight[0];
++   	hw_weigh.bits.hw_tq1 = weight[1];
++   	hw_weigh.bits.hw_tq2 = weight[2];
++   	hw_weigh.bits.hw_tq3 = weight[3];
++
++	gmac_write_reg(tp->dma_base_addr, GMAC_TX_WEIGHTING_CTRL_0_REG, hw_weigh.bits32, 0xffffffff);
++}
++
++/*----------------------------------------------------------------------
++* mac_start_tx_dma
++*----------------------------------------------------------------------*/
++int mac_start_tx_dma(int mac)
++{
++	GMAC_DMA_CTRL_T dma_ctrl, dma_ctrl_mask;
++
++	dma_ctrl.bits32 = 0;
++	dma_ctrl.bits.td_enable = 1;
++
++	dma_ctrl_mask.bits32 = 0;
++	dma_ctrl_mask.bits.td_enable = 1;
++
++	if (mac == 0)
++    	gmac_write_reg(TOE_GMAC0_DMA_BASE, GMAC_DMA_CTRL_REG, dma_ctrl.bits32, dma_ctrl_mask.bits32);
++	else
++    	gmac_write_reg(TOE_GMAC1_DMA_BASE, GMAC_DMA_CTRL_REG, dma_ctrl.bits32, dma_ctrl_mask.bits32);
++	return	1;
++}
++
++/*----------------------------------------------------------------------
++* mac_stop_tx_dma
++*----------------------------------------------------------------------*/
++int mac_stop_tx_dma(int mac)
++{
++	GMAC_DMA_CTRL_T dma_ctrl, dma_ctrl_mask;
++
++	dma_ctrl.bits32 = 0;
++	dma_ctrl.bits.td_enable = 0;
++
++	dma_ctrl_mask.bits32 = 0;
++	dma_ctrl_mask.bits.td_enable = 1;
++
++	if (mac == 0)
++    	gmac_write_reg(TOE_GMAC0_DMA_BASE, GMAC_DMA_CTRL_REG, dma_ctrl.bits32, dma_ctrl_mask.bits32);
++	else
++    	gmac_write_reg(TOE_GMAC1_DMA_BASE, GMAC_DMA_CTRL_REG, dma_ctrl.bits32, dma_ctrl_mask.bits32);
++	return	1;
++}
++
++/*----------------------------------------------------------------------
++* mac_read_reg(int mac, unsigned int offset)
++*----------------------------------------------------------------------*/
++unsigned int mac_read_reg(int mac, unsigned int offset)
++{
++	switch (mac)
++	{
++		case 0:
++			return gmac_read_reg(TOE_GMAC0_BASE, offset);
++		case 1:
++			return gmac_read_reg(TOE_GMAC1_BASE, offset);
++		default:
++			return 0;
++	}
++}
++
++/*----------------------------------------------------------------------
++* mac_write_reg
++*----------------------------------------------------------------------*/
++void mac_write_reg(int mac, unsigned int offset, unsigned data)
++{
++	switch (mac)
++	{
++		case 0:
++			gmac_write_reg(GMAC0_BASE, offset, data, 0xffffffff);
++			break;
++		case 1:
++			gmac_write_reg(GMAC1_BASE, offset, data, 0xffffffff);
++			break;
++	}
++}
++
++/*----------------------------------------------------------------------
++* mac_read_dma_reg(int mac, unsigned int offset)
++*----------------------------------------------------------------------*/
++u32 mac_read_dma_reg(int mac, unsigned int offset)
++{
++	switch (mac)
++	{
++		case 0:
++			return gmac_read_reg(TOE_GMAC0_DMA_BASE, offset);
++		case 1:
++			return gmac_read_reg(TOE_GMAC1_DMA_BASE, offset);
++		default:
++			return 0;
++	}
++}
++
++/*----------------------------------------------------------------------
++* mac_write_dma_reg
++*----------------------------------------------------------------------*/
++void mac_write_dma_reg(int mac, unsigned int offset, u32 data)
++{
++	switch (mac)
++	{
++		case 0:
++			gmac_write_reg(TOE_GMAC0_DMA_BASE, offset, data, 0xffffffff);
++			break;
++		case 1:
++			gmac_write_reg(TOE_GMAC1_DMA_BASE, offset, data, 0xffffffff);
++			break;
++	}
++}
++
++/*----------------------------------------------------------------------
++* ether_crc
++*----------------------------------------------------------------------*/
++static unsigned const ethernet_polynomial = 0x04c11db7U;
++static unsigned int ether_crc (int length, unsigned char *data)
++{
++	int crc = -1;
++	unsigned int i;
++	unsigned int crc_val=0;
++
++	while (--length >= 0) {
++		unsigned char current_octet = *data++;
++		int bit;
++		for (bit = 0; bit < 8; bit++, current_octet >>= 1)
++			crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ?
++			     ethernet_polynomial : 0);
++	}
++	crc = ~crc;
++	for (i=0;i<32;i++)
++	{
++		crc_val = crc_val + (((crc << i) & 0x80000000) >> (31-i));
++	}
++	return crc_val;
++}
++
++
++
++/*----------------------------------------------------------------------
++* mac_set_rx_mode
++*----------------------------------------------------------------------*/
++void mac_set_rx_mode(int pid, unsigned int data)
++{
++	unsigned int	base;
++
++	base = (pid == 0) ? GMAC0_BASE : GMAC1_BASE;
++
++    gmac_write_reg(base, GMAC_RX_FLTR, data, 0x0000001f);
++    return;
++}
++
++
++/*----------------------------------------------------------------------
++* gmac_open
++*----------------------------------------------------------------------*/
++
++static int gmac_open (struct net_device *dev)
++{
++	GMAC_INFO_T  *tp = (GMAC_INFO_T *)dev->priv;
++	int    					retval;
++	TOE_INFO_T				*toe;
++	toe = (TOE_INFO_T *)&toe_private_data;
++
++    /* hook ISR */
++	retval = request_irq (dev->irq, toe_gmac_interrupt, SA_INTERRUPT, dev->name, dev);
++	if (retval)
++		return retval;
++
++	toe_init_gmac(dev);
++
++	if(!FLAG_SWITCH)
++	{
++    	init_waitqueue_head (&tp->thr_wait);
++    	init_completion(&tp->thr_exited);
++
++    	tp->time_to_die = 0;
++    	tp->thr_pid = kernel_thread (gmac_phy_thread, dev, CLONE_FS | CLONE_FILES);
++    	if (tp->thr_pid < 0)
++    	{
++    		printk (KERN_WARNING "%s: unable to start kernel thread\n",dev->name);
++    	}
++    }
++
++	tp->operation = 1;
++
++   	netif_start_queue (dev);
++
++	return (0);
++}
++
++/*----------------------------------------------------------------------
++* gmac_close
++*----------------------------------------------------------------------*/
++static int gmac_close(struct net_device *dev)
++{
++    TOE_INFO_T			*toe;
++// 	GMAC_RXDESC_T		*sw_desc_ptr,*desc_ptr;
++// 	unsigned int		buf_ptr;
++	GMAC_INFO_T 	*tp = dev->priv;
++	unsigned int		ret;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++
++	tp->operation = 0;
++
++    netif_stop_queue(dev);
++    mdelay(20);
++
++    /* stop tx/rx packet */
++    toe_gmac_disable_tx_rx(dev);
++    mdelay(20);
++
++    /* stop the chip's Tx and Rx DMA processes */
++	toe_gmac_hw_stop(dev);
++
++	toe_gmac_disable_interrupt(tp->irq);
++
++    /* disable interrupts by clearing the interrupt mask */
++    synchronize_irq();
++    free_irq(dev->irq,dev);
++
++//	DMA_MFREE(sw_desc_ptr, (TOE_SW_FREEQ_DESC_NUM * sizeof(GMAC_RXDESC_T),(dma_addr_t *)&toe->sw_freeq_desc_base_dma);
++//	DMA_MFREE(desc_ptr, TOE_HW_FREEQ_DESC_NUM * sizeof(GMAC_RXDESC_T),(dma_addr_t *)&toe->hw_freeq_desc_base_dma);
++//	DMA_MFREE(buf_ptr, TOE_HW_FREEQ_DESC_NUM) * HW_RX_BUF_SIZE),(dma_addr_t *)&toe->hwfq_buf_base_dma);
++//	DMA_MFREE(toe->gmac[0].swtxq_desc_base , TOE_GMAC0_SWTXQ_DESC_NUM * TOE_SW_TXQ_NUM * sizeof(GMAC_TXDESC_T),(dma_addr_t *)&toe->gmac[0].swtxq_desc_base_dma);
++//	DMA_MFREE(toe->gmac[1].swtxq_desc_base , TOE_GMAC0_SWTXQ_DESC_NUM * TOE_SW_TXQ_NUM * sizeof(GMAC_TXDESC_T),(dma_addr_t *)&toe->gmac[1].swtxq_desc_base_dma);
++//	DMA_MFREE(toe->gmac[0].hwtxq_desc_base_dma , TOE_GMAC0_HWTXQ_DESC_NUM * TOE_HW_TXQ_NUM * sizeof(GMAC_TXDESC_T),(dma_addr_t *)&toe->gmac[0].hwtxq_desc_base_dma);
++//	DMA_MFREE(toe->gmac[1].hwtxq_desc_base_dma , TOE_GMAC0_SWTXQ_DESC_NUM * TOE_HW_TXQ_NUM * sizeof(GMAC_TXDESC_T),(dma_addr_t *)&toe->gmac[1].hwtxq_desc_base_dma);
++//	DMA_MFREE(toe->gmac[0].default_desc_base_dma ,TOE_DEFAULT_Q0_DESC_NUM * sizeof(GMAC_TXDESC_T),(dma_addr_t *)&toe->gmac[0].default_desc_base_dma);
++//	DMA_MFREE(toe->gmac[1].default_desc_base_dma , TOE_DEFAULT_Q0_DESC_NUM * sizeof(GMAC_TXDESC_T),(dma_addr_t *)&toe->gmac[1].default_desc_base_dma);
++//	DMA_MFREE(toe->intr_desc_base_dma , TOE_INTR_QUEUE_NUM * TOE_INTR_DESC_NUM * sizeof(GMAC_RXDESC_T),(dma_addr_t *)&toe->intr_desc_base_dma);
++//	DMA_MFREE(toe->intr_buf_base_dma , TOE_INTR_DESC_NUM * sizeof(TOE_QHDR_T),(dma_addr_t *)&toe->intr_buf_base_dma);
++
++	if(!FLAG_SWITCH)
++	{
++    	if (tp->thr_pid >= 0)
++    	{
++		    tp->time_to_die = 1;
++    		wmb();
++    		ret = kill_proc (tp->thr_pid, SIGTERM, 1);
++    		if (ret)
++    		{
++    			printk (KERN_ERR "%s: unable to signal thread\n", dev->name);
++    			return ret;
++    		}
++//    		wait_for_completion (&tp->thr_exited);
++    	}
++    }
++
++    return (0);
++}
++
++/*----------------------------------------------------------------------
++* toe_gmac_fill_free_q
++* allocate buffers for free queue.
++*----------------------------------------------------------------------*/
++static inline void toe_gmac_fill_free_q(void)
++{
++	struct sk_buff	*skb;
++	volatile DMA_RWPTR_T	fq_rwptr;
++	volatile GMAC_RXDESC_T	*fq_desc;
++	unsigned long	flags;
++	// unsigned short max_cnt=TOE_SW_FREEQ_DESC_NUM>>1;
++
++	fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++	spin_lock_irqsave(&gmac_fq_lock, flags);
++	//while ((max_cnt--) && (unsigned short)RWPTR_ADVANCE_ONE(fq_rwptr.bits.wptr,
++	//				TOE_SW_FREEQ_DESC_NUM) != fq_rwptr.bits.rptr) {
++	while ((unsigned short)RWPTR_ADVANCE_ONE(fq_rwptr.bits.wptr,
++					TOE_SW_FREEQ_DESC_NUM) != fq_rwptr.bits.rptr) {
++		if ((skb = dev_alloc_skb(SW_RX_BUF_SIZE)) == NULL) {
++			printk("%s::skb allocation fail!\n", __func__);
++			//while(1);
++			break;
++		}
++		REG32(skb->data) = (unsigned int)skb;
++		skb_reserve(skb, SKB_RESERVE_BYTES);
++		// fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++		fq_rwptr.bits.wptr = RWPTR_ADVANCE_ONE(fq_rwptr.bits.wptr,
++			TOE_SW_FREEQ_DESC_NUM);
++		fq_desc = (GMAC_RXDESC_T*)toe_private_data.swfq_desc_base+fq_rwptr.bits.wptr;
++		fq_desc->word2.buf_adr = (unsigned int)__pa(skb->data);
++		SET_WPTR(TOE_GLOBAL_BASE+GLOBAL_SWFQ_RWPTR_REG, fq_rwptr.bits.wptr);
++		toe_private_data.fq_rx_rwptr.bits32 = fq_rwptr.bits32;
++	}
++	spin_unlock_irqrestore(&gmac_fq_lock, flags);
++}
++// EXPORT_SYMBOL(toe_gmac_fill_free_q);
++
++/*----------------------------------------------------------------------
++* toe_gmac_interrupt
++*----------------------------------------------------------------------*/
++static irqreturn_t toe_gmac_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
++{
++	struct net_device   *dev = (struct net_device *)dev_instance;
++	TOE_INFO_T			*toe;
++	GMAC_INFO_T 		*tp = (GMAC_INFO_T *)dev->priv;
++	unsigned int		status0;
++	unsigned int		status1;
++	unsigned int		status2;
++	unsigned int		status3;
++	unsigned int		status4;
++
++//	struct net_device_stats *isPtr = (struct net_device_stats *)&tp->ifStatics;
++	toe = (TOE_INFO_T *)&toe_private_data;
++//	handle NAPI
++#ifdef CONFIG_SL_NAPI
++if (storlink_ctl.pauseoff == 1)
++{
++/* disable GMAC interrupt */
++    //toe_gmac_disable_interrupt(tp->irq);
++
++//	isPtr->interrupts++;
++	/* read Interrupt status */
++	status0 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_0_REG);
++	status1 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_1_REG);
++	status2 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_2_REG);
++	status3 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_3_REG);
++	status4 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_4_REG);
++	// prompt warning if status bit ON but not enabled
++#if 0
++	if (status0 & ~tp->intr0_enabled)
++		printk("Intr 0 Status error. status = 0x%X, enable = 0x%X\n",
++				status0, tp->intr0_enabled);
++	if (status1 & ~tp->intr1_enabled)
++		printk("Intr 1 Status error. status = 0x%X, enable = 0x%X\n",
++				status1, tp->intr1_enabled);
++	if (status2 & ~tp->intr2_enabled)
++		printk("Intr 2 Status error. status = 0x%X, enable = 0x%X\n",
++				status2, tp->intr2_enabled);
++	if (status3 & ~tp->intr3_enabled)
++		printk("Intr 3 Status error. status = 0x%X, enable = 0x%X\n",
++				status3, tp->intr3_enabled);
++	if (status4 & ~tp->intr4_enabled)
++		printk("Intr 4 Status error. status = 0x%X, enable = 0x%X\n",
++				status4, tp->intr4_enabled);
++#endif
++
++	if (status0)
++		writel(status0 & tp->intr0_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_0_REG);
++	if (status1)
++		writel(status1 & tp->intr1_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_1_REG);
++	if (status2)
++		writel(status2 & tp->intr2_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_2_REG);
++	if (status3)
++		writel(status3 & tp->intr3_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_3_REG);
++	if (status4)
++		writel(status4 & tp->intr4_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_4_REG);
++#if 0
++	/* handle freeq interrupt first */
++	if (status4 & tp->intr4_enabled) {
++		if ((status4 & SWFQ_EMPTY_INT_BIT) && (tp->intr4_enabled & SWFQ_EMPTY_INT_BIT))
++		{
++			// unsigned long data = REG32(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++			//gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_ENABLE_4_REG,
++			//	tp->intr4_enabled & ~SWFQ_EMPTY_INT_BIT, SWFQ_EMPTY_INT_BIT);
++
++			if (toe->gmac[0].dev && netif_running(toe->gmac[0].dev))
++				toe_gmac_handle_default_rxq(toe->gmac[0].dev,&toe->gmac[0]);
++			if (toe->gmac[1].dev && netif_running(toe->gmac[1].dev))
++				toe_gmac_handle_default_rxq(toe->gmac[1].dev,&toe->gmac[1]);
++			printk("\nfreeq int\n");
++			toe_gmac_fill_free_q();
++			tp->sw_fq_empty_cnt++;
++
++		}
++	}
++#endif
++	// Interrupt Status 1
++	if (status1 & tp->intr1_enabled)
++	{
++		#define G1_INTR0_BITS	(GMAC1_HWTQ13_EOF_INT_BIT | GMAC1_HWTQ12_EOF_INT_BIT | GMAC1_HWTQ11_EOF_INT_BIT | GMAC1_HWTQ10_EOF_INT_BIT)
++		#define G0_INTR0_BITS	(GMAC0_HWTQ03_EOF_INT_BIT | GMAC0_HWTQ02_EOF_INT_BIT | GMAC0_HWTQ01_EOF_INT_BIT | GMAC0_HWTQ00_EOF_INT_BIT)
++		// Handle GMAC 0/1 HW Tx queue 0-3 EOF events
++		// Only count
++		// TOE, Classification, and default queues interrupts are handled by ISR
++		// because they should pass packets to upper layer
++		if (tp->port_id == 0)
++		{
++			if (netif_running(dev) && (status1 & G0_INTR0_BITS) && (tp->intr1_enabled & G0_INTR0_BITS))
++			{
++				if (status1 & GMAC0_HWTQ03_EOF_INT_BIT)
++					tp->hwtxq[3].eof_cnt++;
++				if (status1 & GMAC0_HWTQ02_EOF_INT_BIT)
++					tp->hwtxq[2].eof_cnt++;
++				if (status1 & GMAC0_HWTQ01_EOF_INT_BIT)
++					tp->hwtxq[1].eof_cnt++;
++				if (status1 & GMAC0_HWTQ00_EOF_INT_BIT)
++					tp->hwtxq[0].eof_cnt++;
++			}
++				if (netif_running(dev) && (status1 & DEFAULT_Q0_INT_BIT) && (tp->intr1_enabled & DEFAULT_Q0_INT_BIT))
++				{
++					if (likely(netif_rx_schedule_prep(dev)))
++        			{
++        				unsigned int data32;
++        				// disable GMAC-0 rx interrupt
++        				// class-Q & TOE-Q are implemented in future
++        				//data32 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_1_REG);
++        				//data32 &= ~DEFAULT_Q0_INT_BIT;
++						//writel(data32, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_1_REG);
++						//printk("\%s: DEFAULT_Q0_INT_BIT===================>>>>>>>>>>>>\n",__func__);
++						writel(0x0, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_ENABLE_1_REG);
++						//tp->total_q_cnt_napi=0;
++						//rx_time = jiffies;
++						//rx_old_bytes = isPtr->rx_bytes;
++            			__netif_rx_schedule(dev);
++        			}
++			}
++		}
++		else if (tp->port_id == 1)
++		{
++			if (netif_running(dev) && (status1 & G1_INTR0_BITS) && (tp->intr1_enabled & G1_INTR0_BITS))
++			{
++				if (status1 & GMAC1_HWTQ13_EOF_INT_BIT)
++					tp->hwtxq[3].eof_cnt++;
++				if (status1 & GMAC1_HWTQ12_EOF_INT_BIT)
++					tp->hwtxq[2].eof_cnt++;
++				if (status1 & GMAC1_HWTQ11_EOF_INT_BIT)
++					tp->hwtxq[1].eof_cnt++;
++				if (status1 & GMAC1_HWTQ10_EOF_INT_BIT)
++					tp->hwtxq[0].eof_cnt++;
++			}
++
++			if (netif_running(dev) && (status1 & DEFAULT_Q1_INT_BIT) && (tp->intr1_enabled & DEFAULT_Q1_INT_BIT))
++			{
++				if (likely(netif_rx_schedule_prep(dev)))
++        		{
++        			unsigned int data32;
++         			// disable GMAC-0 rx interrupt
++        			// class-Q & TOE-Q are implemented in future
++        			//data32 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_1_REG);
++        			//data32 &= ~DEFAULT_Q1_INT_BIT;
++					//writel(data32, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_1_REG);
++					//printk("\%s: 1111111111--->DEFAULT_Q1_INT_BIT===================>>>>>>>>>>>>\n",__func__);
++					writel(0x0, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_ENABLE_1_REG);
++					//tp->total_q_cnt_napi=0;
++					//rx_time = jiffies;
++					//rx_old_bytes = isPtr->rx_bytes;
++           			__netif_rx_schedule(dev);
++        		}
++			}
++		}
++	}
++
++	// Interrupt Status 0
++	if (status0 & tp->intr0_enabled)
++	{
++		#define ERR_INTR_BITS	(GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT |	\
++								 GMAC1_TXDERR_INT_BIT | GMAC1_TXPERR_INT_BIT |	\
++								 GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT |	\
++								 GMAC1_RXDERR_INT_BIT | GMAC1_RXPERR_INT_BIT)
++
++		if (status0 &  ERR_INTR_BITS)
++		{
++			if ((status0 & GMAC0_TXDERR_INT_BIT) && (tp->intr0_enabled & GMAC0_TXDERR_INT_BIT))
++			{
++				tp->txDerr_cnt[0]++;
++				printk("GMAC0 TX AHB Bus Error!\n");
++			}
++			if ((status0 & GMAC0_TXPERR_INT_BIT) && (tp->intr0_enabled & GMAC0_TXPERR_INT_BIT))
++			{
++				tp->txPerr_cnt[0]++;
++				printk("GMAC0 Tx Descriptor Protocol Error!\n");
++			}
++			if ((status0 & GMAC1_TXDERR_INT_BIT) && (tp->intr0_enabled & GMAC1_TXDERR_INT_BIT))
++			{
++				tp->txDerr_cnt[1]++;
++				printk("GMAC1 Tx AHB Bus Error!\n");
++			}
++			if ((status0 & GMAC1_TXPERR_INT_BIT) && (tp->intr0_enabled & GMAC1_TXPERR_INT_BIT))
++			{
++				tp->txPerr_cnt[1]++;
++				printk("GMAC1 Tx Descriptor Protocol Error!\n");
++			}
++
++			if ((status0 & GMAC0_RXDERR_INT_BIT) && (tp->intr0_enabled & GMAC0_RXDERR_INT_BIT))
++			{
++				tp->RxDerr_cnt[0]++;
++				printk("GMAC0 Rx AHB Bus Error!\n");
++			}
++			if ((status0 & GMAC0_RXPERR_INT_BIT) && (tp->intr0_enabled & GMAC0_RXPERR_INT_BIT))
++			{
++				tp->RxPerr_cnt[0]++;
++				printk("GMAC0 Rx Descriptor Protocol Error!\n");
++			}
++			if ((status0 & GMAC1_RXDERR_INT_BIT) && (tp->intr0_enabled & GMAC1_RXDERR_INT_BIT))
++			{
++				tp->RxDerr_cnt[1]++;
++				printk("GMAC1 Rx AHB Bus Error!\n");
++			}
++			if ((status0 & GMAC1_RXPERR_INT_BIT) && (tp->intr0_enabled & GMAC1_RXPERR_INT_BIT))
++			{
++				tp->RxPerr_cnt[1]++;
++				printk("GMAC1 Rx Descriptor Protocol Error!\n");
++			}
++		}
++
++#ifndef	GMAX_TX_INTR_DISABLED
++		if (tp->port_id == 1 &&	netif_running(dev) &&
++			(((status0 & GMAC1_SWTQ10_FIN_INT_BIT) && (tp->intr0_enabled & GMAC1_SWTQ10_FIN_INT_BIT))
++			||
++			((status0 & GMAC1_SWTQ10_EOF_INT_BIT) && (tp->intr0_enabled & GMAC1_SWTQ10_EOF_INT_BIT))))
++		{
++			toe_gmac_tx_complete(&toe_private_data.gmac[1], 0, dev, 1);
++		}
++
++		if (tp->port_id == 0 &&	netif_running(dev) &&
++			(((status0 & GMAC0_SWTQ00_FIN_INT_BIT) && (tp->intr0_enabled & GMAC0_SWTQ00_FIN_INT_BIT))
++			||
++			((status0 & GMAC0_SWTQ00_EOF_INT_BIT) && (tp->intr0_enabled & GMAC0_SWTQ00_EOF_INT_BIT))))
++		{
++			toe_gmac_tx_complete(&toe_private_data.gmac[0], 0, dev, 1);
++		}
++#endif
++	}
++	// Interrupt Status 4
++	if (status4 & tp->intr4_enabled)
++	{
++		#define G1_INTR4_BITS		(0xff000000)
++		#define G0_INTR4_BITS		(0x00ff0000)
++
++		if (tp->port_id == 0)
++		{
++			if ((status4 & G0_INTR4_BITS) && (tp->intr4_enabled & G0_INTR4_BITS))
++			{
++				if (status4 & GMAC0_RESERVED_INT_BIT)
++					printk("GMAC0_RESERVED_INT_BIT is ON\n");
++				if (status4 & GMAC0_MIB_INT_BIT)
++					tp->mib_full_cnt++;
++				if (status4 & GMAC0_RX_PAUSE_ON_INT_BIT)
++					tp->rx_pause_on_cnt++;
++				if (status4 & GMAC0_TX_PAUSE_ON_INT_BIT)
++					tp->tx_pause_on_cnt++;
++				if (status4 & GMAC0_RX_PAUSE_OFF_INT_BIT)
++					tp->rx_pause_off_cnt++;
++				if (status4 & GMAC0_TX_PAUSE_OFF_INT_BIT)
++					tp->rx_pause_off_cnt++;
++				if (status4 & GMAC0_RX_OVERRUN_INT_BIT)
++					tp->rx_overrun_cnt++;
++				if (status4 & GMAC0_STATUS_CHANGE_INT_BIT)
++					tp->status_changed_cnt++;
++			}
++		}
++		else if (tp->port_id == 1)
++		{
++			if ((status4 & G1_INTR4_BITS) && (tp->intr4_enabled & G1_INTR4_BITS))
++			{
++				if (status4 & GMAC1_RESERVED_INT_BIT)
++					printk("GMAC1_RESERVED_INT_BIT is ON\n");
++				if (status4 & GMAC1_MIB_INT_BIT)
++					tp->mib_full_cnt++;
++				if (status4 & GMAC1_RX_PAUSE_ON_INT_BIT)
++				{
++					printk("Gmac pause on\n");
++					tp->rx_pause_on_cnt++;
++				}
++				if (status4 & GMAC1_TX_PAUSE_ON_INT_BIT)
++				{
++					printk("Gmac pause on\n");
++					tp->tx_pause_on_cnt++;
++				}
++				if (status4 & GMAC1_RX_PAUSE_OFF_INT_BIT)
++				{
++					printk("Gmac pause off\n");
++					tp->rx_pause_off_cnt++;
++				}
++				if (status4 & GMAC1_TX_PAUSE_OFF_INT_BIT)
++				{
++					printk("Gmac pause off\n");
++					tp->rx_pause_off_cnt++;
++				}
++				if (status4 & GMAC1_RX_OVERRUN_INT_BIT)
++				{
++					//printk("Gmac Rx Overrun \n");
++					tp->rx_overrun_cnt++;
++				}
++				if (status4 & GMAC1_STATUS_CHANGE_INT_BIT)
++					tp->status_changed_cnt++;
++			}
++		}
++	}
++
++	//toe_gmac_enable_interrupt(tp->irq);
++#ifdef IxscriptMate_1518
++	if (storlink_ctl.pauseoff == 1)
++	{
++		GMAC_CONFIG0_T config0;
++		config0.bits32 = readl(TOE_GMAC0_BASE+GMAC_CONFIG0);
++		config0.bits.dis_rx = 0;
++		writel(config0.bits32, TOE_GMAC0_BASE+GMAC_CONFIG0);
++		config0.bits32 = readl(TOE_GMAC1_BASE+GMAC_CONFIG0);
++		config0.bits.dis_rx = 0;
++		writel(config0.bits32, TOE_GMAC1_BASE+GMAC_CONFIG0);
++	}
++#endif
++//	 enable_irq(gmac_irq[dev_index]);
++	//printk("gmac_interrupt complete!\n\n");
++//	return IRQ_RETVAL(handled);
++	return	IRQ_RETVAL(1);
++}
++else
++{
++#endif	//endif NAPI
++
++
++	/* disable GMAC interrupt */
++    toe_gmac_disable_interrupt(tp->irq);
++
++//	isPtr->interrupts++;
++	/* read Interrupt status */
++	status0 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_0_REG);
++	status1 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_1_REG);
++	status2 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_2_REG);
++	status3 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_3_REG);
++	status4 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_4_REG);
++	// prompt warning if status bit ON but not enabled
++#if 0
++	if (status0 & ~tp->intr0_enabled)
++		printk("Intr 0 Status error. status = 0x%X, enable = 0x%X\n",
++				status0, tp->intr0_enabled);
++	if (status1 & ~tp->intr1_enabled)
++		printk("Intr 1 Status error. status = 0x%X, enable = 0x%X\n",
++				status1, tp->intr1_enabled);
++	if (status2 & ~tp->intr2_enabled)
++		printk("Intr 2 Status error. status = 0x%X, enable = 0x%X\n",
++				status2, tp->intr2_enabled);
++	if (status3 & ~tp->intr3_enabled)
++		printk("Intr 3 Status error. status = 0x%X, enable = 0x%X\n",
++				status3, tp->intr3_enabled);
++	if (status4 & ~tp->intr4_enabled)
++		printk("Intr 4 Status error. status = 0x%X, enable = 0x%X\n",
++				status4, tp->intr4_enabled);
++#endif
++#define	INTERRUPT_SELECT			1
++	if (status0)
++		writel(status0 & tp->intr0_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_0_REG);
++	if (status1)
++		writel(status1 & tp->intr1_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_1_REG);
++	if (status2)
++		writel(status2 & tp->intr2_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_2_REG);
++	if (status3)
++		writel(status3 & tp->intr3_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_3_REG);
++	if (status4)
++		writel(status4 & tp->intr4_enabled, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_STATUS_4_REG);
++
++	/* handle freeq interrupt first */
++	if (status4 & tp->intr4_enabled) {
++		if ((status4 & SWFQ_EMPTY_INT_BIT) && (tp->intr4_enabled & SWFQ_EMPTY_INT_BIT))
++		{
++			// unsigned long data = REG32(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++			//gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_ENABLE_4_REG,
++			//	tp->intr4_enabled & ~SWFQ_EMPTY_INT_BIT, SWFQ_EMPTY_INT_BIT);
++
++			//gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_STATUS_4_REG,
++			//	SWFQ_EMPTY_INT_BIT, SWFQ_EMPTY_INT_BIT);
++			if (toe->gmac[0].dev && netif_running(toe->gmac[0].dev))
++				toe_gmac_handle_default_rxq(toe->gmac[0].dev,&toe->gmac[0]);
++			if (toe->gmac[1].dev && netif_running(toe->gmac[1].dev))
++				toe_gmac_handle_default_rxq(toe->gmac[1].dev,&toe->gmac[1]);
++			printk("\nfreeq int\n");
++			toe_gmac_fill_free_q();
++			tp->sw_fq_empty_cnt++;
++
++			gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_STATUS_4_REG, status4,
++				SWFQ_EMPTY_INT_BIT);
++		}
++	}
++
++	// Interrupt Status 1
++	if (status1 & tp->intr1_enabled)
++	{
++		#define G1_INTR0_BITS	(GMAC1_HWTQ13_EOF_INT_BIT | GMAC1_HWTQ12_EOF_INT_BIT | GMAC1_HWTQ11_EOF_INT_BIT | GMAC1_HWTQ10_EOF_INT_BIT)
++		#define G0_INTR0_BITS	(GMAC0_HWTQ03_EOF_INT_BIT | GMAC0_HWTQ02_EOF_INT_BIT | GMAC0_HWTQ01_EOF_INT_BIT | GMAC0_HWTQ00_EOF_INT_BIT)
++		// Handle GMAC 0/1 HW Tx queue 0-3 EOF events
++		// Only count
++		// TOE, Classification, and default queues interrupts are handled by ISR
++		// because they should pass packets to upper layer
++		if (tp->port_id == 0)
++		{
++#ifndef	INTERRUPT_SELECT
++			if (netif_running(dev) && (status1 & G0_INTR0_BITS) && (tp->intr1_enabled & G0_INTR0_BITS))
++			{
++				if (status1 & GMAC0_HWTQ03_EOF_INT_BIT)
++					tp->hwtxq[3].eof_cnt++;
++				if (status1 & GMAC0_HWTQ02_EOF_INT_BIT)
++					tp->hwtxq[2].eof_cnt++;
++				if (status1 & GMAC0_HWTQ01_EOF_INT_BIT)
++					tp->hwtxq[1].eof_cnt++;
++				if (status1 & GMAC0_HWTQ00_EOF_INT_BIT)
++					tp->hwtxq[0].eof_cnt++;
++#endif	//INTERRUPT_SELECT
++#ifndef	INTERRUPT_SELECT
++			}
++#endif	//INTERRUPT_SELECT
++			if (netif_running(dev) && (status1 & DEFAULT_Q0_INT_BIT) && (tp->intr1_enabled & DEFAULT_Q0_INT_BIT))
++			{
++				tp->default_q_intr_cnt++;
++				toe_gmac_handle_default_rxq(dev, tp);
++			}
++#ifdef CONFIG_SL351x_RXTOE
++			if (netif_running(dev) && (status1 & TOE_IQ_ALL_BITS) &&
++			    (tp->intr1_enabled & TOE_IQ_ALL_BITS)) {
++				//printk("status %x, bits %x, slct %x\n", status1, TOE_IQ_ALL_BITS, tp->intr1_selected);
++				toe_gmac_handle_toeq(dev, tp, status1);
++				//toe_gmac_handle_toeq(dev, toe, tp, status1);
++			}
++#endif
++		}
++		else if (tp->port_id == 1)
++		{
++#ifndef	INTERRUPT_SELECT
++			if (netif_running(dev) && (status1 & G1_INTR0_BITS) && (tp->intr1_enabled & G1_INTR0_BITS))
++			{
++				if (status1 & GMAC1_HWTQ13_EOF_INT_BIT)
++					tp->hwtxq[3].eof_cnt++;
++				if (status1 & GMAC1_HWTQ12_EOF_INT_BIT)
++					tp->hwtxq[2].eof_cnt++;
++				if (status1 & GMAC1_HWTQ11_EOF_INT_BIT)
++					tp->hwtxq[1].eof_cnt++;
++				if (status1 & GMAC1_HWTQ10_EOF_INT_BIT)
++					tp->hwtxq[0].eof_cnt++;
++#endif	//INTERRUPT_SELECT
++#ifndef	INTERRUPT_SELECT
++			}
++#endif	//INTERRUPT_SELECT
++			if (netif_running(dev) && (status1 & DEFAULT_Q1_INT_BIT) && (tp->intr1_enabled & DEFAULT_Q1_INT_BIT))
++			{
++				tp->default_q_intr_cnt++;
++				toe_gmac_handle_default_rxq(dev, tp);
++			}
++#ifdef CONFIG_SL351x_RXTOE
++			if (netif_running(dev) && (status1 & TOE_IQ_ALL_BITS) &&
++			    (tp->intr1_enabled & TOE_IQ_ALL_BITS)) {
++				//printk("status %x, bits %x, slct %x\n", status1, TOE_IQ_ALL_BITS, tp->intr1_selected);
++				toe_gmac_handle_toeq(dev, tp, status1);
++				//toe_gmac_handle_toeq(dev, toe, tp, status1);
++			}
++#endif
++		}
++	}
++
++
++	// Interrupt Status 0
++	if (status0 & tp->intr0_enabled)
++	{
++
++		#define ERR_INTR_BITS	(GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT |	\
++								 GMAC1_TXDERR_INT_BIT | GMAC1_TXPERR_INT_BIT |	\
++								 GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT |	\
++								 GMAC1_RXDERR_INT_BIT | GMAC1_RXPERR_INT_BIT)
++#ifndef	INTERRUPT_SELECT
++		if (status0 &  ERR_INTR_BITS)
++		{
++			if ((status0 & GMAC0_TXDERR_INT_BIT) && (tp->intr0_enabled & GMAC0_TXDERR_INT_BIT))
++			{
++				tp->txDerr_cnt[0]++;
++				printk("GMAC0 TX AHB Bus Error!\n");
++			}
++			if ((status0 & GMAC0_TXPERR_INT_BIT) && (tp->intr0_enabled & GMAC0_TXPERR_INT_BIT))
++			{
++				tp->txPerr_cnt[0]++;
++				printk("GMAC0 Tx Descriptor Protocol Error!\n");
++			}
++			if ((status0 & GMAC1_TXDERR_INT_BIT) && (tp->intr0_enabled & GMAC1_TXDERR_INT_BIT))
++			{
++				tp->txDerr_cnt[1]++;
++				printk("GMAC1 Tx AHB Bus Error!\n");
++			}
++			if ((status0 & GMAC1_TXPERR_INT_BIT) && (tp->intr0_enabled & GMAC1_TXPERR_INT_BIT))
++			{
++				tp->txPerr_cnt[1]++;
++				printk("GMAC1 Tx Descriptor Protocol Error!\n");
++			}
++
++			if ((status0 & GMAC0_RXDERR_INT_BIT) && (tp->intr0_enabled & GMAC0_RXDERR_INT_BIT))
++			{
++				tp->RxDerr_cnt[0]++;
++				printk("GMAC0 Rx AHB Bus Error!\n");
++			}
++			if ((status0 & GMAC0_RXPERR_INT_BIT) && (tp->intr0_enabled & GMAC0_RXPERR_INT_BIT))
++			{
++				tp->RxPerr_cnt[0]++;
++				printk("GMAC0 Rx Descriptor Protocol Error!\n");
++			}
++			if ((status0 & GMAC1_RXDERR_INT_BIT) && (tp->intr0_enabled & GMAC1_RXDERR_INT_BIT))
++			{
++				tp->RxDerr_cnt[1]++;
++				printk("GMAC1 Rx AHB Bus Error!\n");
++			}
++			if ((status0 & GMAC1_RXPERR_INT_BIT) && (tp->intr0_enabled & GMAC1_RXPERR_INT_BIT))
++			{
++				tp->RxPerr_cnt[1]++;
++				printk("GMAC1 Rx Descriptor Protocol Error!\n");
++			}
++		}
++#endif	//INTERRUPT_SELECT
++#ifndef	GMAX_TX_INTR_DISABLED
++		if (tp->port_id == 1 &&	netif_running(dev) &&
++			(((status0 & GMAC1_SWTQ10_FIN_INT_BIT) && (tp->intr0_enabled & GMAC1_SWTQ10_FIN_INT_BIT))
++			||
++			((status0 & GMAC1_SWTQ10_EOF_INT_BIT) && (tp->intr0_enabled & GMAC1_SWTQ10_EOF_INT_BIT))))
++		{
++			toe_gmac_tx_complete(&toe_private_data.gmac[1], 0, dev, 1);
++		}
++
++		if (tp->port_id == 0 &&	netif_running(dev) &&
++			(((status0 & GMAC0_SWTQ00_FIN_INT_BIT) && (tp->intr0_enabled & GMAC0_SWTQ00_FIN_INT_BIT))
++			||
++			((status0 & GMAC0_SWTQ00_EOF_INT_BIT) && (tp->intr0_enabled & GMAC0_SWTQ00_EOF_INT_BIT))))
++		{
++			toe_gmac_tx_complete(&toe_private_data.gmac[0], 0, dev, 1);
++		}
++#endif
++		// clear enabled status bits
++	}
++	// Interrupt Status 4
++#ifndef	INTERRUPT_SELECT
++	if (status4 & tp->intr4_enabled)
++	{
++		#define G1_INTR4_BITS		(0xff000000)
++		#define G0_INTR4_BITS		(0x00ff0000)
++
++		if (tp->port_id == 0)
++		{
++			if ((status4 & G0_INTR4_BITS) && (tp->intr4_enabled & G0_INTR4_BITS))
++			{
++				if (status4 & GMAC0_RESERVED_INT_BIT)
++					printk("GMAC0_RESERVED_INT_BIT is ON\n");
++				if (status4 & GMAC0_MIB_INT_BIT)
++					tp->mib_full_cnt++;
++				if (status4 & GMAC0_RX_PAUSE_ON_INT_BIT)
++					tp->rx_pause_on_cnt++;
++				if (status4 & GMAC0_TX_PAUSE_ON_INT_BIT)
++					tp->tx_pause_on_cnt++;
++				if (status4 & GMAC0_RX_PAUSE_OFF_INT_BIT)
++					tp->rx_pause_off_cnt++;
++				if (status4 & GMAC0_TX_PAUSE_OFF_INT_BIT)
++					tp->rx_pause_off_cnt++;
++				if (status4 & GMAC0_RX_OVERRUN_INT_BIT)
++					tp->rx_overrun_cnt++;
++				if (status4 & GMAC0_STATUS_CHANGE_INT_BIT)
++					tp->status_changed_cnt++;
++			}
++		}
++		else if (tp->port_id == 1)
++		{
++			if ((status4 & G1_INTR4_BITS) && (tp->intr4_enabled & G1_INTR4_BITS))
++			{
++				if (status4 & GMAC1_RESERVED_INT_BIT)
++					printk("GMAC1_RESERVED_INT_BIT is ON\n");
++				if (status4 & GMAC1_MIB_INT_BIT)
++					tp->mib_full_cnt++;
++				if (status4 & GMAC1_RX_PAUSE_ON_INT_BIT)
++				{
++					//printk("Gmac pause on\n");
++					tp->rx_pause_on_cnt++;
++				}
++				if (status4 & GMAC1_TX_PAUSE_ON_INT_BIT)
++				{
++					//printk("Gmac pause on\n");
++					tp->tx_pause_on_cnt++;
++				}
++				if (status4 & GMAC1_RX_PAUSE_OFF_INT_BIT)
++				{
++					//printk("Gmac pause off\n");
++					tp->rx_pause_off_cnt++;
++				}
++				if (status4 & GMAC1_TX_PAUSE_OFF_INT_BIT)
++				{
++					//printk("Gmac pause off\n");
++					tp->rx_pause_off_cnt++;
++				}
++				if (status4 & GMAC1_RX_OVERRUN_INT_BIT)
++				{
++					//printk("Gmac Rx Overrun \n");
++					tp->rx_overrun_cnt++;
++				}
++				if (status4 & GMAC1_STATUS_CHANGE_INT_BIT)
++					tp->status_changed_cnt++;
++			}
++		}
++#if 0
++		if ((status4 & SWFQ_EMPTY_INT_BIT) && (tp->intr4_enabled & SWFQ_EMPTY_INT_BIT))
++		{
++			// unsigned long data = REG32(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++//			mac_stop_rxdma(tp->sc);
++			gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_ENABLE_4_REG,
++				tp->intr4_enabled & ~SWFQ_EMPTY_INT_BIT, SWFQ_EMPTY_INT_BIT);
++
++			gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_STATUS_4_REG,
++				SWFQ_EMPTY_INT_BIT, SWFQ_EMPTY_INT_BIT);
++			toe_gmac_fill_free_q();
++			tp->sw_fq_empty_cnt++;
++
++			gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_STATUS_4_REG, status4,
++				SWFQ_EMPTY_INT_BIT);
++//#if 0
++/*			if (netif_running(dev))
++				toe_gmac_handle_default_rxq(dev, tp);
++			printk("SWFQ_EMPTY_INT_BIT is ON!\n");	// should not be happened */
++//#endif
++		}
++#endif
++	}
++#endif	//INTERRUPT_SELECT
++	toe_gmac_enable_interrupt(tp->irq);
++//enable gmac rx function when do RFC 2544
++#ifdef IxscriptMate_1518
++	if (storlink_ctl.pauseoff == 1)
++	{
++		GMAC_CONFIG0_T config0;
++		config0.bits32 = readl(TOE_GMAC0_BASE+GMAC_CONFIG0);
++		config0.bits.dis_rx = 0;
++		writel(config0.bits32, TOE_GMAC0_BASE+GMAC_CONFIG0);
++		config0.bits32 = readl(TOE_GMAC1_BASE+GMAC_CONFIG0);
++		config0.bits.dis_rx = 0;
++		writel(config0.bits32, TOE_GMAC1_BASE+GMAC_CONFIG0);
++	}
++#endif
++	//printk("gmac_interrupt complete!\n\n");
++//	return IRQ_RETVAL(handled);
++	return	IRQ_RETVAL(1);
++#ifdef CONFIG_SL_NAPI
++}
++#endif
++}
++
++/*----------------------------------------------------------------------
++*	toe_gmac_handle_default_rxq
++*	(1) Get rx Buffer for default Rx queue
++*	(2) notify or call upper-routine to handle it
++*	(3) get a new buffer and insert it into SW free queue
++*	(4) Note: The SW free queue Read-Write Pointer should be locked when accessing
++*----------------------------------------------------------------------*/
++//static inline void toe_gmac_handle_default_rxq(struct net_device *dev, GMAC_INFO_T *tp)
++static void toe_gmac_handle_default_rxq(struct net_device *dev, GMAC_INFO_T *tp)
++{
++	TOE_INFO_T			*toe;
++    GMAC_RXDESC_T   	*curr_desc;
++	struct sk_buff 		*skb;
++    DMA_RWPTR_T			rwptr;
++	unsigned int 		pkt_size;
++	int					max_cnt;
++	unsigned int        desc_count;
++	unsigned int        good_frame, chksum_status, rx_status;
++	struct net_device_stats *isPtr = (struct net_device_stats *)&tp->ifStatics;
++
++//when do ixia RFC 2544 test and packet size is select 1518 bytes,disable gmace rx function immediately after one interrupt come in.
++#ifdef IxscriptMate_1518
++	if (storlink_ctl.pauseoff == 1)
++	{
++		GMAC_CONFIG0_T config0;
++		config0.bits32 = readl(TOE_GMAC0_BASE+GMAC_CONFIG0);
++		config0.bits.dis_rx = 1;
++		writel(config0.bits32, TOE_GMAC0_BASE+GMAC_CONFIG0);
++		config0.bits32 = readl(TOE_GMAC1_BASE+GMAC_CONFIG0);
++		config0.bits.dis_rx = 1;
++		writel(config0.bits32, TOE_GMAC1_BASE+GMAC_CONFIG0);
++	}
++#endif
++	rwptr.bits32 = readl(&tp->default_qhdr->word1);
++#if 0
++	if (rwptr.bits.rptr != tp->rx_rwptr.bits.rptr)
++	{
++		mac_stop_txdma((struct net_device *)tp->dev);
++		printk("Default Queue HW RD ptr (0x%x) != SW RD Ptr (0x%x)\n",
++				rwptr.bits32, tp->rx_rwptr.bits.rptr);
++		while(1);
++	}
++#endif
++	toe = (TOE_INFO_T *)&toe_private_data;
++	max_cnt = DEFAULT_RXQ_MAX_CNT;
++	while ((--max_cnt) && rwptr.bits.rptr != rwptr.bits.wptr)
++//	while (rwptr.bits.rptr != rwptr.bits.wptr)
++	{
++//if packet size is not 1518 for RFC 2544,enable gmac rx function.The other packet size have RX workaround.
++#ifdef IxscriptMate_1518
++    	if (storlink_ctl.pauseoff == 1)
++		{
++			if (pkt_size != 1514)
++			{
++						GMAC_CONFIG0_T config0;
++						config0.bits32 = readl(TOE_GMAC0_BASE+GMAC_CONFIG0);
++						config0.bits.dis_rx = 0;
++						writel(config0.bits32, TOE_GMAC0_BASE+GMAC_CONFIG0);
++						config0.bits32 = readl(TOE_GMAC1_BASE+GMAC_CONFIG0);
++						config0.bits.dis_rx = 0;
++						writel(config0.bits32, TOE_GMAC1_BASE+GMAC_CONFIG0);
++			}
++		}
++#endif
++    	curr_desc = (GMAC_RXDESC_T *)tp->default_desc_base + rwptr.bits.rptr;
++//		consistent_sync(curr_desc, sizeof(GMAC_RXDESC_T), PCI_DMA_FROMDEVICE);
++		tp->default_q_cnt++;
++    	tp->rx_curr_desc = (unsigned int)curr_desc;
++    	rx_status = curr_desc->word0.bits.status;
++    	chksum_status = curr_desc->word0.bits.chksum_status;
++    	tp->rx_status_cnt[rx_status]++;
++    	tp->rx_chksum_cnt[chksum_status]++;
++        pkt_size = curr_desc->word1.bits.byte_count;  /*total byte count in a frame*/
++		desc_count = curr_desc->word0.bits.desc_count; /* get descriptor count per frame */
++		good_frame=1;
++		if ((curr_desc->word0.bits32 & (GMAC_RXDESC_0_T_derr | GMAC_RXDESC_0_T_perr))
++			|| (pkt_size < 60)
++		    || (chksum_status & 0x4)
++			|| rx_status)
++		{
++			good_frame = 0;
++			if (curr_desc->word0.bits32 & GMAC_RXDESC_0_T_derr)
++				printk("%s::derr (GMAC-%d)!!!\n", __func__, tp->port_id);
++			if (curr_desc->word0.bits32 & GMAC_RXDESC_0_T_perr)
++				printk("%s::perr (GMAC-%d)!!!\n", __func__, tp->port_id);
++			if (rx_status)
++			{
++				if (rx_status == 4 || rx_status == 7)
++					isPtr->rx_crc_errors++;
++//				printk("%s::Status=%d (GMAC-%d)!!!\n", __func__, rx_status, tp->port_id);
++			}
++#ifdef SL351x_GMAC_WORKAROUND
++			else if (pkt_size < 60)
++			{
++				if (tp->short_frames_cnt < GMAC_SHORT_FRAME_THRESHOLD)
++					tp->short_frames_cnt++;
++				if (tp->short_frames_cnt >= GMAC_SHORT_FRAME_THRESHOLD)
++				{
++					GMAC_CONFIG0_T config0;
++					config0.bits32 = readl(TOE_GMAC0_BASE+GMAC_CONFIG0);
++					config0.bits.dis_rx = 1;
++					writel(config0.bits32, TOE_GMAC0_BASE+GMAC_CONFIG0);
++					config0.bits32 = readl(TOE_GMAC1_BASE+GMAC_CONFIG0);
++					config0.bits.dis_rx = 1;
++					writel(config0.bits32, TOE_GMAC1_BASE+GMAC_CONFIG0);
++				}
++			}
++#endif
++//			if (chksum_status)
++//				printk("%s::Checksum Status=%d (GMAC-%d)!!!\n", __func__, chksum_status, tp->port_id);
++			skb = (struct sk_buff *)(REG32(__va(curr_desc->word2.buf_adr) - SKB_RESERVE_BYTES));
++			dev_kfree_skb_irq(skb);
++		}
++		if (good_frame)
++		{
++			if (curr_desc->word0.bits.drop)
++				printk("%s::Drop (GMAC-%d)!!!\n", __func__, tp->port_id);
++//			if (chksum_status)
++//				printk("%s::Checksum Status=%d (GMAC-%d)!!!\n", __func__, chksum_status, tp->port_id);
++
++	    	/* get frame information from the first descriptor of the frame */
++#ifdef SL351x_GMAC_WORKAROUND
++			if (tp->short_frames_cnt >= GMAC_SHORT_FRAME_THRESHOLD)
++			{
++				GMAC_CONFIG0_T config0;
++				config0.bits32 = readl(TOE_GMAC0_BASE+GMAC_CONFIG0);
++				config0.bits.dis_rx = 0;
++				writel(config0.bits32, TOE_GMAC0_BASE+GMAC_CONFIG0);
++				config0.bits32 = readl(TOE_GMAC1_BASE+GMAC_CONFIG0);
++				config0.bits.dis_rx = 0;
++				writel(config0.bits32, TOE_GMAC1_BASE+GMAC_CONFIG0);
++			}
++			tp->short_frames_cnt = 0;
++#endif
++			isPtr->rx_packets++;
++			skb = (struct sk_buff *)(REG32(__va(curr_desc->word2.buf_adr - SKB_RESERVE_BYTES)));
++			if (!skb)
++			{
++				printk("Fatal Error!!skb==NULL\n");
++				goto next_rx;
++			}
++			tp->curr_rx_skb = skb;
++			// consistent_sync((void *)__va(curr_desc->word2.buf_adr), pkt_size, PCI_DMA_FROMDEVICE);
++
++	//		curr_desc->word2.buf_adr = 0;
++
++			skb_reserve (skb, RX_INSERT_BYTES);	/* 16 byte align the IP fields. */
++			skb_put(skb, pkt_size);
++			skb->dev = dev;
++			if (chksum_status == RX_CHKSUM_IP_UDP_TCP_OK)
++			{
++				skb->ip_summed = CHECKSUM_UNNECESSARY;
++#ifdef CONFIG_SL351x_NAT
++				if (nat_cfg.enabled && curr_desc->word3.bits.l3_offset && curr_desc->word3.bits.l4_offset)
++				{
++					struct iphdr	*ip_hdr;
++					ip_hdr = (struct iphdr *)&(skb->data[curr_desc->word3.bits.l3_offset]);
++					sl351x_nat_input(skb,
++									tp->port_id,
++									(void *)curr_desc->word3.bits.l3_offset,
++								  	(void *)curr_desc->word3.bits.l4_offset);
++				}
++#endif
++				skb->protocol = eth_type_trans(skb,dev); /* set skb protocol */
++#if 0
++#ifdef CONFIG_SL351x_RXTOE
++				if (storlink_ctl.rx_max_pktsize) {
++					struct iphdr	*ip_hdr;
++					struct tcphdr	*tcp_hdr;
++					int ip_hdrlen;
++
++ 					ip_hdr = (struct iphdr*)&(skb->data[0]);
++					if ((skb->protocol == __constant_htons(ETH_P_IP)) &&
++					   ((ip_hdr->protocol & 0x00ff) == IPPROTO_TCP)) {
++						ip_hdrlen = ip_hdr->ihl << 2;
++						tcp_hdr = (struct tcphdr*)&(skb->data[ip_hdrlen]);
++						if (tcp_hdr->syn) {
++							struct toe_conn* connection = init_toeq(ip_hdr->version,
++									ip_hdr, tcp_hdr, toe, &(skb->data[0]) - 14);
++							TCP_SKB_CB(skb)->connection = connection;
++							//	hash_dump_entry(TCP_SKB_CB(skb)->connection->hash_entry_index);
++							//		printk("%s::skb data %x, conn %x, mode %x\n",
++							//			__func__, skb->data, connection, connection->mode);
++						}
++					}
++				}
++#endif
++#endif
++			}
++			else if (chksum_status == RX_CHKSUM_IP_OK_ONLY)
++			{
++				skb->ip_summed = CHECKSUM_UNNECESSARY;
++#ifdef CONFIG_SL351x_NAT
++				if (nat_cfg.enabled && curr_desc->word3.bits.l3_offset && curr_desc->word3.bits.l4_offset)
++				{
++					struct iphdr		*ip_hdr;
++					//struct tcphdr		*tcp_hdr;
++					ip_hdr = (struct iphdr *)&(skb->data[curr_desc->word3.bits.l3_offset]);
++					//tcp_hdr = (struct tcphdr *)&(skb->data[curr_desc->word3.bits.l4_offset]);
++					if (ip_hdr->protocol == IPPROTO_UDP)
++					{
++						sl351x_nat_input(skb,
++										tp->port_id,
++										(void *)curr_desc->word3.bits.l3_offset,
++								  		(void *)curr_desc->word3.bits.l4_offset);
++					}
++					else if (ip_hdr->protocol == IPPROTO_GRE)
++					{
++						sl351x_nat_input(skb,
++									tp->port_id,
++									(void *)curr_desc->word3.bits.l3_offset,
++								  	(void *)curr_desc->word3.bits.l4_offset);
++					}
++				}
++#endif
++				skb->protocol = eth_type_trans(skb,dev); /* set skb protocol */
++			}
++			else
++			{
++				skb->protocol = eth_type_trans(skb,dev); /* set skb protocol */
++			}
++
++			netif_rx(skb);  /* socket rx */
++			dev->last_rx = jiffies;
++
++			isPtr->rx_bytes += pkt_size;
++
++        }
++
++next_rx:
++		// advance one for Rx default Q 0/1
++		rwptr.bits.rptr = RWPTR_ADVANCE_ONE(rwptr.bits.rptr, tp->default_desc_num);
++		SET_RPTR(&tp->default_qhdr->word1, rwptr.bits.rptr);
++     	tp->rx_rwptr.bits32 = rwptr.bits32;
++
++		toe_gmac_fill_free_q();
++	}
++}
++
++/*----------------------------------------------------------------------
++* gmac_get_phy_vendor
++*----------------------------------------------------------------------*/
++static unsigned int gmac_get_phy_vendor(int phy_addr)
++{
++    unsigned int	reg_val;
++    reg_val=(mii_read(phy_addr,0x02) << 16) + mii_read(phy_addr,0x03);
++    return reg_val;
++}
++
++/*----------------------------------------------------------------------
++* gmac_set_phy_status
++*----------------------------------------------------------------------*/
++void gmac_set_phy_status(struct net_device *dev)
++{
++	GMAC_INFO_T *tp = dev->priv;
++	GMAC_STATUS_T   status;
++	unsigned int    reg_val, ability,wan_port_id;
++	unsigned int    i = 0;
++
++#ifdef VITESSE_G5SWITCH
++	if((tp->port_id == GMAC_PORT1)&&(Giga_switch==1)){
++#if 0
++		rcv_mask = SPI_read(2,0,0x10);			// Receive mask
++		rcv_mask |= 0x4F;
++		for(i=0;i<4;i++){
++			reg_val = BIT(26)|(i<<21)|(10<<16);
++			SPI_write(3,0,1,reg_val);
++			msleep(10);
++			reg_val = SPI_read(3,0,2);
++			if(reg_val & 0x0c00){
++				printk("Port%d:Giga mode\n",i);
++				SPI_write(1,i,0x00,0x300701B1);
++				SPI_write(1,i,0x00,0x10070181);
++				switch_pre_link[i]=LINK_UP;
++				switch_pre_speed[i]=GMAC_SPEED_1000;
++			}
++			else{
++				reg_val = BIT(26)|(i<<21)|(5<<16);
++				SPI_write(3,0,1,reg_val);
++				msleep(10);
++				ability = (reg_val = SPI_read(3,0,2)&0x5e0) >>5;
++				if ((ability & 0x0C)) /* 100M full duplex */
++				{
++					SPI_write(1,i,0x00,0x30050472);
++					SPI_write(1,i,0x00,0x10050442);
++					printk("Port%d:100M\n",i);
++					switch_pre_link[i]=LINK_UP;
++				switch_pre_speed[i]=GMAC_SPEED_100;
++				}
++				else if((ability & 0x03)) /* 10M full duplex */
++				{
++					SPI_write(1,i,0x00,0x30050473);
++					SPI_write(1,i,0x00,0x10050443);
++					printk("Port%d:10M\n",i);
++					switch_pre_link[i]=LINK_UP;
++					switch_pre_speed[i]=GMAC_SPEED_10;
++				}
++				else{
++					SPI_write(1,i,0x00,BIT(16));			// disable RX
++					SPI_write(5,0,0x0E,BIT(i));			// dicard packet
++					while((SPI_read(5,0,0x0C)&BIT(i))==0)		// wait to be empty
++						msleep(1);
++
++					SPI_write(1,i,0x00,0x20000030);			// PORT_RST
++					switch_pre_link[i]=LINK_DOWN;
++					switch_pre_speed[i]=GMAC_SPEED_10;
++					rcv_mask &= ~BIT(i);
++					SPI_write(2,0,0x10,rcv_mask);			// Disable Receive
++				}
++			}
++		}
++#endif
++		gmac_get_switch_status(dev);
++		gmac_write_reg(tp->base_addr, GMAC_STATUS, 0x7d, 0x0000007f);
++//		SPI_write(2,0,0x10,rcv_mask);			// Enable Receive
++		return ;
++	}
++#endif
++
++	reg_val = gmac_get_phy_vendor(tp->phy_addr);
++	printk("GMAC-%d Addr %d Vendor ID: 0x%08x\n", tp->port_id, tp->phy_addr, reg_val);
++
++	switch (tp->phy_mode)
++	{
++		case GMAC_PHY_GMII:
++		mii_write(tp->phy_addr,0x04,0x05e1); /* advertisement 100M full duplex, pause capable on */
++		#ifdef CONFIG_SL3516_ASIC
++		mii_write(tp->phy_addr,0x09,0x0300); /* advertise 1000M full/half duplex */
++		#else
++		mii_write(tp->phy_addr,0x09,0x0000); /* advertise no 1000M full/half duplex */
++		#endif
++		break;
++		case GMAC_PHY_RGMII_100:
++		mii_write(tp->phy_addr,0x04,0x05e1); /* advertisement 100M full duplex, pause capable on */
++		mii_write(tp->phy_addr,0x09,0x0000); /* advertise no 1000M */
++		break;
++		case GMAC_PHY_RGMII_1000:
++		mii_write(tp->phy_addr,0x04,0x05e1); /* advertisement 100M full duplex, pause capable on */
++		#ifdef CONFIG_SL3516_ASIC
++		mii_write(tp->phy_addr,0x09,0x0300); /* advertise 1000M full/half duplex */
++		#else
++		mii_write(tp->phy_addr,0x09,0x0000); /* advertise no 1000M full/half duplex */
++		#endif
++		break;
++		case GMAC_PHY_MII:
++		default:
++		mii_write(tp->phy_addr,0x04,0x05e1); /* advertisement 100M full duplex, pause capable on */
++		mii_write(tp->phy_addr,0x09,0x0000); /* advertise no 1000M */
++		break;
++	}
++
++	mii_write(tp->phy_addr,0x18,0x0041);	// Phy active led
++	if (tp->auto_nego_cfg)
++	{
++		reg_val = 0x1200 | (1 << 15);
++		mii_write(tp->phy_addr,0x00,reg_val); /* Enable and Restart Auto-Negotiation */
++		mdelay(500);
++		reg_val &= ~(1 << 15);
++		mii_write(tp->phy_addr, 0x00, reg_val);
++	}
++	else
++	{
++		reg_val = 0;
++		reg_val |= (tp->full_duplex_cfg) ? (1 << 8) : 0;
++		reg_val |= (tp->speed_cfg == GMAC_SPEED_1000) ? (1 << 6) : 0;
++		reg_val |= (tp->speed_cfg == GMAC_SPEED_100) ? (1 << 13) : 0;
++		mii_write(tp->phy_addr, 0x00, reg_val);
++		mdelay(100);
++
++		reg_val |= (1 << 15);	// Reset PHY;
++		mii_write(tp->phy_addr, 0x00, reg_val);
++	}
++
++	status.bits32 = 0;
++	/* set PHY operation mode */
++	status.bits.mii_rmii = tp->phy_mode;
++	status.bits.reserved = 1;
++	mdelay(100);
++	while (((reg_val=mii_read(tp->phy_addr,0x01)) & 0x00000004)!=0x04)
++	{
++		msleep(100);
++		i++;
++		if (i > 30)
++		break;
++	}
++	if (i>30)
++	{
++		tp->pre_phy_status = LINK_DOWN;
++		status.bits.link = LINK_DOWN;
++		//		clear_bit(__LINK_STATE_START, &dev->state);
++		printk("Link Down (0x%04x) ", reg_val);
++		if(Giga_switch == 1)
++		{
++				wan_port_id = 1;
++#ifdef CONFIG_SL351x_SYSCTL
++				storlink_ctl.link[ wan_port_id] = 0;
++#endif
++		}
++		else
++		{
++#ifdef CONFIG_SL351x_SYSCTL
++				storlink_ctl.link[ tp->port_id] = 0;
++#endif
++		}
++	}
++	else
++	{
++		tp->pre_phy_status = LINK_UP;
++		status.bits.link = LINK_UP;
++		//		set_bit(__LINK_STATE_START, &dev->state);
++		printk("Link Up (0x%04x) ",reg_val);
++		if(Giga_switch == 1)
++		{
++				wan_port_id = 1;
++#ifdef CONFIG_SL351x_SYSCTL
++				storlink_ctl.link[ wan_port_id] = 1;
++#endif
++		}
++		else
++		{
++#ifdef CONFIG_SL351x_SYSCTL
++				storlink_ctl.link[ tp->port_id] = 1;
++#endif
++		}
++	}
++	//    value = mii_read(PHY_ADDR,0x05);
++
++	ability = (mii_read(tp->phy_addr,0x05) & 0x05E0) >> 5;
++
++	//#ifdef CONFIG_SL3516_ASIC
++	reg_val = mii_read(tp->phy_addr,10);
++	printk("MII REG 10 = 0x%x\n",reg_val);
++
++	if ((reg_val & 0x0800) == 0x0800)
++	{
++		status.bits.duplex = 1;
++		status.bits.speed = 2;
++		if (status.bits.mii_rmii == GMAC_PHY_RGMII_100)
++		status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
++
++		printk(" 1000M/Full \n");
++	}
++	else if ((reg_val & 0x0400) == 0x0400)
++	{
++		status.bits.duplex = 0;
++		status.bits.speed = 2;
++		if (status.bits.mii_rmii == GMAC_PHY_RGMII_100)
++		status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
++
++		printk(" 1000M/Half \n");
++	}
++	//#endif
++	else
++	{
++		#ifdef CONFIG_SL3516_ASIC
++		if (status.bits.mii_rmii == GMAC_PHY_RGMII_1000)
++		status.bits.mii_rmii = GMAC_PHY_RGMII_100;
++		#endif
++		printk("MII REG 5 (bit 5:15) = 0x%x\n", ability);
++		if ((ability & 0x08)==0x08) /* 100M full duplex */
++		{
++			status.bits.duplex = 1;
++			status.bits.speed = 1;
++			printk(" 100M/Full\n");
++
++		}
++		else if ((ability & 0x04)==0x04) /* 100M half duplex */
++		{
++			status.bits.duplex = 0;
++			status.bits.speed = 1;
++			printk(" 100M/Half\n");
++
++		}
++		else if ((ability & 0x02)==0x02) /* 10M full duplex */
++		{
++			status.bits.duplex = 1;
++			status.bits.speed = 0;
++			printk(" 10M/Full\n");
++
++		}
++		else if ((ability & 0x01)==0x01) /* 10M half duplex */
++		{
++			status.bits.duplex = 0;
++			status.bits.speed = 0;
++			printk(" 10M/Half\n");
++
++		}
++	}
++	if ((ability & 0x20)==0x20)
++	{
++		tp->flow_control_enable = 1;
++		printk("Flow Control Enable.\n");
++	}
++	else
++	{
++		tp->flow_control_enable = 0;
++		printk("Flow Control Disable.\n");
++	}
++	tp->full_duplex_status = status.bits.duplex;
++	tp->speed_status = status.bits.speed;
++	if (!tp->auto_nego_cfg)
++	{
++		status.bits.duplex = tp->full_duplex_cfg;
++		status.bits.speed = tp->speed_cfg;
++	}
++	toe_gmac_disable_tx_rx(dev);
++	mdelay(10);
++	gmac_write_reg(tp->base_addr, GMAC_STATUS, status.bits32, 0x0000007f);
++	toe_gmac_enable_tx_rx(dev);
++}
++
++/*----------------------------------------------------------------------
++* gmac_phy_thread
++*----------------------------------------------------------------------*/
++static int gmac_phy_thread (void *data)
++{
++	struct net_device   *dev = data;
++	GMAC_INFO_T *tp = dev->priv;
++	unsigned long       timeout;
++
++    daemonize("%s", dev->name);
++	allow_signal(SIGTERM);
++//	reparent_to_init();
++//	spin_lock_irq(¤t->sigmask_lock);
++//	sigemptyset(¤t->blocked);
++//	recalc_sigpending(current);
++//	spin_unlock_irq(¤t->sigmask_lock);
++//	strncpy (current->comm, dev->name, sizeof(current->comm) - 1);
++//	current->comm[sizeof(current->comm) - 1] = '\0';
++
++	while (1)
++	{
++	    timeout = next_tick;
++		do
++		{
++			timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout);
++		} while (!signal_pending (current) && (timeout > 0));
++
++		if (signal_pending (current))
++		{
++//			spin_lock_irq(¤t->sigmask_lock);
++			flush_signals(current);
++//			spin_unlock_irq(¤t->sigmask_lock);
++		}
++
++		if (tp->time_to_die)
++			break;
++
++		// printk("%s : Polling MAC %d PHY Status...\n",__func__, tp->port_id);
++		rtnl_lock ();
++		if (tp->auto_nego_cfg){
++#ifdef VITESSE_G5SWITCH
++        		if((tp->port_id == GMAC_PORT1)&&(Giga_switch==1))
++	        		gmac_get_switch_status(dev);
++        		else
++#endif
++        			gmac_get_phy_status(dev); //temp remove
++        	}
++		rtnl_unlock ();
++	}
++	complete_and_exit (&tp->thr_exited, 0);
++}
++
++/*----------------------------------------------------------------------
++* gmac_get_switch_status
++*----------------------------------------------------------------------*/
++#ifdef VITESSE_G5SWITCH
++void gmac_get_switch_status(struct net_device *dev)
++{
++	GMAC_INFO_T *tp = dev->priv;
++	GMAC_CONFIG0_T	config0,config0_mask;
++	unsigned int	switch_port_id;
++	int get_link=0;
++
++	get_link = Get_Set_port_status();
++	if(get_link){				// link
++		if(ever_dwon){
++			ever_dwon = 0;
++			toe_gmac_enable_tx_rx(dev);
++			netif_wake_queue(dev);
++			set_bit(__LINK_STATE_START, &dev->state);
++		}
++	}
++	else{					// all down
++		//printk("All link down\n");
++		ever_dwon=1;
++		netif_stop_queue(dev);
++		toe_gmac_disable_tx_rx(dev);
++		clear_bit(__LINK_STATE_START, &dev->state);
++	}
++
++	if ( tp->port_id == 1 )
++		switch_port_id = 0;
++#ifdef CONFIG_SL351x_SYSCTL
++	if (get_link)
++	{
++		storlink_ctl.link[switch_port_id] = 1;
++	}
++	else
++	{
++		storlink_ctl.link[switch_port_id] = 0;
++	}
++	if (storlink_ctl.pauseoff == 1)
++		{
++			if (tp->flow_control_enable == 1)
++			{
++				config0.bits32 = 0;
++				config0_mask.bits32 = 0;
++				config0.bits.tx_fc_en = 0; /* disable tx flow control */
++				config0.bits.rx_fc_en = 0; /* disable rx flow control */
++				config0_mask.bits.tx_fc_en = 1;
++				config0_mask.bits.rx_fc_en = 1;
++				gmac_write_reg(tp->base_addr, GMAC_CONFIG0,config0.bits32,config0_mask.bits32);
++				printk("Disable SWITCH Flow Control...\n");
++			}
++				tp->flow_control_enable = 0;
++		}
++		else
++#endif
++		{
++			if (tp->flow_control_enable == 0)
++			{
++				config0.bits32 = 0;
++				config0_mask.bits32 = 0;
++				config0.bits.tx_fc_en = 1; /* enable tx flow control */
++				config0.bits.rx_fc_en = 1; /* enable rx flow control */
++				config0_mask.bits.tx_fc_en = 1;
++				config0_mask.bits.rx_fc_en = 1;
++				gmac_write_reg(tp->base_addr, GMAC_CONFIG0,config0.bits32,config0_mask.bits32);
++				printk("Enable SWITCH Flow Control...\n");
++			}
++			tp->flow_control_enable = 1;
++		}
++	return ;
++
++}
++#endif
++
++/*----------------------------------------------------------------------
++* gmac_get_phy_status
++*----------------------------------------------------------------------*/
++void gmac_get_phy_status(struct net_device *dev)
++{
++	GMAC_INFO_T *tp = dev->priv;
++	GMAC_CONFIG0_T	config0,config0_mask;
++	GMAC_STATUS_T   status, old_status;
++	unsigned int    reg_val,ability,wan_port_id;
++
++	old_status.bits32 = status.bits32 = gmac_read_reg(tp->base_addr, GMAC_STATUS);
++
++
++	/* read PHY status register */
++	reg_val = mii_read(tp->phy_addr,0x01);
++	if ((reg_val & 0x0024) == 0x0024) /* link is established and auto_negotiate process completed */
++	{
++		ability = (mii_read(tp->phy_addr,0x05) & 0x05E0) >> 5;
++		/* read PHY Auto-Negotiation Link Partner Ability Register */
++		#ifdef CONFIG_SL3516_ASIC
++		reg_val = mii_read(tp->phy_addr,10);
++		if ((reg_val & 0x0800) == 0x0800)
++		{
++			status.bits.duplex = 1;
++			status.bits.speed = 2;
++			if (status.bits.mii_rmii == GMAC_PHY_RGMII_100)
++			status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
++		}
++		else if ((reg_val & 0x0400) == 0x0400)
++		{
++			status.bits.duplex = 0;
++			status.bits.speed = 2;
++			if (status.bits.mii_rmii == GMAC_PHY_RGMII_100)
++			status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
++		}
++		else
++		#endif
++		{
++			#ifdef CONFIG_SL3516_ASIC
++			if (status.bits.mii_rmii == GMAC_PHY_RGMII_1000)
++			status.bits.mii_rmii = GMAC_PHY_RGMII_100;
++			#endif
++			if ((ability & 0x08)==0x08) /* 100M full duplex */
++			{
++				status.bits.duplex = 1;
++				status.bits.speed = 1;
++			}
++			else if ((ability & 0x04)==0x04) /* 100M half duplex */
++			{
++				status.bits.duplex = 0;
++				status.bits.speed = 1;
++			}
++			else if ((ability & 0x02)==0x02) /* 10M full duplex */
++			{
++				status.bits.duplex = 1;
++				status.bits.speed = 0;
++			}
++			else if ((ability & 0x01)==0x01) /* 10M half duplex */
++			{
++				status.bits.duplex = 0;
++				status.bits.speed = 0;
++			}
++		}
++		status.bits.link = LINK_UP; /* link up */
++		if(Giga_switch==1)
++		{
++				wan_port_id = 1;
++#ifdef CONFIG_SL351x_SYSCTL
++				storlink_ctl.link[ wan_port_id] = 1;
++		}
++		else
++		{
++				storlink_ctl.link[ tp->port_id] = 1;
++#endif
++		}
++		if ((ability & 0x20)==0x20)
++		{
++			if (tp->flow_control_enable == 0)
++			{
++				config0.bits32 = 0;
++				config0_mask.bits32 = 0;
++				config0.bits.tx_fc_en = 1; /* enable tx flow control */
++				config0.bits.rx_fc_en = 1; /* enable rx flow control */
++				config0_mask.bits.tx_fc_en = 1;
++				config0_mask.bits.rx_fc_en = 1;
++				gmac_write_reg(tp->base_addr, GMAC_CONFIG0,config0.bits32,config0_mask.bits32);
++				printk("GMAC-%d Flow Control Enable.\n", tp->port_id);
++			}
++			tp->flow_control_enable = 1;
++		}
++		else
++		{
++			if (tp->flow_control_enable == 1)
++			{
++				config0.bits32 = 0;
++				config0_mask.bits32 = 0;
++				config0.bits.tx_fc_en = 0; /* disable tx flow control */
++				config0.bits.rx_fc_en = 0; /* disable rx flow control */
++				config0_mask.bits.tx_fc_en = 1;
++				config0_mask.bits.rx_fc_en = 1;
++				gmac_write_reg(tp->base_addr, GMAC_CONFIG0,config0.bits32,config0_mask.bits32);
++				printk("GMAC-%d Flow Control Disable.\n", tp->port_id);
++			}
++			tp->flow_control_enable = 0;
++		}
++
++		if (tp->pre_phy_status == LINK_DOWN)
++		{
++			printk("GMAC-%d LINK_UP......\n",tp->port_id);
++			tp->pre_phy_status = LINK_UP;
++		}
++	}
++	else
++	{
++		status.bits.link = LINK_DOWN; /* link down */
++		if(Giga_switch == 1)
++		{
++				wan_port_id = 1;
++#ifdef CONFIG_SL351x_SYSCTL
++				storlink_ctl.link[ wan_port_id] = 0;
++		}
++		else
++		{
++				storlink_ctl.link[ tp->port_id] = 0;
++#endif
++		}
++		if (tp->pre_phy_status == LINK_UP)
++		{
++			printk("GMAC-%d LINK_Down......\n",tp->port_id);
++			tp->pre_phy_status = LINK_DOWN;
++		}
++	}
++
++	tp->full_duplex_status = status.bits.duplex;
++	tp->speed_status = status.bits.speed;
++	if (!tp->auto_nego_cfg)
++	{
++		status.bits.duplex = tp->full_duplex_cfg;
++		status.bits.speed = tp->speed_cfg;
++	}
++
++	if (old_status.bits32 != status.bits32)
++	{
++		netif_stop_queue(dev);
++		toe_gmac_disable_tx_rx(dev);
++		clear_bit(__LINK_STATE_START, &dev->state);
++		printk("GMAC-%d Change Status Bits 0x%x-->0x%x\n",tp->port_id, old_status.bits32, status.bits32);
++		mdelay(10); // let GMAC consume packet
++		gmac_write_reg(tp->base_addr, GMAC_STATUS, status.bits32, 0x0000007f);
++		if (status.bits.link == LINK_UP)
++		{
++			toe_gmac_enable_tx_rx(dev);
++			netif_wake_queue(dev);
++			set_bit(__LINK_STATE_START, &dev->state);
++		}
++	}
++}
++
++/***************************************/
++/* define GPIO module base address     */
++/***************************************/
++#define GPIO_BASE_ADDR  (IO_ADDRESS(SL2312_GPIO_BASE))
++#define GPIO_BASE_ADDR1  (IO_ADDRESS(SL2312_GPIO_BASE1))
++
++/* define GPIO pin for MDC/MDIO */
++#ifdef CONFIG_SL3516_ASIC
++#define H_MDC_PIN           22
++#define H_MDIO_PIN          21
++#define G_MDC_PIN           22
++#define G_MDIO_PIN          21
++#else
++#define H_MDC_PIN           3
++#define H_MDIO_PIN          2
++#define G_MDC_PIN           0
++#define G_MDIO_PIN          1
++#endif
++
++//#define GPIO_MDC             0x80000000
++//#define GPIO_MDIO            0x00400000
++
++static unsigned int GPIO_MDC = 0;
++static unsigned int GPIO_MDIO = 0;
++static unsigned int GPIO_MDC_PIN = 0;
++static unsigned int GPIO_MDIO_PIN = 0;
++
++// For PHY test definition!!
++#define LPC_EECK		0x02
++#define LPC_EDIO		0x04
++#define LPC_GPIO_SET		3
++#define LPC_BASE_ADDR		IO_ADDRESS(IT8712_IO_BASE)
++#define inb_gpio(x)		inb(LPC_BASE_ADDR + IT8712_GPIO_BASE + x)
++#define outb_gpio(x, y)		outb(y, LPC_BASE_ADDR + IT8712_GPIO_BASE + x)
++
++enum GPIO_REG
++{
++    GPIO_DATA_OUT   = 0x00,
++    GPIO_DATA_IN    = 0x04,
++    GPIO_PIN_DIR    = 0x08,
++    GPIO_BY_PASS    = 0x0c,
++    GPIO_DATA_SET   = 0x10,
++    GPIO_DATA_CLEAR = 0x14,
++};
++/***********************/
++/*    MDC : GPIO[31]   */
++/*    MDIO: GPIO[22]   */
++/***********************/
++
++/***************************************************
++* All the commands should have the frame structure:
++*

++****************************************************/
++
++/*****************************************************************
++* Inject a bit to NWay register through CSR9_MDC,MDIO
++*******************************************************************/
++void mii_serial_write(char bit_MDO) // write data into mii PHY
++{
++#ifdef CONFIG_SL2312_LPC_IT8712
++	unsigned char iomode,status;
++
++	iomode = LPCGetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET);
++	iomode |= (LPC_EECK|LPC_EDIO) ;				// Set EECK,EDIO,EECS output
++	LPCSetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET, iomode);
++
++	if(bit_MDO)
++	{
++		status = inb_gpio( LPC_GPIO_SET);
++		status |= LPC_EDIO ;		//EDIO high
++		outb_gpio(LPC_GPIO_SET, status);
++	}
++	else
++	{
++		status = inb_gpio( LPC_GPIO_SET);
++		status &= ~(LPC_EDIO) ;		//EDIO low
++		outb_gpio(LPC_GPIO_SET, status);
++	}
++
++	status |= LPC_EECK ;		//EECK high
++	outb_gpio(LPC_GPIO_SET, status);
++
++	status &= ~(LPC_EECK) ;		//EECK low
++	outb_gpio(LPC_GPIO_SET, status);
++
++#else
++    unsigned int addr;
++    unsigned int value;
++
++    addr = GPIO_BASE_ADDR + GPIO_PIN_DIR;
++    value = readl(addr) | GPIO_MDC | GPIO_MDIO; /* set MDC/MDIO Pin to output */
++    writel(value,addr);
++    if(bit_MDO)
++    {
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++        writel(GPIO_MDIO,addr); /* set MDIO to 1 */
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++        writel(GPIO_MDC,addr); /* set MDC to 1 */
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++        writel(GPIO_MDC,addr); /* set MDC to 0 */
++    }
++    else
++    {
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++        writel(GPIO_MDIO,addr); /* set MDIO to 0 */
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_SET);
++        writel(GPIO_MDC,addr); /* set MDC to 1 */
++        addr = (GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++        writel(GPIO_MDC,addr); /* set MDC to 0 */
++    }
++
++#endif
++}
++
++/**********************************************************************
++* read a bit from NWay register through CSR9_MDC,MDIO
++***********************************************************************/
++unsigned int mii_serial_read(void) // read data from mii PHY
++{
++#ifdef CONFIG_SL2312_LPC_IT8712
++  	unsigned char iomode,status;
++	unsigned int value ;
++
++	iomode = LPCGetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET);
++	iomode &= ~(LPC_EDIO) ;		// Set EDIO input
++	iomode |= (LPC_EECK) ;		// Set EECK,EECS output
++	LPCSetConfig(LDN_GPIO, 0xc8 + LPC_GPIO_SET, iomode);
++
++	status = inb_gpio( LPC_GPIO_SET);
++	status |= LPC_EECK ;		//EECK high
++	outb_gpio(LPC_GPIO_SET, status);
++
++	status &= ~(LPC_EECK) ;		//EECK low
++	outb_gpio(LPC_GPIO_SET, status);
++
++	value = inb_gpio( LPC_GPIO_SET);
++
++	value = value>>2 ;
++	value &= 0x01;
++
++	return value ;
++
++#else
++    unsigned int *addr;
++    unsigned int value;
++
++    addr = (unsigned int *)(GPIO_BASE_ADDR + GPIO_PIN_DIR);
++    value = readl(addr) & ~GPIO_MDIO; //0xffbfffff;   /* set MDC to output and MDIO to input */
++    writel(value,addr);
++
++    addr = (unsigned int *)(GPIO_BASE_ADDR + GPIO_DATA_SET);
++    writel(GPIO_MDC,addr); /* set MDC to 1 */
++    addr = (unsigned int *)(GPIO_BASE_ADDR + GPIO_DATA_CLEAR);
++    writel(GPIO_MDC,addr); /* set MDC to 0 */
++
++    addr = (unsigned int *)(GPIO_BASE_ADDR + GPIO_DATA_IN);
++    value = readl(addr);
++    value = (value & (1<> GPIO_MDIO_PIN;
++    return(value);
++
++#endif
++}
++
++/***************************************
++* preamble + ST
++***************************************/
++void mii_pre_st(void)
++{
++    unsigned char i;
++
++    for(i=0;i<32;i++) // PREAMBLE
++        mii_serial_write(1);
++    mii_serial_write(0); // ST
++    mii_serial_write(1);
++}
++
++
++/******************************************
++* Read MII register
++* phyad -> physical address
++* regad -> register address
++***************************************** */
++unsigned int mii_read(unsigned char phyad,unsigned char regad)
++{
++    unsigned int i,value;
++    unsigned int bit;
++
++    if (phyad == GPHY_ADDR)
++    {
++        GPIO_MDC_PIN = G_MDC_PIN;   /* assigned MDC pin for giga PHY */
++        GPIO_MDIO_PIN = G_MDIO_PIN; /* assigned MDIO pin for giga PHY */
++    }
++    else
++    {
++        GPIO_MDC_PIN = H_MDC_PIN;   /* assigned MDC pin for 10/100 PHY */
++        GPIO_MDIO_PIN = H_MDIO_PIN; /* assigned MDIO pin for 10/100 PHY */
++    }
++    GPIO_MDC = (1<>(4-i)) & 0x01) ? 1 :0 ;
++        mii_serial_write(bit);
++    }
++
++    for (i=0;i<5;i++) { // REGAD
++        bit= ((regad>>(4-i)) & 0x01) ? 1 :0 ;
++        mii_serial_write(bit);
++    }
++
++    mii_serial_read(); // TA_Z
++//    if((bit=mii_serial_read()) !=0 ) // TA_0
++//    {
++//        return(0);
++//    }
++    value=0;
++    for (i=0;i<16;i++) { // READ DATA
++        bit=mii_serial_read();
++        value += (bit<<(15-i)) ;
++    }
++
++    mii_serial_write(0); // dumy clock
++    mii_serial_write(0); // dumy clock
++
++	//printk("%s: phy_addr=0x%x reg_addr=0x%x value=0x%x \n",__func__,phyad,regad,value);
++    return(value);
++}
++
++/******************************************
++* Write MII register
++* phyad -> physical address
++* regad -> register address
++* value -> value to be write
++***************************************** */
++void mii_write(unsigned char phyad,unsigned char regad,unsigned int value)
++{
++    unsigned int i;
++    char bit;
++
++	printk("%s: phy_addr=0x%x reg_addr=0x%x value=0x%x \n",__func__,phyad,regad,value);
++    if (phyad == GPHY_ADDR)
++    {
++        GPIO_MDC_PIN = G_MDC_PIN;   /* assigned MDC pin for giga PHY */
++        GPIO_MDIO_PIN = G_MDIO_PIN; /* assigned MDIO pin for giga PHY */
++    }
++    else
++    {
++        GPIO_MDC_PIN = H_MDC_PIN;   /* assigned MDC pin for 10/100 PHY */
++        GPIO_MDIO_PIN = H_MDIO_PIN; /* assigned MDIO pin for 10/100 PHY */
++    }
++    GPIO_MDC = (1<>(4-i)) & 0x01) ? 1 :0 ;
++        mii_serial_write(bit);
++    }
++
++    for (i=0;i<5;i++) { // REGAD
++        bit= ((regad>>(4-i)) & 0x01) ? 1 :0 ;
++        mii_serial_write(bit);
++    }
++    mii_serial_write(1); // TA_1
++    mii_serial_write(0); // TA_0
++
++    for (i=0;i<16;i++) { // OUT DATA
++        bit= ((value>>(15-i)) & 0x01) ? 1 : 0 ;
++        mii_serial_write(bit);
++    }
++    mii_serial_write(0); // dumy clock
++    mii_serial_write(0); // dumy clock
++}
++
++/*----------------------------------------------------------------------
++* gmac_set_rx_mode
++*----------------------------------------------------------------------*/
++static void gmac_set_rx_mode(struct net_device *dev)
++{
++    GMAC_RX_FLTR_T      filter;
++	unsigned int        mc_filter[2];	/* Multicast hash filter */
++    int                 bit_nr;
++	unsigned int        i;
++	GMAC_INFO_T 		*tp = dev->priv;
++
++//    printk("%s : dev->flags = %x \n",__func__,dev->flags);
++//    dev->flags |= IFF_ALLMULTI;  /* temp */
++    filter.bits32 = 0;
++    filter.bits.error = 0;
++	if (dev->flags & IFF_PROMISC)
++	{
++	    filter.bits.error = 1;
++        filter.bits.promiscuous = 1;
++        filter.bits.broadcast = 1;
++        filter.bits.multicast = 1;
++        filter.bits.unicast = 1;
++		mc_filter[1] = mc_filter[0] = 0xffffffff;
++	}
++	else if (dev->flags & IFF_ALLMULTI)
++	{
++//        filter.bits.promiscuous = 1;
++        filter.bits.broadcast = 1;
++        filter.bits.multicast = 1;
++        filter.bits.unicast = 1;
++		mc_filter[1] = mc_filter[0] = 0xffffffff;
++	}
++	else
++	{
++		struct dev_mc_list *mclist;
++
++//        filter.bits.promiscuous = 1;
++        filter.bits.broadcast = 1;
++        filter.bits.multicast = 1;
++        filter.bits.unicast = 1;
++		mc_filter[1] = mc_filter[0] = 0;
++		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;i++, mclist = mclist->next)
++		{
++            bit_nr = ether_crc(ETH_ALEN,mclist->dmi_addr) & 0x0000003f;
++            if (bit_nr < 32)
++            {
++                mc_filter[0] = mc_filter[0] | (1<base_addr,GMAC_RX_FLTR,filter.bits32,0xffffffff);  //chech base address!!!
++    gmac_write_reg(tp->base_addr,GMAC_MCAST_FIL0,mc_filter[0],0xffffffff);
++    gmac_write_reg(tp->base_addr,GMAC_MCAST_FIL1,mc_filter[1],0xffffffff);
++    return;
++}
++
++#ifdef CONFIG_SL_NAPI
++/*----------------------------------------------------------------------
++* gmac_rx_poll
++*----------------------------------------------------------------------*/
++static int gmac_rx_poll(struct net_device *dev, int *budget)
++{
++	TOE_INFO_T			*toe;
++    GMAC_RXDESC_T   	*curr_desc;
++	struct sk_buff 		*skb;
++    DMA_RWPTR_T			rwptr;
++	unsigned int 		pkt_size;
++	unsigned int        desc_count;
++	unsigned int        good_frame, chksum_status, rx_status;
++	int                 rx_pkts_num = 0;
++	int                 quota = min(dev->quota, *budget);
++	GMAC_INFO_T			*tp = (GMAC_INFO_T *)dev->priv;
++	unsigned int		status4;
++	volatile DMA_RWPTR_T	fq_rwptr;
++	int					max_cnt = TOE_SW_FREEQ_DESC_NUM;//TOE_SW_FREEQ_DESC_NUM = 64
++	//unsigned long		rx_old_bytes;
++	struct net_device_stats *isPtr = (struct net_device_stats *)&tp->ifStatics;
++	//unsigned long long	rx_time;
++
++
++
++#if 1
++	if (do_again)
++	{
++			toe_gmac_fill_free_q();
++			status4 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_4_REG);
++			fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++			//printk("\n%s:: do_again toe_gmac_fill_free_q =======>status4=0x%x =====fq_rwptr =0x%8x======>JKJKJKJKJKJKJKJKJ \n", __func__,status4,fq_rwptr.bits32);
++			if (fq_rwptr.bits.wptr != fq_rwptr.bits.rptr)
++			{
++						//status4 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_4_REG);
++						do_again =0;
++						//netif_rx_complete(dev);
++						gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_STATUS_4_REG, status4,	0x1);
++						fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++						rwptr.bits32 = readl(&tp->default_qhdr->word1);
++			}
++			else
++				return 1;
++	}
++#endif
++	rwptr.bits32 = readl(&tp->default_qhdr->word1);
++#if 0
++	if (rwptr.bits.rptr != tp->rx_rwptr.bits.rptr)
++	{
++		mac_stop_txdma((struct net_device *)tp->dev);
++		printk("Default Queue HW RD ptr (0x%x) != SW RD Ptr (0x%x)\n",
++				rwptr.bits32, tp->rx_rwptr.bits.rptr);
++		while(1);
++	}
++#endif
++	toe = (TOE_INFO_T *)&toe_private_data;
++
++	fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++	//printk("%s:---Before-------------->Default Queue HW RW ptr (0x%8x),   fq_rwptr =0x%8x \n",__func__,rwptr.bits32,fq_rwptr.bits32 );
++	//printk("%s:---Before while   rx_pkts_num=%d------rx_finished_idx=0x%x------->Default_Q [rwptr.bits.rptr(SW)=0x%x,   rwptr.bits.wptr(HW) = 0x%x ]---->Free_Q(SW_HW) = 0x%8x \n",__func__,rx_pkts_num,rx_finished_idx,rwptr.bits.rptr,rwptr.bits.wptr,fq_rwptr.bits32 );
++//	while ((--max_cnt) && (rwptr.bits.rptr != rwptr.bits.wptr) && (rx_pkts_num < quota))
++
++	while ((rwptr.bits.rptr != rwptr.bits.wptr) && (rx_pkts_num < quota))
++	{
++
++    	curr_desc = (GMAC_RXDESC_T *)tp->default_desc_base + rwptr.bits.rptr;
++		tp->default_q_cnt++;
++    	tp->rx_curr_desc = (unsigned int)curr_desc;
++    	rx_status = curr_desc->word0.bits.status;
++    	chksum_status = curr_desc->word0.bits.chksum_status;
++    	tp->rx_status_cnt[rx_status]++;
++    	tp->rx_chksum_cnt[chksum_status]++;
++        pkt_size = curr_desc->word1.bits.byte_count;  /*total byte count in a frame*/
++		desc_count = curr_desc->word0.bits.desc_count; /* get descriptor count per frame */
++		good_frame=1;
++		if ((curr_desc->word0.bits32 & (GMAC_RXDESC_0_T_derr | GMAC_RXDESC_0_T_perr))
++			|| (pkt_size < 60)
++		    || (chksum_status & 0x4)
++		    || rx_status )
++//			|| rx_status || (rwptr.bits.rptr > rwptr.bits.wptr ))
++		{
++			good_frame = 0;
++			if (curr_desc->word0.bits32 & GMAC_RXDESC_0_T_derr)
++				printk("%s::derr (GMAC-%d)!!!\n", __func__, tp->port_id);
++			if (curr_desc->word0.bits32 & GMAC_RXDESC_0_T_perr)
++				printk("%s::perr (GMAC-%d)!!!\n", __func__, tp->port_id);
++			if (rx_status)
++			{
++				if (rx_status == 4 || rx_status == 7)
++					isPtr->rx_crc_errors++;
++//				printk("%s::Status=%d (GMAC-%d)!!!\n", __func__, rx_status, tp->port_id);
++			}
++#ifdef SL351x_GMAC_WORKAROUND
++			else if (pkt_size < 60)
++			{
++				if (tp->short_frames_cnt < GMAC_SHORT_FRAME_THRESHOLD)
++					tp->short_frames_cnt++;
++				if (tp->short_frames_cnt >= GMAC_SHORT_FRAME_THRESHOLD)
++				{
++					GMAC_CONFIG0_T config0;
++					config0.bits32 = readl(TOE_GMAC0_BASE+GMAC_CONFIG0);
++					config0.bits.dis_rx = 1;
++					writel(config0.bits32, TOE_GMAC0_BASE+GMAC_CONFIG0);
++					config0.bits32 = readl(TOE_GMAC1_BASE+GMAC_CONFIG0);
++					config0.bits.dis_rx = 1;
++					writel(config0.bits32, TOE_GMAC1_BASE+GMAC_CONFIG0);
++				}
++			}
++#endif
++//			if (chksum_status)
++//				printk("%s::Checksum Status=%d (GMAC-%d)!!!\n", __func__, chksum_status, tp->port_id);
++			skb = (struct sk_buff *)(REG32(__va(curr_desc->word2.buf_adr) - SKB_RESERVE_BYTES));
++			dev_kfree_skb_irq(skb);
++		}
++		if (good_frame)
++		{
++			if (curr_desc->word0.bits.drop)
++				printk("%s::Drop (GMAC-%d)!!!\n", __func__, tp->port_id);
++//			if (chksum_status)
++//				printk("%s::Checksum Status=%d (GMAC-%d)!!!\n", __func__, chksum_status, tp->port_id);
++
++#ifdef SL351x_GMAC_WORKAROUND
++			if (tp->short_frames_cnt >= GMAC_SHORT_FRAME_THRESHOLD)
++			{
++				GMAC_CONFIG0_T config0;
++				config0.bits32 = readl(TOE_GMAC0_BASE+GMAC_CONFIG0);
++				config0.bits.dis_rx = 0;
++				writel(config0.bits32, TOE_GMAC0_BASE+GMAC_CONFIG0);
++				config0.bits32 = readl(TOE_GMAC1_BASE+GMAC_CONFIG0);
++				config0.bits.dis_rx = 0;
++				writel(config0.bits32, TOE_GMAC1_BASE+GMAC_CONFIG0);
++			}
++			tp->short_frames_cnt = 0;
++#endif
++	    	/* get frame information from the first descriptor of the frame */
++			isPtr->rx_packets++;
++			//consistent_sync((void *)__va(curr_desc->word2.buf_adr), pkt_size, PCI_DMA_FROMDEVICE);
++			skb = (struct sk_buff *)(REG32(__va(curr_desc->word2.buf_adr) - SKB_RESERVE_BYTES));
++			tp->curr_rx_skb = skb;
++	//		curr_desc->word2.buf_adr = 0;
++
++		    //skb_reserve (skb, SKB_RESERVE_BYTES);
++			skb_reserve (skb, RX_INSERT_BYTES);	/* 2 byte align the IP fields. */
++			//if ((skb->tail+pkt_size) > skb->end )
++			//printk("%s::------------->Here skb->len=%d,pkt_size= %d,skb->head=0x%x,skb->tail= 0x%x, skb->end= 0x%x\n", __func__, skb->len, pkt_size,skb->head,skb->tail,skb->end);
++			skb_put(skb, pkt_size);
++
++
++			skb->dev = dev;
++			if (chksum_status == RX_CHKSUM_IP_UDP_TCP_OK)
++			{
++				skb->ip_summed = CHECKSUM_UNNECESSARY;
++#ifdef CONFIG_SL351x_NAT
++				if (nat_cfg.enabled && curr_desc->word3.bits.l3_offset && curr_desc->word3.bits.l4_offset)
++				{
++					struct iphdr	*ip_hdr;
++					ip_hdr = (struct iphdr *)&(skb->data[curr_desc->word3.bits.l3_offset]);
++					sl351x_nat_input(skb,
++									tp->port_id,
++									(void *)curr_desc->word3.bits.l3_offset,
++								  	(void *)curr_desc->word3.bits.l4_offset);
++				}
++#endif
++				skb->protocol = eth_type_trans(skb,dev); /* set skb protocol */
++#if 0
++#ifdef CONFIG_SL351x_RXTOE
++				if (storlink_ctl.rx_max_pktsize) {
++					struct iphdr	*ip_hdr;
++					struct tcphdr	*tcp_hdr;
++					int ip_hdrlen;
++
++ 					ip_hdr = (struct iphdr*)&(skb->data[0]);
++					if ((skb->protocol == __constant_htons(ETH_P_IP)) &&
++					   ((ip_hdr->protocol & 0x00ff) == IPPROTO_TCP)) {
++						ip_hdrlen = ip_hdr->ihl << 2;
++						tcp_hdr = (struct tcphdr*)&(skb->data[ip_hdrlen]);
++						if (tcp_hdr->syn) {
++							struct toe_conn* connection = init_toeq(ip_hdr->version,
++									ip_hdr, tcp_hdr, toe, &(skb->data[0]) - 14);
++							TCP_SKB_CB(skb)->connection = connection;
++							//	hash_dump_entry(TCP_SKB_CB(skb)->connection->hash_entry_index);
++							//		printk("%s::skb data %x, conn %x, mode %x\n",
++							//			__func__, skb->data, connection, connection->mode);
++						}
++					}
++				}
++#endif
++#endif
++			}
++			else if (chksum_status == RX_CHKSUM_IP_OK_ONLY)
++			{
++				skb->ip_summed = CHECKSUM_UNNECESSARY;
++#ifdef CONFIG_SL351x_NAT
++				if (nat_cfg.enabled && curr_desc->word3.bits.l3_offset && curr_desc->word3.bits.l4_offset)
++				{
++					struct iphdr	*ip_hdr;
++					ip_hdr = (struct iphdr *)&(skb->data[curr_desc->word3.bits.l3_offset]);
++					if (ip_hdr->protocol == IPPROTO_UDP)
++					{
++						sl351x_nat_input(skb,
++										tp->port_id,
++										(void *)curr_desc->word3.bits.l3_offset,
++								  		(void *)curr_desc->word3.bits.l4_offset);
++					}
++					else if (ip_hdr->protocol == IPPROTO_GRE)
++					{
++						sl351x_nat_input(skb,
++									tp->port_id,
++									(void *)curr_desc->word3.bits.l3_offset,
++								  	(void *)curr_desc->word3.bits.l4_offset);
++					}
++				}
++#endif
++				skb->protocol = eth_type_trans(skb,dev); /* set skb protocol */
++			}
++			else
++			{
++				skb->protocol = eth_type_trans(skb,dev); /* set skb protocol */
++			}
++			//netif_rx(skb);  /* socket rx */
++			netif_receive_skb(skb); //For NAPI
++			dev->last_rx = jiffies;
++
++			isPtr->rx_bytes += pkt_size;
++			//printk("------------------->isPtr->rx_bytes = %d\n",isPtr->rx_bytes);
++
++
++        }
++		// advance one for Rx default Q 0/1
++		rwptr.bits.rptr = RWPTR_ADVANCE_ONE(rwptr.bits.rptr, tp->default_desc_num);
++		SET_RPTR(&tp->default_qhdr->word1, rwptr.bits.rptr);
++     	tp->rx_rwptr.bits32 = rwptr.bits32;
++		rx_pkts_num++;
++		//rwptr.bits32 = readl(&tp->default_qhdr->word1);//try read default_qhdr again
++		//fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++		//printk("%s:---Loop  -------->rx_pkts_num=%d------------>Default Queue HW RW ptr = (0x%8x),   fq_rwptr =0x%8x \n",__func__,rx_pkts_num,rwptr.bits32,fq_rwptr.bits32 );
++#if 0
++		if ((status4 & 0x1) == 0)
++		{
++			//if (!((dev->last_rx <= (rx_time + 2)) &&  (isPtr->rx_bytes > (rx_old_bytes + 1000000 ))))
++			if (tp->total_q_cnt_napi < 1024)
++			{
++				tp->total_q_cnt_napi++;
++				toe_gmac_fill_free_q();  //for iperf test disable
++			}
++			//else
++				//printk("%s:---isPtr->rx_bytes =%u , rx_old_bytes =%u\n",__func__,isPtr->rx_bytes,rx_old_bytes );
++
++		}
++#endif
++		//rwptr.bits.rptr = RWPTR_ADVANCE_ONE(rwptr.bits.rptr, tp->default_desc_num);
++		//printk("%s:---Loop  -------->rx_pkts_num=%d----rwptr.bits.rptr=0x%x-------->Default Queue HW RW ptr = (0x%8x),   fq_rwptr =0x%8x \n",__func__,rx_pkts_num,rwptr.bits.rptr,rwptr.bits32,fq_rwptr.bits32 );
++		//printk("%s:---Loop  rx_pkts_num=%d------rwptr.bits.rptr=0x%x------->Default_Q [rwptr.bits.rptr(SW)=0x%x,   rwptr.bits.wptr(HW) = 0x%x ]---->Free_Q(SW_HW) = 0x%8x \n",__func__,rx_pkts_num,rwptr.bits.rptr,rwptr.bits.rptr,rwptr.bits.wptr,fq_rwptr.bits32 );
++	}
++	// advance one for Rx default Q 0/1
++
++		//rwptr.bits.rptr = RWPTR_ADVANCE_ONE(rwptr.bits.rptr, tp->default_desc_num);
++		//SET_RPTR(&tp->default_qhdr->word1, rwptr.bits.rptr);
++     	//tp->rx_rwptr.bits32 = rwptr.bits32;
++     	//rwptr.bits.rptr = rwptr.bits.rptr;
++
++	dev->quota -= rx_pkts_num;
++	*budget -= rx_pkts_num;
++
++	status4 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_4_REG);//try read SWFQ empty again
++	//fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++	rwptr.bits32 = readl(&tp->default_qhdr->word1); //try read default_qhdr again
++	//printk("%s:---After    rx_pkts_num=%d------rwptr.bits.rptr=0x%x------->Default_Q [rwptr.bits.rptr(SW)=0x%x,   rwptr.bits.wptr(HW) = 0x%x ]---->Free_Q(SW_HW) = 0x%8x \n",__func__,rx_pkts_num,rwptr.bits.rptr,rwptr.bits.rptr,rwptr.bits.wptr,fq_rwptr.bits32 );
++//	if (rwptr.bits.rptr > rwptr.bits.wptr )
++//			{
++				//toe_gmac_disable_rx(dev);
++				//wait_event_interruptible_timeout(freeq_wait,
++					//(rx_pkts_num == 100), CMTP_INTEROP_TIMEOUT);
++				//printk("\n%s:: return 22222=======> rx_pkts_num =%d,   rwptr.bits.rptr=%d,   rwptr.bits.wptr = %d ====---------=======>JKJKJKJKJK\n",
++					//__func__,rx_pkts_num,rwptr.bits.rptr,rwptr.bits.wptr);
++//				return 1;
++//			}
++
++	if (rwptr.bits.rptr == rwptr.bits.wptr)
++	{
++		unsigned int data32;
++			//printk("%s:---[rwptr.bits.rptr == rwptr.bits.wptr]   rx_pkts_num=%d------rwptr.bits.rptr=0x%x------->Default_Q [rwptr.bits.rptr(SW)=0x%x,   rwptr.bits.wptr(HW) = 0x%x ]---->Free_Q(SW_HW) = 0x%8x \n",__func__,rx_pkts_num,rwptr.bits.rptr,rwptr.bits.rptr,rwptr.bits.wptr,fq_rwptr.bits32 );
++
++	    /* Receive descriptor is empty now */
++#if 1
++     if (status4 & 0x1)
++   			{
++   				do_again =1;
++   				//writel(0x40400000, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_ENABLE_4_REG); //disable SWFQ empty interrupt
++   				//toe_gmac_disable_interrupt(tp->irq);
++   				tp->sw_fq_empty_cnt++;
++   				//toe_gmac_disable_rx(dev);
++   				writel(0x07960202, TOE_GMAC0_BASE+GMAC_CONFIG0);
++				writel(0x07960202, TOE_GMAC1_BASE+GMAC_CONFIG0);
++   				//printk("\n%s ::  freeq int-----tp->sw_fq_empty_cnt  =%d---------====================----------------->\n",__func__,tp->sw_fq_empty_cnt);
++   				//while ((fq_rwptr.bits.wptr >= (fq_rwptr.bits.rptr+256)) || (fq_rwptr.bits.wptr <= (fq_rwptr.bits.rptr+256)))
++   				//{
++   					//gmac_write_reg(TOE_GLOBAL_BASE, GLOBAL_INTERRUPT_STATUS_4_REG, status4,
++					//0x1);
++				//printk("\n%s::fq_rwptr.wrptr = %x =======> ===========>here \n", __func__,fq_rwptr.bits32);
++				//if ((status4 & 0x1) == 0)
++					//break;
++				 return 1;
++				//}
++
++			}
++#endif
++        //toe_gmac_fill_free_q();
++        netif_rx_complete(dev);
++        // enable GMAC-0 rx interrupt
++        // class-Q & TOE-Q are implemented in future
++        //data32 = readl(TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_1_REG);
++        //if (tp->port_id == 0)
++        	//data32 |= DEFAULT_Q0_INT_BIT;
++        //else
++        	//data32 |= DEFAULT_Q1_INT_BIT;
++        //writel(data32, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_ENABLE_1_REG);
++		writel(0x3, TOE_GLOBAL_BASE+GLOBAL_INTERRUPT_ENABLE_1_REG);
++		//printk("\n%s::netif_rx_complete-->  rx_pkts_num =%d,   rwptr.bits.rptr=0x%x,   rwptr.bits.wptr = 0x%x ====---------=======>JKJKJKJKJK\n",
++		//__func__,rx_pkts_num,rwptr.bits.rptr,rwptr.bits.wptr);
++        writel(0x07960200, TOE_GMAC0_BASE+GMAC_CONFIG0);
++		writel(0x07960200, TOE_GMAC1_BASE+GMAC_CONFIG0);
++        return 0;
++    }
++    else
++    {
++        //printk("\n%s:: return 1 -->status4= 0x%x,rx_pkts_num =%d,   rwptr.bits.rptr=0x%x,   rwptr.bits.wptr = 0x%x  ======> \n", __func__,status4,rx_pkts_num,rwptr.bits.rptr,rwptr.bits.wptr);
++        return 1;
++    }
++}
++#endif
++
++/*----------------------------------------------------------------------
++* gmac_tx_timeout
++*----------------------------------------------------------------------*/
++void gmac_tx_timeout(struct net_device *dev)
++{
++	GMAC_INFO_T				*tp = (GMAC_INFO_T *)dev->priv;
++
++#ifdef CONFIG_SL351x_SYSCTL
++	if (tp->operation && storlink_ctl.link[tp->port_id])
++#else
++	if (tp->operation)
++#endif
++	{
++		netif_wake_queue(dev);
++	}
++}
++
++
++
++/*----------------------------------------------------------------------
++* mac_set_rule_reg
++*----------------------------------------------------------------------*/
++int mac_set_rule_reg(int mac, int rule, int enabled, u32 reg0, u32 reg1, u32 reg2)
++{
++	int		total_key_dwords;
++
++	total_key_dwords = 1;
++
++	if (reg0 & MR_L2_BIT)
++	{
++		if (reg0 & MR_DA_BIT) total_key_dwords += 2;
++		if (reg0 & MR_SA_BIT) total_key_dwords += 2;
++		if ((reg0 & MR_DA_BIT) && ( reg0 & MR_SA_BIT)) total_key_dwords--;
++		if (reg0 & (MR_PPPOE_BIT | MR_VLAN_BIT)) total_key_dwords++;
++	}
++	if (reg0 & MR_L3_BIT)
++	{
++		if (reg0 & (MR_IP_HDR_LEN_BIT | MR_TOS_TRAFFIC_BIT | MR_SPR_BITS))
++			total_key_dwords++;
++		if (reg0 & MR_FLOW_LABLE_BIT) total_key_dwords++;
++		if ((reg0 & MR_IP_VER_BIT) == 0) // IPv4
++		{
++			if (reg1 & 0xff000000) total_key_dwords += 1;
++			if (reg1 & 0x00ff0000) total_key_dwords += 1;
++		}
++		else
++		{
++			if (reg1 & 0xff000000) total_key_dwords += 4;
++			if (reg1 & 0x00ff0000) total_key_dwords += 4;
++		}
++	}
++	if (reg0 & MR_L4_BIT)
++	{
++		if (reg1 & 0x0000f000) total_key_dwords += 1;
++		if (reg1 & 0x00000f00) total_key_dwords += 1;
++		if (reg1 & 0x000000f0) total_key_dwords += 1;
++		if (reg1 & 0x0000000f) total_key_dwords += 1;
++		if (reg2 & 0xf0000000) total_key_dwords += 1;
++		if (reg2 & 0x0f000000) total_key_dwords += 1;
++	}
++	if (reg0 & MR_L7_BIT)
++	{
++		if (reg2 & 0x00f00000) total_key_dwords += 1;
++		if (reg2 & 0x000f0000) total_key_dwords += 1;
++		if (reg2 & 0x0000f000) total_key_dwords += 1;
++		if (reg2 & 0x00000f00) total_key_dwords += 1;
++		if (reg2 & 0x000000f0) total_key_dwords += 1;
++		if (reg2 & 0x0000000f) total_key_dwords += 1;
++	}
++
++	if (total_key_dwords > HASH_MAX_KEY_DWORD)
++		return -1;
++
++	if (total_key_dwords == 0 && enabled)
++		return -2;
++
++	mac_set_rule_enable_bit(mac, rule, 0);
++	if (enabled)
++	{
++		mac_set_MRxCRx(mac, rule, 0, reg0);
++		mac_set_MRxCRx(mac, rule, 1, reg1);
++		mac_set_MRxCRx(mac, rule, 2, reg2);
++		mac_set_rule_action(mac, rule, total_key_dwords);
++		mac_set_rule_enable_bit(mac, rule, enabled);
++	}
++	else
++	{
++		mac_set_rule_action(mac, rule, 0);
++	}
++	return total_key_dwords;
++}
++
++/*----------------------------------------------------------------------
++* mac_get_rule_enable_bit
++*----------------------------------------------------------------------*/
++int mac_get_rule_enable_bit(int mac, int rule)
++{
++	switch (rule)
++	{
++		case 0: return ((mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG0) >> 15) & 1);
++		case 1: return ((mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG0) >> 31) & 1);
++		case 2: return ((mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG1) >> 15) & 1);
++		case 3: return ((mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG1) >> 31) & 1);
++		default: return 0;
++	}
++}
++
++/*----------------------------------------------------------------------
++* mac_set_rule_enable_bit
++*----------------------------------------------------------------------*/
++void mac_set_rule_enable_bit(int mac, int rule, int data)
++{
++	u32 reg;
++
++	if (data & ~1)
++		return;
++
++	switch (rule)
++	{
++		case 0:
++			reg = (mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG0) & ~(1<<15)) | (data << 15);
++			mac_write_dma_reg(mac, GMAC_HASH_ENGINE_REG0, reg);
++			break;
++		case 1:
++			reg = (mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG0) & ~(1<<31)) | (data << 31);
++			mac_write_dma_reg(mac, GMAC_HASH_ENGINE_REG0, reg);
++			break;
++		case 2:
++			reg = (mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG1) & ~(1<<15)) | (data << 15);
++			mac_write_dma_reg(mac, GMAC_HASH_ENGINE_REG1, reg);
++			break;
++		case 3:
++			reg = (mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG1) & ~(1<<31)) | (data << 31);
++			mac_write_dma_reg(mac, GMAC_HASH_ENGINE_REG1, reg);
++	}
++}
++
++/*----------------------------------------------------------------------
++* mac_set_rule_action
++*----------------------------------------------------------------------*/
++int mac_set_rule_action(int mac, int rule, int data)
++{
++	u32 reg;
++
++	if (data > 32)
++		return -1;
++
++	if (data)
++		data = (data << 6) | (data + HASH_ACTION_DWORDS);
++	switch (rule)
++	{
++		case 0:
++			reg = (mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG0) & ~(0x7ff));
++			mac_write_dma_reg(mac, GMAC_HASH_ENGINE_REG0, reg | data);
++			break;
++		case 1:
++			reg = (mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG0) & ~(0x7ff<<16));
++			mac_write_dma_reg(mac, GMAC_HASH_ENGINE_REG0, reg | (data << 16));
++			break;
++		case 2:
++			reg = (mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG1) & ~(0x7ff));
++			mac_write_dma_reg(mac, GMAC_HASH_ENGINE_REG1,  reg | data);
++			break;
++		case 3:
++			reg = (mac_read_dma_reg(mac, GMAC_HASH_ENGINE_REG1) & ~(0x7ff<<16));
++			mac_write_dma_reg(mac, GMAC_HASH_ENGINE_REG1, reg | (data << 16));
++			break;
++		default:
++			return -1;
++	}
++
++	return 0;
++}
++/*----------------------------------------------------------------------
++* mac_get_MRxCRx
++*----------------------------------------------------------------------*/
++int mac_get_MRxCRx(int mac, int rule, int ctrlreg)
++{
++	int reg;
++
++	switch (rule)
++	{
++		case 0: reg = GMAC_MR0CR0 + ctrlreg * 4; break;
++		case 1: reg = GMAC_MR1CR0 + ctrlreg * 4; break;
++		case 2: reg = GMAC_MR2CR0 + ctrlreg * 4; break;
++		case 3: reg = GMAC_MR3CR0 + ctrlreg * 4; break;
++		default: return 0;
++	}
++	return mac_read_dma_reg(mac, reg);
++}
++
++/*----------------------------------------------------------------------
++* mac_set_MRxCRx
++*----------------------------------------------------------------------*/
++void mac_set_MRxCRx(int mac, int rule, int ctrlreg, u32 data)
++{
++	int reg;
++
++	switch (rule)
++	{
++		case 0: reg = GMAC_MR0CR0 + ctrlreg * 4; break;
++		case 1: reg = GMAC_MR1CR0 + ctrlreg * 4; break;
++		case 2: reg = GMAC_MR2CR0 + ctrlreg * 4; break;
++		case 3: reg = GMAC_MR3CR0 + ctrlreg * 4; break;
++		default: return;
++	}
++	mac_write_dma_reg(mac, reg, data);
++}
++
++/*----------------------------------------------------------------------
++* mac_set_rule_priority
++*----------------------------------------------------------------------*/
++void mac_set_rule_priority(int mac, int p0, int p1, int p2, int p3)
++{
++	int 			i;
++	GMAC_MRxCR0_T	reg[4];
++
++	for (i=0; i<4; i++)
++		reg[i].bits32 = mac_get_MRxCRx(mac, i, 0);
++
++	reg[0].bits.priority = p0;
++	reg[1].bits.priority = p1;
++	reg[2].bits.priority = p2;
++	reg[3].bits.priority = p3;
++
++	for (i=0; i<4; i++)
++		mac_set_MRxCRx(mac, i, 0, reg[i].bits32);
++}
++
++/*----------------------------------------------------------------------
++* gmac_netdev_ioctl
++*----------------------------------------------------------------------*/
++static int gmac_netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++	int 				rc = 0;
++    unsigned char		*hwa = rq->ifr_ifru.ifru_hwaddr.sa_data;
++
++#ifdef br_if_ioctl
++    struct 				ethtool_cmd ecmd; 	//br_if.c will call this ioctl
++	GMAC_INFO_T 		*tp = dev->priv;
++#endif
++
++#ifdef 	CONFIG_SL351x_NAT
++	if (cmd == SIOCDEVPRIVATE)
++		return sl351x_nat_ioctl(dev, rq, cmd);
++#endif
++
++	switch (cmd) {
++	case SIOCETHTOOL:
++#ifdef br_if_ioctl  	//br_if.c will call this ioctl
++		if (!netif_running(dev))
++		{
++			printk("Before changing the H/W address,please down the device.\n");
++			return -EINVAL;
++		}
++		memset((void *) &ecmd, 0, sizeof (ecmd));
++           	    ecmd.supported =
++                	SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |
++                    SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
++                    SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
++         		    ecmd.port = PORT_TP;
++            	    ecmd.transceiver = XCVR_EXTERNAL;
++            	    ecmd.phy_address = tp->phy_addr;
++            	    switch (tp->speed_status)
++            	    {
++            	    case GMAC_SPEED_10: ecmd.speed = SPEED_10; break;
++             	    case GMAC_SPEED_100: ecmd.speed = SPEED_100; break;
++            	    case GMAC_SPEED_1000: ecmd.speed = SPEED_1000; break;
++            	    default: ecmd.speed = SPEED_10; break;
++            	   }
++            	    ecmd.duplex = tp->full_duplex_status ? DUPLEX_FULL : DUPLEX_HALF;
++            	    ecmd.advertising = ADVERTISED_TP;
++            	    ecmd.advertising |= ADVERTISED_Autoneg;
++           	    ecmd.autoneg = AUTONEG_ENABLE;
++                    if (copy_to_user(rq->ifr_data, &ecmd, sizeof (ecmd)))
++                  	return -EFAULT;
++#endif
++
++        break;
++
++    case SIOCSIFHWADDR:
++		if (!netif_running(dev))
++		{
++			printk("Before changing the H/W address,please down the device.\n");
++			return -EINVAL;
++		}
++        gmac_set_mac_address(dev,hwa);
++        break;
++
++	case SIOCGMIIPHY:	/* Get the address of the PHY in use. */
++        break;
++
++	case SIOCGMIIREG:	/* Read the specified MII register. */
++		break;
++
++	case SIOCSMIIREG:	/* Write the specified MII register */
++		break;
++
++	default:
++		rc = -EOPNOTSUPP;
++		break;
++	}
++
++	return rc;
++}
++
++#ifdef SL351x_GMAC_WORKAROUND
++
++#define GMAC_TX_STATE_OFFSET	0x60
++#define GMAC_RX_STATE_OFFSET	0x64
++#define GMAC_POLL_HANGED_NUM	200
++#define GMAC_RX_HANGED_STATE	0x4b2000
++#define GMAC_RX_HANGED_MASK		0xdff000
++#define GMAC_TX_HANGED_STATE	0x34012
++#define GMAC_TX_HANGED_MASK		0xfffff
++#define TOE_GLOBAL_REG_SIZE		(0x78/sizeof(u32))
++#define TOE_DMA_REG_SIZE		(0xd0/sizeof(u32))
++#define TOE_GMAC_REG_SIZE		(0x30/sizeof(u32))
++#define GMAC0_RX_HANG_BIT		(1 << 0)
++#define GMAC0_TX_HANG_BIT		(1 << 1)
++#define GMAC1_RX_HANG_BIT		(1 << 2)
++#define GMAC1_TX_HANG_BIT		(1 << 3)
++
++int		gmac_in_do_workaround;
++#if 0
++int		debug_cnt, poll_max_cnt;
++#endif
++u32		gmac_workaround_cnt[4];
++u32		toe_global_reg[TOE_GLOBAL_REG_SIZE];
++u32		toe_dma_reg[GMAC_NUM][TOE_DMA_REG_SIZE];
++u32		toe_gmac_reg[GMAC_NUM][TOE_GMAC_REG_SIZE];
++u32		gmac_short_frame_workaround_cnt[2];
++
++static void sl351x_gmac_release_buffers(void);
++static void sl351x_gmac_release_swtx_q(void);
++static void sl351x_gmac_release_rx_q(void);
++#ifdef _TOEQ_CLASSQ_READY_
++static void sl351x_gmac_release_class_q(void);
++static void sl351x_gmac_release_toe_q(void);
++static void sl351x_gmac_release_intr_q(void);
++#endif
++static void sl351x_gmac_release_sw_free_q(void);
++static void sl351x_gmac_release_hw_free_q(void);
++#ifdef CONFIG_SL351x_NAT
++static int get_free_desc_cnt(unsigned long rwptr, int total);
++static void sl351x_gmac_release_hwtx_q(void);
++u32     sl351x_nat_workaround_cnt;
++#endif
++void sl351x_gmac_save_reg(void);
++void sl351x_gmac_restore_reg(void);
++
++
++/*----------------------------------------------------------------------
++* 	sl351x_poll_gmac_hanged_status
++* 	- Called by timer routine, period 10ms
++*	- If (state != 0 && state == prev state && )
++*----------------------------------------------------------------------*/
++void sl351x_poll_gmac_hanged_status(u32 data)
++{
++	int 			i;
++	u32 			state;
++	TOE_INFO_T		*toe;
++	GMAC_INFO_T		*tp;
++	u32				hanged_state;
++	// int				old_operation[GMAC_NUM];
++#ifdef CONFIG_SL351x_NAT
++	u32				hw_free_cnt;
++#endif
++
++	if (gmac_in_do_workaround)
++		return;
++
++	gmac_in_do_workaround = 1;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	hanged_state = 0;
++
++#ifdef SL351x_TEST_WORKAROUND
++	if (toe->gmac[0].operation || toe->gmac[1].operation)
++	{
++		debug_cnt++;
++		if (debug_cnt == (30 * HZ))
++		{
++			debug_cnt = 0;
++			hanged_state = GMAC0_RX_HANG_BIT;
++			goto do_workaround;
++		}
++	}
++#endif
++	if (toe->gmac[0].operation)
++		hanged_state |= GMAC0_RX_HANG_BIT | GMAC0_TX_HANG_BIT;
++
++#if (GMAC_NUM > 1)
++	if (toe->gmac[1].operation)
++		hanged_state |= GMAC1_RX_HANG_BIT | GMAC1_TX_HANG_BIT;
++#endif
++
++	for (i=0; i 1)
++		if (hanged_state & GMAC1_RX_HANG_BIT)
++		{
++			state = readl(TOE_GMAC1_BASE + GMAC_RX_STATE_OFFSET) & GMAC_RX_HANGED_MASK;
++			if (state != GMAC_RX_HANGED_STATE)
++				hanged_state &= ~GMAC1_RX_HANG_BIT;
++		}
++		if (hanged_state & GMAC1_TX_HANG_BIT)
++		{
++			state = readl(TOE_GMAC1_BASE + GMAC_TX_STATE_OFFSET) & GMAC_TX_HANGED_MASK;
++			if (state != GMAC_TX_HANGED_STATE)
++				hanged_state &= ~GMAC1_TX_HANG_BIT;
++		}
++#endif
++		if (!hanged_state)
++		{
++#if 0
++			if (i < poll_max_cnt)
++				poll_max_cnt = i;
++#endif
++			if (toe->gmac[0].short_frames_cnt >= GMAC_SHORT_FRAME_THRESHOLD)
++			{
++				gmac_short_frame_workaround_cnt[0]++;
++				toe->gmac[0].short_frames_cnt = 0;
++				goto do_workaround;
++			}
++#if (GMAC_NUM > 1)
++			if (toe->gmac[1].short_frames_cnt >= GMAC_SHORT_FRAME_THRESHOLD)
++			{
++				gmac_short_frame_workaround_cnt[1]++;
++				toe->gmac[1].short_frames_cnt = 0;
++				goto do_workaround;
++			}
++#endif
++
++#ifdef CONFIG_SL351x_NAT
++			hw_free_cnt = readl(TOE_GLOBAL_BASE + GLOBAL_HWFQ_RWPTR_REG);
++			hw_free_cnt = get_free_desc_cnt(hw_free_cnt, TOE_HW_FREEQ_DESC_NUM);
++#ifdef NAT_WORKAROUND_BY_RESET_GMAC
++			if (readl(TOE_GLOBAL_BASE + 0x4084) && (hw_free_cnt <= PAUSE_SET_HW_FREEQ))
++			{
++				sl351x_nat_workaround_cnt++;
++				goto do_workaround;
++			}
++#else
++			if (readl(TOE_GLOBAL_BASE + 0x4084) && (hw_free_cnt <= (PAUSE_SET_HW_FREEQ*2)))
++			{
++				sl351x_nat_workaround_cnt++;
++				sl351x_nat_workaround_handler();
++			}
++#endif
++#endif
++			gmac_in_do_workaround = 0;
++			add_timer(&gmac_workround_timer_obj);
++			return;
++		}
++	}
++
++do_workaround:
++
++	gmac_initialized = 0;
++	if (hanged_state)
++	{
++		if (hanged_state & GMAC0_RX_HANG_BIT) gmac_workaround_cnt[0]++;
++		if (hanged_state & GMAC0_TX_HANG_BIT) gmac_workaround_cnt[1]++;
++		if (hanged_state & GMAC1_RX_HANG_BIT) gmac_workaround_cnt[2]++;
++		if (hanged_state & GMAC1_TX_HANG_BIT) gmac_workaround_cnt[3]++;
++	}
++
++	for (i=0; igmac[i];
++		// old_operation[i] = tp->operation;
++		if (tp->operation)
++		{
++			netif_stop_queue(tp->dev);
++			clear_bit(__LINK_STATE_START, &tp->dev->state);
++			toe_gmac_disable_interrupt(tp->irq);
++			toe_gmac_disable_tx_rx(tp->dev);
++			toe_gmac_hw_stop(tp->dev);
++		}
++	}
++
++	// clear all status bits
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_0_REG);
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_1_REG);
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_2_REG);
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_3_REG);
++	writel(0xffffffff, TOE_GLOBAL_BASE + GLOBAL_INTERRUPT_STATUS_4_REG);
++
++#if 0
++	if ((hanged_state & GMAC0_RX_HANG_BIT) &&
++		(readl(TOE_GMAC0_DMA_BASE + 0xdc) & 0xf0))
++	{
++		struct sk_buff *skb;
++		unsigned int buf;
++		buf = readl(TOE_GMAC0_DMA_BASE + 0x68) & ~3;
++#ifdef CONFIG_SL351x_NAT
++		if (buf < toe->hwfq_buf_base_dma || buf > toe->hwfq_buf_end_dma)
++#endif
++		{
++			skb = (struct sk_buff *)(REG32(buf - SKB_RESERVE_BYTES));
++			printk("GMAC-0 free a loss SKB 0x%x\n", (u32)skb);
++			dev_kfree_skb(skb);
++		}
++	}
++	if ((hanged_state & GMAC1_RX_HANG_BIT)  &&
++		(readl(TOE_GMAC1_DMA_BASE + 0xdc) & 0xf0))
++	{
++		struct sk_buff *skb;
++		unsigned int buf;
++		buf = readl(TOE_GMAC1_DMA_BASE + 0x68) & ~3;
++#ifdef CONFIG_SL351x_NAT
++		if (buf < toe->hwfq_buf_base_dma || buf > toe->hwfq_buf_end_dma)
++#endif
++		{
++			skb = (struct sk_buff *)(REG32(buf - SKB_RESERVE_BYTES));
++			printk("GMAC-1 free a loss SKB 0x%x\n", (u32)skb);
++			dev_kfree_skb(skb);
++		}
++	}
++#endif
++
++	sl351x_gmac_release_buffers();
++	sl351x_gmac_save_reg();
++	toe_gmac_sw_reset();
++	sl351x_gmac_restore_reg();
++
++	if (toe->gmac[0].default_qhdr->word1.bits32)
++	{
++		// printk("===> toe->gmac[0].default_qhdr->word1 = 0x%x\n", toe->gmac[0].default_qhdr->word1);
++		sl351x_gmac_release_rx_q();
++		writel(0, &toe->gmac[0].default_qhdr->word1);
++	}
++	if (toe->gmac[1].default_qhdr->word1.bits32)
++	{
++		// printk("===> toe->gmac[1].default_qhdr->word1 = 0x%x\n", toe->gmac[1].default_qhdr->word1);
++		sl351x_gmac_release_rx_q();
++		writel(0, &toe->gmac[1].default_qhdr->word1);
++	}
++
++	gmac_initialized = 1;
++
++#ifdef 	CONFIG_SL351x_NAT
++	writel(0, TOE_GLOBAL_BASE + 0x4084);
++#endif
++
++	for (i=0; igmac[i];
++ 		if (tp->operation)
++ 		{
++			toe_gmac_enable_interrupt(tp->irq);
++			toe_gmac_hw_start(tp->dev);
++			toe_gmac_enable_tx_rx(tp->dev);
++			netif_wake_queue(tp->dev);
++			set_bit(__LINK_STATE_START, &tp->dev->state);
++		}
++	}
++
++	gmac_in_do_workaround = 0;
++	add_timer(&gmac_workround_timer_obj);
++}
++
++/*----------------------------------------------------------------------
++*	get_free_desc_cnt
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL351x_NAT
++static int get_free_desc_cnt(unsigned long rwptr, int total)
++{
++	unsigned short wptr = rwptr & 0xffff;
++	unsigned short rptr = rwptr >> 16;
++
++	if (wptr >= rptr)
++		return (total - wptr + rptr);
++	else
++		return (rptr - wptr);
++}
++#endif
++/*----------------------------------------------------------------------
++* 	sl351x_gmac_release_buffers
++*----------------------------------------------------------------------*/
++static void sl351x_gmac_release_buffers(void)
++{
++	// Free buffers & Descriptors in all SW Tx Queues
++	sl351x_gmac_release_swtx_q();
++
++	// Free buffers in Default Rx Queues
++	sl351x_gmac_release_rx_q();
++
++#ifdef _TOEQ_CLASSQ_READY_
++	// Free buffers in Classification Queues
++	sl351x_gmac_release_class_q();
++
++	// Free buffers in TOE Queues
++	sl351x_gmac_release_toe_q();
++
++	// Free buffers in Interrupt Queues
++	sl351x_gmac_release_intr_q();
++#endif
++
++	// Free buffers & descriptors in SW free queue
++	sl351x_gmac_release_sw_free_q();
++
++	// Free buffers & descriptors in HW free queue
++	sl351x_gmac_release_hw_free_q();
++
++#ifdef CONFIG_SL351x_NAT
++	// Free buffers & descriptors in HW free queue
++	sl351x_gmac_release_hwtx_q();
++#endif
++}
++/*----------------------------------------------------------------------
++* 	sl351x_gmac_release_swtx_q
++*----------------------------------------------------------------------*/
++static void sl351x_gmac_release_swtx_q(void)
++{
++	int				i, j;
++	GMAC_TXDESC_T	*curr_desc;
++	unsigned int	desc_count;
++	TOE_INFO_T		*toe;
++	GMAC_INFO_T		*tp;
++	GMAC_SWTXQ_T	*swtxq;
++	DMA_RWPTR_T		rwptr;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	tp = (GMAC_INFO_T *)&toe->gmac[0];
++	for (i=0; iexisted) continue;
++		swtxq = (GMAC_SWTXQ_T *)&tp->swtxq[0];
++		for (j=0; jrwptr_reg);
++				if (rwptr.bits.rptr == swtxq->finished_idx)
++				break;
++				curr_desc = (GMAC_TXDESC_T *)swtxq->desc_base + swtxq->finished_idx;
++				// if (curr_desc->word0.bits.status_tx_ok)
++				{
++					desc_count = curr_desc->word0.bits.desc_count;
++					while (--desc_count)
++					{
++						curr_desc->word0.bits.status_tx_ok = 0;
++						swtxq->finished_idx = RWPTR_ADVANCE_ONE(swtxq->finished_idx, swtxq->total_desc_num);
++						curr_desc = (GMAC_TXDESC_T *)swtxq->desc_base + swtxq->finished_idx;
++					}
++
++					curr_desc->word0.bits.status_tx_ok = 0;
++					if (swtxq->tx_skb[swtxq->finished_idx])
++					{
++						dev_kfree_skb_irq(swtxq->tx_skb[swtxq->finished_idx]);
++						swtxq->tx_skb[swtxq->finished_idx] = NULL;
++					}
++				}
++				swtxq->finished_idx = RWPTR_ADVANCE_ONE(swtxq->finished_idx, swtxq->total_desc_num);
++			}
++			writel(0, swtxq->rwptr_reg);
++			swtxq->finished_idx = 0;
++		}
++	}
++
++}
++/*----------------------------------------------------------------------
++* 	sl351x_gmac_release_rx_q
++*----------------------------------------------------------------------*/
++static void sl351x_gmac_release_rx_q(void)
++{
++	int				i;
++	TOE_INFO_T		*toe;
++	GMAC_INFO_T		*tp;
++	DMA_RWPTR_T		rwptr;
++	volatile GMAC_RXDESC_T	*curr_desc;
++	struct sk_buff			*skb;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	tp = (GMAC_INFO_T *)&toe->gmac[0];
++	for (i=0; iexisted) continue;
++		rwptr.bits32 = readl(&tp->default_qhdr->word1);
++		while (rwptr.bits.rptr != rwptr.bits.wptr)
++		{
++			curr_desc = (GMAC_RXDESC_T *)tp->default_desc_base + rwptr.bits.rptr;
++			skb = (struct sk_buff *)(REG32(__va(curr_desc->word2.buf_adr) - SKB_RESERVE_BYTES));
++			dev_kfree_skb_irq(skb);
++			rwptr.bits.rptr = RWPTR_ADVANCE_ONE(rwptr.bits.rptr, tp->default_desc_num);
++			SET_RPTR(&tp->default_qhdr->word1, rwptr.bits.rptr);
++			rwptr.bits32 = readl(&tp->default_qhdr->word1);
++		}  // while
++		writel(0, &tp->default_qhdr->word1);
++		tp->rx_rwptr.bits32 = 0;
++	} // for
++
++}
++/*----------------------------------------------------------------------
++* 	sl351x_gmac_release_class_q
++*----------------------------------------------------------------------*/
++#ifdef _TOEQ_CLASSQ_READY_
++static void sl351x_gmac_release_class_q(void)
++{
++	int				i;
++	TOE_INFO_T		*toe;
++	CLASSQ_INFO_T	*classq;
++	DMA_RWPTR_T		rwptr;
++	volatile GMAC_RXDESC_T	*curr_desc;
++	struct sk_buff			*skb;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	classq = (CLASSQ_INFO_T *)&toe->classq[0];
++	for (i=0; iqhdr->word1);
++		while (rwptr.bits.rptr != rwptr.bits.wptr)
++		{
++			curr_desc = (GMAC_RXDESC_T *)classq->desc_base + rwptr.bits.rptr;
++			skb = (struct sk_buff *)(REG32(__va(curr_desc->word2.buf_adr) - SKB_RESERVE_BYTES));
++			dev_kfree_skb_irq(skb);
++			rwptr.bits.rptr = RWPTR_ADVANCE_ONE(rwptr.bits.rptr, classq->desc_num);
++			SET_RPTR(&classq->qhdr->word1, rwptr.bits.rptr);
++			rwptr.bits32 = readl(&classq->qhdr->word1);
++		}  // while
++		writel(0, &classq->qhdr->word1);
++		classq->rwptr.bits32 = 0;
++	} // for
++
++}
++#endif
++/*----------------------------------------------------------------------
++* 	sl351x_gmac_release_toe_q
++*----------------------------------------------------------------------*/
++#ifdef _TOEQ_CLASSQ_READY_
++static void sl351x_gmac_release_toe_q(void)
++{
++	int				i;
++	TOE_INFO_T		*toe;
++	TOEQ_INFO_T		*toeq_info;
++	TOE_QHDR_T		*toe_qhdr;
++	DMA_RWPTR_T		rwptr;
++	volatile GMAC_RXDESC_T	*curr_desc;
++	unsigned int	rptr, wptr;
++	GMAC_RXDESC_T	*toe_curr_desc;
++	struct sk_buff			*skb;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	toe_qhdr = (TOE_QHDR_T *)TOE_TOE_QUE_HDR_BASE;
++	for (i=0; itoeq[i];
++		wptr = toe_qhdr->word1.bits.wptr;
++		rptr = toe_qhdr->word1.bits.rptr;
++		while (rptr != wptr)
++		{
++			toe_curr_desc = (GMAC_RXDESC_T *)toeq_info->desc_base + rptr;
++			skb = (struct sk_buff *)(REG32(__va(toe_curr_desc->word2.buf_adr) - SKB_RESERVE_BYTES));
++			dev_kfree_skb_irq(skb);
++			rptr = RWPTR_ADVANCE_ONE(rptr, toeq_info->desc_num);
++			SET_RPTR(&toe_qhdr->word1.bits32, rptr);
++			wptr = toe_qhdr->word1.bits.wptr;
++			rptr = toe_qhdr->word1.bits.rptr;
++		}
++		toe_qhdr->word1.bits32 = 0;
++		toeq_info->rwptr.bits32 = 0;
++	}
++}
++#endif
++/*----------------------------------------------------------------------
++* 	sl351x_gmac_release_intr_q
++*----------------------------------------------------------------------*/
++#ifdef _TOEQ_CLASSQ_READY_
++static void sl351x_gmac_release_intr_q(void)
++{
++}
++#endif
++/*----------------------------------------------------------------------
++* 	sl351x_gmac_release_sw_free_q
++*----------------------------------------------------------------------*/
++static void sl351x_gmac_release_sw_free_q(void)
++{
++	TOE_INFO_T				*toe;
++	volatile DMA_RWPTR_T	fq_rwptr;
++	volatile GMAC_RXDESC_T	*fq_desc;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++
++	while ((unsigned short)RWPTR_ADVANCE_ONE(fq_rwptr.bits.wptr, TOE_SW_FREEQ_DESC_NUM) != fq_rwptr.bits.rptr)
++	{
++		struct sk_buff *skb;
++		if ((skb = dev_alloc_skb(SW_RX_BUF_SIZE))==NULL)  /* allocate socket buffer */
++		{
++			printk("%s::skb buffer allocation fail !\n",__func__); while(1);
++		}
++		// *(unsigned int *)(skb->data) = (unsigned int)skb;
++		REG32(skb->data) = (unsigned long)skb;
++		skb_reserve(skb, SKB_RESERVE_BYTES);
++
++		fq_rwptr.bits.wptr = RWPTR_ADVANCE_ONE(fq_rwptr.bits.wptr, TOE_SW_FREEQ_DESC_NUM);
++		fq_desc = (volatile GMAC_RXDESC_T *)toe->swfq_desc_base + fq_rwptr.bits.wptr;
++		fq_desc->word2.buf_adr = (unsigned int)__pa(skb->data);
++		SET_WPTR(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG, fq_rwptr.bits.wptr);
++		fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++	}
++
++	toe->fq_rx_rwptr.bits.wptr = TOE_SW_FREEQ_DESC_NUM - 1;
++	toe->fq_rx_rwptr.bits.rptr = 0;
++	writel(toe->fq_rx_rwptr.bits32, TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++
++}
++/*----------------------------------------------------------------------
++* 	sl351x_gmac_release_hw_free_q
++*----------------------------------------------------------------------*/
++static void sl351x_gmac_release_hw_free_q(void)
++{
++	DMA_RWPTR_T			rwptr_reg;
++
++#ifdef CONFIG_SL351x_NAT
++	int					i;
++	TOE_INFO_T			*toe;
++	GMAC_RXDESC_T		*desc_ptr;
++	unsigned int		buf_ptr;
++
++	toe = (TOE_INFO_T *)&toe_private_data;
++	desc_ptr = (GMAC_RXDESC_T *)toe->hwfq_desc_base;
++	buf_ptr = (unsigned int)toe->hwfq_buf_base_dma;
++	for (i=0; iword0.bits.buffer_size = HW_RX_BUF_SIZE;
++		desc_ptr->word1.bits.sw_id = i;
++		desc_ptr->word2.buf_adr = (unsigned int)buf_ptr;
++   		desc_ptr++;
++   		buf_ptr += HW_RX_BUF_SIZE;
++	}
++#endif
++	rwptr_reg.bits.wptr = TOE_HW_FREEQ_DESC_NUM - 1;
++	rwptr_reg.bits.rptr = 0;
++	writel(rwptr_reg.bits32, TOE_GLOBAL_BASE + GLOBAL_HWFQ_RWPTR_REG);
++}
++
++/*----------------------------------------------------------------------
++* 	sl351x_gmac_release_hw_free_q
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL351x_NAT
++static void sl351x_gmac_release_hwtx_q(void)
++{
++	int				i;
++	unsigned int	rwptr_addr;
++
++	rwptr_addr = TOE_GMAC0_DMA_BASE + GMAC_HW_TX_QUEUE0_PTR_REG;
++	for (i=0; ihwfq_desc_base;
++	buf_ptr = (unsigned int)toe->hwfq_buf_base_dma;
++	for (i=0; iword0.bits.buffer_size = HW_RX_BUF_SIZE;
++		desc_ptr->word1.bits.sw_id = i;
++		desc_ptr->word2.buf_adr = (unsigned int)buf_ptr;
++		desc_ptr++;
++		buf_ptr += HW_RX_BUF_SIZE;
++	}
++	rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_HWFQ_RWPTR_REG);
++	rwptr.bits.wptr = RWPTR_RECEDE_ONE(rwptr.bits.rptr, TOE_HW_FREEQ_DESC_NUM);
++	writel(rwptr.bits32, TOE_GLOBAL_BASE + GLOBAL_HWFQ_RWPTR_REG);
++	writel(0, TOE_GLOBAL_BASE + 0x4084);
++
++	// Enable Rx of GMAC-0 & 1
++	config0.bits32 = readl(TOE_GMAC0_BASE+GMAC_CONFIG0);
++	config0.bits.dis_rx = 0;
++	writel(config0.bits32, TOE_GMAC0_BASE+GMAC_CONFIG0);
++	config0.bits32 = readl(TOE_GMAC1_BASE+GMAC_CONFIG0);
++	config0.bits.dis_rx = 0;
++	writel(config0.bits32, TOE_GMAC1_BASE+GMAC_CONFIG0);
++}
++#endif
++#endif // CONFIG_SL351x_NAT
++
++#endif // SL351x_GMAC_WORKAROUND
++
++/* get the mac addresses from flash
++ *can't do this in module_init because mtd driver is initialized after ethernet
++ */
++static __init int sl351x_mac_address_init(void)
++{
++	GMAC_INFO_T		*tp;
++	struct sockaddr sock;
++	int i;
++
++	/* get mac address from FLASH */
++	gmac_get_mac_address();
++
++	for (i = 0; i < GMAC_NUM; i++) {
++		tp = (GMAC_INFO_T *)&toe_private_data.gmac[i];
++		memcpy(&sock.sa_data[0],ð_mac[tp->port_id][0],6);
++		gmac_set_mac_address(tp->dev,(void *)&sock);
++	}
++
++        return 0;
++}
++late_initcall(sl351x_mac_address_init);
++
++
+Index: linux-2.6.23.16/drivers/net/sl351x_hash.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/drivers/net/sl351x_hash.c	2008-03-15 16:59:32.361970401 +0200
+@@ -0,0 +1,713 @@
++/**************************************************************************
++* Copyright 2006 StorLink Semiconductors, Inc.  All rights reserved.
++*--------------------------------------------------------------------------
++* Name			: sl351x_hash.c
++* Description	:
++*		Handle Storlink SL351x Hash Functions
++*
++* History
++*
++*	Date		Writer		Description
++*----------------------------------------------------------------------------
++*	03/13/2006	Gary Chen	Create and implement
++*
++****************************************************************************/
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#define	 MIDWAY
++#define	 SL_LEPUS
++
++#include 
++#include 
++#include 
++
++#ifndef RXTOE_DEBUG
++#define RXTOE_DEBUG
++#endif
++#undef RXTOE_DEBUG
++
++/*----------------------------------------------------------------------
++* Definition
++*----------------------------------------------------------------------*/
++#define	hash_printf				printk
++
++#define HASH_TIMER_PERIOD		(30)	// seconds
++#define HASH_ILLEGAL_INDEX		0xffff
++
++/*----------------------------------------------------------------------
++* Variables
++*----------------------------------------------------------------------*/
++u32					hash_nat_owner_bits[HASH_TOTAL_ENTRIES/32];
++char 				hash_tables[HASH_TOTAL_ENTRIES][HASH_MAX_BYTES] __attribute__ ((aligned(16)));
++static struct timer_list hash_timer_obj;
++LIST_HEAD(hash_timeout_list);
++
++/*----------------------------------------------------------------------
++* Functions
++*----------------------------------------------------------------------*/
++void dm_long(u32 location, int length);
++static void hash_timer_func(u32 data);
++
++/*----------------------------------------------------------------------
++* hash_init
++*----------------------------------------------------------------------*/
++void sl351x_hash_init(void)
++{
++	int i;
++	volatile u32 *dp1, *dp2, dword;
++
++	dp1 = (volatile u32 *) TOE_V_BIT_BASE;
++	dp2 = (volatile u32 *) TOE_A_BIT_BASE;
++
++	for (i=0; iindex, 1);
++//	printk("Dump hash key!\n");
++//	dump_hash_key(entry);
++	return entry->index;
++}
++
++/*----------------------------------------------------------------------
++* hash_set_valid_flag
++*----------------------------------------------------------------------*/
++void hash_set_valid_flag(int index, int valid)
++{
++	register u32 reg32;
++
++	reg32 = TOE_V_BIT_BASE + (index/32) * 4;
++
++	if (valid)
++	{
++		writel(readl(reg32) | (1 << (index%32)), reg32);
++	}
++	else
++	{
++		writel(readl(reg32) & ~(1 << (index%32)), reg32);
++	}
++}
++
++/*----------------------------------------------------------------------
++* hash_set_nat_owner_flag
++*----------------------------------------------------------------------*/
++void hash_set_nat_owner_flag(int index, int valid)
++{
++	if (valid)
++	{
++		hash_nat_owner_bits[index/32] |= (1 << (index % 32));
++	}
++	else
++	{
++		hash_nat_owner_bits[index/32] &= ~(1 << (index % 32));
++	}
++}
++
++
++/*----------------------------------------------------------------------
++* hash_build_keys
++*----------------------------------------------------------------------*/
++int hash_build_keys(u32 *destp, HASH_ENTRY_T *entry)
++{
++	u32 	data;
++	unsigned char 	*cp;
++	int				i, j;
++	unsigned short 	index;
++	int 			total;
++
++	memset((void *)destp, 0, HASH_MAX_BYTES);
++	cp = (unsigned char *)destp;
++
++	if (entry->key_present.port || entry->key_present.Ethertype)
++	{
++		HASH_PUSH_WORD(cp, entry->key.Ethertype);		// word 0
++		HASH_PUSH_BYTE(cp, entry->key.port);			// Byte 2
++		HASH_PUSH_BYTE(cp, 0);							// Byte 3
++	}
++	else
++	{
++		HASH_PUSH_DWORD(cp, 0);
++	}
++
++	if (entry->key_present.da || entry->key_present.sa)
++	{
++		unsigned char mac[4];
++		if (entry->key_present.da)
++		{
++			for (i=0; i<4; i++)
++				HASH_PUSH_BYTE(cp, entry->key.da[i]);
++		}
++		mac[0] = (entry->key_present.da) ? entry->key.da[4] : 0;
++		mac[1] = (entry->key_present.da) ? entry->key.da[5] : 0;
++		mac[2] = (entry->key_present.sa) ? entry->key.sa[0] : 0;
++		mac[3] = (entry->key_present.sa) ? entry->key.sa[1] : 0;
++		data = mac[0] + (mac[1]<<8) + (mac[2]<<16) + (mac[3]<<24);
++		HASH_PUSH_DWORD(cp, data);
++		if (entry->key_present.sa)
++		{
++			for (i=2; i<6; i++)
++				HASH_PUSH_BYTE(cp, entry->key.sa[i]);
++		}
++	}
++
++	if (entry->key_present.pppoe_sid || entry->key_present.vlan_id)
++	{
++		HASH_PUSH_WORD(cp, entry->key.vlan_id);		// low word
++		HASH_PUSH_WORD(cp, entry->key.pppoe_sid);	// high word
++	}
++	if (entry->key_present.ipv4_hdrlen || entry->key_present.ip_tos || entry->key_present.ip_protocol)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.ip_protocol);		// Byte 0
++		HASH_PUSH_BYTE(cp, entry->key.ip_tos);			// Byte 1
++		HASH_PUSH_BYTE(cp, entry->key.ipv4_hdrlen);		// Byte 2
++		HASH_PUSH_BYTE(cp, 0);							// Byte 3
++	}
++
++	if (entry->key_present.ipv6_flow_label)
++	{
++		HASH_PUSH_DWORD(cp, entry->key.ipv6_flow_label);	// low word
++	}
++	if (entry->key_present.sip)
++	{
++		// input (entry->key.sip[i]) is network-oriented
++		// output (hash key) is host-oriented
++		for (i=3; i>=0; i--)
++			HASH_PUSH_BYTE(cp, entry->key.sip[i]);
++		if (entry->key.ipv6)
++		{
++			for (i=4; i<16; i+=4)
++			{
++				for (j=i+3; j>=i; j--)
++					HASH_PUSH_BYTE(cp, entry->key.sip[j]);
++			}
++		}
++	}
++	if (entry->key_present.dip)
++	{
++		// input (entry->key.sip[i]) is network-oriented
++		// output (hash key) is host-oriented
++		for (i=3; i>=0; i--)
++			HASH_PUSH_BYTE(cp, entry->key.dip[i]);
++		if (entry->key.ipv6)
++		{
++			for (i=4; i<16; i+=4)
++			{
++				for (j=i+3; j>=i; j--)
++					HASH_PUSH_BYTE(cp, entry->key.dip[j]);
++			}
++		}
++	}
++
++	if (entry->key_present.l4_bytes_0_3)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[0]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[1]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[2]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[3]);
++	}
++	if (entry->key_present.l4_bytes_4_7)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[4]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[5]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[6]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[7]);
++	}
++	if (entry->key_present.l4_bytes_8_11)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[8]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[9]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[10]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[11]);
++	}
++	if (entry->key_present.l4_bytes_12_15)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[12]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[13]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[14]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[15]);
++	}
++	if (entry->key_present.l4_bytes_16_19)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[16]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[17]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[18]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[19]);
++	}
++	if (entry->key_present.l4_bytes_20_23)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[20]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[21]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[22]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[23]);
++	}
++	if (entry->key_present.l7_bytes_0_3)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[0]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[1]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[2]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[3]);
++	}
++	if (entry->key_present.l7_bytes_4_7)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[4]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[5]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[6]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[7]);
++	}
++	if (entry->key_present.l7_bytes_8_11)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[8]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[9]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[10]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[11]);
++	}
++	if (entry->key_present.l7_bytes_12_15)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[12]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[13]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[14]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[15]);
++	}
++	if (entry->key_present.l7_bytes_16_19)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[16]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[17]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[18]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[19]);
++	}
++	if (entry->key_present.l7_bytes_20_23)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[20]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[21]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[22]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[23]);
++	}
++
++	// get hash index
++	total = (u32)((u32)cp - (u32)destp) / (sizeof(u32));
++
++	if (total > HASH_MAX_KEY_DWORD)
++	{
++		//hash_printf("Total key words (%d) is too large (> %d)!\n",
++		//				total, HASH_MAX_KEY_DWORD);
++		return -1;
++	}
++
++	if (entry->key_present.port || entry->key_present.Ethertype)
++		index = hash_gen_crc16((unsigned char *)destp, total * 4);
++	else
++	{
++		if (total == 1)
++		{
++			hash_printf("No key is assigned!\n");
++			return -1;
++		}
++
++		index = hash_gen_crc16((unsigned char *)(destp+1), (total-1) * 4);
++	}
++
++	entry->index = index & HASH_BITS_MASK;
++
++	//hash_printf("Total key words = %d, Hash Index= %d\n",
++	//				total, entry->index);
++
++	cp = (unsigned char *)destp;
++	cp+=3;
++	HASH_PUSH_BYTE(cp, entry->rule);	// rule
++
++	entry->total_dwords = total;
++
++	return total;
++}
++
++/*----------------------------------------------------------------------
++* hash_build_nat_keys
++*----------------------------------------------------------------------*/
++void hash_build_nat_keys(u32 *destp, HASH_ENTRY_T *entry)
++{
++	unsigned char 	*cp;
++	int				i;
++	unsigned short 	index;
++	int 			total;
++
++	memset((void *)destp, 0, HASH_MAX_BYTES);
++
++	cp = (unsigned char *)destp + 2;
++	HASH_PUSH_BYTE(cp, entry->key.port);
++	cp++;
++
++	if (entry->key_present.pppoe_sid || entry->key_present.vlan_id)
++	{
++		HASH_PUSH_WORD(cp, entry->key.vlan_id);		// low word
++		HASH_PUSH_WORD(cp, entry->key.pppoe_sid);	// high word
++	}
++
++	HASH_PUSH_BYTE(cp, entry->key.ip_protocol);
++	cp+=3;
++
++	// input (entry->key.sip[i]) is network-oriented
++	// output (hash key) is host-oriented
++	for (i=3; i>=0; i--)
++		HASH_PUSH_BYTE(cp, entry->key.sip[i]);
++
++	// input (entry->key.sip[i]) is network-oriented
++	// output (hash key) is host-oriented
++	for (i=3; i>=0; i--)
++		HASH_PUSH_BYTE(cp, entry->key.dip[i]);
++
++	HASH_PUSH_BYTE(cp, entry->key.l4_bytes[0]);
++	HASH_PUSH_BYTE(cp, entry->key.l4_bytes[1]);
++	HASH_PUSH_BYTE(cp, entry->key.l4_bytes[2]);
++	HASH_PUSH_BYTE(cp, entry->key.l4_bytes[3]);
++
++	// get hash index
++	total = (u32)((u32)cp - (u32)destp) / (sizeof(u32));
++
++	index = hash_gen_crc16((unsigned char *)destp, total * 4);
++	entry->index = index & ((1 << HASH_BITS) - 1);
++
++	cp = (unsigned char *)destp;
++	cp+=3;
++	HASH_PUSH_BYTE(cp, entry->rule);	// rule
++
++	entry->total_dwords = total;
++}
++
++/*----------------------------------------------------------------------
++* hash_build_toe_keys
++*----------------------------------------------------------------------*/
++int hash_build_toe_keys(u32 *destp, HASH_ENTRY_T *entry)
++{
++	unsigned long	data;
++	unsigned char	*cp;
++	unsigned short	index;
++	int	i;
++	int total;
++	//printk("%s\n", __func__);
++	memset((void*)destp, 0, HASH_MAX_BYTES);
++	cp = (unsigned char*)destp;
++
++	if(entry->key_present.port || entry->key_present.Ethertype) {
++		data = (entry->key.port << 16) + entry->key.Ethertype;
++		HASH_PUSH_DWORD(cp, data);
++	} else
++		HASH_PUSH_DWORD(cp, 0);
++
++	if (entry->key_present.da || entry->key_present.sa) {
++		unsigned char	mac[4];
++		if (entry->key_present.da) {
++			data = (entry->key.da[0]) + (entry->key.da[1] << 8) +
++				   (entry->key.da[2] << 16) + (entry->key.da[3] <<24);
++			HASH_PUSH_DWORD(cp, data);
++		}
++		mac[0] = (entry->key_present.da) ? entry->key.da[4] : 0;
++		mac[1] = (entry->key_present.da) ? entry->key.da[5] : 0;
++		mac[2] = (entry->key_present.sa) ? entry->key.sa[0] : 0;
++		mac[3] = (entry->key_present.sa) ? entry->key.sa[1] : 0;
++		data = mac[0] + (mac[1]<<8) + (mac[2]<<16) + (mac[3]<<24);
++		HASH_PUSH_DWORD(cp, data);
++		if (entry->key_present.sa) {
++			data = (entry->key.sa[2]) + (entry->key.sa[3] << 8) +
++				   (entry->key.sa[4] << 16) + (entry->key.sa[5] <<24);
++			HASH_PUSH_DWORD(cp, data);
++		}
++	}
++
++	if (entry->key_present.ip_protocol) {
++		unsigned char ip_protocol;
++		ip_protocol = entry->key.ip_protocol;
++		data = ip_protocol;
++		HASH_PUSH_DWORD(cp, data);
++	}
++
++	if (entry->key_present.ipv6_flow_label) {
++		unsigned long flow_label;
++		flow_label  = entry->key.ipv6_flow_label;
++		data = flow_label & 0xfffff;
++		HASH_PUSH_DWORD(cp, data);
++	}
++
++	if (entry->key_present.sip)	{
++		{
++			data = IPIV(entry->key.sip[0], entry->key.sip[1],
++					entry->key.sip[2], entry->key.sip[3]);
++			HASH_PUSH_DWORD(cp, data);
++			if (entry->key.ipv6) {
++				for (i=4; i<16; i+=4) {
++					data = IPIV(entry->key.sip[i+0], entry->key.sip[i+1],
++							entry->key.sip[i+2], entry->key.sip[i+3]);
++					HASH_PUSH_DWORD(cp, data);
++				}
++			}
++		}
++	}
++
++	if (entry->key_present.dip)	{
++		{
++			data = IPIV(entry->key.dip[0], entry->key.dip[1],
++						entry->key.dip[2], entry->key.dip[3]);
++			HASH_PUSH_DWORD(cp, data);
++			if (entry->key.ipv6) {
++				for (i=4; i<16; i+=4) {
++					data = IPIV(entry->key.dip[i+0], entry->key.dip[i+1],
++								entry->key.dip[i+2], entry->key.dip[i+3]);
++					HASH_PUSH_DWORD(cp, data);
++				}
++			}
++		}
++	}
++	if (entry->key_present.l4_bytes_0_3)
++	{
++		unsigned char *datap;
++		datap = &entry->key.l4_bytes[0];
++		data = 	datap[0] + 	(datap[1] << 8) + (datap[2] << 16) + (datap[3] << 24);
++		HASH_PUSH_DWORD(cp, data);
++	}
++	if (entry->key_present.l7_bytes_0_3)
++	{
++		unsigned char *datap;
++		datap = &entry->key.l7_bytes[0];
++		data = 	datap[0] + 	(datap[1] << 8) + (datap[2] << 16) + (datap[3] << 24);
++		HASH_PUSH_DWORD(cp, data);
++	}
++	if (entry->key_present.l7_bytes_4_7)
++	{
++		unsigned char *datap;
++		datap = &entry->key.l7_bytes[4];
++		data = 	datap[0] + 	(datap[1] << 8) + (datap[2] << 16) + (datap[3] << 24);
++		HASH_PUSH_DWORD(cp, data);
++	}
++
++	total = (unsigned long)((unsigned long)cp - (unsigned long)destp) / (sizeof(u32));
++	if (total > HASH_MAX_KEY_DWORD) {
++		//printf("Total key words (%d) is too large (> %d)!\n",
++		//		total, HASH_MAX_KEY_DWORD);
++		return -1;
++	}
++	index = hash_gen_crc16((unsigned char*)(destp + 1), (total-1)*4);
++	entry->index = index & ((1 << HASH_BITS)-1);
++
++	cp = (unsigned char*) destp;
++	cp += 3;
++	HASH_PUSH_BYTE(cp, entry->rule);
++	entry->total_dwords = total;
++	return total;
++}
++
++/*----------------------------------------------------------------------
++* hash_add_toe_entry
++*----------------------------------------------------------------------*/
++int hash_add_toe_entry(HASH_ENTRY_T *entry)
++{
++	int	rc;
++	u32	key[HASH_MAX_DWORDS];
++
++	rc = hash_build_toe_keys((u32 *)&key, entry);
++	if (rc < 0)
++		return -1;
++	hash_write_entry(entry, (unsigned char*) &key[0]);
++	//hash_dump_entry(entry->index);
++//	hash_set_valid_flag(entry->index, 1);
++//	printk("Dump hash key!\n");
++//	dump_hash_key(entry);
++	return entry->index;
++}
++
++
++/*----------------------------------------------------------------------
++* hash_write_entry
++*----------------------------------------------------------------------*/
++int hash_write_entry(HASH_ENTRY_T *entry, unsigned char *key)
++{
++	int		i;
++	u32		*srcep, *destp, *destp2;
++
++	srcep = (u32 *)key;
++	destp2 = destp = (u32 *)&hash_tables[entry->index][0];
++
++	for (i=0; i<(entry->total_dwords); i++, srcep++, destp++)
++		*destp = *srcep;
++
++	srcep = (u32 *)&entry->action;
++	*destp++ = *srcep;
++
++	srcep = (u32 *)&entry->param;
++	for (i=0; i<(sizeof(ENTRY_PARAM_T)/sizeof(*destp)); i++, srcep++, destp++)
++		*destp = *srcep;
++
++	memset(destp, 0, (HASH_MAX_DWORDS-entry->total_dwords-HASH_ACTION_DWORDS) * sizeof(u32));
++
++	consistent_sync(destp2, (entry->total_dwords+HASH_ACTION_DWORDS) * 4, PCI_DMA_TODEVICE);
++	return 0;
++}
++
++/*----------------------------------------------------------------------
++* hash_timer_func
++*----------------------------------------------------------------------*/
++static void hash_timer_func(u32 data)
++{
++	int					i, j, idx;
++	volatile u32		*own_p, *valid_p;
++	u32					own_bits, a_bits;
++	int					period = HASH_TIMER_PERIOD;
++
++	valid_p = (volatile u32 *)TOE_V_BIT_BASE;
++	own_p = (volatile u32 *)hash_nat_owner_bits;
++	for (i=0, idx=0; i<(HASH_TOTAL_ENTRIES/32); i++, own_p++, valid_p++, idx+=32)
++	{
++		a_bits = readl(TOE_A_BIT_BASE + (i*4));
++		own_bits = *own_p;
++		if (own_bits)
++		{
++			for (j=0; own_bits && j<32; j++)
++			{
++				if (own_bits & 1)
++				{
++					short *counter_p, *interval_p;
++					NAT_HASH_ENTRY_T	*nat_entry;
++					GRE_HASH_ENTRY_T	*gre_entry;
++					nat_entry = (NAT_HASH_ENTRY_T *)hash_get_entry(idx+j);
++					gre_entry = (GRE_HASH_ENTRY_T *)nat_entry;
++					if (nat_entry->key.ip_protocol == IPPROTO_GRE)
++					{
++						counter_p = (short *)&gre_entry->tmo.counter;
++						interval_p = (short *)&gre_entry->tmo.interval;
++					}
++					else
++					{
++						counter_p = (short *)&nat_entry->tmo.counter;
++						interval_p = (short *)&nat_entry->tmo.interval;
++					}
++					if (a_bits & 1)
++					{
++						*counter_p = *interval_p;
++					}
++					else
++					{
++						*counter_p -= HASH_TIMER_PERIOD;
++						if (*counter_p <= 0)
++						{
++							*valid_p &= ~(1 << j);		// invalidate it
++							*own_p &= ~(1 << j);		// release ownership for NAT
++							*counter_p = 0;
++							// hash_printf("%lu %s: Clear hash index: %d\n", jiffies/HZ, __func__, i*32+j);
++						}
++						else if (period > *counter_p)
++						{
++							period = *counter_p;
++						}
++					}
++				}
++				a_bits >>= 1;
++				own_bits >>=1;
++			}
++		}
++	}
++
++	hash_timer_obj.expires = jiffies + (period * HZ);
++	add_timer((struct timer_list *)data);
++}
++
++/*----------------------------------------------------------------------
++* dm_long
++*----------------------------------------------------------------------*/
++void dm_long(u32 location, int length)
++{
++	u32		*start_p, *curr_p, *end_p;
++	u32		*datap, data;
++	int		i;
++
++	//if (length > 1024)
++	//	length = 1024;
++
++	start_p = (u32 *)location;
++	end_p = (u32 *)location + length;
++	curr_p = (u32 *)((u32)location & 0xfffffff0);
++	datap = (u32 *)location;
++	while (curr_p < end_p)
++	{
++		hash_printf("0x%08x: ",(u32)curr_p & 0xfffffff0);
++		for (i=0; i<4; i++)
++		{
++			if (curr_p < start_p || curr_p >= end_p)
++               hash_printf("         ");
++			else
++			{
++				data = *datap;
++				hash_printf("%08X ", data);
++			}
++			if (i==1)
++              hash_printf("- ");
++
++			curr_p++;
++			datap++;
++		}
++        hash_printf("\n");
++	}
++}
++
++/*----------------------------------------------------------------------
++* hash_dump_entry
++*----------------------------------------------------------------------*/
++void hash_dump_entry(int index)
++{
++	hash_printf("Hash Index %d:\n", index);
++	dm_long((u32)&hash_tables[index][0], HASH_MAX_DWORDS);
++}
++
++
+Index: linux-2.6.23.16/drivers/net/sl351x_nat.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/drivers/net/sl351x_nat.c	2008-03-15 16:59:39.862397640 +0200
+@@ -0,0 +1,1736 @@
++/****************************************************************************
++* Copyright 2006 StorLink Semiconductors, Inc.  All rights reserved.
++*----------------------------------------------------------------------------
++* Name			: sl351x_nat.c
++* Description	:
++*		Handle Storlink SL351x NAT Functions
++*
++*
++* Packet Flow:
++*
++*            (xmit)+<--- SW NAT -->+(xmit)
++*                  |       ^^      |
++*                  |       ||      |
++*                  |       ||      |
++*   Client <---> GMAC-x  HW-NAT  GMAC-y  <---> Server
++*
++*
++* History
++*
++*	Date		Writer		Description
++*----------------------------------------------------------------------------
++*	03/13/2006	Gary Chen	Create and implement
++*
++*
++****************************************************************************/
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++
++#define	 MIDWAY
++#define	 SL_LEPUS
++
++#include 
++#include 
++#include 
++#include 
++#ifdef CONFIG_NETFILTER
++#include 
++#include 
++#endif
++
++//#define NAT_DEBUG_MSG		1
++#define _NOT_CHECK_SIP_DIP
++//#define	SL351x_NAT_TEST_BY_SMARTBITS		1	// Initialize 32 hash entries and test by SmartBITS
++#define VITESSE_G5SWITCH	1
++
++#ifdef CONFIG_SL351x_NAT
++
++/*----------------------------------------------------------------------
++* Definition
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL3516_ASIC
++#define CONFIG_SL351x_NAT_TCP_UDP
++#define CONFIG_SL351x_NAT_GRE
++#define CONFIG_SL351x_TCP_UDP_RULE_ID	0
++#define CONFIG_SL351x_GRE_RULE_ID		1
++#else
++#define CONFIG_SL351x_NAT_TCP_UDP
++//#define CONFIG_SL351x_NAT_GRE
++#define CONFIG_SL351x_TCP_UDP_RULE_ID	0
++#define CONFIG_SL351x_GRE_RULE_ID		0
++#endif
++
++#define	nat_printf					printk
++#define NAT_FTP_CTRL_PORT 			(21)	// TCP
++#define NAT_H323_PORT				(1720)	// TCP
++#define NAT_T120_PORT				(1503)	// TCP
++#define NAT_PPTP_PORT				(1723)	// TCP
++#define NAT_TFTP_PORT 				(69)	// UDP
++#define NAT_DNS_PORT 				(53)	// UDP
++#define NAT_NTP_PORT				(123)	// UDP
++#define NAT_RAS_PORT				(1719)	// UDP
++#define NAT_BOOTP67_PORT			(67)	// UDP
++#define NAT_BOOTP68_PORT			(68)	// UDP
++
++#define NAT_TCP_PORT_MAX			64
++#define NAT_UDP_PORT_MAX			64
++
++#define GRE_PROTOCOL				(0x880b)
++#define GRE_PROTOCOL_SWAP			__constant_htons(0x880b)
++
++#ifdef VITESSE_G5SWITCH
++extern int Giga_switch;
++#endif
++
++typedef struct
++{
++	u16		flags_ver;
++	u16		protocol;
++	u16		payload_length;
++	u16		call_id;
++	u32		seq;
++	u32		ack;
++} GRE_PKTHDR_T;
++
++/*----------------------------------------------------------------------
++* NAT Configuration
++*
++* Note: Any change for network setting, the NAT configuration should
++*       be changed also.
++*	cfg->lan_port	0 if GMAC-0, 1: if GMAC-1
++*	cfg->wan_port	0 if GMAC-0, 1: if GMAC-1
++*	cfg->lan_ipaddr, cfg->lan_gateway, cfg->lan_netmask
++*	cfg->wan_ipaddr, cfg->wan_gateway, cfg->wan_netmask
++*
++*----------------------------------------------------------------------*/
++NAT_CFG_T 		nat_cfg;
++static int		nat_initialized;
++u32 			nat_collision;
++
++#ifdef CONFIG_SL351x_NAT_TCP_UDP
++static u16		fixed_tcp_port_list[]={NAT_FTP_CTRL_PORT,
++							   			NAT_H323_PORT,
++							   			// NAT_T120_PORT,
++							   			NAT_PPTP_PORT,
++										0};
++static u16		fixed_udp_port_list[]={NAT_DNS_PORT,
++									  	NAT_NTP_PORT,
++									  	NAT_TFTP_PORT,
++										NAT_RAS_PORT,
++									  	NAT_BOOTP67_PORT,
++									  	NAT_BOOTP68_PORT,
++									   	0};
++#endif
++
++// #define _HAVE_DYNAMIC_PORT_LIST
++#ifdef _HAVE_DYNAMIC_PORT_LIST
++static u16		dynamic_tcp_port_list[NAT_TCP_PORT_MAX+1];
++static u16		dynamic_udp_port_list[NAT_UDP_PORT_MAX+1]};
++#endif
++
++/*----------------------------------------------------------------------
++* Functions
++*----------------------------------------------------------------------*/
++int sl351x_nat_tcp_udp_output(struct sk_buff *skb, int port);
++int sl351x_nat_udp_output(struct sk_buff *skb, int port);
++int sl351x_nat_gre_output(struct sk_buff *skb, int port);
++
++extern int mac_set_rule_reg(int mac, int rule, int enabled, u32 reg0, u32 reg1, u32 reg2);
++extern void hash_dump_entry(int index);
++extern void mac_get_hw_tx_weight(struct net_device *dev, char *weight);
++extern void mac_set_hw_tx_weight(struct net_device *dev, char *weight);
++
++#ifdef SL351x_NAT_TEST_BY_SMARTBITS
++static void nat_init_test_entry(void);
++#endif
++/*----------------------------------------------------------------------
++* sl351x_nat_init
++*	initialize a NAT matching rule
++*	Called by SL351x Driver
++*		key		: port, protocol, Sip, Dip, Sport, Dport
++*		Action	: Srce Q: HW Free Queue,
++*				  Dest Q: HW TxQ
++*				  Change DA
++*				  Change SA
++*                 Change Sip or Dip
++*    			  Change Sport or Dport
++*----------------------------------------------------------------------*/
++void sl351x_nat_init(void)
++{
++	int					rc;
++	GMAC_MRxCR0_T		mrxcr0;
++	GMAC_MRxCR1_T		mrxcr1;
++	GMAC_MRxCR2_T		mrxcr2;
++	NAT_CFG_T			*cfg;
++
++	if (nat_initialized)
++		return;
++
++	nat_initialized = 1;
++
++	if ((sizeof(NAT_HASH_ENTRY_T) > HASH_MAX_BYTES) ||
++		(sizeof(GRE_HASH_ENTRY_T) > HASH_MAX_BYTES))
++	{
++		nat_printf("NAT_HASH_ENTRY_T structure Size is too larger!\n");
++		while(1);
++	}
++
++	cfg = (NAT_CFG_T *)&nat_cfg;
++	memset((void *)cfg, 0, sizeof(NAT_CFG_T));
++#ifdef _HAVE_DYNAMIC_PORT_LIST
++	memset((void *)dynamic_tcp_port_list, 0, sizeof(dynamic_tcp_port_list));
++	memset((void *)dynamic_udp_port_list, 0, sizeof(dynamic_udp_port_list));
++#endif
++
++#ifdef VITESSE_G5SWITCH
++	if(Giga_switch)
++	{
++		cfg->enabled			= 1;
++		cfg->tcp_udp_rule_id 	= CONFIG_SL351x_TCP_UDP_RULE_ID;
++		cfg->gre_rule_id 		= CONFIG_SL351x_GRE_RULE_ID;
++		cfg->lan_port			= 1;
++		cfg->wan_port			= 0;
++		cfg->default_hw_txq 	= 3;
++		cfg->tcp_tmo_interval 	= 60;
++		cfg->udp_tmo_interval 	= 180;
++		cfg->gre_tmo_interval 	= 60;
++	}
++	else
++	{
++		cfg->enabled			= 1;
++		cfg->tcp_udp_rule_id 	= CONFIG_SL351x_TCP_UDP_RULE_ID;
++		cfg->gre_rule_id 		= CONFIG_SL351x_GRE_RULE_ID;
++		cfg->lan_port			= 0;
++		cfg->wan_port			= 1;
++		cfg->default_hw_txq 	= 3;
++		cfg->tcp_tmo_interval 	= 60;
++		cfg->udp_tmo_interval 	= 180;
++		cfg->gre_tmo_interval 	= 60;
++
++	}
++#endif
++
++#if 1	//	debug purpose
++	cfg->ipcfg[0].total				= 1;
++	cfg->ipcfg[0].entry[0].ipaddr	= IPIV(192,168,2,92);
++	cfg->ipcfg[0].entry[0].netmask	= IPIV(255,255,255,0);
++	cfg->ipcfg[1].total				= 1;
++	cfg->ipcfg[1].entry[0].ipaddr	= IPIV(192,168,1,200);
++	cfg->ipcfg[1].entry[0].netmask	= IPIV(255,255,255,0);
++#endif
++
++#if 1
++	cfg->xport.total = 0;
++#else
++	cfg->xport.total = 4;
++
++	// H.323/H.225 Call setup
++	cfg->xport.entry[0].protocol = IPPROTO_TCP;
++	cfg->xport.entry[0].sport_start = 0;
++	cfg->xport.entry[0].sport_end = 0;
++	cfg->xport.entry[0].dport_start = 1720;
++	cfg->xport.entry[0].dport_end = 1720;
++	cfg->xport.entry[1].protocol = IPPROTO_TCP;
++	cfg->xport.entry[1].sport_start = 1720;
++	cfg->xport.entry[1].sport_end = 1720;
++	cfg->xport.entry[1].dport_start = 0;
++	cfg->xport.entry[1].dport_end = 0;
++
++	// RAS Setup
++	cfg->xport.entry[2].protocol = IPPROTO_UDP;
++	cfg->xport.entry[2].sport_start = 0;
++	cfg->xport.entry[2].sport_end = 0;
++	cfg->xport.entry[2].dport_start = 1719;
++	cfg->xport.entry[2].dport_end = 1719;
++	cfg->xport.entry[3].protocol = IPPROTO_UDP;
++	cfg->xport.entry[3].sport_start = 1719;
++	cfg->xport.entry[3].sport_end = 1719;
++	cfg->xport.entry[3].dport_start = 0;
++	cfg->xport.entry[3].dport_end = 0;
++#endif
++
++#ifdef CONFIG_SL351x_NAT_TCP_UDP
++	mrxcr0.bits32 = 0;
++	mrxcr1.bits32 = 0;
++	mrxcr2.bits32 = 0;
++	mrxcr0.bits.port = 1;
++	mrxcr0.bits.l3 = 1;
++	mrxcr0.bits.l4 = 1;
++	mrxcr1.bits.sip = 1;
++	mrxcr1.bits.dip = 1;
++	mrxcr1.bits.l4_byte0_15 = 0x0f;	// Byte 0-3
++	mrxcr0.bits.sprx = 3;
++
++	rc = mac_set_rule_reg(cfg->lan_port, cfg->tcp_udp_rule_id, 1, mrxcr0.bits32, mrxcr1.bits32, mrxcr2.bits32);
++	if (rc < 0)
++	{
++		nat_printf("NAT Failed to set MAC-%d Rule %d!\n", cfg->lan_port, cfg->tcp_udp_rule_id);
++	}
++
++	if (cfg->lan_port != cfg->wan_port)
++	{
++		rc = mac_set_rule_reg(cfg->wan_port, cfg->tcp_udp_rule_id, 1, mrxcr0.bits32, mrxcr1.bits32, mrxcr2.bits32);
++		if (rc < 0)
++		{
++			nat_printf("NAT Failed to set MAC-%d Rule %d!\n", cfg->wan_port, cfg->tcp_udp_rule_id);
++		}
++	}
++#endif
++
++#ifdef CONFIG_SL351x_NAT_GRE
++	mrxcr0.bits32 = 0;
++	mrxcr1.bits32 = 0;
++	mrxcr2.bits32 = 0;
++	mrxcr0.bits.port = 1;
++	mrxcr0.bits.l3 = 1;
++	mrxcr0.bits.l4 = 1;
++	mrxcr1.bits.sip = 1;
++	mrxcr1.bits.dip = 1;
++	mrxcr1.bits.l4_byte0_15 = 0xcc;	// Byte 2, 3, 6, 7
++	mrxcr0.bits.sprx = 4;			// see GMAC driver about SPR
++
++	rc = mac_set_rule_reg(cfg->lan_port, cfg->gre_rule_id, 1, mrxcr0.bits32, mrxcr1.bits32, mrxcr2.bits32);
++	if (rc < 0)
++	{
++		nat_printf("NAT Failed to set MAC-%d Rule %d!\n", cfg->lan_port, cfg->gre_rule_id);
++	}
++
++	if (cfg->lan_port != cfg->wan_port)
++	{
++		rc = mac_set_rule_reg(cfg->wan_port, cfg->gre_rule_id, 1, mrxcr0.bits32, mrxcr1.bits32, mrxcr2.bits32);
++		if (rc < 0)
++		{
++			nat_printf("NAT Failed to set MAC-%d Rule %d!\n", cfg->wan_port, cfg->gre_rule_id);
++		}
++	}
++#endif
++
++#ifdef SL351x_NAT_TEST_BY_SMARTBITS
++	nat_init_test_entry();
++#endif
++}
++
++/*----------------------------------------------------------------------
++* nat_build_keys
++*	Note: To call this routine, the key->rule_id MUST be zero
++*----------------------------------------------------------------------*/
++static inline int nat_build_keys(NAT_KEY_T *key)
++{
++	return hash_gen_crc16((unsigned char *)key, NAT_KEY_SIZE) & HASH_BITS_MASK;
++}
++
++/*----------------------------------------------------------------------
++* gre_build_keys
++*	Note: To call this routine, the key->rule_id MUST be zero
++*----------------------------------------------------------------------*/
++static inline int gre_build_keys(GRE_KEY_T *key)
++{
++	return hash_gen_crc16((unsigned char *)key, GRE_KEY_SIZE) & HASH_BITS_MASK;
++}
++
++/*----------------------------------------------------------------------
++* nat_write_hash_entry
++*----------------------------------------------------------------------*/
++static inline int nat_write_hash_entry(int index, void *hash_entry)
++{
++	int		i;
++	u32		*srcep, *destp, *destp2;
++
++	srcep = (u32 *)hash_entry;
++	destp = destp2 = (u32 *)&hash_tables[index][0];
++
++	for (i=0; i<(NAT_HASH_ENTRY_SIZE/sizeof(u32)); i++)
++		*destp++ = *srcep++;
++
++	consistent_sync(destp2, NAT_HASH_ENTRY_SIZE, PCI_DMA_TODEVICE);
++	return 0;
++}
++
++/*----------------------------------------------------------------------
++* gre_write_hash_entry
++*----------------------------------------------------------------------*/
++static inline int gre_write_hash_entry(int index, void *hash_entry)
++{
++	int		i;
++	u32		*srcep, *destp, *destp2;
++
++	srcep = (u32 *)hash_entry;
++	destp = destp2 = (u32 *)&hash_tables[index][0];
++
++	for (i=0; i<(GRE_HASH_ENTRY_SIZE/sizeof(u32)); i++)
++		*destp++ = *srcep++;
++
++	consistent_sync(destp2, GRE_HASH_ENTRY_SIZE, PCI_DMA_TODEVICE);
++	return 0;
++}
++
++/*----------------------------------------------------------------------
++* sl351x_nat_find_ipcfg
++*	return NULL if not found
++*----------------------------------------------------------------------*/
++static NAT_IP_ENTRY_T *sl351x_nat_find_ipcfg(u32 ipaddr, int port)
++{
++	int				i;
++	NAT_IP_ENTRY_T	*ipcfg;
++
++	ipcfg = (NAT_IP_ENTRY_T *)&nat_cfg.ipcfg[port].entry[0];
++	for (i=0; iipaddr)
++		{
++			return ipcfg;
++		}
++	}
++	return NULL;
++}
++
++/*----------------------------------------------------------------------
++* sl351x_nat_assign_qid
++*----------------------------------------------------------------------*/
++static int sl351x_nat_assign_qid(u8 proto, u32 sip, u32 dip, u16 sport, u16 dport)
++{
++	int 				i, total, qid;
++	NAT_WRULE_ENTRY_T	*entry;
++
++	for (qid = 0; qidprotocol || entry->protocol==proto)
++			{
++				//if (!entry->sip_start && !entry->dip_start && !entry->sport_start && !entry->dport_start)
++				//	continue; // UI take care
++				if (entry->sip_start && !((sip >= entry->sip_start) &&
++									   (sip <= entry->sip_end)))
++					continue;
++				if (entry->dip_start && !((dip >= entry->dip_start) &&
++									   (dip <= entry->dip_end)))
++					continue;
++				if (entry->sport_start && !((sport >= entry->sport_start) &&
++									   (sport <= entry->sport_end)))
++					continue;
++				if (entry->dport_start && !((dport >= entry->dport_start)
++					 			       && (dport <= entry->dport_end)))
++					continue;
++				return qid;
++			}
++		}
++	}
++	return nat_cfg.default_hw_txq;
++}
++
++/*----------------------------------------------------------------------
++* sl351x_nat_input
++*	Handle NAT input frames
++*	Called by SL351x Driver - Handle Default Rx Queue
++*	Notes: The caller must make sure that the l3off & l4offset should not be zero.
++*	SL351x NAT Frames should meet the following conditions:
++*	1. TCP or UDP frame
++*	2. Cannot be special ALGs ports which TCP/UDP data is updated
++*	3. LAN-IN Frames:
++*		Source IP is in the LAN subnet and Destination is not in the LAN subnet
++*	4. WAN-IN Frames
++*		Destination IP is in the WAN port IP
++*
++*	Example Ports
++*	1. TCP/UDP data is updated
++*		(a) FTP Control Packet
++*		(b) VoIP Packets
++*		(c) etc. (add in future)
++*	2. UDP Low packet rate, not worth
++*		(b) TFTP Destination Port is 69
++*		(b) DNS  53
++*		(c) NTP  123
++*		(d) etc. (add in future)
++*----------------------------------------------------------------------*/
++void sl351x_nat_input(struct sk_buff *skb, int port, void *l3off, void *l4off)
++{
++	int 				i, found;
++	u32					sip, dip;
++	u16					sport, dport;
++	struct ethhdr		*ether_hdr;
++	struct iphdr		*ip_hdr;
++	struct tcphdr		*tcp_hdr;
++	struct pppoe_hdr	*pppoe_hdr;
++	NAT_CB_T			*nat_cb;
++	u8					proto, pppoe_frame=0;
++	NAT_CFG_T			*cfg;
++	u16					ppp_proto;
++	NAT_IP_ENTRY_T		*ipcfg;
++	NAT_XPORT_ENTRY_T	*xentry;
++	GRE_PKTHDR_T		*gre_hdr;
++#ifdef CONFIG_SL351x_NAT_TCP_UDP
++	u16 				*port_ptr;
++#endif
++
++	cfg = (NAT_CFG_T *)&nat_cfg;
++	if (!cfg->enabled || !cfg->ipcfg[port].total)
++		return;
++
++	ip_hdr = (struct iphdr *)&(skb->data[(u32)l3off]);
++	proto = ip_hdr->protocol;
++
++	tcp_hdr = (struct tcphdr *)&(skb->data[(u32)l4off]);
++	gre_hdr = (GRE_PKTHDR_T *)tcp_hdr;
++	sport = ntohs(tcp_hdr->source);
++	dport = ntohs(tcp_hdr->dest);
++
++	sip = ntohl(ip_hdr->saddr);
++	dip = ntohl(ip_hdr->daddr);
++
++	if (dip == IPIV(255,255,255,255))
++		return;
++
++	if (port == cfg->lan_port)
++	{
++		ipcfg = (NAT_IP_ENTRY_T *)&cfg->ipcfg[port].entry[0];
++		for (i=0, found=0; iipcfg[port].total; i++, ipcfg++)
++		{
++			u32 subnet = ipcfg->ipaddr & ipcfg->netmask;
++			if (((sip & ipcfg->netmask) == subnet) &&
++				((dip & ipcfg->netmask) != subnet))
++			{
++				found = 1;
++				break;
++			}
++		}
++		if (!found)
++			return;
++	}
++	else
++	{
++#ifndef _NOT_CHECK_SIP_DIP	// enable it if know and get the wan ip address
++		if (!sl351x_nat_find_ipcfg(dip, port))
++		{
++			printk("WAN->LAN Incorrect Dip %d.%d.%d.%d\n", HIPQUAD(dip));
++			return;
++		}
++#endif
++		ether_hdr = (struct ethhdr *)skb->data;
++		pppoe_hdr = (struct pppoe_hdr *)(ether_hdr + 1);
++		ppp_proto = *(u16 *)&pppoe_hdr->tag[0];
++		if (ether_hdr->h_proto == __constant_htons(ETH_P_PPP_SES)	// 0x8864
++			&& ppp_proto == __constant_htons(PPP_IP) )				// 0x21
++		{
++			pppoe_frame = 1;
++		}
++	}
++
++#ifdef CONFIG_SL351x_NAT_TCP_UDP
++	if (proto == IPPROTO_TCP)
++	{
++#ifdef	NAT_DEBUG_MSG
++		nat_printf("From   GMAC-%d: 0x%-4X TCP %d.%d.%d.%d [%d] --> %d.%d.%d.%d [%d]",
++				port, ntohs(ip_hdr->id),
++				NIPQUAD(ip_hdr->saddr), sport,
++				NIPQUAD(ip_hdr->daddr), dport);
++		if (tcp_flag_word(tcp_hdr) & TCP_FLAG_SYN) nat_printf(" SYN");
++		if (tcp_flag_word(tcp_hdr) & TCP_FLAG_FIN) nat_printf(" FIN");
++		if (tcp_flag_word(tcp_hdr) & TCP_FLAG_RST) nat_printf(" RST");
++		if (tcp_flag_word(tcp_hdr) & TCP_FLAG_ACK) nat_printf(" ACK");
++		nat_printf("\n");
++#endif
++		// if (tcp_flag_word(tcp_hdr) & (TCP_FLAG_SYN | TCP_FLAG_FIN | TCP_FLAG_RST))
++		if (tcp_flag_word(tcp_hdr) & (TCP_FLAG_SYN))
++		{
++			return;
++		}
++		port_ptr = fixed_tcp_port_list;
++		for (i=0; *port_ptr; i++, port_ptr++)
++		{
++			if (sport == *port_ptr || dport == *port_ptr)
++				return;
++		}
++#ifdef _HAVE_DYNAMIC_PORT_LIST
++		port_ptr = dynamic_tcp_port_list;
++		for (i=0; *port_ptr; i++, port_ptr++)
++		{
++			if (sport == *port_ptr || dport == *port_ptr)
++				return;
++		}
++#endif
++	}
++	else if (proto == IPPROTO_UDP)
++	{
++#ifdef	NAT_DEBUG_MSG
++		nat_printf("From   GMAC-%d: 0x%-4X UDP %d.%d.%d.%d [%d] --> %d.%d.%d.%d [%d]",
++				port, ntohs(ip_hdr->id),
++				NIPQUAD(ip_hdr->saddr), sport,
++				NIPQUAD(ip_hdr->daddr), dport);
++		nat_printf("\n");
++#endif
++		port_ptr = fixed_udp_port_list;
++		for (i=0; *port_ptr; i++, port_ptr++)
++		{
++			if (sport == *port_ptr || dport == *port_ptr)
++				return;
++		}
++#ifdef _HAVE_DYNAMIC_PORT_LIST
++		port_ptr = dynamic_udp_port_list;
++		for (i=0; *port_ptr; i++, port_ptr++)
++		{
++			if (sport == *port_ptr || dport == *port_ptr)
++				return;
++		}
++#endif
++	}
++	else
++#endif	// CONFIG_SL351x_NAT_TCP_UDP
++#ifdef CONFIG_SL351x_NAT_GRE
++	if (proto == IPPROTO_GRE)
++	{
++		if (gre_hdr->protocol != GRE_PROTOCOL_SWAP)
++			return;
++#ifdef	NAT_DEBUG_MSG
++		nat_printf("From   GMAC-%d: 0x%-4X GRE %d.%d.%d.%d [%d] --> %d.%d.%d.%d",
++				port, ntohs(ip_hdr->id),
++				NIPQUAD(ip_hdr->saddr), ntohs(gre_hdr->call_id),
++				NIPQUAD(ip_hdr->daddr));
++		nat_printf("\n");
++#endif
++	}
++	else
++#endif
++		return;
++
++
++	// check xport list
++	xentry = (NAT_XPORT_ENTRY_T *)&cfg->xport.entry[0];
++	for (i=0; ixport.total; i++, xentry++)
++	{
++		if (!xentry->protocol || xentry->protocol == proto)
++		{
++			//if (!xentry->sport_start && !xentry->dport_start) // UI take care
++			//	continue;
++			if (xentry->sport_start && !((sport >= xentry->sport_start) &&
++									   (sport <= xentry->sport_end)))
++				continue;
++			if (xentry->dport_start && !((dport >= xentry->dport_start)
++					 			       && (dport <= xentry->dport_end)))
++				continue;
++			return;
++		}
++	}
++
++	nat_cb = NAT_SKB_CB(skb);
++	if (((u32)nat_cb & 3))
++	{
++		nat_printf("%s ERROR! nat_cb is not alignment!!!!!!\n", __func__);
++		return;
++	}
++	nat_cb->tag = NAT_CB_TAG;
++	memcpy(nat_cb->sa, skb->data+6, 6);
++	nat_cb->sip = ip_hdr->saddr;
++	nat_cb->dip = ip_hdr->daddr;
++	if (proto == IPPROTO_GRE)
++	{
++		nat_cb->sport = gre_hdr->protocol;
++		nat_cb->dport = gre_hdr->call_id;
++	}
++	else
++	{
++		nat_cb->sport = tcp_hdr->source;
++		nat_cb->dport = tcp_hdr->dest;
++	}
++	nat_cb->pppoe_frame = pppoe_frame;
++}
++
++/*----------------------------------------------------------------------
++* sl351x_nat_output
++*	Handle NAT output frames
++*	Called by SL351x Driver - Transmit
++*
++*	1. If not SL351x NAT frames, return FALSE
++*	2. LAN-to-WAN frames
++*		(1) Sip must be WAN IP
++*	3. If TCP SY/RST/FIN frame, return
++*	4. Build the hash key and get the hash index
++*	5. If V-Bit is ON, return.
++*	6. Write hash entry and validate it
++*
++*----------------------------------------------------------------------*/
++int sl351x_nat_output(struct sk_buff *skb, int port)
++{
++	struct iphdr		*ip_hdr;
++	u8					proto;
++	NAT_CB_T			*nat_cb;
++
++	nat_cb = NAT_SKB_CB(skb);
++	if (nat_cb->tag != NAT_CB_TAG)
++		return 0;
++
++	if (((u32)nat_cb & 3))
++	{
++		nat_printf("%s ERROR! nat_cb is not alignment!!!!!!\n", __func__);
++		return 0;
++	}
++	ip_hdr = (struct iphdr *)skb->h.ipiph;
++	proto = ip_hdr->protocol;
++
++	switch (proto)
++	{
++		case IPPROTO_TCP:
++		case IPPROTO_UDP:
++			return sl351x_nat_tcp_udp_output(skb, port);
++		case IPPROTO_GRE:
++			return sl351x_nat_gre_output(skb, port);
++	}
++	return 0;
++}
++
++/*----------------------------------------------------------------------
++* sl351x_nat_tcp_udp_output
++*	Handle NAT TCP/UDP output frames
++*----------------------------------------------------------------------*/
++int sl351x_nat_tcp_udp_output(struct sk_buff *skb, int port)
++{
++	u32					sip, dip;
++	struct ethhdr		*ether_hdr;
++	struct iphdr		*ip_hdr;
++	struct tcphdr		*tcp_hdr;
++	struct pppoe_hdr	*pppoe_hdr;
++	NAT_CB_T			*nat_cb;
++	NAT_CFG_T			*cfg;
++	u8					proto;
++	u16					sport, dport, ppp_proto;
++	u32					hash_data[HASH_MAX_DWORDS];
++	NAT_HASH_ENTRY_T	*hash_entry;
++	int					hash_index;
++	struct ip_conntrack *nat_ip_conntrack;
++	enum ip_conntrack_info ctinfo;
++
++	nat_cb = NAT_SKB_CB(skb);
++	cfg = (NAT_CFG_T *)&nat_cfg;
++
++	ether_hdr = (struct ethhdr *)skb->data;
++	ip_hdr = (struct iphdr *)skb->h.ipiph;
++	tcp_hdr = (struct tcphdr *)((u32)ip_hdr + (ip_hdr->ihl<<2));
++	sip = ntohl(ip_hdr->saddr);
++	dip = ntohl(ip_hdr->daddr);
++	proto = ip_hdr->protocol;
++	sport = ntohs(tcp_hdr->source);
++	dport = ntohs(tcp_hdr->dest);
++
++#ifdef	NAT_DEBUG_MSG
++	{
++		nat_printf("To   GMAC-%d: 0x%-4X [%d] %d.%d.%d.%d [%d] --> %d.%d.%d.%d [%d]",
++				port, ntohs(ip_hdr->id), proto,
++				NIPQUAD(ip_hdr->saddr), sport,
++				NIPQUAD(ip_hdr->daddr), dport);
++		if (proto == IPPROTO_TCP)
++		{
++			if (tcp_flag_word(tcp_hdr) & TCP_FLAG_SYN) nat_printf(" SYN");
++			if (tcp_flag_word(tcp_hdr) & TCP_FLAG_FIN) nat_printf(" FIN");
++			if (tcp_flag_word(tcp_hdr) & TCP_FLAG_RST) nat_printf(" RST");
++			if (tcp_flag_word(tcp_hdr) & TCP_FLAG_ACK) nat_printf(" ACK");
++		}
++		nat_printf("\n");
++	}
++#endif
++	nat_ip_conntrack = ip_conntrack_get(skb, &ctinfo);
++	if (!nat_ip_conntrack)
++	{
++		nat_printf("IP conntrack info is not found!\n");
++		return 0;
++	}
++	// nat_printf("nat_ip_conntrack = 0x%x, status=0x%lx, ctinfo=%d\n", (u32)nat_ip_conntrack, nat_ip_conntrack->status, ctinfo);
++	// if (nat_ip_conntrack->master || nat_ip_conntrack->helper)
++	if (nat_ip_conntrack->helper)
++	{
++		nat_printf("Sport=%d Dport=%d master=0x%x, helper=0x%x\n", sport, dport, (u32)nat_ip_conntrack->master, (u32)nat_ip_conntrack->helper);
++		return 0;
++	}
++
++	//if (proto == IPPROTO_TCP && !(nat_ip_conntrack->status & IPS_ASSURED))
++	//	return 0;
++
++#ifdef	NAT_DEBUG_MSG
++	nat_printf("nat_ip_conntrack=0x%x, nat_cb->state=%d\n", (u32)nat_ip_conntrack, nat_cb->state);
++	nat_printf("lan2wan_hash_index=%d,  wan2lan_hash_index=%d\n", nat_ip_conntrack->lan2wan_hash_index, nat_ip_conntrack->wan2lan_hash_index);
++	nat_printf("lan2wan_collision=%d, wan2lan_collision=%d\n", nat_ip_conntrack->lan2wan_collision, nat_ip_conntrack->wan2lan_collision);
++#endif
++	if (proto == IPPROTO_TCP)
++	{
++		if (nat_cb->state >= TCP_CONNTRACK_FIN_WAIT && nat_cb->state <= TCP_CONNTRACK_CLOSE)
++		{
++			if 	(nat_ip_conntrack->lan2wan_hash_index)
++			{
++#ifdef	NAT_DEBUG_MSG
++				nat_printf("Invalidate LAN->WAN hash entry %d\n", nat_ip_conntrack->lan2wan_hash_index - 1);
++#endif
++				hash_nat_disable_owner(nat_ip_conntrack->lan2wan_hash_index - 1);
++				hash_invalidate_entry(nat_ip_conntrack->lan2wan_hash_index - 1);
++				nat_ip_conntrack->lan2wan_hash_index = 0;
++			}
++			if 	(nat_ip_conntrack->wan2lan_hash_index)
++			{
++#ifdef	NAT_DEBUG_MSG
++				nat_printf("Invalidate WAN->LAN hash entry %d\n", nat_ip_conntrack->wan2lan_hash_index - 1);
++#endif
++				hash_nat_disable_owner(nat_ip_conntrack->wan2lan_hash_index - 1);
++				hash_invalidate_entry(nat_ip_conntrack->wan2lan_hash_index - 1);
++				nat_ip_conntrack->wan2lan_hash_index = 0;
++			}
++			return 0;
++
++		}
++		else if (nat_cb->state != TCP_CONNTRACK_ESTABLISHED)
++		{
++			return 0;
++		}
++	}
++	if (proto == IPPROTO_TCP && (tcp_flag_word(tcp_hdr) & (TCP_FLAG_SYN | TCP_FLAG_FIN | TCP_FLAG_RST)))
++	// if (proto == IPPROTO_TCP &&  (tcp_flag_word(tcp_hdr) & (TCP_FLAG_SYN)))
++		return 0;
++
++	hash_entry = (NAT_HASH_ENTRY_T *)&hash_data;
++	if (port == cfg->wan_port)	// LAN-to-WAN
++	{
++		if (nat_ip_conntrack->lan2wan_hash_index || nat_ip_conntrack->lan2wan_collision)
++			return 0;
++#ifndef _NOT_CHECK_SIP_DIP	// enable it if know and get the wan ip address
++		if (!sl351x_nat_find_ipcfg(sip, port))
++		{
++			printk("LAN->WAN Incorrect Sip %d.%d.%d.%d\n", HIPQUAD(sip));
++			return 0;
++		}
++#endif
++		// Note: unused fields (including rule_id) MUST be zero
++		hash_entry->key.Ethertype 	= 0;
++		hash_entry->key.port_id 	= cfg->lan_port;
++		hash_entry->key.rule_id 	= 0;
++		hash_entry->key.ip_protocol = proto;
++		hash_entry->key.reserved1 	= 0;
++		hash_entry->key.reserved2 	= 0;
++		hash_entry->key.sip 		= ntohl(nat_cb->sip);
++		hash_entry->key.dip 		= ntohl(nat_cb->dip);
++		hash_entry->key.sport 		= nat_cb->sport;
++		hash_entry->key.dport 		= nat_cb->dport;
++
++		hash_index = nat_build_keys(&hash_entry->key);
++
++#ifdef NAT_DEBUG_LAN_HASH_TIMEOUT
++		if (hash_get_nat_owner_flag(hash_index))
++			return 0;
++#endif
++		if (hash_get_valid_flag(hash_index))
++		{
++			nat_ip_conntrack->lan2wan_collision = 1;
++			nat_collision++;
++#if 0
++			if (proto == IPPROTO_TCP && (tcp_flag_word(tcp_hdr) & (TCP_FLAG_FIN | TCP_FLAG_RST)))
++			{
++				if (memcmp((void *)&hash_entry->key, hash_get_entry(hash_index), sizeof(NAT_KEY_T)) == 0)
++				{
++   					hash_nat_disable_owner(hash_index);
++ 					hash_invalidate_entry(hash_index); // Must last one, else HW Tx fast SW
++ 					// nat_printf("Invalidate nat hash entry %d\n", hash_index);
++ 				}
++			}
++#endif
++			return 0;
++		}
++
++		// write hash entry
++		hash_entry->key.rule_id = cfg->tcp_udp_rule_id;
++		memcpy(hash_entry->param.da, skb->data, 6);
++		memcpy(hash_entry->param.sa, skb->data+6, 6);
++		hash_entry->param.Sip = sip;
++		hash_entry->param.Dip = dip;
++		hash_entry->param.Sport = sport;
++		hash_entry->param.Dport = dport;
++		hash_entry->param.vlan = 0;
++		hash_entry->param.sw_id = 0;
++		hash_entry->param.mtu = 0;
++		// check PPPoE
++		pppoe_hdr = (struct pppoe_hdr *)(ether_hdr + 1);
++		ppp_proto = *(u16 *)&pppoe_hdr->tag[0];
++		if (ether_hdr->h_proto == __constant_htons(ETH_P_PPP_SES)	// 0x8864
++			&& ppp_proto == __constant_htons(PPP_IP) )				// 0x21
++		{
++			hash_entry->action.dword = NAT_PPPOE_LAN2WAN_ACTIONS;
++			hash_entry->param.pppoe = htons(pppoe_hdr->sid);
++		}
++		else
++		{
++			hash_entry->action.dword = NAT_LAN2WAN_ACTIONS;
++			hash_entry->param.pppoe = 0;
++		}
++		hash_entry->action.bits.dest_qid = sl351x_nat_assign_qid(proto, sip, dip, sport, dport);
++		hash_entry->action.bits.dest_qid +=	(cfg->wan_port==0) ? TOE_GMAC0_HW_TXQ0_QID : TOE_GMAC1_HW_TXQ0_QID;
++		hash_entry->tmo.counter = hash_entry->tmo.interval =
++						(proto == IPPROTO_TCP) ? cfg->tcp_tmo_interval : cfg->udp_tmo_interval;
++		nat_write_hash_entry(hash_index, hash_entry);
++		// nat_printf("%lu Validate a LAN hash entry %d\n", jiffies/HZ, hash_index);
++		// hash_dump_entry(hash_index);
++		hash_nat_enable_owner(hash_index);
++		hash_validate_entry(hash_index); // Must last one, else HW Tx fast than SW
++ 		nat_ip_conntrack->lan2wan_hash_index = hash_index + 1;
++ 		nat_ip_conntrack->hw_nat |= 1;
++		return 0;
++	}
++	else // WAN-to-LAN
++	{
++		if (nat_ip_conntrack->wan2lan_hash_index || nat_ip_conntrack->wan2lan_collision)
++			return 0;
++
++		// Note: unused fields (including rule_id) MUST be zero
++		hash_entry->key.Ethertype 	= 0;
++		hash_entry->key.port_id 	= cfg->wan_port;
++		hash_entry->key.rule_id 	= 0;
++		hash_entry->key.ip_protocol = proto;
++		hash_entry->key.reserved1 	= 0;
++		hash_entry->key.reserved2 	= 0;
++		hash_entry->key.sip 		= ntohl(nat_cb->sip);
++		hash_entry->key.dip 		= ntohl(nat_cb->dip);
++		hash_entry->key.sport 		= nat_cb->sport;
++		hash_entry->key.dport 		= nat_cb->dport;
++
++		hash_index = nat_build_keys(&hash_entry->key);
++
++#ifdef NAT_DEBUG_WAN_HASH_TIMEOUT
++		if (hash_get_nat_owner_flag(hash_index))
++			return 0;
++#endif
++		if (hash_get_valid_flag(hash_index))
++		{
++			nat_ip_conntrack->wan2lan_collision = 1;
++			nat_collision++;
++#if 0
++			if (proto == IPPROTO_TCP && (tcp_flag_word(tcp_hdr) & (TCP_FLAG_FIN | TCP_FLAG_RST)))
++			{
++				if (memcmp((void *)&hash_entry->key, hash_get_entry(hash_index), sizeof(NAT_KEY_T)) == 0)
++				{
++   					hash_nat_disable_owner(hash_index);
++ 					hash_invalidate_entry(hash_index); // Must last one, else HW Tx fast SW
++  					// nat_printf("Invalidate nat hash entry %d\n", hash_index);
++				}
++			}
++#endif
++			return 0;
++		}
++
++		// write hash entry
++		hash_entry->key.rule_id = cfg->tcp_udp_rule_id;
++		memcpy(hash_entry->param.da, skb->data, 6);
++		memcpy(hash_entry->param.sa, skb->data+6, 6);
++		hash_entry->param.Sip = sip;
++		hash_entry->param.Dip = dip;
++		hash_entry->param.Sport = sport;
++		hash_entry->param.Dport = dport;
++		hash_entry->param.vlan = 0;
++		hash_entry->param.pppoe = 0;
++		hash_entry->param.sw_id = 0;
++		hash_entry->param.mtu = 0;
++		hash_entry->action.dword = (nat_cb->pppoe_frame) ? NAT_PPPOE_WAN2LAN_ACTIONS : NAT_WAN2LAN_ACTIONS;
++		hash_entry->action.bits.dest_qid = sl351x_nat_assign_qid(proto, sip, dip, sport, dport);
++		hash_entry->action.bits.dest_qid += (cfg->lan_port==0) ? TOE_GMAC0_HW_TXQ0_QID : TOE_GMAC1_HW_TXQ0_QID;;
++		hash_entry->tmo.counter = hash_entry->tmo.interval =
++						(proto == IPPROTO_TCP) ? cfg->tcp_tmo_interval : cfg->udp_tmo_interval;
++		nat_write_hash_entry(hash_index, hash_entry);
++
++		// nat_printf("%lu Validate a WAN hash entry %d\n", jiffies/HZ, hash_index);
++		// hash_dump_entry(hash_index);
++   		hash_nat_enable_owner(hash_index);
++ 		hash_validate_entry(hash_index); // Must last one, else HW Tx fast SW
++ 		nat_ip_conntrack->wan2lan_hash_index = hash_index + 1;
++ 		nat_ip_conntrack->hw_nat |= 2;
++		return 0;
++	}
++	return 0;
++}
++
++/*----------------------------------------------------------------------
++* sl351x_nat_gre_output
++*	Handle NAT GRE output frames
++*----------------------------------------------------------------------*/
++int sl351x_nat_gre_output(struct sk_buff *skb, int port)
++{
++	u32					sip, dip;
++	struct ethhdr		*ether_hdr;
++	struct iphdr		*ip_hdr;
++	struct pppoe_hdr	*pppoe_hdr;
++	GRE_PKTHDR_T		*gre_hdr;
++	NAT_CB_T			*nat_cb;
++	NAT_CFG_T			*cfg;
++	u16					ppp_proto;
++	u32					hash_data[HASH_MAX_DWORDS];
++	GRE_HASH_ENTRY_T	*hash_entry;
++	int					hash_index;
++	struct ip_conntrack *nat_ip_conntrack;
++	enum ip_conntrack_info ctinfo;
++
++	nat_cb = NAT_SKB_CB(skb);
++	cfg = (NAT_CFG_T *)&nat_cfg;
++
++	ether_hdr = (struct ethhdr *)skb->data;
++	ip_hdr = (struct iphdr *)skb->h.ipiph;
++	gre_hdr = (GRE_PKTHDR_T *)((u32)ip_hdr + (ip_hdr->ihl<<2));
++	sip = ntohl(ip_hdr->saddr);
++	dip = ntohl(ip_hdr->daddr);
++
++#ifdef	NAT_DEBUG_MSG
++	{
++		nat_printf("To   GMAC-%d: 0x%-4X GRE %d.%d.%d.%d [%d] --> %d.%d.%d.%d",
++				port, ntohs(ip_hdr->id),
++				NIPQUAD(ip_hdr->saddr), ntohs(gre_hdr->call_id),
++				NIPQUAD(ip_hdr->daddr));
++		nat_printf("\n");
++	}
++#endif
++	nat_ip_conntrack = ip_conntrack_get(skb, &ctinfo);
++	if (nat_ip_conntrack)
++	{
++		// if (nat_ip_conntrack->master || nat_ip_conntrack->helper)
++		if (nat_ip_conntrack->helper)
++		{
++			nat_printf("GRE Call-ID=%d, master=0x%x, helper=0x%x\n", ntohs(gre_hdr->call_id), (u32)nat_ip_conntrack->master, (u32)nat_ip_conntrack->helper);
++			return 0;
++		}
++		if (!(nat_ip_conntrack->status & IPS_ASSURED))
++			return 0;
++	}
++
++	hash_entry = (GRE_HASH_ENTRY_T *)&hash_data;
++	if (port == cfg->wan_port)	// LAN-to-WAN
++	{
++#ifdef _NOT_CHECK_SIP_DIP	// enable it if know and get the wan ip address
++		if (!sl351x_nat_find_ipcfg(sip, port))
++		{
++			printk("LAN->WAN Incorrect Sip %d.%d.%d.%d\n", HIPQUAD(sip));
++			return 0;
++		}
++#endif
++		// Note: unused fields (including rule_id) MUST be zero
++		hash_entry->key.Ethertype 	= 0;
++		hash_entry->key.port_id 	= cfg->lan_port;
++		hash_entry->key.rule_id 	= 0;
++		hash_entry->key.ip_protocol = IPPROTO_GRE;
++		hash_entry->key.reserved1 	= 0;
++		hash_entry->key.reserved2 	= 0;
++		hash_entry->key.reserved3 	= 0;
++		hash_entry->key.reserved4 	= 0;
++		hash_entry->key.sip 		= ntohl(nat_cb->sip);
++		hash_entry->key.dip 		= ntohl(nat_cb->dip);
++		hash_entry->key.protocol	= nat_cb->sport;
++		hash_entry->key.call_id 	= nat_cb->dport;
++
++		hash_index = gre_build_keys(&hash_entry->key);
++
++#ifdef NAT_DEBUG_LAN_HASH_TIMEOUT
++		if (hash_get_nat_owner_flag(hash_index))
++			return 0;
++#endif
++		if (hash_get_valid_flag(hash_index))
++		{
++			return 0;
++		}
++
++		// write hash entry
++		hash_entry->key.rule_id = cfg->gre_rule_id;
++		memcpy(hash_entry->param.da, skb->data, 6);
++		memcpy(hash_entry->param.sa, skb->data+6, 6);
++		hash_entry->param.Sip = sip;
++		hash_entry->param.Dip = dip;
++		hash_entry->param.Sport = 0;
++		hash_entry->param.Dport = ntohs(gre_hdr->call_id);
++		hash_entry->param.vlan = 0;
++		hash_entry->param.sw_id = 0;
++		hash_entry->param.mtu = 0;
++		// check PPPoE
++		pppoe_hdr = (struct pppoe_hdr *)(ether_hdr + 1);
++		ppp_proto = *(u16 *)&pppoe_hdr->tag[0];
++		if (ether_hdr->h_proto == __constant_htons(ETH_P_PPP_SES)	// 0x8864
++			&& ppp_proto == __constant_htons(PPP_IP) )				// 0x21
++		{
++			hash_entry->action.dword = NAT_PPPOE_PPTP_LAN2WAN_ACTIONS;
++			hash_entry->param.pppoe = htons(pppoe_hdr->sid);
++		}
++		else
++		{
++			hash_entry->action.dword = NAT_PPTP_LAN2WAN_ACTIONS;
++			hash_entry->param.pppoe = 0;
++		}
++		hash_entry->action.bits.dest_qid = sl351x_nat_assign_qid(IPPROTO_GRE, sip, dip, 0, ntohs(gre_hdr->call_id));
++		hash_entry->action.bits.dest_qid +=	(cfg->wan_port==0) ? TOE_GMAC0_HW_TXQ0_QID : TOE_GMAC1_HW_TXQ0_QID;
++		hash_entry->tmo.counter = hash_entry->tmo.interval = cfg->gre_tmo_interval;
++		gre_write_hash_entry(hash_index, hash_entry);
++		// nat_printf("%lu Validate a LAN hash entry %d\n", jiffies/HZ, hash_index);
++		// hash_dump_entry(hash_index);
++		hash_nat_enable_owner(hash_index);
++		hash_validate_entry(hash_index); // Must last one, else HW Tx fast than SW
++		return 0;
++	}
++	else // WAN-to-LAN
++	{
++		// Note: unused fields (including rule_id) MUST be zero
++		hash_entry->key.Ethertype 	= 0;
++		hash_entry->key.port_id 	= cfg->wan_port;
++		hash_entry->key.rule_id 	= 0;
++		hash_entry->key.ip_protocol = IPPROTO_GRE;
++		hash_entry->key.reserved1 	= 0;
++		hash_entry->key.reserved2 	= 0;
++		hash_entry->key.reserved3 	= 0;
++		hash_entry->key.reserved4 	= 0;
++		hash_entry->key.sip 		= ntohl(nat_cb->sip);
++		hash_entry->key.dip 		= ntohl(nat_cb->dip);
++		hash_entry->key.protocol	= nat_cb->sport;
++		hash_entry->key.call_id		= nat_cb->dport;
++
++		hash_index = gre_build_keys(&hash_entry->key);
++
++#ifdef NAT_DEBUG_WAN_HASH_TIMEOUT
++		if (hash_get_nat_owner_flag(hash_index))
++			return 0;
++#endif
++		if (hash_get_valid_flag(hash_index))
++		{
++			return 0;
++		}
++
++		// write hash entry
++		hash_entry->key.rule_id = cfg->gre_rule_id;
++		memcpy(hash_entry->param.da, skb->data, 6);
++		memcpy(hash_entry->param.sa, skb->data+6, 6);
++		hash_entry->param.Sip = sip;
++		hash_entry->param.Dip = dip;
++		hash_entry->param.Sport = 0;
++		hash_entry->param.Dport = ntohs(gre_hdr->call_id);
++		hash_entry->param.vlan = 0;
++		hash_entry->param.pppoe = 0;
++		hash_entry->param.sw_id = 0;
++		hash_entry->param.mtu = 0;
++		hash_entry->action.dword = (nat_cb->pppoe_frame) ? NAT_PPPOE_PPTP_WAN2LAN_ACTIONS : NAT_PPTP_WAN2LAN_ACTIONS;
++		hash_entry->action.bits.dest_qid = sl351x_nat_assign_qid(IPPROTO_GRE, sip, dip, 0, ntohs(gre_hdr->call_id));
++		hash_entry->action.bits.dest_qid += (cfg->lan_port==0) ? TOE_GMAC0_HW_TXQ0_QID : TOE_GMAC1_HW_TXQ0_QID;;
++		hash_entry->tmo.counter = hash_entry->tmo.interval = cfg->gre_tmo_interval;
++		gre_write_hash_entry(hash_index, hash_entry);
++
++		// nat_printf("%lu Validate a WAN hash entry %d\n", jiffies/HZ, hash_index);
++		// hash_dump_entry(hash_index);
++   		hash_nat_enable_owner(hash_index);
++ 		hash_validate_entry(hash_index); // Must last one, else HW Tx fast SW
++		return 0;
++	}
++	return 0;
++}
++
++
++#ifdef _HAVE_DYNAMIC_PORT_LIST
++/*----------------------------------------------------------------------
++* sl_nat_add_port
++*----------------------------------------------------------------------*/
++void sl_nat_add_port(u8 protocol, u16 port)
++{
++	int 	i;
++	u16		*port_ptr;
++
++	if (protocol == IPPROTO_TCP)
++		port_ptr = dynamic_tcp_port_list;
++	else if (protocol == IPPROTO_UDP)
++		port_ptr = dynamic_udp_port_list;
++	else
++		return;
++
++	for (i=0; *port_ptr; i++)
++	{
++		if (port == *port_ptr)
++			return;
++		port_ptr++;
++	}
++	port_ptr++;
++	*port_ptr = port;
++}
++
++/*----------------------------------------------------------------------
++* sl_nat_remove_port
++*----------------------------------------------------------------------*/
++void sl_nat_remove_port(u8 protocol, u16 port)
++{
++	int 	i, j;
++	u16		*port_ptr, *next;
++
++	if (protocol == IPPROTO_TCP)
++		port_ptr = dynamic_tcp_port_list;
++	else if (protocol == IPPROTO_UDP)
++		port_ptr = dynamic_udp_port_list;
++	else
++		return;
++
++	for (i=0; *port_ptr; i++, port_ptr++)
++	{
++		if (port == *port_ptr)
++		{
++			port_next = port_ptr + 1;
++			for (j=i+1; *port_next; i++, j++)
++				*port_ptr++ = *port_next++;
++			*port_ptr = 0;
++			return;
++		}
++	}
++}
++#endif
++
++/*----------------------------------------------------------------------
++* sl351x_nat_ioctl
++*----------------------------------------------------------------------*/
++int sl351x_nat_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++	GMAC_INFO_T 		*tp = (GMAC_INFO_T *)dev->priv;
++	int 				i, j, port_id;
++    NATCMD_HDR_T		nat_hdr;
++    NAT_REQ_E			ctrl;
++	unsigned char		*req_datap;
++	NAT_IP_ENTRY_T		*ipcfg;
++	NAT_XPORT_ENTRY_T	*xport_entry;
++	NAT_WRULE_ENTRY_T	*wrule_entry;
++	unsigned int		qid;
++
++	if (copy_from_user((void *)&nat_hdr, rq->ifr_data, sizeof(nat_hdr)))
++		return -EFAULT;
++	req_datap = (unsigned char *)rq->ifr_data + sizeof(nat_hdr);
++	port_id = tp->port_id;
++	switch (nat_hdr.cmd) {
++	case NATSSTATUS:
++		if (!capable(CAP_NET_ADMIN))
++			return -EPERM;
++		if (nat_hdr.len != sizeof(NAT_STATUS_T))
++			return -EPERM;
++		if (copy_from_user((void *)&ctrl.status, req_datap, sizeof(ctrl.status)))
++			return -EFAULT;
++		if (ctrl.status.enable != 0 && ctrl.status.enable != 1)
++			return -EPERM;
++		// sl351x_nat_set_enabled_flag(ctrl.status.enable);
++		if (nat_cfg.enabled && (ctrl.status.enable == 0))
++		{
++			for (i=0; i= CONFIG_NAT_MAX_IP_NUM)
++			return -E2BIG;
++		if (copy_from_user((void *)&nat_cfg.ipcfg[port_id].entry[i], req_datap, sizeof(NAT_IPCFG_T)))
++			return -EFAULT;
++		nat_cfg.ipcfg[port_id].total++;
++		break;
++	case NATDELIP:
++		if (!capable(CAP_NET_ADMIN))
++			return -EPERM;
++		if (nat_hdr.len != sizeof(NAT_IPCFG_T))
++			return -EPERM;
++		if (copy_from_user((void *)&ctrl.ipcfg, req_datap, sizeof(ctrl.ipcfg)))
++			return -EFAULT;
++		ipcfg = (NAT_IP_ENTRY_T *)&nat_cfg.ipcfg[port_id].entry[0];
++		for (i=0; iipaddr == ctrl.ipcfg.entry.ipaddr)
++			{
++				NAT_IP_ENTRY_T *ipcfg_next;
++				ipcfg_next = ipcfg + 1;
++				for (j=i+1; j < nat_cfg.ipcfg[port_id].total; i++, j++)
++				{
++					memcpy((void *)ipcfg, (void *)ipcfg_next, sizeof(NAT_IP_ENTRY_T));
++					ipcfg++;
++					ipcfg_next++;
++				}
++				ipcfg->ipaddr = 0;
++				ipcfg->netmask = 0;
++				nat_cfg.ipcfg[port_id].total--;
++				return 0;
++			}
++		}
++		return -ENOENT;
++	case NATGETIP:
++		if (nat_hdr.len != sizeof(NAT_IPCFG_ALL_T))
++			return -EPERM;
++		if (copy_to_user(req_datap, (void *)&nat_cfg.ipcfg[port_id], sizeof(NAT_IPCFG_ALL_T)))
++			return -EFAULT;
++		break;
++	case NATAXPORT:
++		if (!capable(CAP_NET_ADMIN))
++			return -EPERM;
++		if (nat_hdr.len != sizeof(NAT_XPORT_T))
++			return -EPERM;
++		i = nat_cfg.xport.total;
++		if (i >= CONFIG_NAT_MAX_XPORT)
++			return -E2BIG;
++		if (copy_from_user((void *)&nat_cfg.xport.entry[i], req_datap, sizeof(NAT_XPORT_T)))
++			return -EFAULT;
++		nat_cfg.xport.total++;
++		break;
++	case NATDXPORT:
++		if (!capable(CAP_NET_ADMIN))
++			return -EPERM;
++		if (nat_hdr.len != sizeof(NAT_XPORT_T))
++			return -EPERM;
++		if (copy_from_user((void *)&ctrl.xport, req_datap, sizeof(NAT_XPORT_T)))
++			return -EFAULT;
++		xport_entry = (NAT_XPORT_ENTRY_T *)&nat_cfg.xport.entry[0];
++		for (i=0; i CONFIG_NAT_TXQ_NUM)
++			return -EPERM;
++		i = nat_cfg.wrule[qid].total;
++		if (i >= CONFIG_NAT_MAX_WRULE)
++			return -E2BIG;
++		if (copy_from_user((void *)&nat_cfg.wrule[qid].entry[i], req_datap+sizeof(qid), sizeof(NAT_WRULE_T)))
++			return -EFAULT;
++		nat_cfg.wrule[qid].total++;
++		break;
++	case NATDWRULE:
++		if (!capable(CAP_NET_ADMIN))
++			return -EPERM;
++		if (nat_hdr.len != sizeof(NAT_WRULE_T))
++			return -EPERM;
++		if (copy_from_user((void *)&ctrl.wrule, req_datap, sizeof(NAT_WRULE_T)))
++			return -EFAULT;
++		qid = ctrl.wrule.qid;
++		if (qid >= CONFIG_NAT_TXQ_NUM)
++			return -EPERM;
++		wrule_entry = (NAT_WRULE_ENTRY_T *)&nat_cfg.wrule[qid].entry[0];
++		for (i=0; i= CONFIG_NAT_TXQ_NUM)
++			return -EPERM;
++		if (copy_to_user(req_datap, (void *)&nat_cfg.wrule[qid], sizeof(NAT_WRULE_ALL_T)))
++			return -EFAULT;
++		break;
++	case NATSDEFQ:
++		if (!capable(CAP_NET_ADMIN))
++			return -EPERM;
++		if (nat_hdr.len != sizeof(NAT_QUEUE_T))
++			return -EPERM;
++		if (copy_from_user((void *)&nat_cfg.default_hw_txq, req_datap, sizeof(u32)))
++			return -EFAULT;
++		break;
++	case NATGDEFQ:
++		if (nat_hdr.len != sizeof(NAT_QUEUE_T))
++			return -EPERM;
++		if (copy_to_user(req_datap, (void *)&nat_cfg.default_hw_txq, sizeof(u32)))
++			return -EFAULT;
++	case NATRMIPCFG:
++		nat_cfg.ipcfg[port_id].total = 0;
++		break;
++	case NATTESTENTRY:
++		if (!capable(CAP_NET_ADMIN))
++			return -EPERM;
++		if (nat_hdr.len != sizeof(NAT_TESTENTRY_T))
++			return -EPERM;
++		if (copy_from_user((void *)&ctrl.init_entry, req_datap, sizeof(ctrl.init_entry)))
++			return -EFAULT;
++		if (ctrl.init_entry.init_enable != 0 && ctrl.init_entry.init_enable != 1)
++			return -EPERM;
++		nat_cfg.init_enabled = ctrl.init_entry.init_enable;
++		break;
++
++	default:
++		return -EPERM;
++	}
++
++	return 0;
++}
++
++/*----------------------------------------------------------------------
++* 	nat_init_test_entry
++*	Initialize NAT test hash entries
++*
++*	SmartBits P1  -----> Lepus GMAC 0 --------------+
++*													|
++*													|
++*             P3  <----- Lepus GMAC 1 -- HW TxQ0 <--+
++*									  -- HW TxQ1 <--+
++*									  -- HW TxQ2 <--+
++*									  -- HW TxQ3 <--+
++*
++*	SmartBits P1  <----- Lepus GMAC 0 -- HW TxQ0 <--+
++*									  -- HW TxQ1 <--+
++*                                     -- HW TxQ2 <--+
++*									  -- HW TxQ3 <--+
++*													|
++*													|
++*             P3  -----> Lepus GMAC 1 --------------+
++*
++*   LAN GMAC0 <--------------------------------------------> GMAC1 WAN
++*	192.168.[x].[y]:50 --> 168.95.[x].[y]:80 ---TXQ[y-1]---> 192.168.2.254:200[y] --> 168.95.[x].[y]:80
++*	192.168.[x].[y]:50 <-- 168.95.[x].[y]:80 <--TXQ[y-1]---- 192.168.2.254:200[y] <-- 168.95.[x].[y]:80
++*   where:
++*		[x] : Packet Type
++*		[y] : Tx Queue, 1 for TxQ0, 2 for TxQ1, 3 for TxQ2, 4 for TxQ3,
++*
++*
++* Packet Type:
++* 1. TCP Frames <---> TCP Frames
++*   LAN GMAC0 <--------------------------------> GMAC1 WAN
++*	192.168.1.1:50 --> 168.95.1.1:80 ---TXQ0---> 192.168.2.254:2001 --> 168.95.1.1:80
++*	192.168.1.1:50 <-- 168.95.1.1:80 <--TXQ0---- 192.168.2.254:2001 <-- 168.95.1.1:80
++*
++*	192.168.1.2:50 --> 168.95.1.2:80 ---TXQ1---> 192.168.2.254:2002 --> 168.95.1.2:80
++*	192.168.1.2:50 <-- 168.95.1.2:80 <--TXQ1---- 192.168.2.254:2002 <-- 168.95.1.2:80
++*
++*	192.168.1.3:50 --> 168.95.1.3:80 ---TXQ2---> 192.168.2.254:2003 --> 168.95.1.3:80
++*	192.168.1.3:50 <-- 168.95.1.3:80 <--TXQ2---- 192.168.2.254:2003 <-- 168.95.1.3:80
++*
++*	192.168.1.4:50 --> 168.95.1.4:80 ---TXQ3---> 192.168.2.254:2004 --> 168.95.1.4:80
++*	192.168.1.4:50 <-- 168.95.1.4:80 <--TXQ3---- 192.168.2.254:2004 <-- 168.95.1.4:80
++*
++* 2 TCP Frames <----> PPPoE + TCP Frames
++*   LAN GMAC0 <--------------------------------> GMAC1 WAN
++*	192.168.2.1:50 --> 168.95.2.1:80 ---TXQ0---> 192.168.2.254:2001 --> 168.95.2.1:80
++*	192.168.2.1:50 <-- 168.95.2.1:80 <--TXQ0---- 192.168.2.254:2001 <-- 168.95.2.1:80
++*
++*	192.168.2.2:50 --> 168.95.2.2:80 ---TXQ1---> 192.168.2.254:2002 --> 168.95.2.2:80
++*	192.168.2.2:50 <-- 168.95.2.2:80 <--TXQ1---- 192.168.2.254:2002 <-- 168.95.2.2:80
++*
++*	192.168.2.3:50 --> 168.95.2.3:80 ---TXQ2---> 192.168.2.254:2003 --> 168.95.2.3:80
++*	192.168.2.3:50 <-- 168.95.2.3:80 <--TXQ2---- 192.168.2.254:2003 <-- 168.95.2.3:80
++*
++*	192.168.2.4:50 --> 168.95.2.4:80 ---TXQ3---> 192.168.2.254:2004 --> 168.95.2.4:80
++*	192.168.2.4:50 <-- 168.95.2.4:80 <--TXQ3---- 192.168.2.254:2004 <-- 168.95.2.4:80
++*
++* 3 TCP Frames <----> VLAN + PPPoE + TCP Frames
++*   LAN GMAC0 <--------------------------------> GMAC1 WAN
++*	192.168.3.1:50 --> 168.95.3.1:80 ---TXQ0---> 192.168.2.254:2001 --> 168.95.3.1:80
++*	192.168.3.1:50 <-- 168.95.3.1:80 <--TXQ0---- 192.168.2.254:2001 <-- 168.95.3.1:80
++*
++*	192.168.3.2:50 --> 168.95.3.2:80 ---TXQ1---> 192.168.2.254:2002 --> 168.95.3.2:80
++*	192.168.3.2:50 <-- 168.95.3.2:80 <--TXQ1---- 192.168.2.254:2002 <-- 168.95.3.2:80
++*
++*	192.168.3.3:50 --> 168.95.3.3:80 ---TXQ2---> 192.168.2.254:2003 --> 168.95.3.3:80
++*	192.168.3.3:50 <-- 168.95.3.3:80 <--TXQ2---- 192.168.2.254:2003 <-- 168.95.3.3:80
++*
++*	192.168.3.4:50 --> 168.95.3.4:80 ---TXQ3---> 192.168.2.254:2004 --> 168.95.3.4:80
++*	192.168.3.4:50 <-- 168.95.3.4:80 <--TXQ3---- 192.168.2.254:2004 <-- 168.95.3.4:80
++*
++* 4 VLAN-A + TCP Frames <----> VLAN-B + PPPoE + TCP Frames
++*   LAN GMAC0 <--------------------------------> GMAC1 WAN
++*	192.168.4.1:50 --> 168.95.4.1:80 ---TXQ0---> 192.168.2.254:2001 --> 168.95.4.1:80
++*	192.168.4.1:50 <-- 168.95.4.1:80 <--TXQ0---- 192.168.2.254:2001 <-- 168.95.4.1:80
++*
++*	192.168.4.2:50 --> 168.95.4.2:80 ---TXQ1---> 192.168.2.254:2002 --> 168.95.4.2:80
++*	192.168.4.2:50 <-- 168.95.4.2:80 <--TXQ1---- 192.168.2.254:2002 <-- 168.95.4.2:80
++*
++*	192.168.4.3:50 --> 168.95.4.3:80 ---TXQ2---> 192.168.2.254:2003 --> 168.95.4.3:80
++*	192.168.4.3:50 <-- 168.95.4.3:80 <--TXQ2---- 192.168.2.254:2003 <-- 168.95.4.3:80
++*
++*	192.168.4.4:50 --> 168.95.4.4:80 ---TXQ3---> 192.168.2.254:2004 --> 168.95.4.4:80
++*	192.168.4.4:50 <-- 168.95.4.4:80 <--TXQ3---- 192.168.2.254:2004 <-- 168.95.4.4:80
++*
++*
++*
++*----------------------------------------------------------------------*/
++#ifdef SL351x_NAT_TEST_BY_SMARTBITS
++#define 	NAT_IPIV(a,b,c,d)			((a<<24)+(b<<16)+(c<<8)+d)
++#define     NAT_TEST_CLIENT_IP 			NAT_IPIV(192,168,1,1)
++#define     NAT_TEST_SERVER_IP 			NAT_IPIV(168,95,1,1)
++#define		NAT_TEST_LAN_IP				NAT_IPIV(192,168,1,254)
++#define		NAT_TEST_WAN_IP				NAT_IPIV(192,168,2,254)
++#define     NAT_TEST_MAP_PORT_BASE		2001
++#define     NAT_TEST_SPORT				50
++#define     NAT_TEST_DPORT				80
++#define     NAT_TEST_PROTOCOL			6
++u8			nat_test_lan_target_da[6]={0x00,0x11,0x22,0x33,0x44,0x55};
++u8			nat_test_wan_target_da[6]={0x00,0xaa,0xbb,0xcc,0xdd,0xee};
++u8			nat_test_lan_my_da[6]={0x00,0x11,0x11,0x11,0x11,0x11};
++u8			nat_test_wan_my_da[6]={0x00,0x22,0x22,0x22,0x22,0x22};
++static void nat_init_test_entry(void)
++{
++	int 				i, j ;
++	NAT_HASH_ENTRY_T	*hash_entry;
++	u32					sip, dip;
++	u32					hash_data[HASH_MAX_DWORDS];
++	NAT_CFG_T			*cfg;
++	int					hash_index;
++
++	cfg = (NAT_CFG_T *)&nat_cfg;
++	hash_entry = (NAT_HASH_ENTRY_T *)&hash_data;
++	hash_entry->key.Ethertype 	= 0;
++	hash_entry->key.rule_id 	= 0;
++	hash_entry->key.ip_protocol = IPPROTO_TCP;
++	hash_entry->key.reserved1 	= 0;
++	hash_entry->key.reserved2 	= 0;
++	// hash_entry->key.sip 		= NAT_TEST_CLIENT_IP;
++	// hash_entry->key.dip 		= NAT_TEST_SERVER_IP;
++	hash_entry->key.sport 		= htons(NAT_TEST_SPORT);
++	hash_entry->key.dport 		= htons(NAT_TEST_DPORT);
++	hash_entry->key.rule_id = cfg->tcp_udp_rule_id;
++	hash_entry->action.dword = NAT_LAN2WAN_ACTIONS;
++
++	sip = NAT_TEST_CLIENT_IP;
++	dip = NAT_TEST_SERVER_IP;
++
++	// Init TCP <------> TCP hash entries
++	// LAN --> WAN
++	// (1) TCP --> TCP
++	// (2) TCP --> PPPoE + TCP
++	// (3) TCP --> VLAN-B + PPPoE + TCP
++	// (4) TCP + VLAN-A --> VLAN-B + PPPoE + TCP
++	memcpy(hash_entry->param.da, nat_test_wan_target_da, 6);
++	memcpy(hash_entry->param.sa, nat_test_wan_my_da, 6);
++	hash_entry->key.port_id = cfg->lan_port;
++	for (i=0; iaction.bits.dest_qid = i+2;
++		}
++		else
++		{
++			hash_entry->action.bits.dest_qid = i;
++		}
++		hash_entry->action.bits.dest_qid += (cfg->wan_port==0) ? TOE_GMAC0_HW_TXQ0_QID : TOE_GMAC1_HW_TXQ0_QID;
++		hash_entry->param.Sport = NAT_TEST_MAP_PORT_BASE+i;
++		hash_entry->param.Dport = NAT_TEST_DPORT;
++		for (j=0; j<4; j++)
++		{
++			hash_entry->key.sip = sip + i + j*0x100;
++			hash_entry->key.dip = dip + i + j*0x100;
++			hash_entry->param.Dip = hash_entry->key.dip;
++			hash_entry->param.Sip = NAT_TEST_WAN_IP;
++			switch (j)
++			{
++			case 0:
++				hash_entry->action.bits.pppoe = 0;
++				hash_entry->param.pppoe = 0;
++				hash_entry->action.bits.vlan = 0;
++				hash_entry->param.vlan = 0;
++				break;
++			case 1:
++				hash_entry->action.bits.pppoe = 1;
++				hash_entry->param.pppoe = i+1;
++				hash_entry->action.bits.vlan = 0;
++				hash_entry->param.vlan = 0;
++				break;
++			case 2:
++				hash_entry->action.bits.pppoe = 1;
++				hash_entry->param.pppoe = i+1;
++				hash_entry->action.bits.vlan = 1;
++				hash_entry->param.vlan = i+10;
++				break;
++			case 3:
++				hash_entry->action.bits.pppoe = 1;
++				hash_entry->param.pppoe = i+1;
++				hash_entry->action.bits.vlan = 1;
++				hash_entry->param.vlan = i+10;
++				break;
++			}
++			hash_entry->tmo.counter = hash_entry->tmo.interval = 0x7fff;
++			hash_index = nat_build_keys(&hash_entry->key);
++			nat_write_hash_entry(hash_index, hash_entry);
++			hash_nat_enable_owner(hash_index);
++			hash_validate_entry(hash_index); // Must last one, else HW Tx fast than SW
++		}
++	}
++
++
++	// WAN --> LAN
++	hash_entry->key.port_id 	= cfg->wan_port;
++	hash_entry->key.sport 		= htons(NAT_TEST_DPORT);
++	hash_entry->key.dport 		= htons(NAT_TEST_DPORT);
++	hash_entry->key.rule_id		= cfg->tcp_udp_rule_id;
++	hash_entry->action.dword	= NAT_WAN2LAN_ACTIONS;
++	hash_entry->key.sport		= htons(NAT_TEST_DPORT);
++	memcpy(hash_entry->param.da, nat_test_lan_target_da, 6);
++	memcpy(hash_entry->param.sa, nat_test_lan_my_da, 6);
++	for (i=0; ikey.dport = htons(NAT_TEST_MAP_PORT_BASE + i);
++		if (i < 2)
++		{
++			hash_entry->action.bits.dest_qid = i+2;
++		}
++		else
++		{
++			hash_entry->action.bits.dest_qid = i;
++		}
++		hash_entry->action.bits.dest_qid += (cfg->lan_port==0) ? TOE_GMAC0_HW_TXQ0_QID : TOE_GMAC1_HW_TXQ0_QID;
++		hash_entry->param.Dport = NAT_TEST_SPORT;
++		hash_entry->param.Sport = NAT_TEST_DPORT;
++		hash_entry->param.da[5] = i;
++		for (j=0; j<4; j++)
++		{
++			hash_entry->key.sip = (dip + i + j*0x100);
++			hash_entry->key.dip = (NAT_TEST_WAN_IP);
++			hash_entry->param.Sip = hash_entry->key.sip;
++			hash_entry->param.Dip = sip + i + j*0x100;
++			switch (j)
++			{
++			case 0:
++				hash_entry->action.bits.pppoe = 0;
++				hash_entry->param.pppoe = 0;
++				hash_entry->action.bits.vlan = 0;
++				hash_entry->param.vlan = 0;
++				break;
++			case 1:
++				hash_entry->action.bits.pppoe = 2;
++				hash_entry->param.pppoe = i+1;
++				hash_entry->action.bits.vlan = 0;
++				hash_entry->param.vlan = 0;
++				break;
++			case 2:
++				hash_entry->action.bits.pppoe = 2;
++				hash_entry->param.pppoe = i+1;
++				hash_entry->action.bits.vlan = 2;
++				hash_entry->param.vlan = i+5;
++				break;
++			case 3:
++				hash_entry->action.bits.pppoe = 1;
++				hash_entry->param.pppoe = i+1;
++				hash_entry->action.bits.vlan = 1;
++				hash_entry->param.vlan = i+5;
++				break;
++			}
++			hash_entry->tmo.counter = hash_entry->tmo.interval = 0x7fff;
++			hash_index = nat_build_keys(&hash_entry->key);
++			nat_write_hash_entry(hash_index, hash_entry);
++			hash_nat_enable_owner(hash_index);
++			hash_validate_entry(hash_index); // Must last one, else HW Tx fast than SW
++		}
++	}
++}
++#endif	// SL351x_NAT_TEST_BY_SMARTBITS
++
++#endif // CONFIG_SL351x_NAT
++
+Index: linux-2.6.23.16/drivers/net/sl351x_proc.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/drivers/net/sl351x_proc.c	2008-03-15 16:59:48.862910740 +0200
+@@ -0,0 +1,578 @@
++/****************************************************************************
++* Copyright 2006 Storlink Corp.  All rights reserved.
++*----------------------------------------------------------------------------
++* Name			: sl351x_proc.c
++* Description	:
++*		Handle Proc Routines for Storlink SL351x Platform
++*
++* History
++*
++*	Date		Writer		Description
++*----------------------------------------------------------------------------
++*	04/13/2006	Gary Chen	Create and implement
++*
++*
++****************************************************************************/
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#ifdef CONFIG_NETFILTER
++#include 
++#endif
++#include 
++#include 
++#include 
++#ifdef CONFIG_SYSCTL
++#include 
++#endif
++
++#define	 MIDWAY
++#define	 SL_LEPUS
++
++// #define PROC_DEBUG_MSG	1
++
++#include 
++#include 
++#include 
++#include 
++#include 
++
++#ifdef CONFIG_PROC_FS
++/*----------------------------------------------------------------------
++* Definition
++*----------------------------------------------------------------------*/
++#define	proc_printf					printk
++#define SL351x_GMAC_PROC_NAME		"sl351x_gmac"
++#define SL351x_NAT_PROC_NAME		"sl351x_nat"
++#define SL351x_TOE_PROC_NAME		"sl351x_toe"
++
++/*----------------------------------------------------------------------
++* Function Definition
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL351x_NAT
++static int nat_ct_open(struct inode *inode, struct file *file);
++static void *nat_ct_seq_start(struct seq_file *s, loff_t *pos);
++static void nat_ct_seq_stop(struct seq_file *s, void *v);
++static void *nat_ct_seq_next(struct seq_file *s, void *v, loff_t *pos);
++static int nat_ct_seq_show(struct seq_file *s, void *v);
++#endif
++
++#ifdef CONFIG_SL351x_RXTOE
++static int toe_ct_open(struct inode *inode, struct file *file);
++static void *toe_ct_seq_start(struct seq_file *s, loff_t *pos);
++static void toe_ct_seq_stop(struct seq_file *s, void *v);
++static void *toe_ct_seq_next(struct seq_file *s, void *v, loff_t *pos);
++static int toe_ct_seq_show(struct seq_file *s, void *v);
++extern int sl351x_get_toe_conn_flag(int index);
++extern struct toe_conn * sl351x_get_toe_conn_info(int index);
++#endif
++
++static int gmac_ct_open(struct inode *inode, struct file *file);
++static void *gmac_ct_seq_start(struct seq_file *s, loff_t *pos);
++static void gmac_ct_seq_stop(struct seq_file *s, void *v);
++static void *gmac_ct_seq_next(struct seq_file *s, void *v, loff_t *pos);
++static int gmac_ct_seq_show(struct seq_file *s, void *v);
++
++
++/*----------------------------------------------------------------------
++* Data
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SYSCTL
++// static struct ctl_table_header *nat_ct_sysctl_header;
++#endif
++
++#ifdef CONFIG_SL351x_NAT
++static struct seq_operations nat_ct_seq_ops = {
++	.start = nat_ct_seq_start,
++	.next  = nat_ct_seq_next,
++	.stop  = nat_ct_seq_stop,
++	.show  = nat_ct_seq_show
++};
++
++static struct file_operations nat_file_ops= {
++	.owner   = THIS_MODULE,
++	.open    = nat_ct_open,
++	.read    = seq_read,
++	.llseek  = seq_lseek,
++	.release = seq_release
++};
++#endif // CONFIG_SL351x_NAT
++
++#ifdef CONFIG_SL351x_RXTOE
++static struct seq_operations toe_ct_seq_ops = {
++	.start = toe_ct_seq_start,
++	.next  = toe_ct_seq_next,
++	.stop  = toe_ct_seq_stop,
++	.show  = toe_ct_seq_show
++};
++
++static struct file_operations toe_file_ops= {
++	.owner   = THIS_MODULE,
++	.open    = toe_ct_open,
++	.read    = seq_read,
++	.llseek  = seq_lseek,
++	.release = seq_release
++};
++#endif
++
++static struct seq_operations gmac_ct_seq_ops = {
++	.start = gmac_ct_seq_start,
++	.next  = gmac_ct_seq_next,
++	.stop  = gmac_ct_seq_stop,
++	.show  = gmac_ct_seq_show
++};
++
++static struct file_operations gmac_file_ops= {
++	.owner   = THIS_MODULE,
++	.open    = gmac_ct_open,
++	.read    = seq_read,
++	.llseek  = seq_lseek,
++	.release = seq_release
++};
++
++#ifdef SL351x_GMAC_WORKAROUND
++extern u32 gmac_workaround_cnt[4];
++extern u32 gmac_short_frame_workaround_cnt[2];
++#ifdef CONFIG_SL351x_NAT
++	extern u32 sl351x_nat_workaround_cnt;
++#endif
++#endif
++/*----------------------------------------------------------------------
++* nat_ct_open
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL351x_NAT
++static int nat_ct_open(struct inode *inode, struct file *file)
++{
++	return seq_open(file, &nat_ct_seq_ops);
++}
++#endif // CONFIG_SL351x_NAT
++/*----------------------------------------------------------------------
++* nat_ct_seq_start
++* find the first
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL351x_NAT
++static void *nat_ct_seq_start(struct seq_file *s, loff_t *pos)
++{
++	int i;
++
++	// proc_printf("%s: *pos=%d\n", __func__, (int)*pos);
++	for (i=*pos; iHASH_TOTAL_ENTRIES)
++		return -ENOSPC;
++
++	idx--;
++	nat_entry = (NAT_HASH_ENTRY_T *)&hash_tables[idx];
++	gre_entry = (GRE_HASH_ENTRY_T *)nat_entry;
++	if (nat_entry->key.ip_protocol == IPPROTO_GRE)
++	{
++		if (seq_printf(s, "%4d: KEY MAC-%d [%d] %u.%u.%u.%u [%u]-->%u.%u.%u.%u\n",
++					idx, gre_entry->key.port_id, gre_entry->key.ip_protocol,
++					HIPQUAD(gre_entry->key.sip), ntohs(gre_entry->key.call_id),
++					HIPQUAD(gre_entry->key.dip)))
++			return -ENOSPC;
++		if (seq_printf(s, "      PARAMETER: %u.%u.%u.%u -->%u.%u.%u.%u [%u] Timeout:%ds\n",
++					HIPQUAD(gre_entry->param.Sip),
++					HIPQUAD(gre_entry->param.Dip), gre_entry->param.Dport,
++					gre_entry->tmo.counter))
++			return -ENOSPC;
++	}
++	else
++	{
++		if (seq_printf(s, "%4d: KEY MAC-%d [%d] %u.%u.%u.%u [%u]-->%u.%u.%u.%u [%u]\n",
++					idx, nat_entry->key.port_id, nat_entry->key.ip_protocol,
++					HIPQUAD(nat_entry->key.sip), ntohs(nat_entry->key.sport),
++					HIPQUAD(nat_entry->key.dip), ntohs(nat_entry->key.dport)))
++			return -ENOSPC;
++		if (seq_printf(s, "      PARAMETER: %u.%u.%u.%u [%u]-->%u.%u.%u.%u [%u] Timeout:%ds\n",
++					HIPQUAD(nat_entry->param.Sip), nat_entry->param.Sport,
++					HIPQUAD(nat_entry->param.Dip), nat_entry->param.Dport,
++					nat_entry->tmo.counter))
++			return -ENOSPC;
++	}
++	return 0;
++}
++#endif // CONFIG_SL351x_NAT
++
++/*----------------------------------------------------------------------
++* toe_ct_open
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL351x_RXTOE
++static int toe_ct_open(struct inode *inode, struct file *file)
++{
++	return seq_open(file, &toe_ct_seq_ops);
++}
++#endif
++/*----------------------------------------------------------------------
++* toe_ct_seq_start
++* find the first
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_SL351x_RXTOE
++static void *toe_ct_seq_start(struct seq_file *s, loff_t *pos)
++{
++	int i;
++
++	// proc_printf("%s: *pos=%d\n", __func__, (int)*pos);
++	for (i=*pos; iTOE_TOE_QUEUE_NUM)
++		return -ENOSPC;
++
++	idx--;
++	toe_entry = (struct toe_conn *)sl351x_get_toe_conn_info(idx);
++	if (!toe_entry)
++		return -ENOSPC;
++
++	if (seq_printf(s, "%4d: Qid %d MAC-%d TCP %u.%u.%u.%u [%u]-->%u.%u.%u.%u [%u]\n",
++				idx, toe_entry->qid, toe_entry->gmac->port_id,
++				NIPQUAD(toe_entry->saddr[0]), ntohs(toe_entry->source),
++				NIPQUAD(toe_entry->daddr[0]), ntohs(toe_entry->dest)))
++			return -ENOSPC;
++	return 0;
++}
++#endif
++/*----------------------------------------------------------------------
++* gmac_ct_open
++*----------------------------------------------------------------------*/
++static int gmac_ct_open(struct inode *inode, struct file *file)
++{
++	return seq_open(file, &gmac_ct_seq_ops);
++}
++
++/*----------------------------------------------------------------------
++* gmac_ct_seq_start
++* find the first
++*----------------------------------------------------------------------*/
++static void *gmac_ct_seq_start(struct seq_file *s, loff_t *pos)
++{
++	int i;
++	i = (int)*pos + 1;;
++
++	if (i > 9)
++		return NULL;
++	else
++		return (void *)i;
++}
++
++/*----------------------------------------------------------------------
++* gmac_ct_seq_stop
++*----------------------------------------------------------------------*/
++static void gmac_ct_seq_stop(struct seq_file *s, void *v)
++{
++}
++
++/*----------------------------------------------------------------------
++* gmac_ct_seq_next
++*----------------------------------------------------------------------*/
++static void *gmac_ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++	int i;
++
++	// proc_printf("%s: *pos=%d\n", __func__, (int)*pos);
++
++	(*pos)++;
++	i = (int)*pos + 1;;
++
++	if (i > 9)
++		return NULL;
++	else
++		return (void *)i;
++}
++
++/*----------------------------------------------------------------------
++* seq_dm_long
++*----------------------------------------------------------------------*/
++static void seq_dm_long(struct seq_file *s, u32 location, int length)
++{
++	u32		*start_p, *curr_p, *end_p;
++	u32		*datap, data;
++	int		i;
++
++	//if (length > 1024)
++	//	length = 1024;
++
++	start_p = (u32 *)location;
++	end_p = (u32 *)location + length;
++	curr_p = (u32 *)((u32)location & 0xfffffff0);
++	datap = (u32 *)location;
++	while (curr_p < end_p)
++	{
++		cond_resched();
++		seq_printf(s, "0x%08x: ",(u32)curr_p & 0xfffffff0);
++		for (i=0; i<4; i++)
++		{
++			if (curr_p < start_p || curr_p >= end_p)
++               seq_printf(s, "         ");
++			else
++			{
++				data = *datap;
++				seq_printf(s, "%08X ", data);
++			}
++			if (i==1)
++              seq_printf(s, "- ");
++
++			curr_p++;
++			datap++;
++		}
++        seq_printf(s, "\n");
++	}
++}
++
++/*----------------------------------------------------------------------
++* gmac_ct_seq_show
++*----------------------------------------------------------------------*/
++static int gmac_ct_seq_show(struct seq_file *s, void *v)
++{
++	switch ((int)v)
++	{
++		case 1:
++			seq_printf(s, "\nGMAC Global Registers\n");
++			seq_dm_long(s, TOE_GLOBAL_BASE, 32);
++			break;
++		case 2:
++			seq_printf(s, "\nGMAC Non-TOE Queue Header\n");
++			seq_dm_long(s, TOE_NONTOE_QUE_HDR_BASE, 12);
++			break;
++		case 3:
++			seq_printf(s, "\nGMAC TOE Queue Header\n");
++			seq_dm_long(s, TOE_TOE_QUE_HDR_BASE, 12);
++			break;
++		case 4:
++			seq_printf(s, "\nGMAC-0 DMA Registers\n");
++			seq_dm_long(s, TOE_GMAC0_DMA_BASE, 52);
++			break;
++		case 5:
++			seq_printf(s, "\nGMAC-0 Registers\n");
++			seq_dm_long(s, TOE_GMAC0_BASE, 32);
++			break;
++		case 6:
++			seq_printf(s, "\nGMAC-1 DMA Registers\n");
++			seq_dm_long(s, TOE_GMAC1_DMA_BASE, 52);
++			break;
++		case 7:
++			seq_printf(s, "\nGMAC-1 Registers\n");
++			seq_dm_long(s, TOE_GMAC1_BASE, 32);
++			break;
++		case 8:
++			seq_printf(s, "\nGLOBAL Registers\n");
++			seq_dm_long(s, GMAC_GLOBAL_BASE_ADDR, 16);
++			break;
++		case 9:
++#ifdef SL351x_GMAC_WORKAROUND
++			seq_printf(s, "\nGMAC-0 Rx/Tx/Short Workaround: %u, %u, %u\n", gmac_workaround_cnt[0], gmac_workaround_cnt[1], gmac_short_frame_workaround_cnt[0]);
++			seq_printf(s, "GMAC-1 Rx/Tx/Short Workaround: %u, %u, %u\n", gmac_workaround_cnt[2], gmac_workaround_cnt[3], gmac_short_frame_workaround_cnt[1]);
++#ifdef CONFIG_SL351x_NAT
++			seq_printf(s, "NAT Workaround: %u\n", sl351x_nat_workaround_cnt);
++#endif
++#endif
++			break;
++		default:
++			return -ENOSPC;
++	}
++	return 0;
++}
++
++/*----------------------------------------------------------------------
++* init
++*----------------------------------------------------------------------*/
++static int __init init(void)
++{
++	struct proc_dir_entry *proc_gmac=NULL;
++
++#ifdef CONFIG_SL351x_NAT
++	struct proc_dir_entry *proc_nat=NULL;
++#endif
++
++#ifdef CONFIG_SL351x_RXTOE
++	struct proc_dir_entry *proc_toe=NULL;
++#endif
++
++#ifdef CONFIG_SYSCTL
++	// nat_ct_sysctl_header = NULL;
++#endif
++	proc_gmac = proc_net_fops_create(SL351x_GMAC_PROC_NAME, 0440, &gmac_file_ops);
++	if (!proc_gmac) goto init_bad;
++
++#ifdef CONFIG_SL351x_NAT
++	proc_nat = proc_net_fops_create(SL351x_NAT_PROC_NAME, 0440, &nat_file_ops);
++	if (!proc_nat) goto init_bad;
++#endif // CONFIG_SL351x_NAT
++
++#ifdef CONFIG_SL351x_RXTOE
++	proc_toe = proc_net_fops_create(SL351x_TOE_PROC_NAME, 0440, &toe_file_ops);
++	if (!proc_toe) goto init_bad;
++#endif
++
++#ifdef CONFIG_SYSCTL
++	// nat_ct_sysctl_header = register_sysctl_table(nat_ct_net_table, 0);
++	// if (!nat_ct_sysctl_header) goto init_bad;
++#endif
++
++	return 0;
++
++init_bad:
++	if (proc_gmac) proc_net_remove(SL351x_GMAC_PROC_NAME);
++
++#ifdef CONFIG_SL351x_NAT
++	if (proc_nat) proc_net_remove(SL351x_NAT_PROC_NAME);
++#endif
++
++#ifdef CONFIG_SL351x_RXTOE
++	if (proc_toe) proc_net_remove(SL351x_NAT_PROC_NAME);
++#endif
++
++#ifdef CONFIG_SYSCTL
++	// if (nat_ct_sysctl_header) unregister_sysctl_table(nat_ct_sysctl_header);
++#endif
++	proc_printf("SL351x NAT Proc: can't create proc or register sysctl.\n");
++	return -ENOMEM;
++}
++
++/*----------------------------------------------------------------------
++* fini
++*----------------------------------------------------------------------*/
++static void __exit fini(void)
++{
++	proc_net_remove(SL351x_GMAC_PROC_NAME);
++
++#ifdef CONFIG_SL351x_NAT
++	proc_net_remove(SL351x_NAT_PROC_NAME);
++#endif
++
++#ifdef CONFIG_SL351x_RXTOE
++	proc_net_remove(SL351x_TOE_PROC_NAME);
++#endif
++
++#ifdef CONFIG_SYSCTL
++	// unregister_sysctl_table(nat_ct_sysctl_header);
++#endif
++}
++
++/*----------------------------------------------------------------------
++* module
++*----------------------------------------------------------------------*/
++module_init(init);
++module_exit(fini);
++
++#endif	// CONFIG_PROC_FS
+Index: linux-2.6.23.16/drivers/net/sl351x_toe.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/drivers/net/sl351x_toe.c	2008-03-15 16:57:25.854761029 +0200
+@@ -0,0 +1,1083 @@
++/**************************************************************************
++* Copyright 2006 StorLink Semiconductors, Inc.  All rights reserved.
++*--------------------------------------------------------------------------
++* Name			: sl351x_toe.c
++* Description	:
++*		Provide TOE routines for SL351x
++*
++* History
++*
++*	Date		Writer		Description
++*----------------------------------------------------------------------------
++*				Xiaochong
++*
++****************************************************************************/
++
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++
++static int in_toe_isr;
++static int toe_initialized=0;
++
++static struct toe_conn	toe_connections[TOE_TOE_QUEUE_NUM];
++EXPORT_SYMBOL(toe_connections);
++static __u32 toe_connection_bits[TOE_TOE_QUEUE_NUM/32] __attribute__ ((aligned(16)));
++struct sk_buff* gen_pure_ack(struct toe_conn* connection, TOE_QHDR_T* toe_qhdr, INTR_QHDR_T *intr_curr_desc);
++
++extern struct storlink_sysctl	storlink_ctl;
++extern TOE_INFO_T toe_private_data;
++extern spinlock_t gmac_fq_lock;
++extern void mac_write_dma_reg(int mac, unsigned int offset, u32 data);
++extern int mac_set_rule_reg(int mac, int rule, int enabled, u32 reg0, u32 reg1, u32 reg2);
++extern int hash_add_toe_entry(HASH_ENTRY_T *entry);
++extern void toe_gmac_fill_free_q(void);
++
++#define _DEBUG_SKB_		1
++#ifdef _DEBUG_SKB_
++/*---------------------------------------------------------------------------
++ * _debug_skb
++ *-------------------------------------------------------------------------*/
++static inline void _debug_skb(struct sk_buff *skb, GMAC_RXDESC_T *toe_curr_desc, u32 data)
++{
++	if ((u32)skb < 0x1000)
++	{
++		printk("%s skb=%x\n", __func__, (u32)skb);
++		while(1);
++	}
++	REG32(__va(toe_curr_desc->word2.buf_adr)-SKB_RESERVE_BYTES) = data;
++}
++#else
++#define _debug_skb(x, y, z)
++#endif
++
++/*---------------------------------------------------------------------------
++ * get_connection_seq_num
++ *-------------------------------------------------------------------------*/
++u32 get_connection_seq_num(unsigned short qid)
++{
++	TOE_QHDR_T	*toe_qhdr;
++
++	toe_qhdr = (TOE_QHDR_T*)TOE_TOE_QUE_HDR_BASE;
++	toe_qhdr += qid;
++	return (u32)toe_qhdr->word3.seq_num;
++}
++EXPORT_SYMBOL(get_connection_seq_num);
++
++/*---------------------------------------------------------------------------
++ * get_connection_ack_num
++ *-------------------------------------------------------------------------*/
++u32 get_connection_ack_num(unsigned short qid)
++{
++	TOE_QHDR_T	*toe_qhdr;
++
++	toe_qhdr = (TOE_QHDR_T*)TOE_TOE_QUE_HDR_BASE;
++	toe_qhdr += qid;
++	return (u32)toe_qhdr->word4.ack_num;
++}
++EXPORT_SYMBOL(get_connection_ack_num);
++
++/*---------------------------------------------------------------------------
++ * dump_toe_qhdr
++ *-------------------------------------------------------------------------*/
++void dump_toe_qhdr(TOE_QHDR_T *toe_qhdr)
++{
++	printk("TOE w1 %x, w2 %x, w3 %x\n", toe_qhdr->word1.bits32,
++		toe_qhdr->word2.bits32, toe_qhdr->word3.bits32);
++	printk("w4 %x, w5 %x, w6 %x\n", toe_qhdr->word4.bits32,
++		toe_qhdr->word5.bits32, toe_qhdr->word6.bits32);
++}
++
++/*---------------------------------------------------------------------------
++ * dump_intrq_desc
++ *-------------------------------------------------------------------------*/
++void dump_intrq_desc(INTR_QHDR_T *intr_curr_desc)
++{
++	printk("INTR w0 %x, w1 %x, seq %x\n", intr_curr_desc->word0.bits32,
++		intr_curr_desc->word1.bits32, intr_curr_desc->word2.bits32);
++	printk("ack %x, w4 %x\n", intr_curr_desc->word3.bits32,
++		intr_curr_desc->word4.bits32);
++}
++
++/*---------------------------------------------------------------------------
++ * This routine will initialize a TOE matching rule
++ * called by SL351x GMAC driver.
++ *-------------------------------------------------------------------------*/
++void sl351x_toe_init(void)
++{
++	GMAC_MRxCR0_T	mrxcr0;
++	GMAC_MRxCR1_T	mrxcr1;
++	GMAC_MRxCR2_T	mrxcr2;
++	int	rule, rc;
++
++	if (toe_initialized)
++		return;
++
++	toe_initialized = 1;
++
++#ifndef CONFIG_SL351x_NAT
++	mrxcr0.bits32 = 0;
++	mrxcr1.bits32 = 0;
++	mrxcr2.bits32 = 0;
++	mrxcr0.bits.l3 = 1;
++	mrxcr0.bits.l4 = 1;
++	mrxcr1.bits.sip = 1;
++	mrxcr1.bits.dip = 1;
++	mrxcr1.bits.l4_byte0_15 = 0x0f;
++	mrxcr0.bits.sprx = 1;
++	rule = 0;
++	rc = mac_set_rule_reg(0, rule, 1, mrxcr0.bits32, mrxcr1.bits32,
++						mrxcr2.bits32);
++	if (rc<0) {
++		printk("%s::Set MAC 0 rule fail!\n", __func__);
++	}
++	rc = mac_set_rule_reg(1, rule, 1, mrxcr0.bits32, mrxcr1.bits32,
++	     					mrxcr2.bits32);
++	if (rc<0) {
++		printk("%s::Set MAC 1 rule fail!\n", __func__);
++	}
++#endif // CONFIG_SL351x_NAT
++}
++
++/*---------------------------------------------------------------------------
++ * dump_intrq_desc
++ * assign an interrupt queue number to a give tcp queue
++ *-------------------------------------------------------------------------*/
++int get_interrupt_queue_id(int tcp_qid)
++{
++	return (int)(tcp_qid & 0x0003);
++}
++
++/*---------------------------------------------------------------------------
++ * reset_connection_index
++ * reset the connection bit by given index
++ *-------------------------------------------------------------------------*/
++void reset_connection_index(__u8 index)
++{
++	__u32 mask = ~(0xffffffff & (1<< (index&0x1f)));
++	toe_connection_bits[index>>5] = toe_connection_bits[index>>5] & mask;
++}
++
++/*---------------------------------------------------------------------------
++ * update_timer
++ *-------------------------------------------------------------------------*/
++void update_timer(struct toe_conn* connection)
++{
++//	if (time_before(jiffies, connection->last_rx_jiffies+3))
++//	if ((jiffies + 0xffffffff - connection->last_rx_jiffies) & 0x3)
++//	if (connection->last_rx_jiffies > jiffies)
++//		printk("%s::jif %g, last_rx_jif %g\n", __func__, jiffies, connection->last_rx_jiffies);
++/*	if ((long)(jiffies + 2)< 3) { // overflow...
++		printk("%s::jiffies %x\n", __func__, jiffies);
++	} */
++//	if ((long)(jiffies - connection->last_rx_jiffies)< 2)
++//		return;
++	connection->last_rx_jiffies = jiffies;
++	// gary chen mod_timer(&connection->rx_timer, jiffies+2);
++	connection->rx_timer.expires = jiffies + 2;
++	add_timer(&connection->rx_timer);
++//	printk("%s::nt %x, lj %x\n", __func__, (jiffies+2), connection->last_rx_jiffies);
++}
++
++/*---------------------------------------------------------------------------
++ * gen_pure_ack
++ *-------------------------------------------------------------------------*/
++struct sk_buff* gen_pure_ack(struct toe_conn* connection, TOE_QHDR_T* toe_qhdr,
++INTR_QHDR_T *intr_curr_desc)
++{
++	struct sk_buff	*skb;
++	struct iphdr	*ip_hdr;
++	struct tcphdr	*tcp_hdr;
++	struct ethhdr	*eth_hdr;
++
++	if ((skb= dev_alloc_skb(RX_BUF_SIZE))==NULL) {
++		printk("%s::alloc pure ack fail!\n", __func__);
++		return NULL;
++	}
++	skb_reserve(skb, RX_INSERT_BYTES);
++	memset(skb->data, 0, 60);
++
++	eth_hdr = (struct ethhdr*)&(skb->data[0]);
++	memcpy(eth_hdr, &connection->l2_hdr, sizeof(struct ethhdr));
++
++	ip_hdr = (struct iphdr*)&(skb->data[14]);
++	ip_hdr->version = connection->ip_ver;
++	ip_hdr->ihl = 20>>2;
++	ip_hdr->tot_len = ntohs(40);
++	ip_hdr->frag_off = htons(IP_DF);
++	ip_hdr->ttl = 128;
++	ip_hdr->protocol = 0x06;
++	ip_hdr->saddr = connection->saddr[0];
++	ip_hdr->daddr = connection->daddr[0];
++//	printk("%s ip sa %x, da %x\n",
++//		__func__, ntohl(ip_hdr->saddr), ntohl(ip_hdr->daddr));
++
++	tcp_hdr = (struct tcphdr*)&(skb->data[34]);
++	tcp_hdr->source = connection->source;
++	tcp_hdr->dest = connection->dest;
++	if (intr_curr_desc) {
++		tcp_hdr->seq = htonl(intr_curr_desc->word2.seq_num);
++		tcp_hdr->ack_seq = htonl(intr_curr_desc->word3.ack_num);
++		tcp_hdr->window = htons(intr_curr_desc->word0.bits.win_size);
++	} else {
++		tcp_hdr->seq = htonl(toe_qhdr->word3.seq_num);
++		tcp_hdr->ack_seq = htonl(toe_qhdr->word4.ack_num);
++		tcp_hdr->window = htons(toe_qhdr->word6.bits.WinSize);
++	}
++	tcp_hdr->ack = 1;
++	tcp_hdr->doff = 20 >> 2;
++#if 0
++	if (!intr_curr_desc) {
++		unsigned char byte;
++		for (i=0; i<20; i++) {
++			byte = skb->data[34+i];
++			printk("%x ", byte);
++		}
++		printk("\n");
++	}
++#endif
++	TCP_SKB_CB(skb)->connection = connection;
++	return skb;
++}
++
++/*---------------------------------------------------------------------------
++ * connection_rx_timer
++ *-------------------------------------------------------------------------*/
++void connection_rx_timer(unsigned long *data)
++{
++	struct toe_conn	*connection = (struct toe_conn*)data;
++	unsigned int	tcp_qid, toeq_wptr;
++	unsigned int	pkt_size, desc_count;
++	struct sk_buff	*skb;
++	GMAC_RXDESC_T	*toe_curr_desc;
++	TOE_QHDR_T	*toe_qhdr;
++	struct net_device	*dev;
++	unsigned long	conn_flags;
++	DMA_RWPTR_T		toeq_rwptr;
++	unsigned short	timeout_descs;
++
++	if (in_toe_isr)
++		printk("%s::in_toe_isr=%d!\n", __func__, in_toe_isr);
++
++	if (connection) {
++		/* should we disable gmac interrupt first? */
++		if (!connection->gmac)
++			printk("%s::conn gmac %x!\n", __func__, (u32)connection->gmac);
++		local_irq_save(conn_flags);
++		if (!spin_trylock(&connection->conn_lock)) {
++			local_irq_restore(conn_flags);
++			// timer should be updated by the toeq isr. So no need to update here.
++			printk("%s::conn_lock is held by ISR!\n", __func__);
++			return;
++		}
++		disable_irq(connection->gmac->irq);
++
++		/* disable hash entry and get toeq desc. */
++		hash_set_valid_flag(connection->hash_entry_index, 0);
++		do{} while(0);	/* wait until HW finish */
++
++		dev = connection->dev;
++		if (!dev)
++			printk("%s::conn dev NULL!\n", __func__);
++		tcp_qid = connection->qid;
++		toe_qhdr = (TOE_QHDR_T *)(TOE_TOE_QUE_HDR_BASE +
++		              tcp_qid * sizeof(TOE_QHDR_T));
++		toeq_rwptr.bits32 = readl(&toe_qhdr->word1);
++		toeq_wptr = toe_qhdr->word1.bits.wptr;
++		timeout_descs = toeq_wptr - toeq_rwptr.bits.rptr;
++
++		if (toeq_rwptr.bits.rptr == toeq_wptr) {
++			if (toe_qhdr->word5.bits32) {
++				// shall we check toe_qhdr->word2.bits?
++				skb = gen_pure_ack(connection, toe_qhdr, (INTR_QHDR_T *)NULL);
++				skb_put(skb, 54);
++				skb->dev = connection->dev;
++				skb->ip_summed = CHECKSUM_UNNECESSARY;
++				skb->protocol = eth_type_trans(skb, connection->dev);
++				netif_rx(skb);
++				connection->dev->last_rx = jiffies;
++			}
++		} else {
++			while (toeq_rwptr.bits.rptr != toeq_rwptr.bits.wptr) {
++				/* we just simply send those packets to tcp? */
++				toe_curr_desc = (GMAC_RXDESC_T*)(toe_private_data.toe_desc_base[tcp_qid]
++					+ toeq_rwptr.bits.rptr * sizeof(GMAC_RXDESC_T));
++				connection->curr_desc = toe_curr_desc;
++				if (toe_curr_desc->word3.bits.ctrl_flag) {
++					printk("%s::ctrl flag! %x, conn rptr %d, to %d, jif %x, conn_jif %x\n",
++						__func__, toe_curr_desc->word3.bits32,
++						connection->toeq_rwptr.bits.rptr, timeout_descs,
++						(u32)jiffies, (u32)connection->last_rx_jiffies);
++				}
++				desc_count = toe_curr_desc->word0.bits.desc_count;
++				pkt_size = toe_curr_desc->word1.bits.byte_count;
++				consistent_sync((void*)__va(toe_curr_desc->word2.buf_adr), pkt_size,
++					PCI_DMA_FROMDEVICE);
++				skb = (struct sk_buff*)(REG32(__va(toe_curr_desc->word2.buf_adr)-
++					SKB_RESERVE_BYTES));
++				_debug_skb(skb, (GMAC_RXDESC_T *)toe_curr_desc, 0x02);
++				connection->curr_rx_skb = skb;
++				skb_reserve(skb, RX_INSERT_BYTES);
++				skb_put(skb, pkt_size);
++				skb->dev = dev;
++				skb->protocol = eth_type_trans(skb, dev);
++				{
++					struct iphdr* ip_hdr = (struct iphdr*)&(skb->data[0]);
++					if (toe_curr_desc->word3.bits.ctrl_flag)
++						printk("%s::ip id %x\n", __func__, ntohs(ip_hdr->id));
++				}
++				skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++				netif_rx(skb);
++				dev->last_rx = jiffies;
++#if 0
++				if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
++					printk("%s::alloc buf fail!\n", __func__);
++				}
++				*(unsigned int*)(skb->data) = (unsigned int)skb;
++				connection->curr_rx_skb = skb;
++				skb_reserve(skb, SKB_RESERVE_BYTES);
++				spin_lock_irqsave(&connection->gmac->rx_mutex, flags);
++				fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++				if (toe_private_data.fq_rx_rwptr.bits.wptr != fq_rwptr.bits.wptr) {
++					mac_stop_txdma((struct net_device*)connection->dev);
++					spin_unlock_irqrestore(&connection->gmac->rx_mutex, flags);
++					while(1);
++				}
++				fq_desc = (GMAC_RXDESC_T*)toe_private_data.swfq_desc_base + fq_rwptr.bits.wptr;
++				fq_desc->word2.buf_adr = (unsigned int)__pa(skb->data);
++				fq_rwptr.bits.wptr = RWPTR_ADVANCE_ONE(fq_rwptr.bits.wptr, TOE_SW_FREEQ_DESC_NUM);
++				SET_WPTR(TOE_GLOBAL_BASE+GLOBAL_SWFQ_RWPTR_REG, fq_rwptr.bits.wptr);
++				toe_private_data.fq_rx_rwptr.bits32 = fq_rwptr.bits32;
++				spin_unlock_irqrestore(&connection->gmac->rx_mutex, flags);
++#endif
++//				spin_lock_irqsave(&connection->gmac->rx_mutex, flags);
++				toeq_rwptr.bits.rptr = RWPTR_ADVANCE_ONE(toeq_rwptr.bits.rptr, TOE_TOE_DESC_NUM);
++				SET_RPTR(&toe_qhdr->word1, toeq_rwptr.bits.rptr);
++//				spin_unlock_irqrestore(&connection->gmac->rx_mutex, flags);
++				connection->toeq_rwptr.bits32 = toeq_rwptr.bits32;
++			}
++			toeq_rwptr.bits32 = readl(&toe_qhdr->word1);
++//			toe_gmac_fill_free_q();
++		}
++		connection->last_rx_jiffies = jiffies;
++		if (connection->status != TCP_CONN_CLOSED)
++			mod_timer(&connection->rx_timer, jiffies+2);
++		if (connection->status != TCP_CONN_ESTABLISHED)
++			printk("%s::conn status %x\n", __func__, connection->status);
++		hash_set_valid_flag(connection->hash_entry_index, 1);
++		enable_irq(connection->gmac->irq);
++		// Gary Chen spin_unlock_irqrestore(&connection->conn_lock, conn_flags);
++	}
++}
++
++/*---------------------------------------------------------------------------
++ * free_toeq_descs
++ *-------------------------------------------------------------------------*/
++void free_toeq_descs(int qid, TOE_INFO_T *toe)
++{
++	void	*desc_ptr;
++
++	desc_ptr = (void*)toe->toe_desc_base[qid];
++	pci_free_consistent(NULL, TOE_TOE_DESC_NUM*sizeof(GMAC_RXDESC_T), desc_ptr,
++	   (dma_addr_t)toe->toe_desc_base_dma[qid]);
++	toe->toe_desc_base[qid] = 0;
++}
++
++/*---------------------------------------------------------------------------
++ * set_toeq_hdr
++ *-------------------------------------------------------------------------*/
++void set_toeq_hdr(struct toe_conn*	connection, TOE_INFO_T* toe, struct net_device *dev)
++{
++	volatile TOE_QHDR_T	*toe_qhdr;
++	volatile unsigned int	toeq_wptr; // toeq_rptr
++	volatile GMAC_RXDESC_T	*toe_curr_desc;
++	struct sk_buff	*skb;
++	unsigned int	pkt_size;
++	DMA_RWPTR_T	toeq_rwptr;
++
++	if (connection->status == TCP_CONN_CLOSING) {
++		connection->status = TCP_CONN_CLOSED;
++		hash_set_valid_flag(connection->hash_entry_index, 0);
++		// remove timer first.
++		// del_timer_sync(&(connection->rx_timer));
++		// check if any queued frames last time.
++		toe_qhdr = (volatile TOE_QHDR_T*)TOE_TOE_QUE_HDR_BASE;
++		toe_qhdr += connection->qid;
++		toeq_rwptr.bits32 = readl(&toe_qhdr->word1);
++
++		//toeq_rptr = toe_qhdr->word1.bits.rptr;
++		toeq_wptr = toe_qhdr->word1.bits.wptr;
++		while (toeq_rwptr.bits.rptr != toeq_wptr) {
++			printk("%s::pending frames in TOE Queue before closing!\n", __func__);
++			toe_curr_desc = (GMAC_RXDESC_T*)(toe->toe_desc_base[connection->qid] +
++				toe_qhdr->word1.bits.rptr*sizeof(GMAC_RXDESC_T));
++			connection->curr_desc = (GMAC_RXDESC_T *)toe_curr_desc;
++			pkt_size = toe_curr_desc->word1.bits.byte_count;
++			consistent_sync((void*)__va(toe_curr_desc->word2.buf_adr), pkt_size,
++				PCI_DMA_FROMDEVICE);
++			skb = (struct sk_buff*)(REG32(__va(toe_curr_desc->word2.buf_adr) -
++				SKB_RESERVE_BYTES));
++			_debug_skb(skb, (GMAC_RXDESC_T *)toe_curr_desc, 0x03);
++			connection->curr_rx_skb = skb;
++			skb_reserve(skb, RX_INSERT_BYTES);
++			skb_put(skb, pkt_size);
++			skb->dev = connection->dev;
++			skb->protocol = eth_type_trans(skb, connection->dev);
++			skb->ip_summed = CHECKSUM_UNNECESSARY;
++			netif_rx(skb);
++			connection->dev->last_rx = jiffies;
++
++			toeq_rwptr.bits.rptr = RWPTR_ADVANCE_ONE(toeq_rwptr.bits.rptr, TOE_TOE_DESC_NUM);
++			SET_RPTR(&toe_qhdr->word1, toeq_rwptr.bits.rptr);
++		}
++		free_toeq_descs(connection->qid, toe);
++		// shall we re-fill free queue?
++
++		reset_connection_index(connection->qid);
++		//memset(connection, 0, sizeof(struct toe_conn));
++		printk(" del timer and close connection %x, qid %d\n", (u32)connection, connection->qid);
++		return;
++	}
++	/* enable or setup toe queue header */
++	if (connection->status == TCP_CONN_CONNECTING && storlink_ctl.rx_max_pktsize) {
++		volatile TOE_QHDR_T	*qhdr;
++		int iq_id;
++		connection->status = TCP_CONN_ESTABLISHED;
++		qhdr = (volatile TOE_QHDR_T*)((unsigned int)TOE_TOE_QUE_HDR_BASE +
++		               connection->qid * sizeof(TOE_QHDR_T));
++
++		iq_id = get_interrupt_queue_id(connection->qid);
++		connection->dev = dev;
++		connection->gmac = dev->priv;
++		connection->toeq_rwptr.bits32 = 0;
++
++//		qhdr->word6.bits.iq_num = iq_id;
++		qhdr->word6.bits.MaxPktSize = (connection->max_pktsize)>>2; // in word.
++		qhdr->word7.bits.AckThreshold = connection->ack_threshold;
++		qhdr->word7.bits.SeqThreshold = connection->seq_threshold;
++
++		// init timer.
++#if 1
++		init_timer(&connection->rx_timer);
++		connection->rx_timer.expires = jiffies + 5;
++		connection->rx_timer.data = (unsigned long)connection;
++		connection->rx_timer.function = (void *)&connection_rx_timer;
++		add_timer(&connection->rx_timer);
++		connection->last_rx_jiffies = jiffies;
++		printk("init_timer %x\n", (u32)jiffies);
++#endif
++		hash_set_valid_flag(connection->hash_entry_index, 1);
++		return;
++	} else {
++		printk("%s::conn status %x, rx_pktsize %d\n",
++			__func__, connection->status, storlink_ctl.rx_max_pktsize);
++	}
++}
++
++/*---------------------------------------------------------------------------
++ * get_connection_index
++ * get_connection_index will find an available index for the connection,
++ * when allocate a new connection is needed.
++ * we find available Qid from AV bits and write to hash_table, so that when RxTOE
++ * packet is received, sw_id from ToeQ descriptor is also the Qid of conneciton Q.
++ *-------------------------------------------------------------------------*/
++int get_connection_index(void)
++{
++	int i=0, j=0, index=-1;
++	__u32	connection_bits;
++
++	for (i = 0; i< TOE_TOE_QUEUE_NUM/32; i++) {
++		connection_bits = ~(toe_connection_bits[i]);
++		if (connection_bits == 0)
++			// all 32 bits are used.
++			continue;
++
++		for (j=0; j<32; j++) {
++			if (connection_bits & 0x01) {
++				index = i*32 + j;
++				return index;
++			}
++			connection_bits = connection_bits >> 1;
++		}
++	}
++	return index;
++}
++
++/*---------------------------------------------------------------------------
++ * set_toe_connection
++ *-------------------------------------------------------------------------*/
++void set_toe_connection(int index, int val)
++{
++	if (val) {
++		toe_connection_bits[index/32] |= (1<<(index%32));
++	} else {
++		toe_connection_bits[index/32] &= (~(1<<(index%32)));
++	}
++}
++
++/*---------------------------------------------------------------------------
++ * sl351x_get_toe_conn_flag
++ *-------------------------------------------------------------------------*/
++int sl351x_get_toe_conn_flag(int index)
++{
++	if (index < TOE_TOE_QUEUE_NUM)
++		return (toe_connection_bits[index/32] & (1 << (index %32)));
++	else
++		return 0;
++}
++
++/*---------------------------------------------------------------------------
++ * sl351x_get_toe_conn_info
++ *-------------------------------------------------------------------------*/
++struct toe_conn * sl351x_get_toe_conn_info(int index)
++{
++	if (index < TOE_TOE_QUEUE_NUM)
++		return (struct toe_conn *)&toe_connections[index];
++	else
++		return NULL;
++}
++
++/*---------------------------------------------------------------------------
++ * create_sw_toe_connection
++ *-------------------------------------------------------------------------*/
++struct toe_conn* create_sw_toe_connection(int qid, int ip_ver, void* ip_hdr,
++	struct tcphdr* tcp_hdr)
++{
++	struct toe_conn*	connection =  &(toe_connections[qid]);
++
++	connection->ip_ver = (__u8)ip_ver;
++	connection->qid = (__u8)qid;
++	connection->source = (__u16)tcp_hdr->source;
++	connection->dest = (__u16)tcp_hdr->dest;
++	if (ip_ver == 4) {
++		struct iphdr* iph = (struct iphdr*) ip_hdr;
++		connection->saddr[0] = (__u32)iph->saddr;
++		connection->daddr[0] = (__u32)iph->daddr;
++//		printk("%s::saddr %x, daddr %x\n", __func__,
++//			ntohl(connection->saddr[0]), ntohl(connection->daddr[0]));
++	} else if (ip_ver == 6) {
++		struct ipv6hdr *iph = (struct ipv6hdr*)ip_hdr;
++		int i=0;
++		for (i=0; i<4; i++) {
++			connection->saddr[i] = (__u32)iph->saddr.in6_u.u6_addr32[i];
++			connection->daddr[i] = (__u32)iph->daddr.in6_u.u6_addr32[i];
++		}
++	}
++	connection->status = TCP_CONN_CREATION;
++	return connection;
++}
++
++/*---------------------------------------------------------------------------
++ * fill_toeq_buf
++ *-------------------------------------------------------------------------*/
++int fill_toeq_buf(int index, TOE_INFO_T* toe)
++{
++	volatile TOE_QHDR_T	*qhdr;
++	//struct toe_conn* connection;
++	GMAC_RXDESC_T	*desc_ptr;
++
++	if (!toe->toe_desc_base[index]) {
++		// first time. init.
++		desc_ptr = (GMAC_RXDESC_T*)(pci_alloc_consistent(NULL, TOE_TOE_DESC_NUM
++		            *sizeof(GMAC_RXDESC_T), (dma_addr_t*)&toe->toe_desc_base_dma[index]));
++
++		toe->toe_desc_num = TOE_TOE_DESC_NUM;
++		toe->toe_desc_base[index] = (unsigned int)desc_ptr;
++	}
++	qhdr = (volatile TOE_QHDR_T*)((unsigned int)TOE_TOE_QUE_HDR_BASE +
++									index*sizeof(TOE_QHDR_T));
++	//connection = (struct toe_conn*)&(toe_connections[index]);
++
++	qhdr->word0.base_size = ((unsigned int)toe->toe_desc_base_dma[index]&TOE_QHDR0_BASE_MASK)
++					| TOE_TOE_DESC_POWER;
++	qhdr->word1.bits32 = 0;
++	qhdr->word2.bits32 = 0;
++	qhdr->word3.bits32 = 0;
++	qhdr->word4.bits32 = 0;
++	qhdr->word5.bits32 = 0;
++	return 1;
++}
++
++/*---------------------------------------------------------------------------
++ * create_toe_hash_entry_smb
++ * add SMB header in hash entry.
++ *-------------------------------------------------------------------------*/
++int create_toe_hash_entry_smb(int ip_ver, void* ip_hdr, struct tcphdr* tcp_hdr,
++	int sw_id)
++{
++	HASH_ENTRY_T	hash_entry, *entry;
++	int	hash_entry_index;
++	int i;
++
++	entry = (HASH_ENTRY_T*)&hash_entry;
++	memset((void*)entry, 0, sizeof(HASH_ENTRY_T));
++	entry->rule = 0;
++
++	/* enable fields of hash key */
++	entry->key_present.ip_protocol = 1;
++	entry->key_present.sip = 1;
++	entry->key_present.dip = 1;
++	entry->key_present.l4_bytes_0_3 = 1;	// src port and dest port
++	entry->key_present.l7_bytes_0_3 = 0;	// do we need to enable NETBIOS? how?
++	entry->key_present.l7_bytes_4_7 = 1;	// "SMB" header
++
++	/* hash key */
++	entry->key.ip_protocol = IPPROTO_TCP;
++	if (ip_ver == 4) {
++		struct iphdr *iph = (struct iphdr*)ip_hdr;
++		memcpy(entry->key.sip, &iph->saddr, 4);
++		memcpy(entry->key.dip, &iph->daddr, 4);
++	} else if (ip_ver == 6) {
++		struct ipv6hdr *iph = (struct ipv6hdr*)ip_hdr;
++		for (i=0; i<4; i++) {
++			memcpy(&(entry->key.sip[i*4]), &(iph->saddr.in6_u.u6_addr32[i]), 4);
++			memcpy(&(entry->key.dip[i*4]), &(iph->daddr.in6_u.u6_addr32[i]), 4);
++		}
++	}
++	*(__u16*)&entry->key.l4_bytes[0] = tcp_hdr->source;
++	*(__u16*)&entry->key.l4_bytes[2] = tcp_hdr->dest;
++
++	entry->key.l7_bytes[4] = 0xff;
++	entry->key.l7_bytes[5] = 0x53;
++	entry->key.l7_bytes[6] = 0x4d;
++	entry->key.l7_bytes[7] = 0x42;
++
++	/* action of hash entry match */
++	entry->action.sw_id = 1;
++	entry->action.dest_qid = (__u8)TOE_TOE_QID(sw_id);
++	entry->action.srce_qid = 0;
++	hash_entry_index = hash_add_toe_entry(entry);
++
++	return hash_entry_index;
++}
++
++// best performance of tcp streaming.
++/*---------------------------------------------------------------------------
++ * create_toe_hash_entry_smb
++ * add SMB header in hash entry.
++ *-------------------------------------------------------------------------*/
++int create_toe_hash_entry_ftp(int ip_ver, void* ip_hdr, struct tcphdr* tcphdr)
++{
++	return 0;
++}
++
++// is hash entry for nfs needed?
++
++/*
++ * Create a TOE hash entry by given ip addresses and tcp port numbers.
++ * hash entry index will be saved in sw connection.
++ */
++/*---------------------------------------------------------------------------
++ * create_toe_hash_entry
++ *-------------------------------------------------------------------------*/
++int create_toe_hash_entry(int ip_ver, void* ip_hdr, struct tcphdr* tcp_hdr, int sw_id)
++{
++	HASH_ENTRY_T	hash_entry, *entry;
++//	unsigned long	hash_key[HASH_MAX_DWORDS];
++	int	hash_entry_index;
++
++	entry = (HASH_ENTRY_T*) &hash_entry;
++	memset((void*)entry, 0, sizeof(HASH_ENTRY_T));
++	entry->rule = 0;
++	/* enable fields of hash key */
++	entry->key_present.ip_protocol = 1;
++	entry->key_present.sip = 1;
++	entry->key_present.dip = 1;
++	entry->key_present.l4_bytes_0_3 = 1;	// src port and dest port
++
++	/* hash key */
++	entry->key.ip_protocol = IPPROTO_TCP;
++	if (ip_ver == 4) {
++		// key of ipv4
++		struct iphdr* iph = (struct iphdr*)ip_hdr;
++		memcpy(entry->key.sip, &iph->saddr, 4);
++		memcpy(entry->key.dip, &iph->daddr, 4);
++	} else if (ip_ver == 6) {
++		// key of ipv6
++		int i=0;
++		struct ipv6hdr *iph = (struct ipv6hdr*)ip_hdr;
++		for (i=0; i<4; i++) {
++			memcpy(&(entry->key.sip[i*4]), &(iph->saddr.in6_u.u6_addr32[i]), 4);
++			memcpy(&(entry->key.dip[i*4]), &(iph->daddr.in6_u.u6_addr32[i]), 4);
++		}
++	}
++	*(__u16*)&entry->key.l4_bytes[0] = tcp_hdr->source;
++	*(__u16*)&entry->key.l4_bytes[2] = tcp_hdr->dest;
++	// is it necessary to write ip version to hash key?
++
++	/* action of hash entry match */
++	entry->action.sw_id = 1;
++	entry->action.dest_qid = (__u8)TOE_TOE_QID(sw_id);
++	entry->action.srce_qid = 0;	// 0 for SW FreeQ. 1 for HW FreeQ.
++	hash_entry_index = hash_add_toe_entry(entry);
++//	printk("\n%s. sw_id %d, hash_entry index %x\n",
++//		__func__, TOE_TOE_QID(sw_id), hash_entry_index);
++	return hash_entry_index;
++}
++
++/*---------------------------------------------------------------------------
++ * init_toeq
++ * 1. Reserve a TOE Queue id first, to get the sw toe_connection.
++ * 2. Setup the hash entry with given iphdr and tcphdr, save hash entry index
++ *    in sw toe_connection.
++ * 3. Prepare sw toe_connection and allocate buffers.
++ * 4. Validate hash entry.
++ *-------------------------------------------------------------------------*/
++struct toe_conn* init_toeq(int ipver, void* iph, struct tcphdr* tcp_hdr,
++	TOE_INFO_T* toe, unsigned char* l2hdr)
++{
++//	printk("\t*** %s, ipver %d\n", __func__, ipver);
++	int qid=-1;
++	struct toe_conn* connection;
++	int hash_entry_index;
++	// int i=0;
++	unsigned short	dest_port = ntohs(tcp_hdr->dest);
++
++	if (dest_port == 445) {
++		printk("%s::SMB/CIFS connection\n", __func__);
++	} else if (dest_port == 20) {
++		printk("%s::ftp-data connection\n", __func__);
++	} else if (dest_port == 2049) {
++		printk("%s::nfs daemon connection\n", __func__);
++	}
++	qid = get_connection_index();
++	if (qid<0)
++		return 0;	// setup toeq failure
++	set_toe_connection(qid, 1); // reserve this sw toeq.
++
++	//connection = (struct toe_conn*)&(toe_connections[qid]);
++	hash_entry_index = create_toe_hash_entry(ipver, iph, tcp_hdr, qid);
++	if (hash_entry_index <0) {
++		printk("%s::release toe hash entry!\n", __func__);
++		set_toe_connection(qid, 0); // release this sw toeq.
++		return 0;
++	}
++	connection = create_sw_toe_connection(qid, ipver, iph, tcp_hdr);
++	connection->hash_entry_index = (__u16) hash_entry_index;
++
++	fill_toeq_buf(qid, toe);
++	memcpy(&connection->l2_hdr, l2hdr, sizeof(struct ethhdr));
++	spin_lock_init(&connection->conn_lock);
++
++	return connection;
++}
++
++#if 0
++/*----------------------------------------------------------------------
++*   toe_init_toe_queue
++*   (1) Initialize the TOE Queue Header
++*       Register: TOE_TOE_QUE_HDR_BASE (0x60003000)
++*   (2) Initialize Descriptors of TOE Queues
++*----------------------------------------------------------------------*/
++void toe_init_toe_queue(TOE_INFO_T* toe)
++{
++}
++EXPORT_SYMBOL(toe_init_toe_queue);
++#endif
++
++/*---------------------------------------------------------------------------
++ * dump_jumbo_skb
++ *-------------------------------------------------------------------------*/
++void dump_jumbo_skb(struct jumbo_frame *jumbo_skb)
++{
++	if (jumbo_skb->skb0) {
++//		printk("%s. jumbo skb %x, len %d\n",
++//			__func__, jumbo_skb->skb0->data, jumbo_skb->skb0->len);
++		netif_rx(jumbo_skb->skb0);
++	}
++	jumbo_skb->skb0 = 0;
++	jumbo_skb->tail = 0;
++	jumbo_skb->iphdr0 = 0;
++	jumbo_skb->tcphdr0 = 0;
++}
++
++/* ---------------------------------------------------------------------
++ * Append skb to skb0. skb0 is the jumbo frame that will be passed to
++ * kernel tcp.
++ * --------------------------------------------------------------------*/
++void rx_append_skb(struct jumbo_frame *jumbo_skb, struct sk_buff* skb, int payload_len)
++{
++	struct iphdr* iphdr0 = (struct iphdr*)&(skb->data[0]);
++	int ip_hdrlen = iphdr0->ihl << 2;
++	struct tcphdr* tcphdr0 = (struct tcphdr*)&(skb->data[ip_hdrlen]);
++
++	if (!jumbo_skb->skb0) {
++		// head of the jumbo frame.
++		jumbo_skb->skb0 = skb;
++		jumbo_skb->tail = 0;
++		jumbo_skb->iphdr0 = iphdr0;
++		jumbo_skb->tcphdr0 = tcphdr0;
++	} else {
++		if (!jumbo_skb->tail)
++			skb_shinfo(jumbo_skb->skb0)->frag_list = skb;
++		else
++			(jumbo_skb->tail)->next = skb;
++		jumbo_skb->tail = skb;
++
++		// do we need to change truesize as well?
++		jumbo_skb->skb0->len += payload_len;
++		jumbo_skb->skb0->data_len += payload_len;
++
++		jumbo_skb->iphdr0->tot_len = htons(ntohs(jumbo_skb->iphdr0->tot_len)+payload_len);
++		jumbo_skb->tcphdr0->ack_seq = tcphdr0->ack_seq;
++		jumbo_skb->tcphdr0->window = tcphdr0->window;
++
++		skb->len += payload_len;
++		skb->data_len = 0;
++		skb->data += ntohs(iphdr0->tot_len) - payload_len;
++	}
++}
++
++/*----------------------------------------------------------------------
++* toe_gmac_handle_toeq
++* (1) read interrupt Queue to get TOE Q.
++* (2) get packet fro TOE Q and send to upper layer handler.
++* (3) allocate new buffers and put to TOE Q. Intr Q buffer is recycled.
++*----------------------------------------------------------------------*/
++void toe_gmac_handle_toeq(struct net_device *dev, GMAC_INFO_T* tp, __u32 status)
++{
++	//volatile INTRQ_INFO_T	*intrq_info;
++	//TOEQ_INFO_T		*toeq_info;
++	volatile NONTOE_QHDR_T	*intr_qhdr;
++	volatile TOE_QHDR_T		*toe_qhdr;
++	volatile INTR_QHDR_T	*intr_curr_desc;
++	TOE_INFO_T	*toe = &toe_private_data;
++
++	volatile GMAC_RXDESC_T	*toe_curr_desc; // , *fq_desc;// *tmp_desc;
++	volatile DMA_RWPTR_T	intr_rwptr, toeq_rwptr;  // fq_rwptr;
++
++	unsigned int 	pkt_size, desc_count, tcp_qid;
++	volatile unsigned int	toeq_wptr;
++	struct toe_conn*		connection;
++	int		i, frag_id = 0;
++	// unsigned long	toeq_flags;
++	struct jumbo_frame	jumbo_skb;
++	struct sk_buff	*skb;
++	__u32	interrupt_status;
++
++	in_toe_isr++;
++
++	interrupt_status = status >> 24;
++	// get interrupt queue header
++	intr_qhdr = (volatile NONTOE_QHDR_T*)TOE_INTR_Q_HDR_BASE;
++	memset(&jumbo_skb, 0, sizeof(struct jumbo_frame));
++
++	for (i=0; i> 1;
++			continue;
++		}
++		interrupt_status = interrupt_status >> 1;
++		intr_rwptr.bits32 = readl(&intr_qhdr->word1);
++
++		while ( intr_rwptr.bits.rptr != intr_rwptr.bits.wptr) {
++			int max_pktsize = 1;
++			// get interrupt queue descriptor.
++			intr_curr_desc = (INTR_QHDR_T*)toe->intr_desc_base +
++				i* TOE_INTR_DESC_NUM + intr_rwptr.bits.rptr;
++//			printk("%s::int %x\n", __func__, intr_curr_desc->word1.bits32);
++			// get toeq id
++			tcp_qid = (u8)intr_curr_desc->word1.bits.tcp_qid - (u8)TOE_TOE_QID(0);
++			// get toeq queue header
++			toe_qhdr = (volatile TOE_QHDR_T*) TOE_TOE_QUE_HDR_BASE;
++			toe_qhdr += tcp_qid;
++			connection = &toe_connections[tcp_qid];
++			del_timer(&connection->rx_timer);
++			// Gary Chen spin_lock_irqsave(&connection->conn_lock, toeq_flags);
++			// handling interrupts of this TOE Q.
++			if (intr_curr_desc->word1.bits.ctl || intr_curr_desc->word1.bits.osq ||
++				intr_curr_desc->word1.bits.abn)
++				max_pktsize = 0;
++			if (!max_pktsize || intr_curr_desc->word1.bits.TotalPktSize) {
++				desc_count=0;
++				// wptr in intl queue is where this TOE interrupt should stop.
++				toeq_rwptr.bits32 = readl(&toe_qhdr->word1);
++				toeq_wptr = intr_curr_desc->word0.bits.wptr;
++				if (connection->toeq_rwptr.bits.rptr != toeq_rwptr.bits.rptr)
++					printk("conn rptr %d, hw rptr %d\n",
++						connection->toeq_rwptr.bits.rptr, toeq_rwptr.bits.rptr);
++
++				if (intr_curr_desc->word1.bits.ctl &&
++					(toeq_rwptr.bits.rptr == toeq_wptr)) {
++					printk("\nctrl frame, but not in TOE queue! conn rptr %d, hw wptr %d\n",
++						connection->toeq_rwptr.bits.rptr, toeq_wptr);
++//					dump_toe_qhdr(toe_qhdr);
++//					dump_intrq_desc(intr_curr_desc);
++				}
++				// while (toeq_rwptr.bits.rptr != intr_curr_desc->word0.bits.wptr) {
++				while (toe_qhdr->word1.bits.rptr != intr_curr_desc->word0.bits.wptr) {
++					frag_id++;
++					toe_curr_desc = (volatile GMAC_RXDESC_T *)(toe->toe_desc_base[tcp_qid]
++						+ toe_qhdr->word1.bits.rptr *sizeof(GMAC_RXDESC_T));
++					connection->curr_desc = (GMAC_RXDESC_T *)toe_curr_desc;
++					desc_count = toe_curr_desc->word0.bits.desc_count;
++					pkt_size = toe_curr_desc->word1.bits.byte_count;
++					consistent_sync((void*)__va(toe_curr_desc->word2.buf_adr), pkt_size,
++						PCI_DMA_FROMDEVICE);
++					skb = (struct sk_buff*)(REG32(__va(toe_curr_desc->word2.buf_adr)-
++						SKB_RESERVE_BYTES));
++					_debug_skb(skb, (GMAC_RXDESC_T *)toe_curr_desc, 0x01);
++					connection->curr_rx_skb = skb;
++					skb_reserve(skb, RX_INSERT_BYTES);
++					if ((skb->len + pkt_size) > (1514+16))
++					{
++						printk("skb->len=%d, pkt_size=%d\n",skb->len, pkt_size);
++						while(1);
++					}
++
++					skb_put(skb, pkt_size);
++					skb->dev = dev;
++					skb->protocol = eth_type_trans(skb, dev);
++					skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++					if (toe_curr_desc->word3.bits32 & 0x1b000000)
++						dump_jumbo_skb(&jumbo_skb);
++
++					rx_append_skb(&jumbo_skb, skb, pkt_size-toe_curr_desc->word3.bits.l7_offset);
++//					spin_lock_irqsave(&gmac_fq_lock, flags);
++					toeq_rwptr.bits.rptr = RWPTR_ADVANCE_ONE(toeq_rwptr.bits.rptr, TOE_TOE_DESC_NUM);
++					SET_RPTR(&toe_qhdr->word1, toeq_rwptr.bits.rptr);
++//					spin_unlock_irqrestore(&gmac_fq_lock, flags);
++					if (storlink_ctl.fqint_threshold)
++						continue;
++#if 0
++//#if (HANDLE_FREEQ_METHOD == HANDLE_FREEQ_INDIVIDUAL)
++					if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
++						printk("%s::toe queue alloc buffer ", __func__);
++					}
++					*(unsigned int*)(skb->data) = (unsigned int)skb;
++					connection->curr_rx_skb = skb;
++					skb_reserve(skb, SKB_RESERVE_BYTES);
++
++					spin_lock_irqsave(&gmac_fq_lock, flags);
++					fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++					if (toe->fq_rx_rwptr.bits.wptr != fq_rwptr.bits.wptr) {
++						printk("%s::fq_rx_rwptr %x\n", __func__, toe->fq_rx_rwptr.bits32);
++						mac_stop_txdma((struct net_device*) tp->dev);
++						spin_unlock_irqrestore(&gmac_fq_lock, flags);
++						while(1);
++					}
++					fq_desc = (GMAC_RXDESC_T*)toe->swfq_desc_base + fq_rwptr.bits.wptr;
++					fq_desc->word2.buf_adr = (unsigned int)__pa(skb->data);
++
++					fq_rwptr.bits.wptr = RWPTR_ADVANCE_ONE(fq_rwptr.bits.wptr, TOE_SW_FREEQ_DESC_NUM);
++					SET_WPTR(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG, fq_rwptr.bits.wptr);
++					toe->fq_rx_rwptr.bits32 = fq_rwptr.bits32;
++					spin_unlock_irqrestore(&gmac_fq_lock, flags);
++#endif
++				} // end of this multi-desc.
++				dump_jumbo_skb(&jumbo_skb);
++				dev->last_rx = jiffies;
++				connection->toeq_rwptr.bits32 = toeq_rwptr.bits32;
++			} else if (intr_curr_desc->word1.bits.sat) {
++				toeq_rwptr.bits32 = readl(&toe_qhdr->word1);
++				toeq_wptr = intr_curr_desc->word0.bits.wptr;
++				if (connection->toeq_rwptr.bits.rptr != toeq_rwptr.bits.rptr)
++					printk("SAT. conn rptr %d, hw rptr %d\n",
++						connection->toeq_rwptr.bits.rptr, toeq_rwptr.bits.rptr);
++/*
++					printk("%s::SAT int!, ackcnt %x, seqcnt %x, rptr %d, wptr %d, ack %x, qhack %x\n",
++ 						__func__, intr_curr_desc->word4.bits.AckCnt, intr_curr_desc->word4.bits.SeqCnt,
++						toeq_rptr, toeq_wptr, intr_curr_desc->word3.ack_num, toe_qhdr->word4.ack_num);*/
++				/* pure ack */
++				if (toeq_rwptr.bits.rptr == toeq_wptr) {
++					if (intr_curr_desc->word4.bits32) {
++						skb = gen_pure_ack(connection, (TOE_QHDR_T *)toe_qhdr, (INTR_QHDR_T *)intr_curr_desc);
++						skb_put(skb, 60);
++						skb->dev = connection->dev;
++						skb->ip_summed = CHECKSUM_UNNECESSARY;
++						skb->protocol = eth_type_trans(skb, connection->dev);
++						netif_rx(skb);
++					} else
++						printk("%s::SAT Interrupt!. But cnt is 0!\n", __func__);
++				} else {
++					// while (toeq_rwptr.bits.rptr != toeq_wptr) {
++					while (toe_qhdr->word1.bits.rptr != intr_curr_desc->word0.bits.wptr) {
++						toe_curr_desc = (volatile GMAC_RXDESC_T*)(toe->toe_desc_base[tcp_qid]
++							+ toe_qhdr->word1.bits.rptr * sizeof(GMAC_RXDESC_T));
++						connection->curr_desc = (GMAC_RXDESC_T *)toe_curr_desc;
++						desc_count = toe_curr_desc->word0.bits.desc_count;
++						pkt_size = toe_curr_desc->word1.bits.byte_count;
++						consistent_sync((void*)__va(toe_curr_desc->word2.buf_adr), pkt_size,
++							PCI_DMA_FROMDEVICE);
++						// if ( ((toeq_rwptr.bits.rptr +1)&(TOE_TOE_DESC_NUM-1)) == toeq_wptr) {
++						if ( RWPTR_ADVANCE_ONE(toe_qhdr->word1.bits.rptr, TOE_TOE_DESC_NUM) == toeq_wptr) {
++							skb = (struct sk_buff*)(REG32(__va(toe_curr_desc->word2.buf_adr) -
++								SKB_RESERVE_BYTES));
++							_debug_skb(skb, (GMAC_RXDESC_T *)toe_curr_desc, 0x04);
++							connection->curr_rx_skb = skb;
++							skb_reserve(skb, RX_INSERT_BYTES);
++							skb_put(skb, pkt_size);
++							skb->dev = dev;
++							skb->protocol = eth_type_trans(skb, dev);
++							skb->ip_summed = CHECKSUM_UNNECESSARY;
++							// printk("toeq_rptr %d, wptr %d\n", toeq_rptr, toeq_wptr);
++							netif_rx(skb);
++							dev->last_rx = jiffies;
++/*
++							if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
++
++							}
++							*(unsigned int*)(skb->data) = (unsigned int) skb;
++							skb_reserve(skb, SKB_RESERVE_BYTES); */
++						} else {
++							// reuse this skb, append to free queue..
++							skb = (struct sk_buff*)(REG32(__va(toe_curr_desc->word2.buf_adr)-
++								SKB_RESERVE_BYTES));
++							_debug_skb(skb, (GMAC_RXDESC_T *)toe_curr_desc, 0x05);
++							connection->curr_rx_skb = skb;
++							dev_kfree_skb_irq(skb);
++						}
++#if 0
++						spin_lock_irqsave(&gmac_fq_lock, flags);
++						fq_rwptr.bits32 = readl(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG);
++/*						if (toe->fq_rx_rwptr.bits.wptr != fq_rwptr.bits.wptr) {
++							printk("%s::fq_rx_rwptr %x\n", __func__, toe->fq_rx_rwptr.bits32);
++							mac_stop_txdma((struct net_device*) tp->dev);
++							spin_unlock_irqrestore(&gmac_fq_lock, flags);
++							while(1);
++						} */
++						fq_desc = (GMAC_RXDESC_T*)toe->swfq_desc_base + fq_rwptr.bits.wptr;
++						fq_desc->word2.buf_adr = (unsigned int)__pa(skb->data);
++
++						fq_rwptr.bits.wptr = RWPTR_ADVANCE_ONE(fq_rwptr.bits.wptr, TOE_SW_FREEQ_DESC_NUM);
++						SET_WPTR(TOE_GLOBAL_BASE + GLOBAL_SWFQ_RWPTR_REG, fq_rwptr.bits.wptr);
++						toe->fq_rx_rwptr.bits32 = fq_rwptr.bits32;
++	//					spin_unlock_irqrestore(&gmac_fq_lock, flags);
++#endif
++//						spin_lock_irqsave(&gmac_fq_lock, flags);
++						toeq_rwptr.bits.rptr = RWPTR_ADVANCE_ONE(toeq_rwptr.bits.rptr, TOE_TOE_DESC_NUM);
++						SET_RPTR(&toe_qhdr->word1, toeq_rwptr.bits.rptr);
++//						spin_unlock_irqrestore(&gmac_fq_lock, flags);
++					}
++				} // end of ACK with options.
++				connection->toeq_rwptr.bits32 = toeq_rwptr.bits32;
++				// Gary Chen spin_unlock_irqrestore(&connection->conn_lock, toeq_flags);
++//				}
++			};
++			update_timer(connection);
++			// any protection against interrupt queue header?
++			intr_rwptr.bits.rptr = RWPTR_ADVANCE_ONE(intr_rwptr.bits.rptr, TOE_INTR_DESC_NUM);
++			SET_RPTR(&intr_qhdr->word1, intr_rwptr.bits.rptr);
++			intr_rwptr.bits32 = readl(&intr_qhdr->word1);
++			toe_gmac_fill_free_q();
++		} // end of this interrupt Queue processing.
++	} // end of all interrupt Queues.
++
++	in_toe_isr = 0;
++}
++
++
+Index: linux-2.6.23.16/drivers/net/sl_lepus_hash.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/drivers/net/sl_lepus_hash.c	2008-03-15 16:59:57.863423587 +0200
+@@ -0,0 +1,553 @@
++/**************************************************************************
++* Copyright 2006 StorLink Semiconductors, Inc.  All rights reserved.
++*--------------------------------------------------------------------------
++* Name			: sl_lepus_hash.c
++* Description	:
++*		Handle Storlink Lepus Hash Functions
++*
++* History
++*
++*	Date		Writer		Description
++*----------------------------------------------------------------------------
++*	03/13/2006	Gary Chen	Create and implement
++*
++****************************************************************************/
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#define	 MIDWAY
++#define	 SL_LEPUS
++
++#include 
++#include 
++#include 
++
++#ifndef RXTOE_DEBUG
++#define RXTOE_DEBUG
++#endif
++#undef RXTOE_DEBUG
++
++/*----------------------------------------------------------------------
++* Definition
++*----------------------------------------------------------------------*/
++#define	hash_printf				printk
++
++#define HASH_TIMER_PERIOD		(60*HZ)	// seconds
++#define HASH_ILLEGAL_INDEX		0xffff
++
++/*----------------------------------------------------------------------
++* Variables
++*----------------------------------------------------------------------*/
++u32					hash_activate_bits[HASH_TOTAL_ENTRIES/32];
++u32					hash_nat_owner_bits[HASH_TOTAL_ENTRIES/32];
++char 				hash_tables[HASH_TOTAL_ENTRIES][HASH_MAX_BYTES] __attribute__ ((aligned(16)));
++static struct timer_list hash_timer_obj;
++LIST_HEAD(hash_timeout_list);
++
++/*----------------------------------------------------------------------
++* Functions
++*----------------------------------------------------------------------*/
++void dm_long(u32 location, int length);
++static void hash_timer_func(u32 data);
++
++/*----------------------------------------------------------------------
++* hash_init
++*----------------------------------------------------------------------*/
++void hash_init(void)
++{
++	int i;
++	volatile u32 *dp1, *dp2, dword;
++
++	dp1 = (volatile u32 *) TOE_V_BIT_BASE;
++	dp2 = (volatile u32 *) TOE_A_BIT_BASE;
++
++	for (i=0; iindex, 1);
++//	printk("Dump hash key!\n");
++//	dump_hash_key(entry);
++	return entry->index;
++}
++
++/*----------------------------------------------------------------------
++* hash_set_valid_flag
++*----------------------------------------------------------------------*/
++void hash_set_valid_flag(int index, int valid)
++{
++	register u32 reg32;
++
++	reg32 = TOE_V_BIT_BASE + (index/32) * 4;
++
++	if (valid)
++	{
++		writel(readl(reg32) | (1 << (index%32)), reg32);
++	}
++	else
++	{
++		writel(readl(reg32) & ~(1 << (index%32)), reg32);
++	}
++}
++
++/*----------------------------------------------------------------------
++* hash_set_nat_owner_flag
++*----------------------------------------------------------------------*/
++void hash_set_nat_owner_flag(int index, int valid)
++{
++	if (valid)
++	{
++		hash_nat_owner_bits[index/32] |= (1 << (index % 32));
++	}
++	else
++	{
++		hash_nat_owner_bits[index/32] &= ~(1 << (index % 32));
++	}
++}
++
++
++/*----------------------------------------------------------------------
++* hash_build_keys
++*----------------------------------------------------------------------*/
++int hash_build_keys(u32 *destp, HASH_ENTRY_T *entry)
++{
++	u32 	data;
++	unsigned char 	*cp;
++	int				i, j;
++	unsigned short 	index;
++	int 			total;
++
++	memset((void *)destp, 0, HASH_MAX_BYTES);
++	cp = (unsigned char *)destp;
++
++	if (entry->key_present.port || entry->key_present.Ethertype)
++	{
++		HASH_PUSH_WORD(cp, entry->key.Ethertype);		// word 0
++		HASH_PUSH_BYTE(cp, entry->key.port);			// Byte 2
++		HASH_PUSH_BYTE(cp, 0);							// Byte 3
++	}
++	else
++	{
++		HASH_PUSH_DWORD(cp, 0);
++	}
++
++	if (entry->key_present.da || entry->key_present.sa)
++	{
++		unsigned char mac[4];
++		if (entry->key_present.da)
++		{
++			for (i=0; i<4; i++)
++				HASH_PUSH_BYTE(cp, entry->key.da[i]);
++		}
++		mac[0] = (entry->key_present.da) ? entry->key.da[4] : 0;
++		mac[1] = (entry->key_present.da) ? entry->key.da[5] : 0;
++		mac[2] = (entry->key_present.sa) ? entry->key.sa[0] : 0;
++		mac[3] = (entry->key_present.sa) ? entry->key.sa[1] : 0;
++		data = mac[0] + (mac[1]<<8) + (mac[2]<<16) + (mac[3]<<24);
++		HASH_PUSH_DWORD(cp, data);
++		if (entry->key_present.sa)
++		{
++			for (i=2; i<6; i++)
++				HASH_PUSH_BYTE(cp, entry->key.sa[i]);
++		}
++	}
++
++	if (entry->key_present.pppoe_sid || entry->key_present.vlan_id)
++	{
++		HASH_PUSH_WORD(cp, entry->key.vlan_id);		// low word
++		HASH_PUSH_WORD(cp, entry->key.pppoe_sid);	// high word
++	}
++	if (entry->key_present.ipv4_hdrlen || entry->key_present.ip_tos || entry->key_present.ip_protocol)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.ip_protocol);		// Byte 0
++		HASH_PUSH_BYTE(cp, entry->key.ip_tos);			// Byte 1
++		HASH_PUSH_BYTE(cp, entry->key.ipv4_hdrlen);		// Byte 2
++		HASH_PUSH_BYTE(cp, 0);							// Byte 3
++	}
++
++	if (entry->key_present.ipv6_flow_label)
++	{
++		HASH_PUSH_DWORD(cp, entry->key.ipv6_flow_label);	// low word
++	}
++	if (entry->key_present.sip)
++	{
++		// input (entry->key.sip[i]) is network-oriented
++		// output (hash key) is host-oriented
++		for (i=3; i>=0; i--)
++			HASH_PUSH_BYTE(cp, entry->key.sip[i]);
++		if (entry->key.ipv6)
++		{
++			for (i=4; i<16; i+=4)
++			{
++				for (j=i+3; j>=i; j--)
++					HASH_PUSH_BYTE(cp, entry->key.sip[j]);
++			}
++		}
++	}
++	if (entry->key_present.dip)
++	{
++		// input (entry->key.sip[i]) is network-oriented
++		// output (hash key) is host-oriented
++		for (i=3; i>=0; i--)
++			HASH_PUSH_BYTE(cp, entry->key.dip[i]);
++		if (entry->key.ipv6)
++		{
++			for (i=4; i<16; i+=4)
++			{
++				for (j=i+3; j>=i; j--)
++					HASH_PUSH_BYTE(cp, entry->key.dip[j]);
++			}
++		}
++	}
++
++	if (entry->key_present.l4_bytes_0_3)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[0]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[1]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[2]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[3]);
++	}
++	if (entry->key_present.l4_bytes_4_7)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[4]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[5]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[6]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[7]);
++	}
++	if (entry->key_present.l4_bytes_8_11)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[8]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[9]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[10]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[11]);
++	}
++	if (entry->key_present.l4_bytes_12_15)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[12]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[13]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[14]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[15]);
++	}
++	if (entry->key_present.l4_bytes_16_19)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[16]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[17]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[18]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[19]);
++	}
++	if (entry->key_present.l4_bytes_20_23)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[20]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[21]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[22]);
++		HASH_PUSH_BYTE(cp, entry->key.l4_bytes[23]);
++	}
++	if (entry->key_present.l7_bytes_0_3)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[0]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[1]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[2]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[3]);
++	}
++	if (entry->key_present.l7_bytes_4_7)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[4]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[5]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[6]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[7]);
++	}
++	if (entry->key_present.l7_bytes_8_11)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[8]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[9]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[10]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[11]);
++	}
++	if (entry->key_present.l7_bytes_12_15)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[12]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[13]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[14]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[15]);
++	}
++	if (entry->key_present.l7_bytes_16_19)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[16]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[17]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[18]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[19]);
++	}
++	if (entry->key_present.l7_bytes_20_23)
++	{
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[20]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[21]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[22]);
++		HASH_PUSH_BYTE(cp, entry->key.l7_bytes[23]);
++	}
++
++	// get hash index
++	total = (u32)((u32)cp - (u32)destp) / (sizeof(u32));
++
++	if (total > HASH_MAX_KEY_DWORD)
++	{
++		//hash_printf("Total key words (%d) is too large (> %d)!\n",
++		//				total, HASH_MAX_KEY_DWORD);
++		return -1;
++	}
++
++	if (entry->key_present.port || entry->key_present.Ethertype)
++		index = hash_gen_crc16((unsigned char *)destp, total * 4);
++	else
++	{
++		if (total == 1)
++		{
++			hash_printf("No key is assigned!\n");
++			return -1;
++		}
++
++		index = hash_gen_crc16((unsigned char *)(destp+1), (total-1) * 4);
++	}
++
++	entry->index = index & HASH_BITS_MASK;
++
++	//hash_printf("Total key words = %d, Hash Index= %d\n",
++	//				total, entry->index);
++
++	cp = (unsigned char *)destp;
++	cp+=3;
++	HASH_PUSH_BYTE(cp, entry->rule);	// rule
++
++	entry->total_dwords = total;
++
++	return total;
++}
++
++/*----------------------------------------------------------------------
++* hash_build_nat_keys
++*----------------------------------------------------------------------*/
++void hash_build_nat_keys(u32 *destp, HASH_ENTRY_T *entry)
++{
++	unsigned char 	*cp;
++	int				i;
++	unsigned short 	index;
++	int 			total;
++
++	memset((void *)destp, 0, HASH_MAX_BYTES);
++
++	cp = (unsigned char *)destp + 2;
++	HASH_PUSH_BYTE(cp, entry->key.port);
++	cp++;
++
++	if (entry->key_present.pppoe_sid || entry->key_present.vlan_id)
++	{
++		HASH_PUSH_WORD(cp, entry->key.vlan_id);		// low word
++		HASH_PUSH_WORD(cp, entry->key.pppoe_sid);	// high word
++	}
++
++	HASH_PUSH_BYTE(cp, entry->key.ip_protocol);
++	cp+=3;
++
++	// input (entry->key.sip[i]) is network-oriented
++	// output (hash key) is host-oriented
++	for (i=3; i>=0; i--)
++		HASH_PUSH_BYTE(cp, entry->key.sip[i]);
++
++	// input (entry->key.sip[i]) is network-oriented
++	// output (hash key) is host-oriented
++	for (i=3; i>=0; i--)
++		HASH_PUSH_BYTE(cp, entry->key.dip[i]);
++
++	HASH_PUSH_BYTE(cp, entry->key.l4_bytes[0]);
++	HASH_PUSH_BYTE(cp, entry->key.l4_bytes[1]);
++	HASH_PUSH_BYTE(cp, entry->key.l4_bytes[2]);
++	HASH_PUSH_BYTE(cp, entry->key.l4_bytes[3]);
++
++	// get hash index
++	total = (u32)((u32)cp - (u32)destp) / (sizeof(u32));
++
++	index = hash_gen_crc16((unsigned char *)destp, total * 4);
++	entry->index = index & ((1 << HASH_BITS) - 1);
++
++	cp = (unsigned char *)destp;
++	cp+=3;
++	HASH_PUSH_BYTE(cp, entry->rule);	// rule
++
++	entry->total_dwords = total;
++}
++
++
++/*----------------------------------------------------------------------
++* hash_write_entry
++*----------------------------------------------------------------------*/
++int hash_write_entry(HASH_ENTRY_T *entry, unsigned char *key)
++{
++	int		i;
++	u32		*srcep, *destp, *destp2;
++
++	srcep = (u32 *)key;
++	destp2 = destp = (u32 *)&hash_tables[entry->index][0];
++
++	for (i=0; i<(entry->total_dwords); i++, srcep++, destp++)
++		*destp = *srcep;
++
++	srcep = (u32 *)&entry->action;
++	*destp++ = *srcep;
++
++	srcep = (u32 *)&entry->param;
++	for (i=0; i<(sizeof(ENTRY_PARAM_T)/sizeof(*destp)); i++, srcep++, destp++)
++		*destp = *srcep;
++
++	memset(destp, 0, (HASH_MAX_DWORDS-entry->total_dwords-HASH_ACTION_DWORDS) * sizeof(u32));
++
++	consistent_sync(destp2, (entry->total_dwords+HASH_ACTION_DWORDS) * 4, PCI_DMA_TODEVICE);
++	return 0;
++}
++
++/*----------------------------------------------------------------------
++* hash_timer_func
++*----------------------------------------------------------------------*/
++static void hash_timer_func(u32 data)
++{
++	int				i, j;
++	volatile u32	*active_p, *own_p, *valid_p;
++	u32				a_bits, own_bits;
++
++	valid_p = (volatile u32 *)TOE_V_BIT_BASE;
++	active_p = (volatile u32 *)hash_activate_bits;
++	own_p = (volatile u32 *)hash_nat_owner_bits;
++	for (i=0; i<(HASH_TOTAL_ENTRIES/32); i++, own_p++, active_p++, valid_p++)
++	{
++		*active_p |= readl(TOE_A_BIT_BASE + (i*4));
++		a_bits = *active_p;
++		own_bits = *own_p;
++		if (own_bits)
++		{
++#ifndef DEBUG_NAT_MIXED_HW_SW_TX
++			a_bits = own_bits & ~a_bits;
++#else
++			a_bits = own_bits & a_bits;
++#endif
++			for (j=0; a_bits && j<32; j++)
++			{
++				if (a_bits & 1)
++				{
++					*valid_p &= ~(1 << j);		// invalidate it
++#if !(defined(NAT_DEBUG_LAN_HASH_TIMEOUT) || defined(NAT_DEBUG_WAN_HASH_TIMEOUT))
++					*own_p &= ~(1 << j);		// release ownership for NAT
++#endif
++// #ifdef DEBUG_NAT_MIXED_HW_SW_TX
++#if 0
++					hash_printf("%lu %s: Clear hash index: %d\n", jiffies/HZ, __func__, i*32+j);
++#endif
++				}
++				a_bits >>= 1;
++			}
++			*active_p &= ~own_bits;		// deactivate it for next polling
++		}
++	}
++
++	hash_timer_obj.expires = jiffies + HASH_TIMER_PERIOD;
++	add_timer((struct timer_list *)data);
++}
++
++/*----------------------------------------------------------------------
++* dm_long
++*----------------------------------------------------------------------*/
++void dm_long(u32 location, int length)
++{
++	u32		*start_p, *curr_p, *end_p;
++	u32		*datap, data;
++	int		i;
++
++	//if (length > 1024)
++	//	length = 1024;
++
++	start_p = (u32 *)location;
++	end_p = (u32 *)location + length;
++	curr_p = (u32 *)((u32)location & 0xfffffff0);
++	datap = (u32 *)location;
++	while (curr_p < end_p)
++	{
++		hash_printf("0x%08x: ",(u32)curr_p & 0xfffffff0);
++		for (i=0; i<4; i++)
++		{
++			if (curr_p < start_p || curr_p >= end_p)
++               hash_printf("         ");
++			else
++			{
++				data = *datap;
++				hash_printf("%08X ", data);
++			}
++			if (i==1)
++              hash_printf("- ");
++
++			curr_p++;
++			datap++;
++		}
++        hash_printf("\n");
++	}
++}
++
++/*----------------------------------------------------------------------
++* hash_dump_entry
++*----------------------------------------------------------------------*/
++void hash_dump_entry(int index)
++{
++	hash_printf("Hash Index %d:\n", index);
++	dm_long((u32)&hash_tables[index][0], HASH_MAX_DWORDS);
++}
++
++
+Index: linux-2.6.23.16/drivers/net/sl_switch.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/drivers/net/sl_switch.c	2008-03-15 17:00:08.364022040 +0200
+@@ -0,0 +1,650 @@
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++
++#define GMAC_GLOBAL_BASE_ADDR       (IO_ADDRESS(SL2312_GLOBAL_BASE))
++#define GPIO_BASE_ADDR1  (IO_ADDRESS(SL2312_GPIO_BASE1))
++enum GPIO_REG
++{
++    GPIO_DATA_OUT   = 0x00,
++    GPIO_DATA_IN    = 0x04,
++    GPIO_PIN_DIR    = 0x08,
++    GPIO_BY_PASS    = 0x0c,
++    GPIO_DATA_SET   = 0x10,
++    GPIO_DATA_CLEAR = 0x14,
++};
++
++#define GMAC_SPEED_10			0
++#define GMAC_SPEED_100			1
++#define GMAC_SPEED_1000			2
++
++enum phy_state
++{
++    LINK_DOWN   = 0,
++    LINK_UP     = 1
++};
++
++#ifndef BIT
++#define BIT(x)						(1 << (x))
++#endif
++
++//int Get_Set_port_status();
++unsigned int SPI_read_bit(void);
++void SPI_write_bit(char bit_EEDO);
++void SPI_write(unsigned char block,unsigned char subblock,unsigned char addr,unsigned int value);
++unsigned int SPI_read(unsigned char block,unsigned char subblock,unsigned char addr);
++int SPI_default(void);
++void SPI_CS_enable(unsigned char enable);
++unsigned int SPI_get_identifier(void);
++void phy_write(unsigned char port_no,unsigned char reg,unsigned int val);
++unsigned int phy_read(unsigned char port_no,unsigned char reg);
++void phy_write_masked(unsigned char port_no,unsigned char reg,unsigned int val,unsigned int mask);
++void init_seq_7385(unsigned char port_no) ;
++void phy_receiver_init (unsigned char port_no);
++
++#define PORT_NO		4
++int switch_pre_speed[PORT_NO]={0,0,0,0};
++int switch_pre_link[PORT_NO]={0,0,0,0};
++
++
++
++
++
++/*				NOTES
++ *   The Protocol of the SPI are as follows:
++ *
++ *     		   Bit7 Bit6 Bit5 Bit4 Bit3 Bit2 Bit1 Bit0
++ *	byte0     |   Block id  | r/w | sub-block        |
++ *	byte1     |		Address			 |
++ *	byte2	  |		Data			 |
++ *	byte3	  |		Data			 |
++ *	byte4	  |		Data			 |
++ *	byte5	  |		Data			 |
++ */
++
++
++
++
++/***************************************/
++/* define GPIO module base address     */
++/***************************************/
++#define GPIO_EECS	     0x80000000		/*   EECS: GPIO[22]   */
++#define GPIO_MOSI	     0x20000000         /*   EEDO: GPIO[29]   send to 6996*/
++#define GPIO_MISO	     0x40000000         /*   EEDI: GPIO[30]   receive from 6996*/
++#define GPIO_EECK	     0x10000000         /*   EECK: GPIO[31]   */
++
++/*************************************************************
++* SPI protocol for ADM6996 control
++**************************************************************/
++#define SPI_OP_LEN	     0x08		// the length of start bit and opcode
++#define SPI_OPWRITE	     0X05		// write
++#define SPI_OPREAD	     0X06		// read
++#define SPI_OPERASE	     0X07		// erase
++#define SPI_OPWTEN	     0X04		// write enable
++#define SPI_OPWTDIS	     0X04		// write disable
++#define SPI_OPERSALL	     0X04		// erase all
++#define SPI_OPWTALL	     0X04		// write all
++
++#define SPI_ADD_LEN	     8			// bits of Address
++#define SPI_DAT_LEN	     32			// bits of Data
++
++
++/****************************************/
++/*	Function Declare		*/
++/****************************************/
++
++//unsigned int SPI_read_bit(void);
++//void SPI_write_bit(char bit_EEDO);
++//unsigned int SPI_read_bit(void);
++/******************************************
++* SPI_write
++* addr -> Write Address
++* value -> value to be write
++***************************************** */
++void phy_receiver_init (unsigned char port_no)
++{
++    phy_write(port_no,31,0x2a30);
++    phy_write_masked(port_no, 12, 0x0200, 0x0300);
++    phy_write(port_no,31,0);
++}
++
++void phy_write(unsigned char port_no,unsigned char reg,unsigned int val)
++{
++	unsigned int cmd;
++
++	cmd = (port_no<<21)|(reg<<16)|val;
++	SPI_write(3,0,1,cmd);
++}
++
++unsigned int phy_read(unsigned char port_no,unsigned char reg)
++{
++	unsigned int cmd,reg_val;
++
++	cmd = BIT(26)|(port_no<<21)|(reg<<16);
++	SPI_write(3,0,1,cmd);
++	msleep(2);
++	reg_val = SPI_read(3,0,2);
++	return reg_val;
++}
++
++void phy_write_masked(unsigned char port_no,unsigned char reg,unsigned int val,unsigned int mask)
++{
++	unsigned int cmd,reg_val;
++
++	cmd = BIT(26)|(port_no<<21)|(reg<<16);	// Read reg_val
++	SPI_write(3,0,1,cmd);
++	mdelay(2);
++	reg_val = SPI_read(3,0,2);
++	reg_val &= ~mask;			// Clear masked bit
++	reg_val |= (val&mask) ;			// set masked bit ,if true
++	cmd = (port_no<<21)|(reg<<16)|reg_val;
++	SPI_write(3,0,1,cmd);
++}
++
++void init_seq_7385(unsigned char port_no)
++{
++	unsigned char rev;
++
++	phy_write(port_no, 31, 0x2a30);
++	phy_write_masked(port_no, 8, 0x0200, 0x0200);
++	phy_write(port_no, 31, 0x52b5);
++	phy_write(port_no, 16, 0xb68a);
++	phy_write_masked(port_no, 18, 0x0003, 0xff07);
++	phy_write_masked(port_no, 17, 0x00a2, 0x00ff);
++	phy_write(port_no, 16, 0x968a);
++	phy_write(port_no, 31, 0x2a30);
++	phy_write_masked(port_no, 8, 0x0000, 0x0200);
++	phy_write(port_no, 31, 0x0000); /* Read revision */
++	rev = phy_read(port_no, 3) & 0x000f;
++	if (rev == 0)
++	{
++		phy_write(port_no, 31, 0x2a30);
++		phy_write_masked(port_no, 8, 0x0200, 0x0200);
++		phy_write(port_no, 31, 0x52b5);
++		phy_write(port_no, 18, 0x0000);
++		phy_write(port_no, 17, 0x0689);
++		phy_write(port_no, 16, 0x8f92);
++		phy_write(port_no, 31, 0x52B5);
++		phy_write(port_no, 18, 0x0000);
++		phy_write(port_no, 17, 0x0E35);
++		phy_write(port_no, 16, 0x9786);
++		phy_write(port_no, 31, 0x2a30);
++		phy_write_masked(port_no, 8, 0x0000, 0x0200);
++		phy_write(port_no, 23, 0xFF80);
++		phy_write(port_no, 23, 0x0000);
++	}
++	phy_write(port_no, 31, 0x0000);
++	phy_write(port_no, 18, 0x0048);
++	if (rev == 0)
++	{
++		phy_write(port_no, 31, 0x2a30);
++		phy_write(port_no, 20, 0x6600);
++		phy_write(port_no, 31, 0x0000);
++		phy_write(port_no, 24, 0xa24e);
++	}
++	else
++	{
++		phy_write(port_no, 31, 0x2a30);
++		phy_write_masked(port_no, 22, 0x0240, 0x0fc0);
++		phy_write_masked(port_no, 20, 0x4000, 0x6000);
++		phy_write(port_no, 31, 1);
++		phy_write_masked(port_no, 20, 0x6000, 0xe000);
++		phy_write(port_no, 31, 0x0000);
++	}
++}
++
++int Get_Set_port_status()
++{
++	unsigned int    reg_val,ability,rcv_mask,mac_config;
++	int is_link=0;
++	int i;
++
++ 	rcv_mask = SPI_read(2,0,0x10);			// Receive mask
++
++	for(i=0;i<4;i++){
++		reg_val = phy_read(i,1);
++		if ((reg_val & 0x0024) == 0x0024) /* link is established and auto_negotiate process completed */
++		{
++			is_link=1;
++			if(switch_pre_link[i]==LINK_DOWN){		// Link Down ==> Link up
++
++				rcv_mask |= BIT(i);			// Enable receive
++
++				reg_val = phy_read(i,10);
++				if(reg_val & 0x0c00){
++					printk("Port%d:Giga mode\n",i);
++//					SPI_write(1,i,0x00,0x300701B1);
++					mac_config = 0x00060004|(6<<6);
++
++					SPI_write(1,i,0x00,((mac_config & 0xfffffff8) | 1) | 0x20000030);	// reset port
++					mac_config |= (( BIT(i) << 19) | 0x08000000);
++					SPI_write(1,i,0x00,mac_config);
++					SPI_write(1,i,0x04,0x000300ff);		// flow control
++
++					reg_val = SPI_read(5,0,0x12);
++					reg_val &= ~BIT(i);
++					SPI_write(5,0,0x12,reg_val);
++
++					reg_val = SPI_read(1,i,0x00);
++					reg_val |= 0x10010000;
++					SPI_write(1,i,0x00,reg_val);
++//					SPI_write(1,i,0x00,0x10070181);
++					switch_pre_link[i]=LINK_UP;
++					switch_pre_speed[i]=GMAC_SPEED_1000;
++				}
++				else{
++					reg_val = phy_read(i,5);
++					ability = (reg_val&0x5e0) >>5;
++					if ((ability & 0x0C)) /* 100M */
++					{
++//						SPI_write(1,i,0x00,0x30050472);
++						if((ability&0x08)==0) 		// Half
++							mac_config = 0x00040004 |(17<<6);
++						else				// Full
++							mac_config = 0x00040004 |(17<<6);
++
++						SPI_write(1,i,0x00,((mac_config & 0xfffffff8) | 1) | 0x20000030);	// reset port
++						mac_config |= (( BIT(i) << 19) | 0x08000000);
++						SPI_write(1,i,0x00,mac_config);
++						SPI_write(1,i,0x04,0x000300ff);		// flow control
++
++						reg_val = SPI_read(5,0,0x12);
++						reg_val &= ~BIT(i);
++						SPI_write(5,0,0x12,reg_val);
++
++						reg_val = SPI_read(1,i,0x00);
++						reg_val &= ~0x08000000;
++						reg_val |= 0x10010000;
++						SPI_write(1,i,0x00,reg_val);
++//						SPI_write(1,i,0x00,0x10050442);
++						printk("Port%d:100M\n",i);
++						switch_pre_link[i]=LINK_UP;
++						switch_pre_speed[i]=GMAC_SPEED_100;
++					}
++					else if((ability & 0x03)) /* 10M */
++					{
++//						SPI_write(1,i,0x00,0x30050473);
++						if((ability&0x2)==0)		// Half
++							mac_config = 0x00040004 |(17<<6);
++						else				// Full
++							mac_config = 0x00040004 |(17<<6);
++
++						SPI_write(1,i,0x00,((mac_config & 0xfffffff8) | 1) | 0x20000030);	// reset port
++						mac_config |= (( BIT(i) << 19) | 0x08000000);
++						SPI_write(1,i,0x00,mac_config);
++						SPI_write(1,i,0x04,0x000300ff);		// flow control
++
++						reg_val = SPI_read(5,0,0x12);
++						reg_val &= ~BIT(i);
++						SPI_write(5,0,0x12,reg_val);
++
++						reg_val = SPI_read(1,i,0x00);
++						reg_val &= ~0x08000000;
++						reg_val |= 0x10010000;
++						SPI_write(1,i,0x00,reg_val);
++//						SPI_write(1,i,0x00,0x10050443);
++						printk("Port%d:10M\n",i);
++						switch_pre_link[i]=LINK_UP;
++						switch_pre_speed[i]=GMAC_SPEED_10;
++					}
++					else{
++						SPI_write(1,i,0x00,0x20000030);
++						printk("Port%d:Unknown mode\n",i);
++						switch_pre_link[i]=LINK_DOWN;
++						switch_pre_speed[i]=GMAC_SPEED_10;
++					}
++				}
++			}
++			else{						// Link up ==> Link UP
++
++			}
++		}
++		else{							// Link Down
++			if(switch_pre_link[i]==LINK_UP){
++				printk("Port%d:Link Down\n",i);
++				//phy_receiver_init(i);
++				reg_val = SPI_read(1,i,0);
++				reg_val &= ~BIT(16);
++				SPI_write(1,i,0x00,reg_val);			// disable RX
++				SPI_write(5,0,0x0E,BIT(i));			// dicard packet
++				while((SPI_read(5,0,0x0C)&BIT(i))==0)		// wait to be empty
++					msleep(1);
++				SPI_write(1,i,0x00,0x20000030);			// PORT_RST
++				SPI_write(5,0,0x0E,SPI_read(5,0,0x0E) & ~BIT(i));// accept packet
++
++				reg_val = SPI_read(5,0,0x12);
++				reg_val |= BIT(i);
++				SPI_write(5,0,0x12,reg_val);
++			}
++			switch_pre_link[i]=LINK_DOWN;
++			rcv_mask &= ~BIT(i);			// disable receive
++		}
++	}
++
++	SPI_write(2,0,0x10,rcv_mask);			// Receive mask
++	return is_link;
++
++}
++EXPORT_SYMBOL(Get_Set_port_status);
++
++void SPI_write(unsigned char block,unsigned char subblock,unsigned char addr,unsigned int value)
++{
++	int     i;
++	char    bit;
++	unsigned int data;
++
++	SPI_CS_enable(1);
++
++	data = (block<<5) | 0x10 | subblock;
++
++	//send write command
++	for(i=SPI_OP_LEN-1;i>=0;i--)
++	{
++		bit = (data>>i)& 0x01;
++		SPI_write_bit(bit);
++	}
++
++	// send 8 bits address (MSB first, LSB last)
++	for(i=SPI_ADD_LEN-1;i>=0;i--)
++	{
++		bit = (addr>>i)& 0x01;
++		SPI_write_bit(bit);
++	}
++	// send 32 bits data (MSB first, LSB last)
++	for(i=SPI_DAT_LEN-1;i>=0;i--)
++	{
++		bit = (value>>i)& 0x01;
++		SPI_write_bit(bit);
++	}
++
++	SPI_CS_enable(0);	// CS low
++
++}
++
++
++/************************************
++* SPI_write_bit
++* bit_EEDO -> 1 or 0 to be written
++************************************/
++void SPI_write_bit(char bit_EEDO)
++{
++	unsigned int addr;
++	unsigned int value;
++
++	addr = (GPIO_BASE_ADDR1 + GPIO_PIN_DIR);
++	value = readl(addr) |GPIO_EECK |GPIO_MOSI ;   /* set EECK/MISO Pin to output */
++	writel(value,addr);
++	if(bit_EEDO)
++	{
++		addr = (GPIO_BASE_ADDR1 + GPIO_DATA_SET);
++		writel(GPIO_MOSI,addr); /* set MISO to 1 */
++
++	}
++	else
++	{
++		addr = (GPIO_BASE_ADDR1 + GPIO_DATA_CLEAR);
++		writel(GPIO_MOSI,addr); /* set MISO to 0 */
++	}
++	addr = (GPIO_BASE_ADDR1 + GPIO_DATA_SET);
++	writel(GPIO_EECK,addr); /* set EECK to 1 */
++	addr = (GPIO_BASE_ADDR1 + GPIO_DATA_CLEAR);
++	writel(GPIO_EECK,addr); /* set EECK to 0 */
++
++	//return ;
++}
++
++/**********************************************************************
++* read a bit from ADM6996 register
++***********************************************************************/
++unsigned int SPI_read_bit(void) // read data from
++{
++	unsigned int addr;
++	unsigned int value;
++
++	addr = (GPIO_BASE_ADDR1 + GPIO_PIN_DIR);
++	value = readl(addr) & (~GPIO_MISO);   // set EECK to output and MISO to input
++	writel(value,addr);
++
++	addr =(GPIO_BASE_ADDR1 + GPIO_DATA_SET);
++	writel(GPIO_EECK,addr); // set EECK to 1
++
++
++	addr = (GPIO_BASE_ADDR1 + GPIO_DATA_IN);
++	value = readl(addr) ;
++
++	addr = (GPIO_BASE_ADDR1 + GPIO_DATA_CLEAR);
++	writel(GPIO_EECK,addr); // set EECK to 0
++
++
++	value = value >> 30;
++	return value ;
++}
++
++/******************************************
++* SPI_default
++* EEPROM content default value
++*******************************************/
++int SPI_default(void)
++{
++	int i;
++	unsigned reg_val,cmd;
++
++#if 0
++	SPI_write(7,0,0x1C,0x01);				// map code space to 0
++
++	reg_val = SPI_read(7,0,0x10);
++	reg_val |= 0x0146;
++	reg_val &= ~0x0001;
++	SPI_write(7,0,0x10,reg_val);				// reset iCPU and enable ext_access
++	SPI_write(7,0,0x11,0x0000);				// start address
++	for(i=0;i which table to be read: 1/count  0/EEPROM
++* addr  -> Address to be read
++* return : Value of the register
++*************************************************/
++unsigned int SPI_read(unsigned char block,unsigned char subblock,unsigned char addr)
++{
++	int     i;
++	char    bit;
++	unsigned int data,value=0;
++
++	SPI_CS_enable(1);
++
++	data = (block<<5) | subblock;
++
++	//send write command
++	for(i=SPI_OP_LEN-1;i>=0;i--)
++	{
++		bit = (data>>i)& 0x01;
++		SPI_write_bit(bit);
++	}
++
++	// send 8 bits address (MSB first, LSB last)
++	for(i=SPI_ADD_LEN-1;i>=0;i--)
++	{
++		bit = (addr>>i)& 0x01;
++		SPI_write_bit(bit);
++	}
++
++	// dummy read for chip ready
++	for(i=0;i<8;i++)
++		SPI_read_bit();
++
++
++	// read 32 bits data (MSB first, LSB last)
++	for(i=SPI_DAT_LEN-1;i>=0;i--)
++	{
++		bit = SPI_read_bit();
++		value |= bit<
++
++#define SL351x_GMAC_WORKAROUND		1
++
++#undef BIG_ENDIAN
++#define BIG_ENDIAN  				0
++#define GMAC_DEBUG      			1
++#define GMAC_NUM					2
++//#define	L2_jumbo_frame				1
++
++#define _PACKED_					__attribute__ ((aligned(1), packed))
++
++#ifndef BIT
++#define BIT(x)						(1 << (x))
++#endif
++
++#define REG32(addr)     			(*(volatile unsigned long  * const)(addr))
++
++#define DMA_MALLOC(size,handle)		pci_alloc_consistent(NULL,size,handle)
++#define DMA_MFREE(mem,size,handle)	pci_free_consistent(NULL,size,mem,handle)
++
++// Define frame size
++#define ETHER_ADDR_LEN				6
++#define GMAC_MAX_ETH_FRAME_SIZE		1514
++#define GMAC_TX_BUF_SIZE			((GMAC_MAX_ETH_FRAME_SIZE + 31) & (~31))
++#define MAX_ISR_WORK        		20
++
++#ifdef	L2_jumbo_frame
++#define SW_RX_BUF_SIZE				9234	// 2048 ,9234
++#else
++#define SW_RX_BUF_SIZE				1536	// 2048
++#endif
++
++#define HW_RX_BUF_SIZE				1536	// 2048
++
++#define GMAC_DEV_TX_TIMEOUT  		(10*HZ)			//add by CH
++#define	SKB_RESERVE_BYTES			16
++
++/**********************************************************************
++ * Base Register
++ **********************************************************************/
++#define TOE_BASE					(IO_ADDRESS(SL2312_TOE_BASE))
++#define GMAC_GLOBAL_BASE_ADDR       (IO_ADDRESS(SL2312_GLOBAL_BASE))
++
++#define TOE_GLOBAL_BASE				(TOE_BASE + 0x0000)
++#define TOE_NONTOE_QUE_HDR_BASE		(TOE_BASE + 0x2000)
++#define TOE_TOE_QUE_HDR_BASE		(TOE_BASE + 0x3000)
++#define TOE_V_BIT_BASE				(TOE_BASE + 0x4000)
++#define TOE_A_BIT_BASE				(TOE_BASE + 0x6000)
++#define TOE_GMAC0_DMA_BASE			(TOE_BASE + 0x8000)
++#define TOE_GMAC0_BASE				(TOE_BASE + 0xA000)
++#define TOE_GMAC1_DMA_BASE			(TOE_BASE + 0xC000)
++#define TOE_GMAC1_BASE				(TOE_BASE + 0xE000)
++
++/**********************************************************************
++ * Queue ID
++ **********************************************************************/
++#define TOE_SW_FREE_QID				0x00
++#define TOE_HW_FREE_QID				0x01
++#define TOE_GMAC0_SW_TXQ0_QID		0x02
++#define TOE_GMAC0_SW_TXQ1_QID		0x03
++#define TOE_GMAC0_SW_TXQ2_QID		0x04
++#define TOE_GMAC0_SW_TXQ3_QID		0x05
++#define TOE_GMAC0_SW_TXQ4_QID		0x06
++#define TOE_GMAC0_SW_TXQ5_QID		0x07
++#define TOE_GMAC0_HW_TXQ0_QID		0x08
++#define TOE_GMAC0_HW_TXQ1_QID		0x09
++#define TOE_GMAC0_HW_TXQ2_QID		0x0A
++#define TOE_GMAC0_HW_TXQ3_QID		0x0B
++#define TOE_GMAC1_SW_TXQ0_QID		0x12
++#define TOE_GMAC1_SW_TXQ1_QID		0x13
++#define TOE_GMAC1_SW_TXQ2_QID		0x14
++#define TOE_GMAC1_SW_TXQ3_QID		0x15
++#define TOE_GMAC1_SW_TXQ4_QID		0x16
++#define TOE_GMAC1_SW_TXQ5_QID		0x17
++#define TOE_GMAC1_HW_TXQ0_QID		0x18
++#define TOE_GMAC1_HW_TXQ1_QID		0x19
++#define TOE_GMAC1_HW_TXQ2_QID		0x1A
++#define TOE_GMAC1_HW_TXQ3_QID		0x1B
++#define TOE_GMAC0_DEFAULT_QID		0x20
++#define TOE_GMAC1_DEFAULT_QID		0x21
++#define TOE_CLASSIFICATION_QID(x)	(0x22 + x)	// 0x22 ~ 0x2F
++#define TOE_TOE_QID(x)				(0x40 + x)	// 0x40 ~ 0x7F
++
++/**********************************************************************
++ * TOE DMA Queue Number should be 2^n, n = 6...12
++ * TOE DMA Queues are the following queue types:
++ *		SW Free Queue, HW Free Queue,
++ *		GMAC 0/1 SW TX Q0-5, and GMAC 0/1 HW TX Q0-5
++ * They have same descriptor numbers.
++ * The base address and descriptor number are configured at
++ * DMA Queues Descriptor Ring Base Address/Size Register (offset 0x0004)
++ **********************************************************************/
++#define TOE_SW_FREEQ_DESC_POWER		10
++#define TOE_SW_FREEQ_DESC_NUM		(1<y) ? x :y)
++#define TX_DESC_NUM					_max(TOE_GMAC0_SWTXQ_DESC_NUM, TOE_GMAC1_SWTXQ_DESC_NUM)
++
++#define RWPTR_ADVANCE_ONE(x, max)	((x == (max -1)) ? 0 : x+1)
++#define RWPTR_RECEDE_ONE(x, max)	((x == 0) ? (max -1) : x-1)
++#define SET_WPTR(addr, data)		(*(volatile u16 * const)((u32)(addr)+2) = (u16)data)
++#define SET_RPTR(addr, data)		(*(volatile u16 * const)((u32)(addr)) = (u16)data)
++
++/**********************************************************************
++ * Global registers
++ * #define TOE_GLOBAL_BASE			(TOE_BASE + 0x0000)
++ * Base 0x60000000
++ **********************************************************************/
++#define GLOBAL_TOE_VERSION_REG			0x0000
++#define GLOBAL_SW_FREEQ_BASE_SIZE_REG	0x0004
++#define GLOBAL_HW_FREEQ_BASE_SIZE_REG	0x0008
++#define GLOBAL_DMA_SKB_SIZE_REG			0x0010
++#define GLOBAL_SWFQ_RWPTR_REG			0x0014
++#define GLOBAL_HWFQ_RWPTR_REG			0x0018
++#define GLOBAL_INTERRUPT_STATUS_0_REG	0x0020
++#define GLOBAL_INTERRUPT_ENABLE_0_REG	0x0024
++#define GLOBAL_INTERRUPT_SELECT_0_REG	0x0028
++#define GLOBAL_INTERRUPT_STATUS_1_REG	0x0030
++#define GLOBAL_INTERRUPT_ENABLE_1_REG	0x0034
++#define GLOBAL_INTERRUPT_SELECT_1_REG	0x0038
++#define GLOBAL_INTERRUPT_STATUS_2_REG	0x0040
++#define GLOBAL_INTERRUPT_ENABLE_2_REG	0x0044
++#define GLOBAL_INTERRUPT_SELECT_2_REG	0x0048
++#define GLOBAL_INTERRUPT_STATUS_3_REG	0x0050
++#define GLOBAL_INTERRUPT_ENABLE_3_REG	0x0054
++#define GLOBAL_INTERRUPT_SELECT_3_REG	0x0058
++#define GLOBAL_INTERRUPT_STATUS_4_REG	0x0060
++#define GLOBAL_INTERRUPT_ENABLE_4_REG	0x0064
++#define GLOBAL_INTERRUPT_SELECT_4_REG	0x0068
++#define GLOBAL_HASH_TABLE_BASE_REG		0x006C
++#define GLOBAL_QUEUE_THRESHOLD_REG		0x0070
++
++/**********************************************************************
++ * GMAC 0/1 DMA/TOE register
++ * #define TOE_GMAC0_DMA_BASE		(TOE_BASE + 0x8000)
++ * #define TOE_GMAC1_DMA_BASE		(TOE_BASE + 0xC000)
++ * Base 0x60008000 or 0x6000C000
++ **********************************************************************/
++#define GMAC_DMA_CTRL_REG				0x0000
++#define GMAC_TX_WEIGHTING_CTRL_0_REG	0x0004
++#define GMAC_TX_WEIGHTING_CTRL_1_REG	0x0008
++#define GMAC_SW_TX_QUEUE0_PTR_REG		0x000C
++#define GMAC_SW_TX_QUEUE1_PTR_REG		0x0010
++#define GMAC_SW_TX_QUEUE2_PTR_REG		0x0014
++#define GMAC_SW_TX_QUEUE3_PTR_REG		0x0018
++#define GMAC_SW_TX_QUEUE4_PTR_REG		0x001C
++#define GMAC_SW_TX_QUEUE5_PTR_REG		0x0020
++#define GMAC_HW_TX_QUEUE0_PTR_REG		0x0024
++#define GMAC_HW_TX_QUEUE1_PTR_REG		0x0028
++#define GMAC_HW_TX_QUEUE2_PTR_REG		0x002C
++#define GMAC_HW_TX_QUEUE3_PTR_REG		0x0030
++#define GMAC_DMA_TX_FIRST_DESC_REG		0x0038
++#define GMAC_DMA_TX_CURR_DESC_REG		0x003C
++#define GMAC_DMA_TX_DESC_WORD0_REG		0x0040
++#define GMAC_DMA_TX_DESC_WORD1_REG		0x0044
++#define GMAC_DMA_TX_DESC_WORD2_REG		0x0048
++#define GMAC_DMA_TX_DESC_WORD3_REG		0x004C
++#define GMAC_SW_TX_QUEUE_BASE_REG		0x0050
++#define GMAC_HW_TX_QUEUE_BASE_REG		0x0054
++#define GMAC_DMA_RX_FIRST_DESC_REG		0x0058
++#define GMAC_DMA_RX_CURR_DESC_REG		0x005C
++#define GMAC_DMA_RX_DESC_WORD0_REG		0x0060
++#define GMAC_DMA_RX_DESC_WORD1_REG		0x0064
++#define GMAC_DMA_RX_DESC_WORD2_REG		0x0068
++#define GMAC_DMA_RX_DESC_WORD3_REG		0x006C
++#define GMAC_HASH_ENGINE_REG0			0x0070
++#define GMAC_HASH_ENGINE_REG1			0x0074
++#define GMAC_MR0CR0						0x0078 	// matching rule 0 Control register 0
++#define GMAC_MR0CR1						0x007C	// matching rule 0 Control register 1
++#define GMAC_MR0CR2						0x0080	// matching rule 0 Control register 2
++#define GMAC_MR1CR0						0x0084	// matching rule 1 Control register 0
++#define GMAC_MR1CR1						0x0088	// matching rule 1 Control register 1
++#define GMAC_MR1CR2						0x008C	// matching rule 1 Control register 2
++#define GMAC_MR2CR0						0x0090	// matching rule 2 Control register 0
++#define GMAC_MR2CR1						0x0094	// matching rule 2 Control register 1
++#define GMAC_MR2CR2						0x0098	// matching rule 2 Control register 2
++#define GMAC_MR3CR0						0x009C	// matching rule 3 Control register 0
++#define GMAC_MR3CR1						0x00A0	// matching rule 3 Control register 1
++#define GMAC_MR3CR2						0x00A4	// matching rule 3 Control register 2
++#define GMAC_SPR0						0x00A8	// Support Protocol Regsister 0
++#define GMAC_SPR1						0x00AC	// Support Protocol Regsister 1
++#define GMAC_SPR2						0x00B0	// Support Protocol Regsister 2
++#define GMAC_SPR3						0x00B4	// Support Protocol Regsister 3
++#define GMAC_SPR4						0x00B8	// Support Protocol Regsister 4
++#define GMAC_SPR5						0x00BC	// Support Protocol Regsister 5
++#define GMAC_SPR6						0x00C0	// Support Protocol Regsister 6
++#define GMAC_SPR7						0x00C4	// Support Protocol Regsister 7
++#define GMAC_AHB_WEIGHT_REG				0x00C8	// GMAC Hash/Rx/Tx AHB Weighting register
++
++/**********************************************************************
++ * TOE GMAC 0/1 register
++ * #define TOE_GMAC0_BASE				(TOE_BASE + 0xA000)
++ * #define TOE_GMAC1_BASE				(TOE_BASE + 0xE000)
++ * Base 0x6000A000 or 0x6000E000
++ **********************************************************************/
++enum GMAC_REGISTER {
++	GMAC_STA_ADD0 	= 0x0000,
++	GMAC_STA_ADD1	= 0x0004,
++	GMAC_STA_ADD2	= 0x0008,
++	GMAC_RX_FLTR	= 0x000c,
++	GMAC_MCAST_FIL0 = 0x0010,
++	GMAC_MCAST_FIL1 = 0x0014,
++	GMAC_CONFIG0	= 0x0018,
++	GMAC_CONFIG1	= 0x001c,
++	GMAC_CONFIG2	= 0x0020,
++	GMAC_CONFIG3	= 0x0024,
++	GMAC_RESERVED	= 0x0028,
++	GMAC_STATUS		= 0x002c,
++	GMAC_IN_DISCARDS= 0x0030,
++	GMAC_IN_ERRORS  = 0x0034,
++	GMAC_IN_MCAST   = 0x0038,
++	GMAC_IN_BCAST   = 0x003c,
++	GMAC_IN_MAC1    = 0x0040,	// for STA 1 MAC Address
++	GMAC_IN_MAC2    = 0x0044	// for STA 2 MAC Address
++};
++/**********************************************************************
++ * TOE version Register (offset 0x0000)
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int reserved		: 15;	// bit 31:17
++		unsigned int v_bit_mode		: 1;	// bit 16		1: 128-entry
++		unsigned int device_id		: 12;	// bit 15:4 	Device ID
++		unsigned int revision_id	: 4;	// bit  3:0 	Revision ID
++#else
++		unsigned int revision_id	: 4;	// bit  3:0 	Revision ID
++		unsigned int device_id		: 12;	// bit 15:4 	Device ID
++		unsigned int v_bit_mode		: 1;	// bit 16		1: 128-entry
++		unsigned int reserved		: 15;	// bit 31:17
++#endif
++	} bits;
++} TOE_VERSION_T;
++
++
++/**********************************************************************
++ * DMA Queues description Ring Base Address/Size Register (offset 0x0004)
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	unsigned int base_size;
++} DMA_Q_BASE_SIZE_T;
++#define DMA_Q_BASE_MASK 	(~0x0f)
++
++/**********************************************************************
++ * DMA SKB Buffer register (offset 0x0008)
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_0008
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int hw_skb_size	: 16;	// bit 31:16	HW Free poll SKB Size
++		unsigned int sw_skb_size	: 16;	// bit 15:0 	SW Free poll SKB Size
++#else
++		unsigned int sw_skb_size	: 16;	// bit 15:0 	SW Free poll SKB Size
++		unsigned int hw_skb_size	: 16;	// bit 31:16	HW Free poll SKB Size
++#endif
++	} bits;
++} DMA_SKB_SIZE_T;
++
++/**********************************************************************
++ * DMA SW Free Queue Read/Write Pointer Register (offset 0x000C)
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_000c
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int wptr			: 16;	// bit 31:16 	Write Ptr, RW
++		unsigned int rptr			: 16;	// bit 15:0		Read Ptr, RO
++#else
++		unsigned int rptr			: 16;	// bit 15:0		Read Ptr, RO
++		unsigned int wptr			: 16;	// bit 31:16 	Write Ptr, RW
++#endif
++	} bits;
++} DMA_RWPTR_T;
++
++/**********************************************************************
++ * DMA HW Free Queue Read/Write Pointer Register (offset 0x0010)
++ **********************************************************************/
++// see DMA_RWPTR_T structure
++
++/**********************************************************************
++ * Interrupt Status Register 0 	(offset 0x0020)
++ * Interrupt Mask Register 0 	(offset 0x0024)
++ * Interrupt Select Register 0 	(offset 0x0028)
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_0020
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int txDerr1		: 1;	// bit 31	GMAC1 AHB Bus Error while Tx
++		unsigned int txPerr1		: 1;	// bit 30	GMAC1 Tx Descriptor Protocol Error
++		unsigned int txDerr0		: 1;	// bit 29	GMAC0 AHB Bus Error while Tx
++		unsigned int txPerr0		: 1;	// bit 28	GMAC0 Tx Descriptor Protocol Error
++		unsigned int rxDerr1		: 1;	// bit 27	GMAC1 AHB Bus Error while Rx
++		unsigned int rxPerr1		: 1;	// bit 26	GMAC1 Rx Descriptor Protocol Error
++		unsigned int rxDerr0		: 1;	// bit 25	GMAC0 AHB Bus Error while Rx
++		unsigned int rxPerr0		: 1;	// bit 24	GMAC0 Rx Descriptor Protocol Error
++		unsigned int swtq15_fin		: 1;	// bit 23	GMAC1 SW Tx Queue 5 Finish Interrupt
++		unsigned int swtq14_fin		: 1;	// bit 22	GMAC1 SW Tx Queue 4 Finish Interrupt
++		unsigned int swtq13_fin		: 1;	// bit 21	GMAC1 SW Tx Queue 3 Finish Interrupt
++		unsigned int swtq12_fin		: 1;	// bit 20	GMAC1 SW Tx Queue 2 Finish Interrupt
++		unsigned int swtq11_fin		: 1;	// bit 19	GMAC1 SW Tx Queue 1 Finish Interrupt
++		unsigned int swtq10_fin		: 1;	// bit 18	GMAC1 SW Tx Queue 0 Finish Interrupt
++		unsigned int swtq05_fin		: 1;	// bit 17	GMAC0 SW Tx Queue 5 Finish Interrupt
++		unsigned int swtq04_fin		: 1;	// bit 16	GMAC0 SW Tx Queue 4 Finish Interrupt
++		unsigned int swtq03_fin		: 1;	// bit 15	GMAC0 SW Tx Queue 3 Finish Interrupt
++		unsigned int swtq02_fin		: 1;	// bit 14	GMAC0 SW Tx Queue 2 Finish Interrupt
++		unsigned int swtq01_fin		: 1;	// bit 13	GMAC0 SW Tx Queue 1 Finish Interrupt
++		unsigned int swtq00_fin		: 1;	// bit 12	GMAC0 SW Tx Queue 0 Finish Interrupt
++		unsigned int swtq15_eof		: 1;	// bit 11	GMAC1 SW Tx Queue 5 EOF Interrupt
++		unsigned int swtq14_eof		: 1;	// bit 10	GMAC1 SW Tx Queue 4 EOF Interrupt
++		unsigned int swtq13_eof		: 1;	// bit 9	GMAC1 SW Tx Queue 3 EOF Interrupt
++		unsigned int swtq12_eof		: 1;	// bit 8	GMAC1 SW Tx Queue 2 EOF Interrupt
++		unsigned int swtq11_eof		: 1;	// bit 7	GMAC1 SW Tx Queue 1 EOF Interrupt
++		unsigned int swtq10_eof		: 1;	// bit 6	GMAC1 SW Tx Queue 0 EOF Interrupt
++		unsigned int swtq05_eof		: 1;	// bit 5	GMAC0 SW Tx Queue 5 EOF Interrupt
++		unsigned int swtq04_eof		: 1;	// bit 4	GMAC0 SW Tx Queue 4 EOF Interrupt
++		unsigned int swtq03_eof		: 1;	// bit 3	GMAC0 SW Tx Queue 3 EOF Interrupt
++		unsigned int swtq02_eof		: 1;	// bit 2	GMAC0 SW Tx Queue 2 EOF Interrupt
++		unsigned int swtq01_eof		: 1;	// bit 1	GMAC0 SW Tx Queue 1 EOF Interrupt
++		unsigned int swtq00_eof		: 1;	// bit 0	GMAC0 SW Tx Queue 0 EOF Interrupt
++#else
++		unsigned int swtq00_eof		: 1;	// bit 0	GMAC0 SW Tx Queue 0 EOF Interrupt
++		unsigned int swtq01_eof		: 1;	// bit 1	GMAC0 SW Tx Queue 1 EOF Interrupt
++		unsigned int swtq02_eof		: 1;	// bit 2	GMAC0 SW Tx Queue 2 EOF Interrupt
++		unsigned int swtq03_eof		: 1;	// bit 3	GMAC0 SW Tx Queue 3 EOF Interrupt
++		unsigned int swtq04_eof		: 1;	// bit 4	GMAC0 SW Tx Queue 4 EOF Interrupt
++		unsigned int swtq05_eof		: 1;	// bit 5	GMAC0 SW Tx Queue 5 EOF Interrupt
++		unsigned int swtq10_eof		: 1;	// bit 6	GMAC1 SW Tx Queue 0 EOF Interrupt
++		unsigned int swtq11_eof		: 1;	// bit 7	GMAC1 SW Tx Queue 1 EOF Interrupt
++		unsigned int swtq12_eof		: 1;	// bit 8	GMAC1 SW Tx Queue 2 EOF Interrupt
++		unsigned int swtq13_eof		: 1;	// bit 9	GMAC1 SW Tx Queue 3 EOF Interrupt
++		unsigned int swtq14_eof		: 1;	// bit 10	GMAC1 SW Tx Queue 4 EOF Interrupt
++		unsigned int swtq15_eof		: 1;	// bit 11	GMAC1 SW Tx Queue 5 EOF Interrupt
++		unsigned int swtq00_fin		: 1;	// bit 12	GMAC0 SW Tx Queue 0 Finish Interrupt
++		unsigned int swtq01_fin		: 1;	// bit 13	GMAC0 SW Tx Queue 1 Finish Interrupt
++		unsigned int swtq02_fin		: 1;	// bit 14	GMAC0 SW Tx Queue 2 Finish Interrupt
++		unsigned int swtq03_fin		: 1;	// bit 15	GMAC0 SW Tx Queue 3 Finish Interrupt
++		unsigned int swtq04_fin		: 1;	// bit 16	GMAC0 SW Tx Queue 4 Finish Interrupt
++		unsigned int swtq05_fin		: 1;	// bit 17	GMAC0 SW Tx Queue 5 Finish Interrupt
++		unsigned int swtq10_fin		: 1;	// bit 18	GMAC1 SW Tx Queue 0 Finish Interrupt
++		unsigned int swtq11_fin		: 1;	// bit 19	GMAC1 SW Tx Queue 1 Finish Interrupt
++		unsigned int swtq12_fin		: 1;	// bit 20	GMAC1 SW Tx Queue 2 Finish Interrupt
++		unsigned int swtq13_fin		: 1;	// bit 21	GMAC1 SW Tx Queue 3 Finish Interrupt
++		unsigned int swtq14_fin		: 1;	// bit 22	GMAC1 SW Tx Queue 4 Finish Interrupt
++		unsigned int swtq15_fin		: 1;	// bit 23	GMAC1 SW Tx Queue 5 Finish Interrupt
++		unsigned int rxPerr0		: 1;	// bit 24	GMAC0 Rx Descriptor Protocol Error
++		unsigned int rxDerr0		: 1;	// bit 25	GMAC0 AHB Bus Error while Rx
++		unsigned int rxPerr1		: 1;	// bit 26	GMAC1 Rx Descriptor Protocol Error
++		unsigned int rxDerr1		: 1;	// bit 27	GMAC1 AHB Bus Error while Rx
++		unsigned int txPerr0		: 1;	// bit 28	GMAC0 Tx Descriptor Protocol Error
++		unsigned int txDerr0		: 1;	// bit 29	GMAC0 AHB Bus Error while Tx
++		unsigned int txPerr1		: 1;	// bit 30	GMAC1 Tx Descriptor Protocol Error
++		unsigned int txDerr1		: 1;	// bit 31	GMAC1 AHB Bus Error while Tx
++#endif
++	} bits;
++} INTR_REG0_T;
++
++#define GMAC1_TXDERR_INT_BIT		BIT(31)
++#define GMAC1_TXPERR_INT_BIT		BIT(30)
++#define GMAC0_TXDERR_INT_BIT		BIT(29)
++#define GMAC0_TXPERR_INT_BIT		BIT(28)
++#define GMAC1_RXDERR_INT_BIT		BIT(27)
++#define GMAC1_RXPERR_INT_BIT		BIT(26)
++#define GMAC0_RXDERR_INT_BIT		BIT(25)
++#define GMAC0_RXPERR_INT_BIT		BIT(24)
++#define GMAC1_SWTQ15_FIN_INT_BIT	BIT(23)
++#define GMAC1_SWTQ14_FIN_INT_BIT	BIT(22)
++#define GMAC1_SWTQ13_FIN_INT_BIT	BIT(21)
++#define GMAC1_SWTQ12_FIN_INT_BIT	BIT(20)
++#define GMAC1_SWTQ11_FIN_INT_BIT	BIT(19)
++#define GMAC1_SWTQ10_FIN_INT_BIT	BIT(18)
++#define GMAC0_SWTQ05_FIN_INT_BIT	BIT(17)
++#define GMAC0_SWTQ04_FIN_INT_BIT	BIT(16)
++#define GMAC0_SWTQ03_FIN_INT_BIT	BIT(15)
++#define GMAC0_SWTQ02_FIN_INT_BIT	BIT(14)
++#define GMAC0_SWTQ01_FIN_INT_BIT	BIT(13)
++#define GMAC0_SWTQ00_FIN_INT_BIT	BIT(12)
++#define GMAC1_SWTQ15_EOF_INT_BIT	BIT(11)
++#define GMAC1_SWTQ14_EOF_INT_BIT	BIT(10)
++#define GMAC1_SWTQ13_EOF_INT_BIT	BIT(9)
++#define GMAC1_SWTQ12_EOF_INT_BIT	BIT(8)
++#define GMAC1_SWTQ11_EOF_INT_BIT	BIT(7)
++#define GMAC1_SWTQ10_EOF_INT_BIT	BIT(6)
++#define GMAC0_SWTQ05_EOF_INT_BIT	BIT(5)
++#define GMAC0_SWTQ04_EOF_INT_BIT	BIT(4)
++#define GMAC0_SWTQ03_EOF_INT_BIT	BIT(3)
++#define GMAC0_SWTQ02_EOF_INT_BIT	BIT(2)
++#define GMAC0_SWTQ01_EOF_INT_BIT	BIT(1)
++#define GMAC0_SWTQ00_EOF_INT_BIT	BIT(0)
++
++
++/**********************************************************************
++ * Interrupt Status Register 1 	(offset 0x0030)
++ * Interrupt Mask Register 1 	(offset 0x0034)
++ * Interrupt Select Register 1 	(offset 0x0038)
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_0030
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int toe_iq3_full	: 1;	// bit 31	TOE Interrupt Queue 3 Full Interrupt
++		unsigned int toe_iq2_full	: 1;	// bit 30	TOE Interrupt Queue 2 Full Interrupt
++		unsigned int toe_iq1_full	: 1;	// bit 29	TOE Interrupt Queue 1 Full Interrupt
++		unsigned int toe_iq0_full	: 1;	// bit 28	TOE Interrupt Queue 0 Full Interrupt
++		unsigned int toe_iq3_intr	: 1;	// bit 27	TOE Interrupt Queue 3 with Interrupts
++		unsigned int toe_iq2_intr	: 1;	// bit 26	TOE Interrupt Queue 2 with Interrupts
++		unsigned int toe_iq1_intr	: 1;	// bit 25	TOE Interrupt Queue 1 with Interrupts
++		unsigned int toe_iq0_intr	: 1;	// bit 24	TOE Interrupt Queue 0 with Interrupts
++		unsigned int hwtq13_eof		: 1;	// bit 23	GMAC1 HW Tx Queue3 EOF Interrupt
++		unsigned int hwtq12_eof		: 1;	// bit 22	GMAC1 HW Tx Queue2 EOF Interrupt
++		unsigned int hwtq11_eof		: 1;	// bit 21	GMAC1 HW Tx Queue1 EOF Interrupt
++		unsigned int hwtq10_eof		: 1;	// bit 20	GMAC1 HW Tx Queue0 EOF Interrupt
++		unsigned int hwtq03_eof		: 1;	// bit 19	GMAC0 HW Tx Queue3 EOF Interrupt
++		unsigned int hwtq02_eof		: 1;	// bit 18	GMAC0 HW Tx Queue2 EOF Interrupt
++		unsigned int hwtq01_eof		: 1;	// bit 17	GMAC0 HW Tx Queue1 EOF Interrupt
++		unsigned int hwtq00_eof		: 1;	// bit 16	GMAC0 HW Tx Queue0 EOF Interrupt
++		unsigned int class_rx		: 14;	// bit 15:2	Classification Queue Rx Interrupt
++		unsigned int default_q1_eof	: 1;	// bit 1	Default Queue 1 EOF Interrupt
++		unsigned int default_q0_eof	: 1;	// bit 0	Default Queue 0 EOF Interrupt
++#else
++		unsigned int default_q0_eof	: 1;	// bit 0	Default Queue 0 EOF Interrupt
++		unsigned int default_q1_eof	: 1;	// bit 1	Default Queue 1 EOF Interrupt
++		unsigned int class_rx		: 14;	// bit 15:2	Classification Queue Rx Interrupt
++		unsigned int hwtq00_eof		: 1;	// bit 16	GMAC0 HW Tx Queue0 EOF Interrupt
++		unsigned int hwtq01_eof		: 1;	// bit 17	GMAC0 HW Tx Queue1 EOF Interrupt
++		unsigned int hwtq02_eof		: 1;	// bit 18	GMAC0 HW Tx Queue2 EOF Interrupt
++		unsigned int hwtq03_eof		: 1;	// bit 19	GMAC0 HW Tx Queue3 EOF Interrupt
++		unsigned int hwtq10_eof		: 1;	// bit 20	GMAC1 HW Tx Queue0 EOF Interrupt
++		unsigned int hwtq11_eof		: 1;	// bit 21	GMAC1 HW Tx Queue1 EOF Interrupt
++		unsigned int hwtq12_eof		: 1;	// bit 22	GMAC1 HW Tx Queue2 EOF Interrupt
++		unsigned int hwtq13_eof		: 1;	// bit 23	GMAC1 HW Tx Queue3 EOF Interrupt
++		unsigned int toe_iq0_intr	: 1;	// bit 24	TOE Interrupt Queue 0 with Interrupts
++		unsigned int toe_iq1_intr	: 1;	// bit 25	TOE Interrupt Queue 1 with Interrupts
++		unsigned int toe_iq2_intr	: 1;	// bit 26	TOE Interrupt Queue 2 with Interrupts
++		unsigned int toe_iq3_intr	: 1;	// bit 27	TOE Interrupt Queue 3 with Interrupts
++		unsigned int toe_iq0_full	: 1;	// bit 28	TOE Interrupt Queue 0 Full Interrupt
++		unsigned int toe_iq1_full	: 1;	// bit 29	TOE Interrupt Queue 1 Full Interrupt
++		unsigned int toe_iq2_full	: 1;	// bit 30	TOE Interrupt Queue 2 Full Interrupt
++		unsigned int toe_iq3_full	: 1;	// bit 31	TOE Interrupt Queue 3 Full Interrupt
++#endif
++	} bits;
++} INTR_REG1_T;
++
++#define TOE_IQ3_FULL_INT_BIT		BIT(31)
++#define TOE_IQ2_FULL_INT_BIT		BIT(30)
++#define TOE_IQ1_FULL_INT_BIT		BIT(29)
++#define TOE_IQ0_FULL_INT_BIT		BIT(28)
++#define TOE_IQ3_INT_BIT				BIT(27)
++#define TOE_IQ2_INT_BIT				BIT(26)
++#define TOE_IQ1_INT_BIT				BIT(25)
++#define TOE_IQ0_INT_BIT				BIT(24)
++#define GMAC1_HWTQ13_EOF_INT_BIT	BIT(23)
++#define GMAC1_HWTQ12_EOF_INT_BIT	BIT(22)
++#define GMAC1_HWTQ11_EOF_INT_BIT	BIT(21)
++#define GMAC1_HWTQ10_EOF_INT_BIT	BIT(20)
++#define GMAC0_HWTQ03_EOF_INT_BIT	BIT(19)
++#define GMAC0_HWTQ02_EOF_INT_BIT	BIT(18)
++#define GMAC0_HWTQ01_EOF_INT_BIT	BIT(17)
++#define GMAC0_HWTQ00_EOF_INT_BIT	BIT(16)
++#define CLASS_RX_INT_BIT(x)			BIT((x+2))
++#define DEFAULT_Q1_INT_BIT			BIT(1)
++#define DEFAULT_Q0_INT_BIT			BIT(0)
++
++#define TOE_IQ_INT_BITS				(TOE_IQ0_INT_BIT | TOE_IQ1_INT_BIT | \
++		               	 			TOE_IQ2_INT_BIT | TOE_IQ3_INT_BIT)
++#define	TOE_IQ_FULL_BITS			(TOE_IQ0_FULL_INT_BIT | TOE_IQ1_FULL_INT_BIT | \
++		                	 		TOE_IQ2_FULL_INT_BIT | TOE_IQ3_FULL_INT_BIT)
++#define	TOE_IQ_ALL_BITS				(TOE_IQ_INT_BITS | TOE_IQ_FULL_BITS)
++#define TOE_CLASS_RX_INT_BITS		0xfffc
++
++/**********************************************************************
++ * Interrupt Status Register 2 	(offset 0x0040)
++ * Interrupt Mask Register 2 	(offset 0x0044)
++ * Interrupt Select Register 2 	(offset 0x0048)
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_0040
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int toe_q31_full	: 1;	// bit 31	TOE Queue 31 Full Interrupt
++		unsigned int toe_q30_full	: 1;	// bit 30	TOE Queue 30 Full Interrupt
++		unsigned int toe_q29_full	: 1;	// bit 29	TOE Queue 29 Full Interrupt
++		unsigned int toe_q28_full	: 1;	// bit 28	TOE Queue 28 Full Interrupt
++		unsigned int toe_q27_full	: 1;	// bit 27	TOE Queue 27 Full Interrupt
++		unsigned int toe_q26_full	: 1;	// bit 26	TOE Queue 26 Full Interrupt
++		unsigned int toe_q25_full	: 1;	// bit 25	TOE Queue 25 Full Interrupt
++		unsigned int toe_q24_full	: 1;	// bit 24	TOE Queue 24 Full Interrupt
++		unsigned int toe_q23_full	: 1;	// bit 23	TOE Queue 23 Full Interrupt
++		unsigned int toe_q22_full	: 1;	// bit 22	TOE Queue 22 Full Interrupt
++		unsigned int toe_q21_full	: 1;	// bit 21	TOE Queue 21 Full Interrupt
++		unsigned int toe_q20_full	: 1;	// bit 20	TOE Queue 20 Full Interrupt
++		unsigned int toe_q19_full	: 1;	// bit 19	TOE Queue 19 Full Interrupt
++		unsigned int toe_q18_full	: 1;	// bit 18	TOE Queue 18 Full Interrupt
++		unsigned int toe_q17_full	: 1;	// bit 17	TOE Queue 17 Full Interrupt
++		unsigned int toe_q16_full	: 1;	// bit 16	TOE Queue 16 Full Interrupt
++		unsigned int toe_q15_full	: 1;	// bit 15	TOE Queue 15 Full Interrupt
++		unsigned int toe_q14_full	: 1;	// bit 14	TOE Queue 14 Full Interrupt
++		unsigned int toe_q13_full	: 1;	// bit 13	TOE Queue 13 Full Interrupt
++		unsigned int toe_q12_full	: 1;	// bit 12	TOE Queue 12 Full Interrupt
++		unsigned int toe_q11_full	: 1;	// bit 11	TOE Queue 11 Full Interrupt
++		unsigned int toe_q10_full	: 1;	// bit 10	TOE Queue 10 Full Interrupt
++		unsigned int toe_q9_full	: 1;	// bit 9	TOE Queue 9 Full Interrupt
++		unsigned int toe_q8_full	: 1;	// bit 8	TOE Queue 8 Full Interrupt
++		unsigned int toe_q7_full	: 1;	// bit 7	TOE Queue 7 Full Interrupt
++		unsigned int toe_q6_full	: 1;	// bit 6	TOE Queue 6 Full Interrupt
++		unsigned int toe_q5_full	: 1;	// bit 5	TOE Queue 5 Full Interrupt
++		unsigned int toe_q4_full	: 1;	// bit 4	TOE Queue 4 Full Interrupt
++		unsigned int toe_q3_full	: 1;	// bit 3	TOE Queue 3 Full Interrupt
++		unsigned int toe_q2_full	: 1;	// bit 2	TOE Queue 2 Full Interrupt
++		unsigned int toe_q1_full	: 1;	// bit 1	TOE Queue 1 Full Interrupt
++		unsigned int toe_q0_full	: 1;	// bit 0	TOE Queue 0 Full Interrupt
++#else
++		unsigned int toe_q0_full	: 1;	// bit 0	TOE Queue 0 Full Interrupt
++		unsigned int toe_q1_full	: 1;	// bit 1	TOE Queue 1 Full Interrupt
++		unsigned int toe_q2_full	: 1;	// bit 2	TOE Queue 2 Full Interrupt
++		unsigned int toe_q3_full	: 1;	// bit 3	TOE Queue 3 Full Interrupt
++		unsigned int toe_q4_full	: 1;	// bit 4	TOE Queue 4 Full Interrupt
++		unsigned int toe_q5_full	: 1;	// bit 5	TOE Queue 5 Full Interrupt
++		unsigned int toe_q6_full	: 1;	// bit 6	TOE Queue 6 Full Interrupt
++		unsigned int toe_q7_full	: 1;	// bit 7	TOE Queue 7 Full Interrupt
++		unsigned int toe_q8_full	: 1;	// bit 8	TOE Queue 8 Full Interrupt
++		unsigned int toe_q9_full	: 1;	// bit 9	TOE Queue 9 Full Interrupt
++		unsigned int toe_q10_full	: 1;	// bit 10	TOE Queue 10 Full Interrupt
++		unsigned int toe_q11_full	: 1;	// bit 11	TOE Queue 11 Full Interrupt
++		unsigned int toe_q12_full	: 1;	// bit 12	TOE Queue 12 Full Interrupt
++		unsigned int toe_q13_full	: 1;	// bit 13	TOE Queue 13 Full Interrupt
++		unsigned int toe_q14_full	: 1;	// bit 14	TOE Queue 14 Full Interrupt
++		unsigned int toe_q15_full	: 1;	// bit 15	TOE Queue 15 Full Interrupt
++		unsigned int toe_q16_full	: 1;	// bit 16	TOE Queue 16 Full Interrupt
++		unsigned int toe_q17_full	: 1;	// bit 17	TOE Queue 17 Full Interrupt
++		unsigned int toe_q18_full	: 1;	// bit 18	TOE Queue 18 Full Interrupt
++		unsigned int toe_q19_full	: 1;	// bit 19	TOE Queue 19 Full Interrupt
++		unsigned int toe_q20_full	: 1;	// bit 20	TOE Queue 20 Full Interrupt
++		unsigned int toe_q21_full	: 1;	// bit 21	TOE Queue 21 Full Interrupt
++		unsigned int toe_q22_full	: 1;	// bit 22	TOE Queue 22 Full Interrupt
++		unsigned int toe_q23_full	: 1;	// bit 23	TOE Queue 23 Full Interrupt
++		unsigned int toe_q24_full	: 1;	// bit 24	TOE Queue 24 Full Interrupt
++		unsigned int toe_q25_full	: 1;	// bit 25	TOE Queue 25 Full Interrupt
++		unsigned int toe_q26_full	: 1;	// bit 26	TOE Queue 26 Full Interrupt
++		unsigned int toe_q27_full	: 1;	// bit 27	TOE Queue 27 Full Interrupt
++		unsigned int toe_q28_full	: 1;	// bit 28	TOE Queue 28 Full Interrupt
++		unsigned int toe_q29_full	: 1;	// bit 29	TOE Queue 29 Full Interrupt
++		unsigned int toe_q30_full	: 1;	// bit 30	TOE Queue 30 Full Interrupt
++		unsigned int toe_q31_full	: 1;	// bit 31	TOE Queue 31 Full Interrupt
++#endif
++	} bits;
++} INTR_REG2_T;
++
++#define TOE_QL_FULL_INT_BIT(x)		BIT(x)
++
++/**********************************************************************
++ * Interrupt Status Register 3 	(offset 0x0050)
++ * Interrupt Mask Register 3 	(offset 0x0054)
++ * Interrupt Select Register 3 	(offset 0x0058)
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_0050
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int toe_q63_full	: 1;	// bit 63	TOE Queue 63 Full Interrupt
++		unsigned int toe_q62_full	: 1;	// bit 62	TOE Queue 62 Full Interrupt
++		unsigned int toe_q61_full	: 1;	// bit 61	TOE Queue 61 Full Interrupt
++		unsigned int toe_q60_full	: 1;	// bit 60	TOE Queue 60 Full Interrupt
++		unsigned int toe_q59_full	: 1;	// bit 59	TOE Queue 59 Full Interrupt
++		unsigned int toe_q58_full	: 1;	// bit 58	TOE Queue 58 Full Interrupt
++		unsigned int toe_q57_full	: 1;	// bit 57	TOE Queue 57 Full Interrupt
++		unsigned int toe_q56_full	: 1;	// bit 56	TOE Queue 56 Full Interrupt
++		unsigned int toe_q55_full	: 1;	// bit 55	TOE Queue 55 Full Interrupt
++		unsigned int toe_q54_full	: 1;	// bit 54	TOE Queue 54 Full Interrupt
++		unsigned int toe_q53_full	: 1;	// bit 53	TOE Queue 53 Full Interrupt
++		unsigned int toe_q52_full	: 1;	// bit 52	TOE Queue 52 Full Interrupt
++		unsigned int toe_q51_full	: 1;	// bit 51	TOE Queue 51 Full Interrupt
++		unsigned int toe_q50_full	: 1;	// bit 50	TOE Queue 50 Full Interrupt
++		unsigned int toe_q49_full	: 1;	// bit 49	TOE Queue 49 Full Interrupt
++		unsigned int toe_q48_full	: 1;	// bit 48	TOE Queue 48 Full Interrupt
++		unsigned int toe_q47_full	: 1;	// bit 47	TOE Queue 47 Full Interrupt
++		unsigned int toe_q46_full	: 1;	// bit 46	TOE Queue 46 Full Interrupt
++		unsigned int toe_q45_full	: 1;	// bit 45	TOE Queue 45 Full Interrupt
++		unsigned int toe_q44_full	: 1;	// bit 44	TOE Queue 44 Full Interrupt
++		unsigned int toe_q43_full	: 1;	// bit 43	TOE Queue 43 Full Interrupt
++		unsigned int toe_q42_full	: 1;	// bit 42	TOE Queue 42 Full Interrupt
++		unsigned int toe_q41_full	: 1;	// bit 41	TOE Queue 41 Full Interrupt
++		unsigned int toe_q40_full	: 1;	// bit 40	TOE Queue 40 Full Interrupt
++		unsigned int toe_q39_full	: 1;	// bit 39 	TOE Queue 39 Full Interrupt
++		unsigned int toe_q38_full	: 1;	// bit 38	TOE Queue 38 Full Interrupt
++		unsigned int toe_q37_full	: 1;	// bit 37	TOE Queue 37 Full Interrupt
++		unsigned int toe_q36_full	: 1;	// bit 36	TOE Queue 36 Full Interrupt
++		unsigned int toe_q35_full	: 1;	// bit 35	TOE Queue 35 Full Interrupt
++		unsigned int toe_q34_full	: 1;	// bit 34	TOE Queue 34 Full Interrupt
++		unsigned int toe_q33_full	: 1;	// bit 33	TOE Queue 33 Full Interrupt
++		unsigned int toe_q32_full	: 1;	// bit 32	TOE Queue 32 Full Interrupt
++#else
++		unsigned int toe_q32_full	: 1;	// bit 32	TOE Queue 32 Full Interrupt
++		unsigned int toe_q33_full	: 1;	// bit 33	TOE Queue 33 Full Interrupt
++		unsigned int toe_q34_full	: 1;	// bit 34	TOE Queue 34 Full Interrupt
++		unsigned int toe_q35_full	: 1;	// bit 35	TOE Queue 35 Full Interrupt
++		unsigned int toe_q36_full	: 1;	// bit 36	TOE Queue 36 Full Interrupt
++		unsigned int toe_q37_full	: 1;	// bit 37	TOE Queue 37 Full Interrupt
++		unsigned int toe_q38_full	: 1;	// bit 38	TOE Queue 38 Full Interrupt
++		unsigned int toe_q39_full	: 1;	// bit 39	TOE Queue 39 Full Interrupt
++		unsigned int toe_q40_full	: 1;	// bit 40	TOE Queue 40 Full Interrupt
++		unsigned int toe_q41_full	: 1;	// bit 41	TOE Queue 41 Full Interrupt
++		unsigned int toe_q42_full	: 1;	// bit 42	TOE Queue 42 Full Interrupt
++		unsigned int toe_q43_full	: 1;	// bit 43	TOE Queue 43 Full Interrupt
++		unsigned int toe_q44_full	: 1;	// bit 44	TOE Queue 44 Full Interrupt
++		unsigned int toe_q45_full	: 1;	// bit 45	TOE Queue 45 Full Interrupt
++		unsigned int toe_q46_full	: 1;	// bit 46	TOE Queue 46 Full Interrupt
++		unsigned int toe_q47_full	: 1;	// bit 47	TOE Queue 47 Full Interrupt
++		unsigned int toe_q48_full	: 1;	// bit 48	TOE Queue 48 Full Interrupt
++		unsigned int toe_q49_full	: 1;	// bit 49	TOE Queue 49 Full Interrupt
++		unsigned int toe_q50_full	: 1;	// bit 50	TOE Queue 50 Full Interrupt
++		unsigned int toe_q51_full	: 1;	// bit 51	TOE Queue 51 Full Interrupt
++		unsigned int toe_q52_full	: 1;	// bit 52	TOE Queue 52 Full Interrupt
++		unsigned int toe_q53_full	: 1;	// bit 53	TOE Queue 53 Full Interrupt
++		unsigned int toe_q54_full	: 1;	// bit 54	TOE Queue 54 Full Interrupt
++		unsigned int toe_q55_full	: 1;	// bit 55	TOE Queue 55 Full Interrupt
++		unsigned int toe_q56_full	: 1;	// bit 56	TOE Queue 56 Full Interrupt
++		unsigned int toe_q57_full	: 1;	// bit 57	TOE Queue 57 Full Interrupt
++		unsigned int toe_q58_full	: 1;	// bit 58	TOE Queue 58 Full Interrupt
++		unsigned int toe_q59_full	: 1;	// bit 59	TOE Queue 59 Full Interrupt
++		unsigned int toe_q60_full	: 1;	// bit 60	TOE Queue 60 Full Interrupt
++		unsigned int toe_q61_full	: 1;	// bit 61	TOE Queue 61 Full Interrupt
++		unsigned int toe_q62_full	: 1;	// bit 62	TOE Queue 62 Full Interrupt
++		unsigned int toe_q63_full	: 1;	// bit 63	TOE Queue 63 Full Interrupt
++#endif
++	} bits;
++} INTR_REG3_T;
++
++#define TOE_QH_FULL_INT_BIT(x)		BIT(x-32)
++
++/**********************************************************************
++ * Interrupt Status Register 4 	(offset 0x0060)
++ * Interrupt Mask Register 4 	(offset 0x0064)
++ * Interrupt Select Register 4 	(offset 0x0068)
++ **********************************************************************/
++typedef union
++{
++	unsigned char byte;
++	struct bit_0060
++	{
++#if (BIG_ENDIAN==1)
++		unsigned char reserved		: 1;	//
++		unsigned char cnt_full 		: 1;	// MIB counters half full interrupt
++		unsigned char rx_pause_on	: 1;	// received pause on frame interrupt
++		unsigned char tx_pause_on	: 1;	// transmit pause on frame interrupt
++		unsigned char rx_pause_off  : 1;	// received pause off frame interrupt
++		unsigned char tx_pause_off	: 1;	// received pause off frame interrupt
++		unsigned char rx_overrun	: 1;    // GMAC Rx FIFO overrun interrupt
++		unsigned char status_changed: 1;	// Status Changed Intr for RGMII Mode
++#else
++		unsigned char status_changed: 1;	// Status Changed Intr for RGMII Mode
++		unsigned char rx_overrun	: 1;   // GMAC Rx FIFO overrun interrupt
++		unsigned char tx_pause_off	: 1;	// received pause off frame interrupt
++		unsigned char rx_pause_off  : 1;	// received pause off frame interrupt
++		unsigned char tx_pause_on	: 1;	// transmit pause on frame interrupt
++		unsigned char rx_pause_on	: 1;	// received pause on frame interrupt
++		unsigned char cnt_full 		: 1;	// MIB counters half full interrupt
++		unsigned char reserved		: 1;	//
++#endif
++	} _PACKED_ bits;
++} _PACKED_ GMAC_INTR_T;
++
++typedef union
++{
++	unsigned int bits32;
++	struct bit_0060_2
++	{
++#if (BIG_ENDIAN==1)
++		GMAC_INTR_T		gmac1;
++		GMAC_INTR_T		gmac0;
++		unsigned int	class_qf_int: 14;	// bit 15:2 Classification Rx Queue13-0 Full Intr.
++		unsigned int    hwfq_empty	: 1;	// bit 1	Hardware Free Queue Empty Intr.
++		unsigned int    swfq_empty	: 1;	// bit 0	Software Free Queue Empty Intr.
++#else
++#endif
++		unsigned int    swfq_empty	: 1;	// bit 0	Software Free Queue Empty Intr.
++		unsigned int    hwfq_empty	: 1;	// bit 1	Hardware Free Queue Empty Intr.
++		unsigned int	class_qf_int: 14;	// bit 15:2 Classification Rx Queue13-0 Full Intr.
++		GMAC_INTR_T		gmac0;
++		GMAC_INTR_T		gmac1;
++	} bits;
++} INTR_REG4_T;
++
++#define GMAC1_RESERVED_INT_BIT		BIT(31)
++#define GMAC1_MIB_INT_BIT			BIT(30)
++#define GMAC1_RX_PAUSE_ON_INT_BIT	BIT(29)
++#define GMAC1_TX_PAUSE_ON_INT_BIT	BIT(28)
++#define GMAC1_RX_PAUSE_OFF_INT_BIT	BIT(27)
++#define GMAC1_TX_PAUSE_OFF_INT_BIT	BIT(26)
++#define GMAC1_RX_OVERRUN_INT_BIT	BIT(25)
++#define GMAC1_STATUS_CHANGE_INT_BIT	BIT(24)
++#define GMAC0_RESERVED_INT_BIT		BIT(23)
++#define GMAC0_MIB_INT_BIT			BIT(22)
++#define GMAC0_RX_PAUSE_ON_INT_BIT	BIT(21)
++#define GMAC0_TX_PAUSE_ON_INT_BIT	BIT(20)
++#define GMAC0_RX_PAUSE_OFF_INT_BIT	BIT(19)
++#define GMAC0_TX_PAUSE_OFF_INT_BIT	BIT(18)
++#define GMAC0_RX_OVERRUN_INT_BIT	BIT(17)
++#define GMAC0_STATUS_CHANGE_INT_BIT	BIT(16)
++#define CLASS_RX_FULL_INT_BIT(x)	BIT((x+2))
++#define HWFQ_EMPTY_INT_BIT			BIT(1)
++#define SWFQ_EMPTY_INT_BIT			BIT(0)
++
++#if 1
++#define GMAC0_INT_BITS				(GMAC0_MIB_INT_BIT)
++#define GMAC1_INT_BITS				(GMAC1_MIB_INT_BIT)
++#else
++#define GMAC0_INT_BITS				(GMAC0_RESERVED_INT_BIT | GMAC0_MIB_INT_BIT | \
++									 GMAC0_RX_PAUSE_ON_INT_BIT | GMAC0_TX_PAUSE_ON_INT_BIT |	\
++									 GMAC0_RX_PAUSE_OFF_INT_BIT | GMAC0_TX_PAUSE_OFF_INT_BIT | 	\
++									 GMAC0_RX_OVERRUN_INT_BIT | GMAC0_STATUS_CHANGE_INT_BIT)
++#define GMAC1_INT_BITS				(GMAC1_RESERVED_INT_BIT | GMAC1_MIB_INT_BIT | \
++									 GMAC1_RX_PAUSE_ON_INT_BIT | GMAC1_TX_PAUSE_ON_INT_BIT |	\
++									 GMAC1_RX_PAUSE_OFF_INT_BIT | GMAC1_TX_PAUSE_OFF_INT_BIT | 	\
++									 GMAC1_RX_OVERRUN_INT_BIT | GMAC1_STATUS_CHANGE_INT_BIT)
++#endif
++
++#define CLASS_RX_FULL_INT_BITS		0xfffc
++
++/**********************************************************************
++ * GLOBAL_QUEUE_THRESHOLD_REG 	(offset 0x0070)
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_0070_2
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int	toe_class	: 8;	// 31:24
++		unsigned int	intrq		: 8;	// 23:16
++		unsigned int    hwfq_empty	: 8;	// 15:8		Hardware Free Queue Empty Threshold
++		unsigned int    swfq_empty	: 8;	//  7:0  	Software Free Queue Empty Threshold
++#else
++#endif
++		unsigned int    swfq_empty	: 8;	//  7:0  	Software Free Queue Empty Threshold
++		unsigned int    hwfq_empty	: 8;	// 15:8		Hardware Free Queue Empty Threshold
++		unsigned int	intrq		: 8;	// 23:16
++		unsigned int	toe_class	: 8;	// 31:24
++	} bits;
++} QUEUE_THRESHOLD_T;
++
++
++/**********************************************************************
++ * GMAC DMA Control Register
++ * GMAC0 offset 0x8000
++ * GMAC1 offset 0xC000
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8000
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int    rd_enable		: 1;	// bit 31	Rx DMA Enable
++		unsigned int    td_enable		: 1;	// bit 30	Tx DMA Enable
++		unsigned int    loopback		: 1;	// bit 29	Loopback TxDMA to RxDMA
++		unsigned int    drop_small_ack	: 1;	// bit 28	1: Drop, 0: Accept
++		unsigned int	reserved		: 10;	// bit 27:18
++		unsigned int	rd_insert_bytes	: 2;	// bit 17:16
++		unsigned int	rd_prot			: 4;	// bit 15:12 DMA Protection Control
++		unsigned int	rd_burst_size	: 2;	// bit 11:10 DMA max burst size for every AHB request
++		unsigned int	rd_bus		    : 2;	// bit 9:8 	Peripheral Bus Width
++		unsigned int	td_prot			: 4;	// bit 7:4  TxDMA protection control
++		unsigned int	td_burst_size	: 2;	// bit 3:2	TxDMA max burst size for every AHB request
++		unsigned int	td_bus		    : 2;	// bit 1:0  Peripheral Bus Width
++#else
++		unsigned int	td_bus		    : 2;	// bit 1:0  Peripheral Bus Width
++		unsigned int	td_burst_size	: 2;	// bit 3:2	TxDMA max burst size for every AHB request
++		unsigned int	td_prot			: 4;	// bit 7:4  TxDMA protection control
++		unsigned int	rd_bus		    : 2;	// bit 9:8 	Peripheral Bus Width
++		unsigned int	rd_burst_size	: 2;	// bit 11:10 DMA max burst size for every AHB request
++		unsigned int	rd_prot			: 4;	// bit 15:12 DMA Protection Control
++		unsigned int	rd_insert_bytes	: 2;	// bit 17:16
++		unsigned int	reserved		: 10;	// bit 27:18
++		unsigned int    drop_small_ack	: 1;	// bit 28	1: Drop, 0: Accept
++		unsigned int    loopback		: 1;	// bit 29	Loopback TxDMA to RxDMA
++		unsigned int    td_enable		: 1;	// bit 30	Tx DMA Enable
++		unsigned int    rd_enable		: 1;	// bit 31	Rx DMA Enable
++#endif
++	} bits;
++} GMAC_DMA_CTRL_T;
++
++/**********************************************************************
++ * GMAC Tx Weighting Control Register 0
++ * GMAC0 offset 0x8004
++ * GMAC1 offset 0xC004
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8004
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int    reserved		: 8;	// bit 31:24
++		unsigned int    hw_tq3			: 6;	// bit 23:18	HW TX Queue 0
++		unsigned int    hw_tq2			: 6;	// bit 17:12	HW TX Queue 1
++		unsigned int    hw_tq1			: 6;	// bit 11:6		HW TX Queue 2
++		unsigned int    hw_tq0			: 6;	// bit 5:0		HW TX Queue 3
++#else
++		unsigned int    hw_tq0			: 6;	// bit 5:0		HW TX Queue 3
++		unsigned int    hw_tq1			: 6;	// bit 11:6		HW TX Queue 2
++		unsigned int    hw_tq2			: 6;	// bit 17:12	HW TX Queue 1
++		unsigned int    hw_tq3			: 6;	// bit 23:18	HW TX Queue 0
++		unsigned int    reserved		: 8;	// bit 31:24
++#endif
++	} bits;
++} GMAC_TX_WCR0_T;	// Weighting Control Register 0
++
++/**********************************************************************
++ * GMAC Tx Weighting Control Register 1
++ * GMAC0 offset 0x8008
++ * GMAC1 offset 0xC008
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8008
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int    reserved		: 2;	// bit 31:30
++		unsigned int    sw_tq5			: 5;	// bit 29:25	SW TX Queue 5
++		unsigned int    sw_tq4			: 5;	// bit 24:20	SW TX Queue 4
++		unsigned int    sw_tq3			: 5;	// bit 19:15	SW TX Queue 3
++		unsigned int    sw_tq2			: 5;	// bit 14:10	SW TX Queue 2
++		unsigned int    sw_tq1			: 5;	// bit 9:5		SW TX Queue 1
++		unsigned int    sw_tq0			: 5;	// bit 4:0		SW TX Queue 0
++#else
++		unsigned int    sw_tq0			: 5;	// bit 4:0		SW TX Queue 0
++		unsigned int    sw_tq1			: 5;	// bit 9:5		SW TX Queue 1
++		unsigned int    sw_tq2			: 5;	// bit 14:10	SW TX Queue 2
++		unsigned int    sw_tq3			: 5;	// bit 19:15	SW TX Queue 3
++		unsigned int    sw_tq4			: 5;	// bit 24:20	SW TX Queue 4
++		unsigned int    sw_tq5			: 5;	// bit 29:25	SW TX Queue 5
++		unsigned int    reserved		: 2;	// bit 31:30
++#endif
++	} bits;
++} GMAC_TX_WCR1_T;	// Weighting Control Register 1
++
++/**********************************************************************
++ * Queue Read/Write Pointer
++ * GMAC SW TX Queue 0~5 Read/Write Pointer register
++ * GMAC0 offset 0x800C ~ 0x8020
++ * GMAC1 offset 0xC00C ~ 0xC020
++ * GMAC HW TX Queue 0~3 Read/Write Pointer register
++ * GMAC0 offset 0x8024 ~ 0x8030
++ * GMAC1 offset 0xC024 ~ 0xC030
++ **********************************************************************/
++// see DMA_RWPTR_T structure
++
++/**********************************************************************
++ * GMAC DMA Tx First Description Address Register
++ * GMAC0 offset 0x8038
++ * GMAC1 offset 0xC038
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8038
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int td_first_des_ptr	: 28;	// bit 31:4	first descriptor address
++		unsigned int td_busy			:  1;	// bit 3	1: TxDMA busy; 0: TxDMA idle
++		unsigned int reserved			:  3;
++#else
++		unsigned int reserved			:  3;
++		unsigned int td_busy			:  1;	// bit 3	1: TxDMA busy; 0: TxDMA idle
++		unsigned int td_first_des_ptr	: 28;	// bit 31:4	first descriptor address
++#endif
++	} bits;
++} GMAC_TXDMA_FIRST_DESC_T;
++
++/**********************************************************************
++ * GMAC DMA Tx Current Description Address Register
++ * GMAC0 offset 0x803C
++ * GMAC1 offset 0xC03C
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_803C
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int td_curr_desc_ptr	: 28;	// bit 31:4	current descriptor address
++		unsigned int reserved			:  4;
++#else
++		unsigned int reserved			:  4;
++		unsigned int td_curr_desc_ptr	: 28;	// bit 31:4	current descriptor address
++#endif
++	} bits;
++} GMAC_TXDMA_CURR_DESC_T;
++
++/**********************************************************************
++ * GMAC DMA Tx Description Word 0 Register
++ * GMAC0 offset 0x8040
++ * GMAC1 offset 0xC040
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8040
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int reserved		: 1;	// bit 31
++		unsigned int derr			: 1;	// bit 30	 data error during processing this descriptor
++		unsigned int perr			: 1;	// bit 29	 protocol error during processing this descriptor
++		unsigned int status_rvd		: 6;	// bit 28:23 Tx Status, Reserved bits
++		unsigned int status_tx_ok	: 1;	// bit 22    Tx Status, 1: Successful 0: Failed
++		unsigned int desc_count 	: 6;	// bit 21:16 number of descriptors used for the current frame
++		unsigned int buffer_size 	: 16;	// bit 15:0  Transfer size
++#else
++		unsigned int buffer_size 	: 16;	// bit 15:0  Transfer size
++		unsigned int desc_count 	: 6;	// bit 21:16 number of descriptors used for the current frame
++		unsigned int status_tx_ok	: 1;	// bit 22    Tx Status, 1: Successful 0: Failed
++		unsigned int status_rvd		: 6;	// bit 28:23 Tx Status, Reserved bits
++		unsigned int perr			: 1;	// bit 29	 protocol error during processing this descriptor
++		unsigned int derr			: 1;	// bit 30	 data error during processing this descriptor
++		unsigned int reserved		: 1;	// bit 31
++#endif
++	} bits;
++} GMAC_TXDESC_0_T;
++
++/**********************************************************************
++ * GMAC DMA Tx Description Word 1 Register
++ * GMAC0 offset 0x8044
++ * GMAC1 offset 0xC044
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct txdesc_word1
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int	reserved	: 9;	// bit 31:23 	Tx Flag, Reserved
++		unsigned int	ip_fixed_len: 1;	// bit 22
++		unsigned int	bypass_tss	: 1;	// bit 21
++		unsigned int	udp_chksum	: 1;	// bit 20		UDP Checksum Enable
++		unsigned int	tcp_chksum	: 1;	// bit 19		TCP Checksum Enable
++		unsigned int	ipv6_enable	: 1;	// bit 18		IPV6 Tx Enable
++		unsigned int	ip_chksum	: 1;	// bit 17		IPV4 Header Checksum Enable
++		unsigned int	mtu_enable	: 1;	// bit 16		TSS segmentation use MTU setting
++		unsigned int	byte_count	: 16;	// bit 15: 0	Tx Frame Byte Count
++#else
++		unsigned int	byte_count	: 16;	// bit 15: 0	Tx Frame Byte Count
++		unsigned int	mtu_enable	: 1;	// bit 16		TSS segmentation use MTU setting
++		unsigned int	ip_chksum	: 1;	// bit 17		IPV4 Header Checksum Enable
++		unsigned int	ipv6_enable	: 1;	// bit 18		IPV6 Tx Enable
++		unsigned int	tcp_chksum	: 1;	// bit 19		TCP Checksum Enable
++		unsigned int	udp_chksum	: 1;	// bit 20		UDP Checksum Enable
++		unsigned int	bypass_tss	: 1;	// bit 21
++		unsigned int	ip_fixed_len: 1;	// bit 22
++		unsigned int	reserved	: 9;	// bit 31:23 	Tx Flag, Reserved
++#endif
++	} bits;
++} GMAC_TXDESC_1_T;
++
++#define TSS_IP_FIXED_LEN_BIT	BIT(22)
++#define TSS_UDP_CHKSUM_BIT		BIT(20)
++#define TSS_TCP_CHKSUM_BIT		BIT(19)
++#define TSS_IPV6_ENABLE_BIT		BIT(18)
++#define TSS_IP_CHKSUM_BIT		BIT(17)
++#define TSS_MTU_ENABLE_BIT		BIT(16)
++
++/**********************************************************************
++ * GMAC DMA Tx Description Word 2 Register
++ * GMAC0 offset 0x8048
++ * GMAC1 offset 0xC048
++ **********************************************************************/
++typedef union
++{
++	unsigned int	bits32;
++	unsigned int 	buf_adr;
++} GMAC_TXDESC_2_T;
++
++/**********************************************************************
++ * GMAC DMA Tx Description Word 3 Register
++ * GMAC0 offset 0x804C
++ * GMAC1 offset 0xC04C
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct txdesc_word3
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int	sof_eof		: 2;	// bit 31:30 	11: only one, 10: first, 01: last, 00: linking
++		unsigned int	eofie		: 1;	// bit 29		End of frame interrupt enable
++		unsigned int	reserved	: 18;	// bit 28:11
++		unsigned int	mtu_size	: 11;	// bit 10: 0	Tx Frame Byte Count
++#else
++		unsigned int	mtu_size	: 11;	// bit 10: 0	Tx Frame Byte Count
++		unsigned int	reserved	: 18;	// bit 28:11
++		unsigned int	eofie		: 1;	// bit 29		End of frame interrupt enable
++		unsigned int	sof_eof		: 2;	// bit 31:30 	11: only one, 10: first, 01: last, 00: linking
++#endif
++	} bits;
++} GMAC_TXDESC_3_T;
++#define SOF_EOF_BIT_MASK	0x3fffffff
++#define SOF_BIT				0x80000000
++#define EOF_BIT				0x40000000
++#define EOFIE_BIT			BIT(29)
++#define MTU_SIZE_BIT_MASK	0x7ff
++
++/**********************************************************************
++ * GMAC Tx Descriptor
++ **********************************************************************/
++typedef struct
++{
++	GMAC_TXDESC_0_T	word0;
++	GMAC_TXDESC_1_T	word1;
++	GMAC_TXDESC_2_T	word2;
++	GMAC_TXDESC_3_T	word3;
++} GMAC_TXDESC_T;
++
++
++/**********************************************************************
++ * GMAC DMA Rx First Description Address Register
++ * GMAC0 offset 0x8058
++ * GMAC1 offset 0xC058
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8058
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int rd_first_des_ptr	: 28;	// bit 31:4 first descriptor address
++		unsigned int rd_busy			:  1;	// bit 3	1-RxDMA busy; 0-RxDMA idle
++		unsigned int reserved			:  3;	// bit 2:0
++#else
++		unsigned int reserved			:  3;	// bit 2:0
++		unsigned int rd_busy			:  1;	// bit 3	1-RxDMA busy; 0-RxDMA idle
++		unsigned int rd_first_des_ptr	: 28;	// bit 31:4 first descriptor address
++#endif
++	} bits;
++} GMAC_RXDMA_FIRST_DESC_T;
++
++/**********************************************************************
++ * GMAC DMA Rx Current Description Address Register
++ * GMAC0 offset 0x805C
++ * GMAC1 offset 0xC05C
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_805C
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int rd_curr_des_ptr	: 28;	// bit 31:4 current descriptor address
++		unsigned int reserved			:  4;	// bit 3:0
++#else
++		unsigned int reserved			:  4;	// bit 3:0
++		unsigned int rd_curr_des_ptr	: 28;	// bit 31:4 current descriptor address
++#endif
++	} bits;
++} GMAC_RXDMA_CURR_DESC_T;
++
++/**********************************************************************
++ * GMAC DMA Rx Description Word 0 Register
++ * GMAC0 offset 0x8060
++ * GMAC1 offset 0xC060
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8060
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int drop			: 1;	// bit 31	 TOE/CIS Queue Full dropped packet to default queue
++		unsigned int derr			: 1;	// bit 30	 data error during processing this descriptor
++		unsigned int perr			: 1;	// bit 29	 protocol error during processing this descriptor
++		unsigned int chksum_status	: 3;	// bit 28:26 Check Sum Status
++		unsigned int status			: 4;	// bit 24:22 Status of rx frame
++		unsigned int desc_count 	: 6;	// bit 21:16 number of descriptors used for the current frame
++		unsigned int buffer_size 	: 16;	// bit 15:0  number of descriptors used for the current frame
++#else
++		unsigned int buffer_size 	: 16;	// bit 15:0  number of descriptors used for the current frame
++		unsigned int desc_count 	: 6;	// bit 21:16 number of descriptors used for the current frame
++		unsigned int status			: 4;	// bit 24:22 Status of rx frame
++		unsigned int chksum_status	: 3;	// bit 28:26 Check Sum Status
++		unsigned int perr			: 1;	// bit 29	 protocol error during processing this descriptor
++		unsigned int derr			: 1;	// bit 30	 data error during processing this descriptor
++		unsigned int drop			: 1;	// bit 31	 TOE/CIS Queue Full dropped packet to default queue
++#endif
++	} bits;
++} GMAC_RXDESC_0_T;
++
++#define		GMAC_RXDESC_0_T_derr				BIT(30)
++#define		GMAC_RXDESC_0_T_perr				BIT(29)
++#define		GMAC_RXDESC_0_T_chksum_status(x)	BIT((x+26))
++#define		GMAC_RXDESC_0_T_status(x)			BIT((x+22))
++#define		GMAC_RXDESC_0_T_desc_count(x)		BIT((x+16))
++
++#define	RX_CHKSUM_IP_UDP_TCP_OK			0
++#define	RX_CHKSUM_IP_OK_ONLY			1
++#define	RX_CHKSUM_NONE					2
++#define	RX_CHKSUM_IP_ERR_UNKNOWN		4
++#define	RX_CHKSUM_IP_ERR				5
++#define	RX_CHKSUM_TCP_UDP_ERR			6
++#define RX_CHKSUM_NUM					8
++
++#define RX_STATUS_GOOD_FRAME			0
++#define RX_STATUS_TOO_LONG_GOOD_CRC		1
++#define RX_STATUS_RUNT_FRAME			2
++#define RX_STATUS_SFD_NOT_FOUND			3
++#define RX_STATUS_CRC_ERROR				4
++#define RX_STATUS_TOO_LONG_BAD_CRC		5
++#define RX_STATUS_ALIGNMENT_ERROR		6
++#define RX_STATUS_TOO_LONG_BAD_ALIGN	7
++#define RX_STATUS_RX_ERR				8
++#define RX_STATUS_DA_FILTERED			9
++#define RX_STATUS_BUFFER_FULL			10
++#define RX_STATUS_NUM					16
++
++
++/**********************************************************************
++ * GMAC DMA Rx Description Word 1 Register
++ * GMAC0 offset 0x8064
++ * GMAC1 offset 0xC064
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct rxdesc_word1
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int	sw_id		: 16;	// bit 31:16	Software ID
++		unsigned int	byte_count	: 16;	// bit 15: 0	Rx Frame Byte Count
++#else
++		unsigned int	byte_count	: 16;	// bit 15: 0	Rx Frame Byte Count
++		unsigned int	sw_id		: 16;	// bit 31:16	Software ID
++#endif
++	} bits;
++} GMAC_RXDESC_1_T;
++
++/**********************************************************************
++ * GMAC DMA Rx Description Word 2 Register
++ * GMAC0 offset 0x8068
++ * GMAC1 offset 0xC068
++ **********************************************************************/
++typedef union
++{
++	unsigned int	bits32;
++	unsigned int	buf_adr;
++} GMAC_RXDESC_2_T;
++
++#define RX_INSERT_NONE		0
++#define RX_INSERT_1_BYTE	1
++#define RX_INSERT_2_BYTE	2
++#define RX_INSERT_3_BYTE	3
++
++#define RX_INSERT_BYTES		RX_INSERT_2_BYTE
++/**********************************************************************
++ * GMAC DMA Rx Description Word 3 Register
++ * GMAC0 offset 0x806C
++ * GMAC1 offset 0xC06C
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct rxdesc_word3
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int	sof_eof		: 2;	// bit 31:30 	11: only one, 10: first, 01: last, 00: linking
++		unsigned int	eofie		: 1;	// bit 29		End of frame interrupt enable
++		unsigned int	ctrl_flag	: 1;	// bit 28 		Control Flag is present
++		unsigned int	out_of_seq	: 1;	// bit 27		Out of Sequence packet
++		unsigned int	option		: 1;	// bit 26		IPV4 option or IPV6 extension header
++		unsigned int	abnormal	: 1;	// bit 25		abnormal case found
++		unsigned int	dup_ack		: 1;	// bit 24		Duplicated ACK detected
++		unsigned int	l7_offset	: 8;	// bit 23: 16	L7 data offset
++		unsigned int	l4_offset	: 8;	// bit 15: 8	L4 data offset
++		unsigned int	l3_offset	: 8;	// bit 7: 0		L3 data offset
++#else
++		unsigned int	l3_offset	: 8;	// bit 7: 0		L3 data offset
++		unsigned int	l4_offset	: 8;	// bit 15: 8	L4 data offset
++		unsigned int	l7_offset	: 8;	// bit 23: 16	L7 data offset
++		unsigned int	dup_ack		: 1;	// bit 24		Duplicated ACK detected
++		unsigned int	abnormal	: 1;	// bit 25		abnormal case found
++		unsigned int	option		: 1;	// bit 26		IPV4 option or IPV6 extension header
++		unsigned int	out_of_seq	: 1;	// bit 27		Out of Sequence packet
++		unsigned int	ctrl_flag	: 1;	// bit 28 		Control Flag is present
++		unsigned int	eofie		: 1;	// bit 29		End of frame interrupt enable
++		unsigned int	sof_eof		: 2;	// bit 31:30 	11: only one, 10: first, 01: last, 00: linking
++#endif
++	} bits;
++} GMAC_RXDESC_3_T;
++
++/**********************************************************************
++ * GMAC Rx Descriptor
++ **********************************************************************/
++typedef struct
++{
++	GMAC_RXDESC_0_T	word0;
++	GMAC_RXDESC_1_T	word1;
++	GMAC_RXDESC_2_T	word2;
++	GMAC_RXDESC_3_T	word3;
++} GMAC_RXDESC_T;
++
++/**********************************************************************
++ * GMAC Hash Engine Enable/Action Register 0 Offset Register
++ * GMAC0 offset 0x8070
++ * GMAC1 offset 0xC070
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8070
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int	mr1en		: 1;	// bit 31		Enable Matching Rule 1
++		unsigned int	reserved1	: 1;	// bit 30
++		unsigned int	timing		: 3;	// bit 29:27
++		unsigned int	mr1_action	: 5;	// bit 26:22	Matching Rule 1 action offset
++		unsigned int	mr1hel		: 6;	// bit 21:16	match rule 1 hash entry size
++		unsigned int	mr0en		: 1;	// bit 15		Enable Matching Rule 0
++		unsigned int	reserved0	: 4;	// bit 14:11
++		unsigned int	mr0_action	: 5;	// bit 10:6		Matching Rule 0 action offset
++		unsigned int	mr0hel		: 6;	// bit 5:0		match rule 0 hash entry size
++#else
++		unsigned int	mr0hel		: 6;	// bit 5:0		match rule 0 hash entry size
++		unsigned int	mr0_action	: 5;	// bit 10:6		Matching Rule 0 action offset
++		unsigned int	reserved0	: 4;	// bit 14:11
++		unsigned int	mr0en		: 1;	// bit 15		Enable Matching Rule 0
++		unsigned int	mr1hel		: 6;	// bit 21:16	match rule 1 hash entry size
++		unsigned int	mr1_action	: 5;	// bit 26:22	Matching Rule 1 action offset
++		unsigned int	timing		: 3;	// bit 29:27
++		unsigned int	reserved1	: 1;	// bit 30
++		unsigned int	mr1en		: 1;	// bit 31		Enable Matching Rule 1
++#endif
++	} bits;
++} GMAC_HASH_ENABLE_REG0_T;
++
++/**********************************************************************
++ * GMAC Hash Engine Enable/Action Register 1 Offset Register
++ * GMAC0 offset 0x8074
++ * GMAC1 offset 0xC074
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8074
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int	mr3en		: 1;	// bit 31		Enable Matching Rule 3
++		unsigned int	reserved3	: 4;	// bit 30:27
++		unsigned int	mr3_action	: 5;	// bit 26:22	Matching Rule 3 action offset
++		unsigned int	mr3hel		: 6;	// bit 21:16	match rule 3 hash entry size
++		unsigned int	mr2en		: 1;	// bit 15		Enable Matching Rule 2
++		unsigned int	reserved2	: 4;	// bit 14:11
++		unsigned int	mr2_action	: 5;	// bit 10:6		Matching Rule 2 action offset
++		unsigned int	mr2hel		: 6;	// bit 5:0		match rule 2 hash entry size
++#else
++		unsigned int	mr2hel		: 6;	// bit 5:0		match rule 2 hash entry size
++		unsigned int	mr2_action	: 5;	// bit 10:6		Matching Rule 2 action offset
++		unsigned int	reserved2	: 4;	// bit 14:11
++		unsigned int	mr2en		: 1;	// bit 15		Enable Matching Rule 2
++		unsigned int	mr3hel		: 6;	// bit 21:16	match rule 3 hash entry size
++		unsigned int	mr3_action	: 5;	// bit 26:22	Matching Rule 3 action offset
++		unsigned int	reserved1	: 4;	// bit 30:27
++		unsigned int	mr3en		: 1;	// bit 31		Enable Matching Rule 3
++#endif
++	} bits;
++} GMAC_HASH_ENABLE_REG1_T;
++
++
++/**********************************************************************
++ * GMAC Matching Rule Control Register 0
++ * GMAC0 offset 0x8078
++ * GMAC1 offset 0xC078
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_8078
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int	l2			: 1;	// bit 31		L2 matching enable
++		unsigned int	l3			: 1;	// bit 30		L3 matching enable
++		unsigned int	l4			: 1;	// bit 29		L4 matching enable
++		unsigned int	l7			: 1;	// bit 28		L7 matching enable
++		unsigned int	port		: 1;	// bit 27		PORT ID matching enable
++		unsigned int	priority	: 3;	// bit 26:24	priority if multi-rules matched
++		unsigned int	da			: 1;	// bit 23		MAC DA enable
++		unsigned int	sa			: 1;	// bit 22		MAC SA enable
++		unsigned int	ether_type	: 1;	// bit 21		Ethernet type enable
++		unsigned int	vlan		: 1;	// bit 20		VLAN ID enable
++		unsigned int	pppoe		: 1;	// bit 19		PPPoE Session ID enable
++		unsigned int	reserved1	: 3;	// bit 18:16
++		unsigned int	ip_version	: 1;	// bit 15		0: IPV4, 1: IPV6
++		unsigned int	ip_hdr_len	: 1;	// bit 14		IPV4 Header length
++		unsigned int	flow_lable	: 1;	// bit 13		IPV6 Flow label
++		unsigned int	tos_traffic	: 1;	// bit 12		IPV4 TOS or IPV6 Traffice Class
++		unsigned int	reserved2	: 4;	// bit 11:8
++		unsigned int	sprx		: 8;	// bit 7:0		Support Protocol Register 7:0
++#else
++		unsigned int	sprx		: 8;	// bit 7:0		Support Protocol Register 7:0
++		unsigned int	reserved2	: 4;	// bit 11:8
++		unsigned int	tos_traffic	: 1;	// bit 12		IPV4 TOS or IPV6 Traffice Class
++		unsigned int	flow_lable	: 1;	// bit 13		IPV6 Flow label
++		unsigned int	ip_hdr_len	: 1;	// bit 14		IPV4 Header length
++		unsigned int	ip_version	: 1;	// bit 15		0: IPV4, 1: IPV6
++		unsigned int	reserved1	: 3;	// bit 18:16
++		unsigned int	pppoe		: 1;	// bit 19		PPPoE Session ID enable
++		unsigned int	vlan		: 1;	// bit 20		VLAN ID enable
++		unsigned int	ether_type	: 1;	// bit 21		Ethernet type enable
++		unsigned int	sa			: 1;	// bit 22		MAC SA enable
++		unsigned int	da			: 1;	// bit 23		MAC DA enable
++		unsigned int	priority	: 3;	// bit 26:24	priority if multi-rules matched
++		unsigned int	port		: 1;	// bit 27		PORT ID matching enable
++		unsigned int	l7			: 1;	// bit 28		L7 matching enable
++		unsigned int	l4			: 1;	// bit 29		L4 matching enable
++		unsigned int	l3			: 1;	// bit 30		L3 matching enable
++		unsigned int	l2			: 1;	// bit 31		L2 matching enable
++#endif
++	} bits;
++} GMAC_MRxCR0_T;
++
++#define MR_L2_BIT			BIT(31)
++#define MR_L3_BIT			BIT(30)
++#define MR_L4_BIT			BIT(29)
++#define MR_L7_BIT			BIT(28)
++#define MR_PORT_BIT			BIT(27)
++#define MR_PRIORITY_BIT		BIT(26)
++#define MR_DA_BIT			BIT(23)
++#define MR_SA_BIT			BIT(22)
++#define MR_ETHER_TYPE_BIT	BIT(21)
++#define MR_VLAN_BIT			BIT(20)
++#define MR_PPPOE_BIT		BIT(19)
++#define MR_IP_VER_BIT		BIT(15)
++#define MR_IP_HDR_LEN_BIT	BIT(14)
++#define MR_FLOW_LABLE_BIT	BIT(13)
++#define MR_TOS_TRAFFIC_BIT	BIT(12)
++#define MR_SPR_BIT(x)		BIT(x)
++#define MR_SPR_BITS		0xff
++
++/**********************************************************************
++ * GMAC Matching Rule Control Register 1
++ * GMAC0 offset 0x807C
++ * GMAC1 offset 0xC07C
++ **********************************************************************/
++ typedef union
++{
++	unsigned int bits32;
++	struct bit_807C
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int	sip			: 1;	// bit 31		Srce IP
++		unsigned int	sip_netmask	: 7;	// bit 30:24	Srce IP net mask, number of mask bits
++		unsigned int	dip			: 1;	// bit 23		Dest IP
++		unsigned int	dip_netmask	: 7;	// bit 22:16	Dest IP net mask, number of mask bits
++		unsigned int    l4_byte0_15	: 16;	// bit 15: 0
++#else
++		unsigned int    l4_byte0_15	: 16;	// bit 15: 0
++		unsigned int	dip_netmask	: 7;	// bit 22:16	Dest IP net mask, number of mask bits
++		unsigned int	dip			: 1;	// bit 23		Dest IP
++		unsigned int	sip_netmask	: 7;	// bit 30:24	Srce IP net mask, number of mask bits
++		unsigned int	sip			: 1;	// bit 31		Srce IP
++#endif
++	} bits;
++} GMAC_MRxCR1_T;
++
++/**********************************************************************
++ * GMAC Matching Rule Control Register 2
++ * GMAC0 offset 0x8080
++ * GMAC1 offset 0xC080
++ **********************************************************************/
++ typedef union
++{
++	unsigned int bits32;
++	struct bit_8080
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int    l4_byte16_24: 8;	// bit 31: 24
++		unsigned int    l7_byte0_23	: 24;	// bit 23:0
++#else
++		unsigned int    l7_byte0_23	: 24;	// bit 23:0
++		unsigned int    l4_byte16_24: 8;	// bit 31: 24
++#endif
++	} bits;
++} GMAC_MRxCR2_T;
++
++
++/**********************************************************************
++ * GMAC Support registers
++ * GMAC0 offset 0x80A8
++ * GMAC1 offset 0xC0A8
++ **********************************************************************/
++ typedef union
++{
++	unsigned int bits32;
++	struct bit_80A8
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int    reserved: 21;	// bit 31:11
++		unsigned int    swap	: 3;	// bit 10:8		Swap
++		unsigned int    protocol: 8;	// bit 7:0		Supported protocol
++#else
++		unsigned int    protocol: 8;	// bit 7:0		Supported protocol
++		unsigned int    swap	: 3;	// bit 10:8		Swap
++		unsigned int    reserved: 21;	// bit 31:11
++#endif
++	} bits;
++} GMAC_SPR_T;
++
++/**********************************************************************
++ * GMAC_AHB_WEIGHT registers
++ * GMAC0 offset 0x80C8
++ * GMAC1 offset 0xC0C8
++ **********************************************************************/
++ typedef union
++{
++	unsigned int bits32;
++	struct bit_80C8
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int    reserved		: 7;	// 31:25
++		unsigned int    tqDV_threshold	: 5;	// 24:20 DMA TqCtrl to Start tqDV FIFO Threshold
++		unsigned int    pre_req			: 5;	// 19:15 Rx Data Pre Request FIFO Threshold
++		unsigned int    tx_weight		: 5;	// 14:10
++		unsigned int    rx_weight		: 5;	// 9:5
++		unsigned int    hash_weight		: 5;	// 4:0
++#else
++		unsigned int    hash_weight		: 5;	// 4:0
++		unsigned int    rx_weight		: 5;	// 9:5
++		unsigned int    tx_weight		: 5;	// 14:10
++		unsigned int    pre_req			: 5;	// 19:15 Rx Data Pre Request FIFO Threshold
++		unsigned int    tqDV_threshold	: 5;	// 24:20 DMA TqCtrl to Start tqDV FIFO Threshold
++		unsigned int    reserved		: 7;	// 31:25
++#endif
++	} bits;
++} GMAC_AHB_WEIGHT_T;
++/**********************************************************************
++ * the register structure of GMAC
++ **********************************************************************/
++
++/**********************************************************************
++ * GMAC RX FLTR
++ * GMAC0 Offset 0xA00C
++ * GMAC1 Offset 0xE00C
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit1_000c
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int 				: 27;
++		unsigned int error			:  1;	/* enable receive of all error frames */
++		unsigned int promiscuous	:  1;   /* enable receive of all frames */
++		unsigned int broadcast		:  1;	/* enable receive of broadcast frames */
++		unsigned int multicast		:  1;	/* enable receive of multicast frames that pass multicast filter */
++		unsigned int unicast		:  1;	/* enable receive of unicast frames that are sent to STA address */
++#else
++		unsigned int unicast		:  1;	/* enable receive of unicast frames that are sent to STA address */
++		unsigned int multicast		:  1;	/* enable receive of multicast frames that pass multicast filter */
++		unsigned int broadcast		:  1;	/* enable receive of broadcast frames */
++		unsigned int promiscuous	:  1;   /* enable receive of all frames */
++		unsigned int error			:  1;	/* enable receive of all error frames */
++		unsigned int 				: 27;
++#endif
++	} bits;
++} GMAC_RX_FLTR_T;
++
++/**********************************************************************
++ * GMAC Configuration 0
++ * GMAC0 Offset 0xA018
++ * GMAC1 Offset 0xE018
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit1_0018
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int reserved		:  2;	// 31
++		unsigned int port1_chk_classq :  1;	// 29
++		unsigned int port0_chk_classq :  1;	// 28
++		unsigned int port1_chk_toeq	:  1;	// 27
++		unsigned int port0_chk_toeq	:  1;	// 26
++		unsigned int port1_chk_hwq	:  1;	// 25
++		unsigned int port0_chk_hwq	:  1;	// 24
++		unsigned int rx_err_detect  :  1;	// 23
++		unsigned int ipv6_exthdr_order: 1;	// 22
++		unsigned int rxc_inv		:  1;	// 21
++		unsigned int rgmm_edge		:  1;	// 20
++        unsigned int rx_tag_remove  :  1;   /* 19: Remove Rx VLAN tag */
++        unsigned int ipv6_rx_chksum :  1;   /* 18: IPv6 RX Checksum enable */
++        unsigned int ipv4_rx_chksum :  1;   /* 17: IPv4 RX Checksum enable */
++        unsigned int rgmii_en       :  1;   /* 16: RGMII in-band status enable */
++		unsigned int tx_fc_en		:  1;	/* 15: TX flow control enable */
++		unsigned int rx_fc_en		:  1;	/* 14: RX flow control enable */
++		unsigned int sim_test		:  1;	/* 13: speed up timers in simulation */
++		unsigned int dis_col		:  1;	/* 12: disable 16 collisions abort function */
++		unsigned int dis_bkoff		:  1;	/* 11: disable back-off function */
++		unsigned int max_len		:  3;	/* 8-10 maximum receive frame length allowed */
++		unsigned int adj_ifg		:  4;	/* 4-7: adjust IFG from 96+/-56 */
++        unsigned int flow_ctrl      :  1;   /* 3: flow control also trigged by Rx queues */
++		unsigned int loop_back		:  1;	/* 2: transmit data loopback enable */
++		unsigned int dis_rx			:  1;	/* 1: disable receive */
++		unsigned int dis_tx			:  1;	/* 0: disable transmit */
++#else
++		unsigned int dis_tx			:  1;	/* 0: disable transmit */
++		unsigned int dis_rx			:  1;	/* 1: disable receive */
++		unsigned int loop_back		:  1;	/* 2: transmit data loopback enable */
++        unsigned int flow_ctrl      :  1;   /* 3: flow control also trigged by Rx queues */
++		unsigned int adj_ifg		:  4;	/* 4-7: adjust IFG from 96+/-56 */
++		unsigned int max_len		:  3;	/* 8-10 maximum receive frame length allowed */
++		unsigned int dis_bkoff		:  1;	/* 11: disable back-off function */
++		unsigned int dis_col		:  1;	/* 12: disable 16 collisions abort function */
++		unsigned int sim_test		:  1;	/* 13: speed up timers in simulation */
++		unsigned int rx_fc_en		:  1;	/* 14: RX flow control enable */
++		unsigned int tx_fc_en		:  1;	/* 15: TX flow control enable */
++        unsigned int rgmii_en       :  1;   /* 16: RGMII in-band status enable */
++        unsigned int ipv4_rx_chksum :  1;   /* 17: IPv4 RX Checksum enable */
++        unsigned int ipv6_rx_chksum :  1;   /* 18: IPv6 RX Checksum enable */
++        unsigned int rx_tag_remove  :  1;   /* 19: Remove Rx VLAN tag */
++		unsigned int rgmm_edge		:  1;	// 20
++		unsigned int rxc_inv		:  1;	// 21
++		unsigned int ipv6_exthdr_order: 1;	// 22
++		unsigned int rx_err_detect  :  1;	// 23
++		unsigned int port0_chk_hwq	:  1;	// 24
++		unsigned int port1_chk_hwq	:  1;	// 25
++		unsigned int port0_chk_toeq	:  1;	// 26
++		unsigned int port1_chk_toeq	:  1;	// 27
++		unsigned int port0_chk_classq :  1;	// 28
++		unsigned int port1_chk_classq :  1;	// 29
++		unsigned int reserved		:  2;	// 31
++#endif
++	} bits;
++} GMAC_CONFIG0_T;
++
++/**********************************************************************
++ * GMAC Configuration 1
++ * GMAC0 Offset 0xA01C
++ * GMAC1 Offset 0xE01C
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit1_001c
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int reserved		: 16;
++		unsigned int rel_threshold	: 8;	/* flow control release threshold */
++		unsigned int set_threshold	: 8; 	/* flow control set threshold */
++#else
++		unsigned int set_threshold	: 8; 	/* flow control set threshold */
++		unsigned int rel_threshold	: 8;	/* flow control release threshold */
++		unsigned int reserved		: 16;
++#endif
++	} bits;
++} GMAC_CONFIG1_T;
++
++#define GMAC_FLOWCTRL_SET_MAX		32
++#define GMAC_FLOWCTRL_SET_MIN		0
++#define GMAC_FLOWCTRL_RELEASE_MAX	32
++#define GMAC_FLOWCTRL_RELEASE_MIN	0
++
++/**********************************************************************
++ * GMAC Configuration 2
++ * GMAC0 Offset 0xA020
++ * GMAC1 Offset 0xE020
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit1_0020
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int rel_threshold	: 16;	/* flow control release threshold */
++		unsigned int set_threshold	: 16; 	/* flow control set threshold */
++#else
++		unsigned int set_threshold	: 16; 	/* flow control set threshold */
++		unsigned int rel_threshold	: 16;	/* flow control release threshold */
++#endif
++	} bits;
++} GMAC_CONFIG2_T;
++
++/**********************************************************************
++ * GMAC Configuration 3
++ * GMAC0 Offset 0xA024
++ * GMAC1 Offset 0xE024
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit1_0024
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int rel_threshold	: 16;	/* flow control release threshold */
++		unsigned int set_threshold	: 16; 	/* flow control set threshold */
++#else
++		unsigned int set_threshold	: 16; 	/* flow control set threshold */
++		unsigned int rel_threshold	: 16;	/* flow control release threshold */
++#endif
++	} bits;
++} GMAC_CONFIG3_T;
++
++
++/**********************************************************************
++ * GMAC STATUS
++ * GMAC0 Offset 0xA02C
++ * GMAC1 Offset 0xE02C
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit1_002c
++	{
++#if (BIG_ENDIAN==1)
++		unsigned int 				: 25;
++		unsigned int mii_rmii		:  2;   /* PHY interface type */
++		unsigned int reserved		:  1;
++		unsigned int duplex			:  1;	/* duplex mode */
++		unsigned int speed			:  2;	/* link speed(00->2.5M 01->25M 10->125M) */
++		unsigned int link			:  1;	/* link status */
++#else
++		unsigned int link			:  1;	/* link status */
++		unsigned int speed			:  2;	/* link speed(00->2.5M 01->25M 10->125M) */
++		unsigned int duplex			:  1;	/* duplex mode */
++		unsigned int reserved		:  1;
++		unsigned int mii_rmii		:  2;   /* PHY interface type */
++		unsigned int 				: 25;
++#endif
++	} bits;
++} GMAC_STATUS_T;
++
++#define GMAC_SPEED_10			0
++#define GMAC_SPEED_100			1
++#define GMAC_SPEED_1000			2
++
++#define GMAC_PHY_MII			0
++#define GMAC_PHY_GMII			1
++#define GMAC_PHY_RGMII_100		2
++#define GMAC_PHY_RGMII_1000		3
++
++/**********************************************************************
++ * Queue Header
++ *	(1) TOE Queue Header
++ *	(2) Non-TOE Queue Header
++ *	(3) Interrupt Queue Header
++ *
++ * memory Layout
++ *	TOE Queue Header
++ *	 0x60003000 +---------------------------+ 0x0000
++ *				|     TOE Queue 0 Header	|
++ *				|         8 * 4 Bytes	    |
++ *				+---------------------------+ 0x0020
++ *				|     TOE Queue 1 Header  	|
++ *				|         8 * 4 Bytes		|
++ *				+---------------------------+ 0x0040
++ *				|   	......  			|
++ *				|      						|
++ *				+---------------------------+
++ *
++ *	Non TOE Queue Header
++ *	 0x60002000 +---------------------------+ 0x0000
++ *				|   Default Queue 0 Header  |
++ *				|         2 * 4 Bytes		|
++ *				+---------------------------+ 0x0008
++ *				|   Default Queue 1 Header	|
++ *				|         2 * 4 Bytes		|
++ *				+---------------------------+ 0x0010
++ *				|   Classification Queue 0	|
++ *				|      	  2 * 4 Bytes		|
++ *				+---------------------------+
++ *				|   Classification Queue 1	|
++ *				|      	  2 * 4 Bytes		|
++ *				+---------------------------+ (n * 8 + 0x10)
++ *				|   		...				|
++ *				|      	  2 * 4 Bytes		|
++ *				+---------------------------+ (13 * 8 + 0x10)
++ *				|   Classification Queue 13	|
++ *				|      	  2 * 4 Bytes		|
++ *				+---------------------------+ 0x80
++ * 				|      Interrupt Queue 0	|
++ *				|      	  2 * 4 Bytes		|
++ *				+---------------------------+
++ * 				|      Interrupt Queue 1	|
++ *				|      	  2 * 4 Bytes		|
++ *				+---------------------------+
++ * 				|      Interrupt Queue 2	|
++ *				|      	  2 * 4 Bytes		|
++ *				+---------------------------+
++ * 				|      Interrupt Queue 3	|
++ *				|      	  2 * 4 Bytes		|
++ *				+---------------------------+
++ *
++ **********************************************************************/
++#define TOE_QUEUE_HDR_ADDR(n)		(TOE_TOE_QUE_HDR_BASE + n * 32)
++#define TOE_Q_HDR_AREA_END			(TOE_QUEUE_HDR_ADDR(TOE_TOE_QUEUE_MAX+1))
++#define TOE_DEFAULT_Q0_HDR_BASE		(TOE_NONTOE_QUE_HDR_BASE + 0x00)
++#define TOE_DEFAULT_Q1_HDR_BASE		(TOE_NONTOE_QUE_HDR_BASE + 0x08)
++#define TOE_CLASS_Q_HDR_BASE		(TOE_NONTOE_QUE_HDR_BASE + 0x10)
++#define TOE_INTR_Q_HDR_BASE			(TOE_NONTOE_QUE_HDR_BASE + 0x80)
++#define INTERRUPT_QUEUE_HDR_ADDR(n)	(TOE_INTR_Q_HDR_BASE + n * 8)
++#define NONTOE_Q_HDR_AREA_END		(INTERRUPT_QUEUE_HDR_ADDR(TOE_INTR_QUEUE_MAX+1))
++/**********************************************************************
++ * TOE Queue Header Word 0
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	unsigned int base_size;
++} TOE_QHDR0_T;
++
++#define TOE_QHDR0_BASE_MASK 	(~0x0f)
++
++/**********************************************************************
++ * TOE Queue Header Word 1
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_qhdr1
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int wptr			: 16;	// bit 31:16
++		unsigned int rptr			: 16;	// bit 15:0
++#else
++		unsigned int rptr			: 16;	// bit 15:0
++		unsigned int wptr			: 16;	// bit 31:16
++#endif
++	} bits;
++} TOE_QHDR1_T;
++
++/**********************************************************************
++ * TOE Queue Header Word 2
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_qhdr2
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int usd			: 1;	// bit 31		0: if no data assembled yet
++		unsigned int ctl			: 1;	// bit 30		1: have control flag bits (except ack)
++		unsigned int osq			: 1;	// bit 29		1: out of sequence
++		unsigned int sat			: 1;	// bit 28		1: SeqCnt > SeqThreshold, or AckCnt > AckThreshold
++		unsigned int ip_opt			: 1;	// bit 27		1: have IPV4 option or IPV6 Extension header
++		unsigned int tcp_opt		: 1;	// bit 26		1: Have TCP option
++		unsigned int abn			: 1;	// bit 25		1: Abnormal case Found
++		unsigned int dack			: 1;	// bit 24		1: Duplicated ACK
++		unsigned int reserved		: 7;	// bit 23:17
++		unsigned int TotalPktSize	: 17;	// bit 16: 0	Total packet size
++#else
++		unsigned int TotalPktSize	: 17;	// bit 16: 0	Total packet size
++		unsigned int reserved		: 7;	// bit 23:17
++		unsigned int dack			: 1;	// bit 24		1: Duplicated ACK
++		unsigned int abn			: 1;	// bit 25		1: Abnormal case Found
++		unsigned int tcp_opt		: 1;	// bit 26		1: Have TCP option
++		unsigned int ip_opt			: 1;	// bit 27		1: have IPV4 option or IPV6 Extension header
++		unsigned int sat			: 1;	// bit 28		1: SeqCnt > SeqThreshold, or AckCnt > AckThreshold
++		unsigned int osq			: 1;	// bit 29		1: out of sequence
++		unsigned int ctl			: 1;	// bit 30		1: have control flag bits (except ack)
++		unsigned int usd			: 1;	// bit 31		0: if no data assembled yet
++#endif
++	} bits;
++} TOE_QHDR2_T;
++
++/**********************************************************************
++ * TOE Queue Header Word 3
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	unsigned int seq_num;
++} TOE_QHDR3_T;
++
++/**********************************************************************
++ * TOE Queue Header Word 4
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	unsigned int ack_num;
++} TOE_QHDR4_T;
++
++/**********************************************************************
++ * TOE Queue Header Word 5
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_qhdr5
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int SeqCnt		: 16;	// bit 31:16
++		unsigned int AckCnt		: 16;	// bit 15:0
++#else
++		unsigned int AckCnt		: 16;	// bit 15:0
++		unsigned int SeqCnt		: 16;	// bit 31:16
++#endif
++	} bits;
++} TOE_QHDR5_T;
++
++/**********************************************************************
++ * TOE Queue Header Word 6
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_qhdr6
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int MaxPktSize	: 14;	// bit 31:18
++		unsigned int iq_num		: 2;	// bit 17:16
++		unsigned int WinSize	: 16;	// bit 15:0
++#else
++		unsigned int WinSize	: 16;	// bit 15:0
++		unsigned int iq_num		: 2;	// bit 17:16
++		unsigned int MaxPktSize	: 14;	// bit 31:18
++#endif
++	} bits;
++} TOE_QHDR6_T;
++
++/**********************************************************************
++ * TOE Queue Header Word 7
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_qhdr7
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int SeqThreshold	: 16;	// bit 31:16
++		unsigned int AckThreshold	: 16;	// bit 15:0
++#else
++		unsigned int AckThreshold	: 16;	// bit 15:0
++		unsigned int SeqThreshold	: 16;	// bit 31:16
++#endif
++	} bits;
++} TOE_QHDR7_T;
++
++/**********************************************************************
++ * TOE Queue Header
++ **********************************************************************/
++typedef struct
++{
++	TOE_QHDR0_T		word0;
++	TOE_QHDR1_T		word1;
++	TOE_QHDR2_T		word2;
++	TOE_QHDR3_T		word3;
++	TOE_QHDR4_T		word4;
++	TOE_QHDR5_T		word5;
++	TOE_QHDR6_T		word6;
++	TOE_QHDR7_T		word7;
++} TOE_QHDR_T;
++
++/**********************************************************************
++ * NONTOE Queue Header Word 0
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	unsigned int base_size;
++} NONTOE_QHDR0_T;
++
++#define NONTOE_QHDR0_BASE_MASK 	(~0x0f)
++
++/**********************************************************************
++ * NONTOE Queue Header Word 1
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_nonqhdr1
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int wptr			: 16;	// bit 31:16
++		unsigned int rptr			: 16;	// bit 15:0
++#else
++		unsigned int rptr			: 16;	// bit 15:0
++		unsigned int wptr			: 16;	// bit 31:16
++#endif
++	} bits;
++} NONTOE_QHDR1_T;
++
++/**********************************************************************
++ * Non-TOE Queue Header
++ **********************************************************************/
++typedef struct
++{
++	NONTOE_QHDR0_T		word0;
++	NONTOE_QHDR1_T		word1;
++} NONTOE_QHDR_T;
++
++/**********************************************************************
++ * Interrupt Queue Header Word 0
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_intrqhdr0
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int wptr		: 16;	// bit 31:16	Write Pointer where hw stopped
++		unsigned int win_size	: 16;	// bit 15:0 	Descriptor Ring Size
++#else
++		unsigned int win_size	: 16;	// bit 15:0 	Descriptor Ring Size
++		unsigned int wptr		: 16;	// bit 31:16	Write Pointer where hw stopped
++#endif
++	} bits;
++} INTR_QHDR0_T;
++
++/**********************************************************************
++ * Interrupt Queue Header Word 1
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_intrqhdr1
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int ctl			: 1;	// bit 31		1: have control flag bits (except ack)
++		unsigned int osq			: 1;	// bit 30		1: out of sequence
++		unsigned int sat			: 1;	// bit 29		1: SeqCnt > SeqThreshold, or AckCnt > AckThreshold
++		unsigned int ip_opt			: 1;	// bit 28		1: have IPV4 option or IPV6 Extension header
++		unsigned int tcp_opt		: 1;	// bit 27		1: Have TCP option
++		unsigned int abn			: 1;	// bit 26		1: Abnormal case Found
++		unsigned int dack			: 1;	// bit 25		1: Duplicated ACK
++		unsigned int tcp_qid		: 8;	// bit 24:17	TCP Queue ID
++		unsigned int TotalPktSize	: 17;	// bit 16: 0	Total packet size
++#else
++		unsigned int TotalPktSize	: 17;	// bit 16: 0	Total packet size
++		unsigned int tcp_qid		: 8;	// bit 24:17	TCP Queue ID
++		unsigned int dack			: 1;	// bit 25		1: Duplicated ACK
++		unsigned int abn			: 1;	// bit 26		1: Abnormal case Found
++		unsigned int tcp_opt		: 1;	// bit 27		1: Have TCP option
++		unsigned int ip_opt			: 1;	// bit 28		1: have IPV4 option or IPV6 Extension header
++		unsigned int sat			: 1;	// bit 29		1: SeqCnt > SeqThreshold, or AckCnt > AckThreshold
++		unsigned int osq			: 1;	// bit 30		1: out of sequence
++		unsigned int ctl			: 1;	// bit 31		1: have control flag bits (except ack)
++#endif
++	} bits;
++} INTR_QHDR1_T;
++
++/**********************************************************************
++ * Interrupt Queue Header Word 2
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	unsigned int seq_num;
++} INTR_QHDR2_T;
++
++/**********************************************************************
++ * Interrupt Queue Header Word 3
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	unsigned int ack_num;
++} INTR_QHDR3_T;
++
++/**********************************************************************
++ * Interrupt Queue Header Word 4
++ **********************************************************************/
++typedef union
++{
++	unsigned int bits32;
++	struct bit_intrqhdr4
++	{
++#if (BIG_ENDIAN==1)
++
++		unsigned int SeqCnt		: 16;	// bit 31:16	Seq# change since last seq# intr.
++		unsigned int AckCnt		: 16;	// bit 15:0     Ack# change since last ack# intr.
++#else
++		unsigned int AckCnt		: 16;	// bit 15:0		Ack# change since last ack# intr.
++		unsigned int SeqCnt		: 16;	// bit 31:16	Seq# change since last seq# intr.
++#endif
++	} bits;
++} INTR_QHDR4_T;
++
++/**********************************************************************
++ * Interrupt Queue Header
++ **********************************************************************/
++typedef struct
++{
++	INTR_QHDR0_T		word0;
++	INTR_QHDR1_T		word1;
++	INTR_QHDR2_T		word2;
++	INTR_QHDR3_T		word3;
++	INTR_QHDR4_T		word4;
++	unsigned int		word5;
++	unsigned int		word6;
++	unsigned int		word7;
++} INTR_QHDR_T;
++
++/**********************************************************************
++ * GMAC Conf
++ **********************************************************************/
++typedef struct gmac_conf {
++	struct net_device *dev;
++	int portmap;
++	int vid;
++	int flag;     /* 1: active  0: non-active */
++} sys_gmac_conf;
++
++/**********************************************************************
++ * GMAC private data
++ **********************************************************************/
++typedef struct {
++	unsigned int		rwptr_reg;
++	unsigned int		desc_base;
++	unsigned int		total_desc_num;
++	unsigned short		finished_idx;
++	GMAC_TXDESC_T		*curr_tx_desc;
++	GMAC_TXDESC_T		*curr_finished_desc;
++	struct sk_buff		*tx_skb[TX_DESC_NUM];
++	unsigned long		total_sent;
++	unsigned long		total_finished;
++	unsigned long		intr_cnt;
++} GMAC_SWTXQ_T;
++
++typedef struct {
++	unsigned int		desc_base;
++	unsigned long 		eof_cnt;
++} GMAC_HWTXQ_T;
++
++typedef struct gmac_private{
++	struct net_device	*dev;
++	unsigned int		existed;
++	unsigned int		port_id;	// 0 or 1
++	unsigned int		base_addr;
++	unsigned int		dma_base_addr;
++	unsigned char		*mac_addr1;
++	unsigned char		*mac_addr2;
++	unsigned int		swtxq_desc_base;
++	unsigned int		hwtxq_desc_base;
++	GMAC_SWTXQ_T		swtxq[TOE_SW_TXQ_NUM];
++	GMAC_HWTXQ_T		hwtxq[TOE_HW_TXQ_NUM];
++	NONTOE_QHDR_T		*default_qhdr;
++	unsigned int		default_desc_base;
++	unsigned int		default_desc_num;
++	unsigned int		rx_curr_desc;
++	DMA_RWPTR_T			rx_rwptr;
++	struct sk_buff		*curr_rx_skb;
++	dma_addr_t			default_desc_base_dma;
++	dma_addr_t			swtxq_desc_base_dma;
++	dma_addr_t			hwtxq_desc_base_dma;
++	unsigned int		irq;
++	unsigned int		flow_control_enable	;
++	unsigned int		pre_phy_status;
++	unsigned int		full_duplex_cfg;
++	unsigned int		speed_cfg;
++	unsigned int		auto_nego_cfg;
++	unsigned int		full_duplex_status;
++	unsigned int		speed_status;
++	unsigned int		phy_mode;	/* 0->MII 1->GMII 2->RGMII(10/100) 3->RGMII(1000) */
++	unsigned int		phy_addr;
++	unsigned int		intr0_enabled;	// 1: enabled
++	unsigned int		intr1_enabled;	// 1: enabled
++	unsigned int		intr2_enabled;	// 1: enabled
++	unsigned int		intr3_enabled;	// 1: enabled
++	unsigned int		intr4_enabled;	// 1: enabled
++//	unsigned int		intr4_enabled_1;	// 1: enabled
++	unsigned int		intr0_selected;	// 1: selected
++	unsigned int		intr1_selected;	// 1: selected
++	unsigned int		intr2_selected;	// 1: selected
++	unsigned int		intr3_selected;	// 1: selected
++	unsigned int		intr4_selected;	// 1: selected
++	// void 				(*gmac_rcv_handler)(struct sk_buff *, int);
++	struct net_device_stats ifStatics;
++	unsigned long		txDerr_cnt[GMAC_NUM];
++	unsigned long		txPerr_cnt[GMAC_NUM];
++	unsigned long		RxDerr_cnt[GMAC_NUM];
++	unsigned long		RxPerr_cnt[GMAC_NUM];
++	unsigned int		isr_rx_cnt;
++	unsigned int		isr_tx_cnt;
++	unsigned long		rx_discard;
++	unsigned long		rx_error;
++	unsigned long		rx_mcast;
++	unsigned long		rx_bcast;
++	unsigned long		rx_status_cnt[8];
++	unsigned long		rx_chksum_cnt[8];
++	unsigned long		rx_sta1_ucast;	// for STA 1 MAC Address
++	unsigned long		rx_sta2_ucast;	// for STA 2 MAC Address
++	unsigned long		mib_full_cnt;
++	unsigned long		rx_pause_on_cnt;
++	unsigned long		tx_pause_on_cnt;
++	unsigned long		rx_pause_off_cnt;
++	unsigned long		tx_pause_off_cnt;
++	unsigned long		rx_overrun_cnt;
++	unsigned long		status_changed_cnt;
++	unsigned long		default_q_cnt;
++	unsigned long		hw_fq_empty_cnt;
++	unsigned long		sw_fq_empty_cnt;
++	unsigned long		default_q_intr_cnt;
++	pid_t               thr_pid;
++	wait_queue_head_t   thr_wait;
++	struct completion   thr_exited;
++    spinlock_t          lock;
++    int                 time_to_die;
++    int					operation;
++#ifdef SL351x_GMAC_WORKAROUND
++    unsigned long		short_frames_cnt;
++#endif
++}GMAC_INFO_T ;
++
++typedef struct toe_private {
++	unsigned int	swfq_desc_base;
++	unsigned int	hwfq_desc_base;
++	unsigned int	hwfq_buf_base;
++//	unsigned int	toe_desc_base[TOE_TOE_QUEUE_NUM];
++//	unsigned int	toe_desc_num;
++//	unsigned int	class_desc_base;
++//	unsigned int	class_desc_num;
++//	unsigned int	intr_desc_base;
++//	unsigned int	intr_desc_num;
++//	unsigned int	intr_buf_base;
++	DMA_RWPTR_T		fq_rx_rwptr;
++	GMAC_INFO_T		gmac[GMAC_NUM];
++	dma_addr_t		sw_freeq_desc_base_dma;
++	dma_addr_t		hw_freeq_desc_base_dma;
++	dma_addr_t		hwfq_buf_base_dma;
++	dma_addr_t		hwfq_buf_end_dma;
++//	dma_addr_t		toe_desc_base_dma[TOE_TOE_QUEUE_NUM];
++//	dma_addr_t		class_desc_base_dma;
++//	dma_addr_t		intr_desc_base_dma;
++//	dma_addr_t		intr_buf_base_dma;
++//	unsigned long	toe_iq_intr_full_cnt[TOE_INTR_QUEUE_NUM];
++//	unsigned long	toe_iq_intr_cnt[TOE_INTR_QUEUE_NUM];
++//	unsigned long	toe_q_intr_full_cnt[TOE_TOE_QUEUE_NUM];
++//	unsigned long	class_q_intr_full_cnt[TOE_CLASS_QUEUE_NUM];
++//	unsigned long	class_q_intr_cnt[TOE_CLASS_QUEUE_NUM];
++} TOE_INFO_T;
++
++extern TOE_INFO_T toe_private_data;
++
++#define GMAC_PORT0	0
++#define GMAC_PORT1	1
++/**********************************************************************
++ * PHY Definition
++ **********************************************************************/
++#define HPHY_ADDR   			0x01
++#define GPHY_ADDR   			0x02
++
++enum phy_state
++{
++    LINK_DOWN   = 0,
++    LINK_UP     = 1
++};
++
++/* transmit timeout value */
++
++#endif //_GMAC_SL351x_H
+Index: linux-2.6.23.16/include/asm-arm/arch-sl2312/sl351x_hash_cfg.h
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/include/asm-arm/arch-sl2312/sl351x_hash_cfg.h	2008-03-15 16:57:25.854761029 +0200
+@@ -0,0 +1,365 @@
++/*-----------------------------------------------------------------------------------
++*	sl351x_hash_cfg.h
++*
++*	Description:
++*	
++*	History:
++*
++*	9/14/2005	Gary Chen	Create
++*
++*-------------------------------------------------------------------------------------*/
++#ifndef _SL351x_HASH_CFG_H_
++#define _SL351x_HASH_CFG_H_	1
++
++// #define NAT_DEBUG_MSG	1
++// #define DEBUG_NAT_MIXED_HW_SW_TX	1
++#ifdef DEBUG_NAT_MIXED_HW_SW_TX
++	// #define NAT_DEBUG_LAN_HASH_TIMEOUT	1
++	// #define NAT_DEBUG_WAN_HASH_TIMEOUT	1
++#endif
++
++#define IPIV(a,b,c,d)		((a<<24)+(b<<16)+(c<<8)+d)
++#define	IPIV1(a)			((a>>24)&0xff)
++#define	IPIV2(a)			((a>>16)&0xff)
++#define IPIV3(a)			((a>>8)&0xff)
++#define IPIV4(a)			((a)&0xff)
++
++#define HASH_MAX_BYTES			64	// 128
++#define HASH_ACTION_DWORDS		9
++#define HASH_MAX_DWORDS			(HASH_MAX_BYTES / sizeof(u32))
++#define HASH_MAX_KEY_DWORD		(HASH_MAX_DWORDS - HASH_ACTION_DWORDS)
++#define HASH_INIT_KEY			0x534C4F52
++#define HASH_BITS				12	// 12 : Normal, 7: Simulation
++#define HASH_TOTAL_ENTRIES		(1 << HASH_BITS)
++#define HASH_MAX_ENTRIES		(1 << 12)
++#define HASH_TOE_ENTRIES		(HASH_TOTAL_ENTRIES >> 5)
++#define HASH_BITS_MASK			((1 << HASH_BITS) - 1)
++
++#define hash_lock(lock)			// spin_lock_bh(lock)
++#define hash_unlock(lock)		// spin_unlock_bh(lock)
++
++/*----------------------------------------------------------------------
++ *  special macro
++ ----------------------------------------------------------------------*/
++#define HASH_PUSH_WORD(cp, data)	{*cp++ = (((u16)(data))     ) & 0xff; 	\
++							 		*cp++ = (((u16)(data)) >> 8) & 0xff;} 
++#define HASH_PUSH_DWORD(cp, data)	{*cp++ = (u8)(((u32)(data))      ) & 0xff;	\
++							  		*cp++ = (u8)(((u32)(data)) >>  8) & 0xff;	\
++							  		*cp++ = (u8)(((u32)(data)) >> 16) & 0xff;	\
++							  		*cp++ = (u8)(((u32)(data)) >> 24) & 0xff;}
++#define HASH_PUSH_BYTE(cp, data)	{*cp++ = ((u8)(data)) & 0xff;}
++
++/*----------------------------------------------------------------------
++ *  key
++ ----------------------------------------------------------------------*/
++typedef struct {
++	u8		port;
++	u16		Ethertype;
++	u8		da[6];
++	u8		sa[6];
++	u16		pppoe_sid;	
++	u16		vlan_id;	
++	u8		ipv4_hdrlen;	
++	u8		ip_tos;	
++	u8		ip_protocol;	
++	u32		ipv6_flow_label;
++	u8		sip[16];
++	u8		dip[16];
++	//__u32			sip[4];
++	//__u32			dip[4];
++	u8		l4_bytes[24];
++	u8		l7_bytes[24];
++	u8		ipv6;	// 1: IPv6, 0: IPV4
++} ENTRY_KEY_T;
++
++/*----------------------------------------------------------------------
++ *  key for NAT
++ *	Note: packed
++ ----------------------------------------------------------------------*/
++typedef struct {
++	u16		Ethertype;		// not used
++	u8		port_id;
++	u8		rule_id;
++	u8		ip_protocol;
++	u8		reserved1;		// ip_tos, not used
++	u16		reserved2;		// not used
++	u32		sip;
++	u32		dip;
++	u16		sport;
++	u16		dport;
++} NAT_KEY_T;
++
++#define NAT_KEY_DWORD_SIZE	(sizeof(NAT_KEY_T)/sizeof(u32))
++#define NAT_KEY_SIZE		(sizeof(NAT_KEY_T))
++
++/*----------------------------------------------------------------------
++ *  key for NAT
++ *	Note: packed
++ ----------------------------------------------------------------------*/
++typedef struct {
++	u16		Ethertype;		// not used
++	u8		port_id;
++	u8		rule_id;
++	u8		ip_protocol;
++	u8		reserved1;		// ip_tos, not used
++	u16		reserved2;		// not used
++	u32		sip;
++	u32		dip;
++	u16		reserved3;
++	u16		protocol;
++	u16		reserved4;
++	u16		call_id;
++} GRE_KEY_T;
++
++#define GRE_KEY_DWORD_SIZE	(sizeof(GRE_KEY_T)/sizeof(u32))
++#define GRE_KEY_SIZE		(sizeof(GRE_KEY_T))
++/*----------------------------------------------------------------------
++ *  key present or not
++ ----------------------------------------------------------------------*/
++typedef struct {
++	u32		port			: 1;
++	u32		Ethertype		: 1;
++	u32		da				: 1;
++	u32		sa				: 1;
++	u32		pppoe_sid		: 1;	
++	u32		vlan_id			: 1;	
++	u32		ipv4_hdrlen		: 1;	
++	u32		ip_tos			: 1;
++	u32		ip_protocol		: 1;	
++	u32		ipv6_flow_label	: 1;
++	u32		sip				: 1;
++	u32		dip				: 1;
++	u32		l4_bytes_0_3	: 1;
++	u32		l4_bytes_4_7	: 1;
++	u32		l4_bytes_8_11	: 1;
++	u32		l4_bytes_12_15	: 1;
++	u32		l4_bytes_16_19	: 1;
++	u32		l4_bytes_20_23	: 1;
++	u32		l7_bytes_0_3	: 1;
++	u32		l7_bytes_4_7	: 1;
++	u32		l7_bytes_8_11	: 1;
++	u32		l7_bytes_12_15	: 1;
++	u32		l7_bytes_16_19	: 1;
++	u32		l7_bytes_20_23	: 1;
++	u32		reserved		: 8;
++} KEY_FIELD_T;
++
++/*----------------------------------------------------------------------
++ *  action
++ ----------------------------------------------------------------------*/
++typedef struct {
++	u32		reserved0	: 5;	// bit 0:4
++	u32		pppoe		: 2;	// bit 5:6
++	u32		vlan		: 2;	// bit 7:8
++	u32		sa			: 1;	// bit 9
++	u32		da			: 1;	// bit 10
++	u32		Dport		: 1;	// bit 11
++	u32		Sport		: 1;	// bit 12
++	u32		Dip			: 1;	// bit 13
++	u32		Sip			: 1;	// bit 14
++	u32		sw_id		: 1;	// bit 15
++	u32		frag		: 1;	// bit 16
++	u32		option		: 1;	// bit 17
++	u32		ttl_0		: 1;	// bit 18
++	u32		ttl_1		: 1;	// bit 19
++	u32		mtu			: 1;	// bit 20
++	u32		exception	: 1;	// bit 21
++	u32		srce_qid	: 1;	// bit 22
++	u32		discard		: 1;	// bit 23
++	u32		dest_qid	: 8;	// bit 24:31
++} ENTRY_ACTION_T;
++
++#define ACTION_DISCARD_BIT		BIT(23)
++#define ACTION_SRCE_QID_BIT		BIT(22)
++#define ACTION_EXCEPTION_BIT	BIT(21)
++#define ACTION_MTU_BIT			BIT(20)
++#define ACTION_TTL_1_BIT		BIT(19)
++#define ACTION_TTL_0_BIT		BIT(18)
++#define ACTION_IP_OPTION		BIT(17)
++#define ACTION_FRAG_BIT			BIT(16)
++#define ACTION_SWID_BIT			BIT(15)
++#define ACTION_SIP_BIT			BIT(14)
++#define ACTION_DIP_BIT			BIT(13)
++#define ACTION_SPORT_BIT		BIT(12)
++#define ACTION_DPORT_BIT		BIT(11)
++#define ACTION_DA_BIT			BIT(10)
++#define ACTION_SA_BIT			BIT(9)
++#define ACTION_VLAN_DEL_BIT		BIT(8)
++#define ACTION_VLAN_INS_BIT		BIT(7)
++#define ACTION_PPPOE_DEL_BIT	BIT(6)
++#define ACTION_PPPOE_INS_BIT	BIT(5)
++#define ACTION_L4_THIRD_BIT		BIT(4)
++#define ACTION_L4_FOURTH_BIT	BIT(3)
++
++#define NAT_ACTION_BITS			(ACTION_SRCE_QID_BIT  | ACTION_EXCEPTION_BIT |	\
++								ACTION_TTL_1_BIT | ACTION_TTL_0_BIT | 			\
++								ACTION_IP_OPTION | ACTION_FRAG_BIT |			\
++								ACTION_DA_BIT | ACTION_SA_BIT)
++#define NAT_LAN2WAN_ACTIONS		(NAT_ACTION_BITS | ACTION_SIP_BIT | ACTION_SPORT_BIT)
++#define NAT_WAN2LAN_ACTIONS		(NAT_ACTION_BITS | ACTION_DIP_BIT | ACTION_DPORT_BIT)
++#define NAT_PPPOE_LAN2WAN_ACTIONS	(NAT_LAN2WAN_ACTIONS | ACTION_PPPOE_INS_BIT)
++#define NAT_PPPOE_WAN2LAN_ACTIONS	(NAT_WAN2LAN_ACTIONS | ACTION_PPPOE_DEL_BIT)
++#define NAT_PPTP_LAN2WAN_ACTIONS	(NAT_ACTION_BITS | ACTION_SIP_BIT | ACTION_L4_FOURTH_BIT)
++#define NAT_PPTP_WAN2LAN_ACTIONS	(NAT_ACTION_BITS | ACTION_DIP_BIT | ACTION_L4_FOURTH_BIT)
++#define NAT_PPPOE_PPTP_LAN2WAN_ACTIONS	(NAT_PPTP_LAN2WAN_ACTIONS | ACTION_PPPOE_INS_BIT)
++#define NAT_PPPOE_PPTP_WAN2LAN_ACTIONS	(NAT_PPTP_WAN2LAN_ACTIONS | ACTION_PPPOE_DEL_BIT)
++								
++/*----------------------------------------------------------------------
++ *  parameter
++ ----------------------------------------------------------------------*/
++typedef struct {
++	u8		da[6];
++	u8		sa[6];
++	u16		vlan;	
++	u16  	pppoe;	
++	u32		Sip;
++	u32		Dip;
++	u16  	Sport;	
++	u16  	Dport;	
++	u16  	sw_id;	
++	u16  	mtu;	
++} ENTRY_PARAM_T;
++
++/*----------------------------------------------------------------------
++ *  Hash Entry
++ ----------------------------------------------------------------------*/
++typedef struct {
++	char			rule;
++	ENTRY_KEY_T		key;
++	KEY_FIELD_T		key_present;
++	ENTRY_ACTION_T	action;
++	ENTRY_PARAM_T	param;
++	int				index;
++	int				total_dwords;
++} HASH_ENTRY_T;
++
++/*----------------------------------------------------------------------
++ *  NAT Hash Entry
++ ----------------------------------------------------------------------*/
++typedef struct {
++	short	counter;
++	short	interval;
++} HASH_TIMEOUT_T;
++
++/*----------------------------------------------------------------------
++ *  NAT Hash Entry for TCP/UDP protocol
++ ----------------------------------------------------------------------*/
++typedef struct {
++	NAT_KEY_T			key;
++	union {
++		u32				dword;
++		ENTRY_ACTION_T	bits;
++	} action;
++	ENTRY_PARAM_T		param;
++	HASH_TIMEOUT_T		tmo;	// used by software only, to use memory space efficiently
++} NAT_HASH_ENTRY_T;
++
++#define NAT_HASH_ENTRY_SIZE		(sizeof(NAT_HASH_ENTRY_T))
++
++/*----------------------------------------------------------------------
++ *  GRE Hash Entry for PPTP/GRE protocol
++ ----------------------------------------------------------------------*/
++typedef struct {
++	GRE_KEY_T			key;
++	union {
++		u32				dword;
++		ENTRY_ACTION_T	bits;
++	} action;
++	ENTRY_PARAM_T		param;
++	HASH_TIMEOUT_T		tmo;	// used by software only, to use memory space efficiently
++} GRE_HASH_ENTRY_T;
++
++#define GRE_HASH_ENTRY_SIZE		(sizeof(GRE_HASH_ENTRY_T))
++
++/*----------------------------------------------------------------------
++ *  External Variables
++ ----------------------------------------------------------------------*/
++extern char				hash_tables[HASH_TOTAL_ENTRIES][HASH_MAX_BYTES] __attribute__ ((aligned(16)));
++extern u32				hash_nat_owner_bits[HASH_TOTAL_ENTRIES/32];
++/*----------------------------------------------------------------------
++* hash_get_valid_flag
++*----------------------------------------------------------------------*/
++static inline int hash_get_valid_flag(int index)
++{
++	volatile u32 *hash_valid_bits_ptr = (volatile u32 *)TOE_V_BIT_BASE;
++
++#ifdef SL351x_GMAC_WORKAROUND
++	if (index >= (0x80 * 8) && index < (0x8c * 8))
++		return 1;
++#endif	
++	return (hash_valid_bits_ptr[index/32] & (1 << (index %32)));
++}
++
++/*----------------------------------------------------------------------
++* hash_get_nat_owner_flag
++*----------------------------------------------------------------------*/
++static inline int hash_get_nat_owner_flag(int index)
++{
++	return (hash_nat_owner_bits[index/32] & (1 << (index %32)));
++}
++
++/*----------------------------------------------------------------------
++* hash_validate_entry
++*----------------------------------------------------------------------*/
++static inline void hash_validate_entry(int index)
++{
++	volatile u32	*hash_valid_bits_ptr = (volatile u32 *)TOE_V_BIT_BASE;
++	register int	ptr = index/32, bits = 1 << (index %32);
++	
++	hash_valid_bits_ptr[ptr] |= bits;
++}
++
++/*----------------------------------------------------------------------
++* hash_invalidate_entry
++*----------------------------------------------------------------------*/
++static inline void hash_invalidate_entry(int index)
++{
++	volatile u32 *hash_valid_bits_ptr = (volatile u32 *)TOE_V_BIT_BASE;
++	register int	ptr = index/32, bits = 1 << (index %32);
++	
++	hash_valid_bits_ptr[ptr] &= ~(bits);
++}
++
++/*----------------------------------------------------------------------
++* hash_nat_enable_owner
++*----------------------------------------------------------------------*/
++static inline void hash_nat_enable_owner(int index)
++{
++	hash_nat_owner_bits[index/32] |= (1 << (index % 32));
++}
++
++/*----------------------------------------------------------------------
++* hash_nat_disable_owner
++*----------------------------------------------------------------------*/
++static inline void hash_nat_disable_owner(int index)
++{
++	hash_nat_owner_bits[index/32] &= ~(1 << (index % 32));
++}
++
++/*----------------------------------------------------------------------
++* hash_get_entry
++*----------------------------------------------------------------------*/
++static inline void *hash_get_entry(int index)
++{
++	return (void*) &hash_tables[index][0];
++}
++
++/*----------------------------------------------------------------------
++* Functions
++*----------------------------------------------------------------------*/
++extern int hash_add_entry(HASH_ENTRY_T *entry);
++extern void sl351x_hash_init(void);
++extern void hash_set_valid_flag(int index, int valid);
++extern void hash_set_nat_owner_flag(int index, int valid);
++extern void *hash_get_entry(int index);
++extern int hash_build_keys(u32 *destp, HASH_ENTRY_T *entry);
++extern void hash_build_nat_keys(u32 *destp, HASH_ENTRY_T *entry);
++extern int hash_write_entry(HASH_ENTRY_T *entry, u8 *key);
++extern int hash_add_entry(HASH_ENTRY_T *entry);
++extern	u16 hash_crc16(u16 crc, u8 *datap, u32 len);
++extern	u16 hash_gen_crc16(u8 *datap, u32 len);
++
++#endif // _SL351x_HASH_CFG_H_
++
++
++
+Index: linux-2.6.23.16/include/asm-arm/arch-sl2312/sl351x_nat_cfg.h
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/include/asm-arm/arch-sl2312/sl351x_nat_cfg.h	2008-03-15 16:57:25.854761029 +0200
+@@ -0,0 +1,211 @@
++/**************************************************************************
++* Copyright 2006 StorLink Semiconductors, Inc.  All rights reserved.                
++*--------------------------------------------------------------------------
++*	sl_nat_cfg.h
++*
++*	Description:
++*		- Define the Device Control Commands for NAT Configuration
++*	
++*	History:
++*
++*	4/28/2006	Gary Chen	Create
++*
++*-----------------------------------------------------------------------------*/
++#ifndef _SL351x_NAT_CFG_H_
++#define _SL351x_NAT_CFG_H_	1
++
++/*----------------------------------------------------------------------
++* Confiuration
++*----------------------------------------------------------------------*/
++#ifdef CONFIG_NETFILTER
++#define CONFIG_SL351x_NAT			1
++#undef CONFIG_SL351x_NAT
++#undef CONFIG_SL351x_SYSCTL
++#endif
++#define CONFIG_NAT_MAX_IP_NUM		4	// per device (eth0 or eth1)
++#define CONFIG_NAT_MAX_XPORT		64
++#define CONFIG_NAT_MAX_WRULE		16	// per Queue
++#define CONFIG_NAT_TXQ_NUM			4
++/*----------------------------------------------------------------------
++* Command set
++*----------------------------------------------------------------------*/
++#define SIOCDEVSL351x	SIOCDEVPRIVATE	// 0x89F0
++#define NATSSTATUS		0
++#define NATGSTATUS		1
++#define NATSETPORT		2
++#define NATGETPORT		3
++#define NATADDIP		4
++#define NATDELIP		5
++#define NATGETIP		6
++#define NATAXPORT		7
++#define NATDXPORT		8
++#define NATGXPORT		9
++#define NATSWEIGHT		10
++#define NATGWEIGHT		11
++#define NATAWRULE		12
++#define NATDWRULE		13
++#define NATGWRULE		14
++#define NATSDEFQ		15
++#define NATGDEFQ		16
++#define NATRMIPCFG		17		// remove IP config
++#define NATTESTENTRY	18
++#define NATSETMEM		19
++#define NATSHOWMEM		20
++/*----------------------------------------------------------------------
++* Command Structure
++*----------------------------------------------------------------------*/
++// Common Header
++typedef struct {
++	unsigned short		cmd;	// command ID
++	unsigned short		len;	// data length, excluding this header
++} NATCMD_HDR_T;
++
++// NATSSTATUS & NATGSTATUS commands
++typedef struct {
++	unsigned char		enable;
++} NAT_STATUS_T;	
++
++// NATSETPORT & NATGETPORT commands
++typedef struct {
++	unsigned char		portmap;
++} NAT_PORTCFG_T;
++
++typedef struct {
++	unsigned int		ipaddr;
++	unsigned int		netmask;
++} NAT_IP_ENTRY_T;
++
++// NATADDIP & NATDELIP commands
++typedef struct {
++	NAT_IP_ENTRY_T	entry;
++} NAT_IPCFG_T;
++
++// NATGETIP command
++typedef struct {
++	unsigned int	total;
++	NAT_IP_ENTRY_T	entry[CONFIG_NAT_MAX_IP_NUM];
++} NAT_IPCFG_ALL_T;
++
++typedef struct {
++	unsigned int		protocol;
++	unsigned short		sport_start;
++	unsigned short		sport_end;
++	unsigned short		dport_start;
++	unsigned short		dport_end;
++} NAT_XPORT_ENTRY_T;
++
++// NATAXPORT & NATDXPORT Commands
++typedef struct {
++	NAT_XPORT_ENTRY_T	entry;
++} NAT_XPORT_T;
++
++// NATGXPORT Command
++typedef struct {
++	unsigned int		total;
++	NAT_XPORT_ENTRY_T	entry[CONFIG_NAT_MAX_XPORT];
++} NAT_XPORT_ALL_T;
++
++// NATSWEIGHT & NATGWEIGHT Commands
++typedef struct {
++	unsigned char		weight[CONFIG_NAT_TXQ_NUM];
++} NAT_WEIGHT_T;
++
++typedef struct {
++	unsigned int		protocol;
++	unsigned int		sip_start;
++	unsigned int		sip_end;
++	unsigned int		dip_start;
++	unsigned int		dip_end;
++	unsigned short		sport_start;
++	unsigned short		sport_end;
++	unsigned short		dport_start;
++	unsigned short		dport_end;
++} NAT_WRULE_ENTRY_T;	
++
++// NATAWRULE & NATDWRULE Commands
++typedef struct {
++	unsigned int		qid;
++	NAT_WRULE_ENTRY_T	entry;
++} NAT_WRULE_T;
++
++// NATGWRULE Command
++typedef struct {
++	unsigned int		total;
++	NAT_WRULE_ENTRY_T	entry[CONFIG_NAT_MAX_WRULE];
++} NAT_WRULE_ALL_T;
++
++// NATSDEFQ & NATGDEFQ commands
++typedef struct {
++	unsigned int		qid;
++} NAT_QUEUE_T;	
++
++// NATTESTENTRY 
++typedef struct {
++	u_int16_t		cmd;	// command ID
++	u_int16_t		len;	// data length, excluding this header
++	u_int8_t		init_enable;
++} NAT_TESTENTRY_T;	
++	
++typedef union
++{
++	NAT_STATUS_T		status;
++	NAT_PORTCFG_T		portcfg;
++	NAT_IPCFG_T			ipcfg;
++	NAT_XPORT_T			xport;
++	NAT_WEIGHT_T		weight;
++	NAT_WRULE_T			wrule;
++	NAT_QUEUE_T			queue;
++	NAT_TESTENTRY_T init_entry;
++} NAT_REQ_E;
++	
++/*----------------------------------------------------------------------
++* NAT Configuration
++*	- Used by driver only
++*----------------------------------------------------------------------*/
++typedef struct {
++	unsigned int		enabled;
++	unsigned int		init_enabled;
++	unsigned int		tcp_udp_rule_id;
++	unsigned int		gre_rule_id;
++	unsigned int		lan_port;
++	unsigned int		wan_port;
++	unsigned int		default_hw_txq;
++	short				tcp_tmo_interval;
++	short				udp_tmo_interval;
++	short				gre_tmo_interval;
++	NAT_IPCFG_ALL_T		ipcfg[2];	// LAN/WAN port
++	NAT_XPORT_ALL_T		xport;
++	NAT_WEIGHT_T		weight;
++	NAT_WRULE_ALL_T		wrule[CONFIG_NAT_TXQ_NUM];
++} NAT_CFG_T;
++
++/*----------------------------------------------------------------------
++* NAT Control Block
++*	- Used by driver only
++*	- Stores LAN-IN or WAN-IN information
++*	- WAN-OUT and LAN-OUT driver use them to build up a hash entry
++*	- NOTES: To update this data structure, MUST take care of alignment issue
++*   -		 MUST make sure that the size of skbuff structure must 
++*            be larger than (40 + sizof(NAT_CB_T))
++*----------------------------------------------------------------------*/
++typedef struct {
++	unsigned short		tag;
++	unsigned char		sa[6];
++	unsigned int		sip;
++	unsigned int		dip;
++	unsigned short		sport;
++	unsigned short		dport;
++	unsigned char		pppoe_frame;
++	unsigned char		state;			// same to enum tcp_conntrack
++	unsigned char		reserved[2];
++} NAT_CB_T;
++
++#define NAT_CB_TAG		0x4C53	// "SL"
++#define NAT_CB_SIZE		sizeof(NAT_CB_T)
++// #define NAT_SKB_CB(skb)	(NAT_CB_T *)(((unsigned int)&((skb)->cb[40]) + 3) & ~3)  // for align 4
++#define NAT_SKB_CB(skb)	(NAT_CB_T *)&((skb)->cb[40])  // for align 4
++
++#endif // _SL351x_NAT_CFG_H_
++
++
++
+Index: linux-2.6.23.16/include/asm-arm/arch-sl2312/sl351x_toe.h
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.23.16/include/asm-arm/arch-sl2312/sl351x_toe.h	2008-03-15 16:57:25.854761029 +0200
+@@ -0,0 +1,88 @@
++/**************************************************************************
++* Copyright 2006 StorLink Semiconductors, Inc.  All rights reserved.
++*--------------------------------------------------------------------------
++* Name			: sl351x_toe.h
++* Description	:
++*		Define for TOE driver of Storlink SL351x
++*
++* History
++*
++*	Date		Writer		Description
++*----------------------------------------------------------------------------
++*				Xiaochong	Create
++*
++****************************************************************************/
++#ifndef __SL351x_TOE_H
++#define __SL351x_TOE_H	1
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++/*
++ * TOE_CONN_T is data structure of tcp connection info, used at both
++ * device layer and kernel tcp layer
++ * skb is the jumbo frame
++ */
++
++struct toe_conn{
++	__u8	qid;		// connection qid 0~63.
++	__u8	ip_ver;		// 0: not used; 4: ipv4; 6: ipv6.
++	/* hash key of the connection */
++	__u16	source;
++	__u16	dest;
++	__u32	saddr[4];
++	__u32	daddr[4];
++
++	__u32	seq;
++	__u32	ack_seq;
++
++	/* these fields are used to set TOE QHDR */
++	__u32	ack_threshold;
++	__u32	seq_threshold;
++	__u16	max_pktsize;
++
++	/* used by sw toe, accumulated ack_seq of ack frames */
++	__u16	ack_cnt;
++	/* used by sw toe, accumulated data frames held at driver */
++	__u16	cur_pktsize;
++
++	__u8	status;
++#define	TCP_CONN_UNDEFINE		0X00
++#define	TCP_CONN_CREATION		0X01
++#define	TCP_CONN_CONNECTING		0X02
++#define	TCP_CONN_ESTABLISHED	0X04
++#define	TCP_CONN_RESET			0X08	// this is used for out-of-order
++                      			    	// or congestion window is small
++#define	TCP_CONN_CLOSING		0X10
++#define	TCP_CONN_CLOSED			0x11
++
++	__u16	hash_entry_index;	/* associated hash entry */
++
++	// one timer per connection. Otherwise all connections should be scanned
++	// in a timeout interrupt, and timeout interrupt is triggered no matter
++	// a connection is actually timeout or not.
++	struct timer_list	rx_timer;
++	unsigned long		last_rx_jiffies;
++	GMAC_INFO_T			*gmac;
++	struct net_device	*dev;
++
++	//	for generating pure ack frame.
++	struct ethhdr		l2_hdr;
++	struct iphdr		l3_hdr;
++
++	spinlock_t			conn_lock;
++	DMA_RWPTR_T			toeq_rwptr;
++	GMAC_RXDESC_T		*curr_desc;
++	struct sk_buff		*curr_rx_skb;
++};
++
++struct jumbo_frame {
++	struct sk_buff	*skb0;		// the head of jumbo frame
++	struct sk_buff	*tail;		// the tail of jumbo frame
++	struct iphdr	*iphdr0;	// the ip hdr of skb0.
++	struct tcphdr	*tcphdr0;	// the tcp hdr of skb0.
++};
++
++#endif // __SL351x_TOE_H
-- 
cgit v1.2.3