movl %esp, %ebp
subl $os_regs, %ebp
-- /* Load the stack pointer */
++ /* Load the stack pointer and convert it to physical address */
movl 52(%esp), %esp
++ addl %ebp, %esp
/* Enable the virtual addresses */
leal _phys_to_virt(%ebp), %eax
/*** Global variables ***/
static struct
{
++ unsigned int is3c556;
unsigned char isBrev;
unsigned char CurrentWindow;
unsigned int IOAddr;
while((1<<15) & inw(ioaddr + regEepromCommand_0_w));
/** Read the value. **/
-- outw(address + ((0x02)<<6), ioaddr + regEepromCommand_0_w);
++ if (INF_3C90X.is3c556)
++ {
++ outw(address + (0x230), ioaddr + regEepromCommand_0_w);
++ }
++ else
++ {
++ outw(address + ((0x02)<<6), ioaddr + regEepromCommand_0_w);
++ }
++
while((1<<15) & inw(ioaddr + regEepromCommand_0_w));
val = inw(ioaddr + regEepromData_0_w);
nic->ioaddr = pci->ioaddr;
nic->irqno = 0;
++ INF_3C90X.is3c556 = (pci->dev_id == 0x6055);
INF_3C90X.IOAddr = pci->ioaddr & ~3;
INF_3C90X.CurrentWindow = 255;
switch (a3c90x_internal_ReadEeprom(INF_3C90X.IOAddr, 0x03))
INF_3C90X.HWAddr[5] = eeprom[HWADDR_OFFSET + 2]&0xFF;
printf("MAC Address = %!\n", INF_3C90X.HWAddr);
++ /** 3C556: Invert MII power **/
++ if (INF_3C90X.is3c556) {
++ unsigned int tmp;
++ a3c90x_internal_SetWindow(INF_3C90X.IOAddr, winAddressing2);
++ tmp = inw(INF_3C90X.IOAddr + regResetOptions_2_w);
++ tmp |= 0x4000;
++ outw(tmp, INF_3C90X.IOAddr + regResetOptions_2_w);
++ }
++
/* Test if the link is good, if not continue */
a3c90x_internal_SetWindow(INF_3C90X.IOAddr, winDiagnostics4);
mstat = inw(INF_3C90X.IOAddr + regMediaStatus_4_w);
static struct pci_id a3c90x_nics[] = {
/* Original 90x revisions: */
++PCI_ROM(0x10b7, 0x6055, "3c556", "3C556"), /* Huricane */
PCI_ROM(0x10b7, 0x9000, "3c905-tpo", "3Com900-TPO"), /* 10 Base TPO */
PCI_ROM(0x10b7, 0x9001, "3c905-t4", "3Com900-Combo"), /* 10/100 T4 */
PCI_ROM(0x10b7, 0x9050, "3c905-tpo100", "3Com905-TX"), /* 100 Base TX / 10/100 TPO */
--- /dev/null
--- /dev/null
++/**************************************************************************
++ *
++ * Etherboot driver for Level 5 Etherfabric network cards
++ *
++ * Written by Michael Brown <mbrown@fensystems.co.uk>
++ *
++ * Copyright Fen Systems Ltd. 2005
++ * Copyright Level 5 Networks Inc. 2005
++ *
++ * This software may be used and distributed according to the terms of
++ * the GNU General Public License (GPL), incorporated herein by
++ * reference. Drivers based on or derived from this code fall under
++ * the GPL and must retain the authorship, copyright and license
++ * notice.
++ *
++ **************************************************************************
++ */
++
++#include "etherboot.h"
++#include "nic.h"
++#include "pci.h"
++#include "timer.h"
++#define dma_addr_t unsigned long
++#include "etherfabric.h"
++
++/**************************************************************************
++ *
++ * Constants and macros
++ *
++ **************************************************************************
++ */
++
++#define DBG(...)
++
++#define EFAB_ASSERT(x) \
++ do { \
++ if ( ! (x) ) { \
++ DBG ( "ASSERT(%s) failed at %s line %d [%s]\n", #x, \
++ __FILE__, __LINE__, __FUNCTION__ ); \
++ } \
++ } while (0)
++
++#define EFAB_TRACE(...)
++
++#define EFAB_REGDUMP(...)
++
++#define FALCON_USE_IO_BAR 1
++
++/*
++ * EtherFabric constants
++ *
++ */
++
++/* PCI Definitions */
++#define EFAB_VENDID_LEVEL5 0x1924
++#define FALCON_P_DEVID 0x0703 /* Temporary PCI ID */
++#define EF1002_DEVID 0xC101
++
++/**************************************************************************
++ *
++ * Data structures
++ *
++ **************************************************************************
++ */
++
++/*
++ * Buffers used for TX, RX and event queue
++ *
++ */
++#define EFAB_BUF_ALIGN 4096
++#define EFAB_DATA_BUF_SIZE 2048
++#define EFAB_RX_BUFS 16
++#define EFAB_RXD_SIZE 512
++#define EFAB_TXD_SIZE 512
++#define EFAB_EVQ_SIZE 512
++struct efab_buffers {
++ uint8_t eventq[4096];
++ uint8_t rxd[4096];
++ uint8_t txd[4096];
++ uint8_t tx_buf[EFAB_DATA_BUF_SIZE];
++ uint8_t rx_buf[EFAB_RX_BUFS][EFAB_DATA_BUF_SIZE];
++ uint8_t padding[EFAB_BUF_ALIGN-1];
++};
++static struct efab_buffers efab_buffers;
++
++/** An RX buffer */
++struct efab_rx_buf {
++ uint8_t *addr;
++ unsigned int len;
++ int id;
++};
++
++/** A TX buffer */
++struct efab_tx_buf {
++ uint8_t *addr;
++ unsigned int len;
++ int id;
++};
++
++/** Etherfabric event type */
++enum efab_event_type {
++ EFAB_EV_NONE = 0,
++ EFAB_EV_TX,
++ EFAB_EV_RX,
++};
++
++/** Etherfabric event */
++struct efab_event {
++ /** Event type */
++ enum efab_event_type type;
++ /** RX buffer ID */
++ int rx_id;
++ /** RX length */
++ unsigned int rx_len;
++};
++
++/*
++ * Etherfabric abstraction layer
++ *
++ */
++struct efab_nic;
++struct efab_operations {
++ void ( * get_membase ) ( struct efab_nic *efab );
++ int ( * reset ) ( struct efab_nic *efab );
++ int ( * init_nic ) ( struct efab_nic *efab );
++ int ( * read_eeprom ) ( struct efab_nic *efab );
++ void ( * build_rx_desc ) ( struct efab_nic *efab,
++ struct efab_rx_buf *rx_buf );
++ void ( * notify_rx_desc ) ( struct efab_nic *efab );
++ void ( * build_tx_desc ) ( struct efab_nic *efab,
++ struct efab_tx_buf *tx_buf );
++ void ( * notify_tx_desc ) ( struct efab_nic *efab );
++ int ( * fetch_event ) ( struct efab_nic *efab,
++ struct efab_event *event );
++ void ( * mask_irq ) ( struct efab_nic *efab, int enabled );
++ void ( * generate_irq ) ( struct efab_nic *efab );
++ void ( * mac_writel ) ( struct efab_nic *efab, efab_dword_t *value,
++ unsigned int mac_reg );
++ void ( * mac_readl ) ( struct efab_nic *efab, efab_dword_t *value,
++ unsigned int mac_reg );
++ int ( * init_mac ) ( struct efab_nic *efab );
++ void ( * mdio_write ) ( struct efab_nic *efab, int location,
++ int value );
++ int ( * mdio_read ) ( struct efab_nic *efab, int location );
++};
++
++/*
++ * Driver private data structure
++ *
++ */
++struct efab_nic {
++
++ /** PCI device */
++ struct pci_device *pci;
++
++ /** Operations table */
++ struct efab_operations *op;
++
++ /** Memory base */
++ void *membase;
++
++ /** I/O base */
++ unsigned int iobase;
++
++ /** Buffers */
++ uint8_t *eventq; /* Falcon only */
++ uint8_t *txd; /* Falcon only */
++ uint8_t *rxd; /* Falcon only */
++ struct efab_tx_buf tx_buf;
++ struct efab_rx_buf rx_bufs[EFAB_RX_BUFS];
++
++ /** Buffer pointers */
++ unsigned int eventq_read_ptr; /* Falcon only */
++ unsigned int tx_write_ptr;
++ unsigned int rx_write_ptr;
++ int tx_in_progress;
++
++ /** Port 0/1 on the NIC */
++ int port;
++
++ /** MAC address */
++ uint8_t mac_addr[ETH_ALEN];
++ /** GMII link options */
++ unsigned int link_options;
++ /** Link status */
++ int link_up;
++
++ /** INT_REG_KER for Falcon */
++ efab_oword_t int_ker __attribute__ (( aligned ( 16 ) ));
++};
++
++/**************************************************************************
++ *
++ * EEPROM access
++ *
++ **************************************************************************
++ */
++
++#define EFAB_EEPROM_SDA 0x80000000u
++#define EFAB_EEPROM_SCL 0x40000000u
++#define ARIZONA_24xx00_SLAVE 0xa0
++#define EFAB_EEPROM_READ_SELECT ( ARIZONA_24xx00_SLAVE | 1 )
++#define EFAB_EEPROM_WRITE_SELECT ( ARIZONA_24xx00_SLAVE | 0 )
++
++static void eeprom_release ( uint32_t *eeprom_reg ) {
++ unsigned int dev;
++
++ udelay ( 10 );
++ dev = readl ( eeprom_reg );
++ writel ( dev | ( EFAB_EEPROM_SDA | EFAB_EEPROM_SCL ),
++ eeprom_reg );
++ udelay ( 10 );
++}
++
++static void eeprom_start ( uint32_t *eeprom_reg ) {
++ unsigned int dev;
++
++ udelay ( 10 );
++ dev = readl ( eeprom_reg );
++
++ if ( ( dev & ( EFAB_EEPROM_SDA | EFAB_EEPROM_SCL ) ) !=
++ ( EFAB_EEPROM_SDA | EFAB_EEPROM_SCL ) ) {
++ udelay ( 10 );
++ writel ( dev | ( EFAB_EEPROM_SDA | EFAB_EEPROM_SCL ),
++ eeprom_reg );
++ udelay ( 1 );
++ }
++ dev &=~ ( EFAB_EEPROM_SDA | EFAB_EEPROM_SCL );
++
++ udelay ( 10 );
++ writel ( dev | EFAB_EEPROM_SCL, eeprom_reg) ;
++ udelay ( 1) ;
++
++ udelay ( 10 );
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++}
++
++static void eeprom_stop ( uint32_t *eeprom_reg ) {
++ unsigned int dev;
++
++ udelay ( 10 );
++ dev = readl ( eeprom_reg );
++ EFAB_ASSERT ( ! ( dev & EFAB_EEPROM_SCL ) );
++
++ if ( dev & ( EFAB_EEPROM_SDA | EFAB_EEPROM_SCL ) ) {
++ dev &=~ ( EFAB_EEPROM_SDA | EFAB_EEPROM_SCL );
++ udelay ( 10 );
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++ }
++
++ udelay ( 10 );
++ dev |= EFAB_EEPROM_SCL;
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++
++ udelay ( 10 );
++ dev |= EFAB_EEPROM_SDA;
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++}
++
++static void eeprom_write ( uint32_t *eeprom_reg, unsigned char data ) {
++ int i;
++ unsigned int dev;
++
++ udelay ( 10 );
++ dev = readl ( eeprom_reg );
++ udelay ( 10 );
++ EFAB_ASSERT ( ! ( dev & EFAB_EEPROM_SCL ) );
++
++ for ( i = 0 ; i < 8 ; i++, data <<= 1 ) {
++ if ( data & 0x80 ) {
++ dev |= EFAB_EEPROM_SDA;
++ } else {
++ dev &=~ EFAB_EEPROM_SDA;
++ }
++ udelay ( 10 );
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++
++ udelay ( 10 );
++ writel ( dev | EFAB_EEPROM_SCL, eeprom_reg );
++ udelay ( 10 );
++
++ udelay ( 10 );
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++ }
++
++ if( ! ( dev & EFAB_EEPROM_SDA ) ) {
++ udelay ( 10 );
++ writel ( dev | EFAB_EEPROM_SDA, eeprom_reg );
++ udelay ( 10 );
++ }
++}
++
++static unsigned char eeprom_read ( uint32_t *eeprom_reg ) {
++ unsigned int i, dev, rd;
++ unsigned char val = 0;
++
++ udelay ( 10 );
++ dev = readl ( eeprom_reg );
++ udelay ( 10 );
++ EFAB_ASSERT ( ! ( dev & EFAB_EEPROM_SCL ) );
++
++ if( ! ( dev & EFAB_EEPROM_SDA ) ) {
++ dev |= EFAB_EEPROM_SDA;
++ udelay ( 10 );
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++ }
++
++ for( i = 0 ; i < 8 ; i++ ) {
++ udelay ( 10 );
++ writel ( dev | EFAB_EEPROM_SCL, eeprom_reg );
++ udelay ( 10 );
++
++ udelay ( 10 );
++ rd = readl ( eeprom_reg );
++ udelay ( 10 );
++ val = ( val << 1 ) | ( ( rd & EFAB_EEPROM_SDA ) != 0 );
++
++ udelay ( 10 );
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++ }
++
++ return val;
++}
++
++static int eeprom_check_ack ( uint32_t *eeprom_reg ) {
++ int ack;
++ unsigned int dev;
++
++ udelay ( 10 );
++ dev = readl ( eeprom_reg );
++ EFAB_ASSERT ( ! ( dev & EFAB_EEPROM_SCL ) );
++
++ writel ( dev | EFAB_EEPROM_SCL, eeprom_reg );
++ udelay ( 10 );
++
++ udelay ( 10 );
++ ack = readl ( eeprom_reg ) & EFAB_EEPROM_SDA;
++
++ udelay ( 10 );
++ writel ( ack & ~EFAB_EEPROM_SCL, eeprom_reg );
++ udelay ( 10 );
++
++ return ( ack == 0 );
++}
++
++static void eeprom_send_ack ( uint32_t *eeprom_reg ) {
++ unsigned int dev;
++
++ udelay ( 10 );
++ dev = readl ( eeprom_reg );
++ EFAB_ASSERT ( ! ( dev & EFAB_EEPROM_SCL ) );
++
++ udelay ( 10 );
++ dev &= ~EFAB_EEPROM_SDA;
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++
++ udelay ( 10 );
++ dev |= EFAB_EEPROM_SCL;
++ writel ( dev, eeprom_reg );
++ udelay ( 10 );
++
++ udelay ( 10 );
++ dev |= EFAB_EEPROM_SDA;
++ writel ( dev & ~EFAB_EEPROM_SCL, eeprom_reg );
++ udelay ( 10 );
++}
++
++static int efab_eeprom_read_mac ( uint32_t *eeprom_reg, uint8_t *mac_addr ) {
++ int i;
++
++ eeprom_start ( eeprom_reg );
++
++ eeprom_write ( eeprom_reg, EFAB_EEPROM_WRITE_SELECT );
++ if ( ! eeprom_check_ack ( eeprom_reg ) )
++ return 0;
++
++ eeprom_write ( eeprom_reg, 0 );
++ if ( ! eeprom_check_ack ( eeprom_reg ) )
++ return 0;
++
++ eeprom_stop ( eeprom_reg );
++ eeprom_start ( eeprom_reg );
++
++ eeprom_write ( eeprom_reg, EFAB_EEPROM_READ_SELECT );
++ if ( ! eeprom_check_ack ( eeprom_reg ) )
++ return 0;
++
++ for ( i = 0 ; i < ETH_ALEN ; i++ ) {
++ mac_addr[i] = eeprom_read ( eeprom_reg );
++ eeprom_send_ack ( eeprom_reg );
++ }
++
++ eeprom_stop ( eeprom_reg );
++
++ eeprom_release ( eeprom_reg );
++
++ return 1;
++}
++
++/**************************************************************************
++ *
++ * GMII routines
++ *
++ **************************************************************************
++ */
++
++/* GMII registers */
++#define MII_BMSR 0x01 /* Basic mode status register */
++#define MII_ADVERTISE 0x04 /* Advertisement control register */
++#define MII_LPA 0x05 /* Link partner ability register*/
++#define GMII_GTCR 0x09 /* 1000BASE-T control register */
++#define GMII_GTSR 0x0a /* 1000BASE-T status register */
++#define GMII_PSSR 0x11 /* PHY-specific status register */
++
++/* Basic mode status register. */
++#define BMSR_LSTATUS 0x0004 /* Link status */
++
++/* Link partner ability register. */
++#define LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
++#define LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
++#define LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
++#define LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
++#define LPA_100BASE4 0x0200 /* Can do 100mbps 4k packets */
++#define LPA_PAUSE 0x0400 /* Bit 10 - MAC pause */
++
++/* Pseudo extensions to the link partner ability register */
++#define LPA_1000FULL 0x00020000
++#define LPA_1000HALF 0x00010000
++
++#define LPA_100 (LPA_100FULL | LPA_100HALF | LPA_100BASE4)
++#define LPA_1000 ( LPA_1000FULL | LPA_1000HALF )
++#define LPA_DUPLEX ( LPA_10FULL | LPA_100FULL | LPA_1000FULL )
++
++/* Mask of bits not associated with speed or duplexity. */
++#define LPA_OTHER ~( LPA_10FULL | LPA_10HALF | LPA_100FULL | \
++ LPA_100HALF | LPA_1000FULL | LPA_1000HALF )
++
++/* PHY-specific status register */
++#define PSSR_LSTATUS 0x0400 /* Bit 10 - link status */
++
++/**
++ * Retrieve GMII autonegotiation advertised abilities
++ *
++ */
++static unsigned int gmii_autoneg_advertised ( struct efab_nic *efab ) {
++ unsigned int mii_advertise;
++ unsigned int gmii_advertise;
++
++ /* Extended bits are in bits 8 and 9 of GMII_GTCR */
++ mii_advertise = efab->op->mdio_read ( efab, MII_ADVERTISE );
++ gmii_advertise = ( ( efab->op->mdio_read ( efab, GMII_GTCR ) >> 8 )
++ & 0x03 );
++ return ( ( gmii_advertise << 16 ) | mii_advertise );
++}
++
++/**
++ * Retrieve GMII autonegotiation link partner abilities
++ *
++ */
++static unsigned int gmii_autoneg_lpa ( struct efab_nic *efab ) {
++ unsigned int mii_lpa;
++ unsigned int gmii_lpa;
++
++ /* Extended bits are in bits 10 and 11 of GMII_GTSR */
++ mii_lpa = efab->op->mdio_read ( efab, MII_LPA );
++ gmii_lpa = ( efab->op->mdio_read ( efab, GMII_GTSR ) >> 10 ) & 0x03;
++ return ( ( gmii_lpa << 16 ) | mii_lpa );
++}
++
++/**
++ * Calculate GMII autonegotiated link technology
++ *
++ */
++static unsigned int gmii_nway_result ( unsigned int negotiated ) {
++ unsigned int other_bits;
++
++ /* Mask out the speed and duplexity bits */
++ other_bits = negotiated & LPA_OTHER;
++
++ if ( negotiated & LPA_1000FULL )
++ return ( other_bits | LPA_1000FULL );
++ else if ( negotiated & LPA_1000HALF )
++ return ( other_bits | LPA_1000HALF );
++ else if ( negotiated & LPA_100FULL )
++ return ( other_bits | LPA_100FULL );
++ else if ( negotiated & LPA_100BASE4 )
++ return ( other_bits | LPA_100BASE4 );
++ else if ( negotiated & LPA_100HALF )
++ return ( other_bits | LPA_100HALF );
++ else if ( negotiated & LPA_10FULL )
++ return ( other_bits | LPA_10FULL );
++ else return ( other_bits | LPA_10HALF );
++}
++
++/**
++ * Check GMII PHY link status
++ *
++ */
++static int gmii_link_ok ( struct efab_nic *efab ) {
++ int status;
++ int phy_status;
++
++ /* BMSR is latching - it returns "link down" if the link has
++ * been down at any point since the last read. To get a
++ * real-time status, we therefore read the register twice and
++ * use the result of the second read.
++ */
++ efab->op->mdio_read ( efab, MII_BMSR );
++ status = efab->op->mdio_read ( efab, MII_BMSR );
++
++ /* Read the PHY-specific Status Register. This is
++ * non-latching, so we need do only a single read.
++ */
++ phy_status = efab->op->mdio_read ( efab, GMII_PSSR );
++
++ return ( ( status & BMSR_LSTATUS ) && ( phy_status & PSSR_LSTATUS ) );
++}
++
++/**************************************************************************
++ *
++ * Alaska PHY
++ *
++ **************************************************************************
++ */
++
++/**
++ * Initialise Alaska PHY
++ *
++ */
++static void alaska_init ( struct efab_nic *efab ) {
++ unsigned int advertised, lpa;
++
++ /* Read link up status */
++ efab->link_up = gmii_link_ok ( efab );
++
++ if ( ! efab->link_up )
++ return;
++
++ /* Determine link options from PHY. */
++ advertised = gmii_autoneg_advertised ( efab );
++ lpa = gmii_autoneg_lpa ( efab );
++ efab->link_options = gmii_nway_result ( advertised & lpa );
++
++ printf ( "%dMbps %s-duplex (%04x,%04x)\n",
++ ( efab->link_options & LPA_1000 ? 1000 :
++ ( efab->link_options & LPA_100 ? 100 : 10 ) ),
++ ( efab->link_options & LPA_DUPLEX ? "full" : "half" ),
++ advertised, lpa );
++}
++
++/**************************************************************************
++ *
++ * Mentor MAC
++ *
++ **************************************************************************
++ */
++
++/* GMAC configuration register 1 */
++#define GM_CFG1_REG_MAC 0x00
++#define GM_SW_RST_LBN 31
++#define GM_SW_RST_WIDTH 1
++#define GM_RX_FC_EN_LBN 5
++#define GM_RX_FC_EN_WIDTH 1
++#define GM_TX_FC_EN_LBN 4
++#define GM_TX_FC_EN_WIDTH 1
++#define GM_RX_EN_LBN 2
++#define GM_RX_EN_WIDTH 1
++#define GM_TX_EN_LBN 0
++#define GM_TX_EN_WIDTH 1
++
++/* GMAC configuration register 2 */
++#define GM_CFG2_REG_MAC 0x01
++#define GM_PAMBL_LEN_LBN 12
++#define GM_PAMBL_LEN_WIDTH 4
++#define GM_IF_MODE_LBN 8
++#define GM_IF_MODE_WIDTH 2
++#define GM_PAD_CRC_EN_LBN 2
++#define GM_PAD_CRC_EN_WIDTH 1
++#define GM_FD_LBN 0
++#define GM_FD_WIDTH 1
++
++/* GMAC maximum frame length register */
++#define GM_MAX_FLEN_REG_MAC 0x04
++#define GM_MAX_FLEN_LBN 0
++#define GM_MAX_FLEN_WIDTH 16
++
++/* GMAC MII management configuration register */
++#define GM_MII_MGMT_CFG_REG_MAC 0x08
++#define GM_MGMT_CLK_SEL_LBN 0
++#define GM_MGMT_CLK_SEL_WIDTH 3
++
++/* GMAC MII management command register */
++#define GM_MII_MGMT_CMD_REG_MAC 0x09
++#define GM_MGMT_SCAN_CYC_LBN 1
++#define GM_MGMT_SCAN_CYC_WIDTH 1
++#define GM_MGMT_RD_CYC_LBN 0
++#define GM_MGMT_RD_CYC_WIDTH 1
++
++/* GMAC MII management address register */
++#define GM_MII_MGMT_ADR_REG_MAC 0x0a
++#define GM_MGMT_PHY_ADDR_LBN 8
++#define GM_MGMT_PHY_ADDR_WIDTH 5
++#define GM_MGMT_REG_ADDR_LBN 0
++#define GM_MGMT_REG_ADDR_WIDTH 5
++
++/* GMAC MII management control register */
++#define GM_MII_MGMT_CTL_REG_MAC 0x0b
++#define GM_MGMT_CTL_LBN 0
++#define GM_MGMT_CTL_WIDTH 16
++
++/* GMAC MII management status register */
++#define GM_MII_MGMT_STAT_REG_MAC 0x0c
++#define GM_MGMT_STAT_LBN 0
++#define GM_MGMT_STAT_WIDTH 16
++
++/* GMAC MII management indicators register */
++#define GM_MII_MGMT_IND_REG_MAC 0x0d
++#define GM_MGMT_BUSY_LBN 0
++#define GM_MGMT_BUSY_WIDTH 1
++
++/* GMAC station address register 1 */
++#define GM_ADR1_REG_MAC 0x10
++#define GM_HWADDR_5_LBN 24
++#define GM_HWADDR_5_WIDTH 8
++#define GM_HWADDR_4_LBN 16
++#define GM_HWADDR_4_WIDTH 8
++#define GM_HWADDR_3_LBN 8
++#define GM_HWADDR_3_WIDTH 8
++#define GM_HWADDR_2_LBN 0
++#define GM_HWADDR_2_WIDTH 8
++
++/* GMAC station address register 2 */
++#define GM_ADR2_REG_MAC 0x11
++#define GM_HWADDR_1_LBN 24
++#define GM_HWADDR_1_WIDTH 8
++#define GM_HWADDR_0_LBN 16
++#define GM_HWADDR_0_WIDTH 8
++
++/* GMAC FIFO configuration register 0 */
++#define GMF_CFG0_REG_MAC 0x12
++#define GMF_FTFENREQ_LBN 12
++#define GMF_FTFENREQ_WIDTH 1
++#define GMF_STFENREQ_LBN 11
++#define GMF_STFENREQ_WIDTH 1
++#define GMF_FRFENREQ_LBN 10
++#define GMF_FRFENREQ_WIDTH 1
++#define GMF_SRFENREQ_LBN 9
++#define GMF_SRFENREQ_WIDTH 1
++#define GMF_WTMENREQ_LBN 8
++#define GMF_WTMENREQ_WIDTH 1
++
++/* GMAC FIFO configuration register 1 */
++#define GMF_CFG1_REG_MAC 0x13
++#define GMF_CFGFRTH_LBN 16
++#define GMF_CFGFRTH_WIDTH 5
++#define GMF_CFGXOFFRTX_LBN 0
++#define GMF_CFGXOFFRTX_WIDTH 16
++
++/* GMAC FIFO configuration register 2 */
++#define GMF_CFG2_REG_MAC 0x14
++#define GMF_CFGHWM_LBN 16
++#define GMF_CFGHWM_WIDTH 6
++#define GMF_CFGLWM_LBN 0
++#define GMF_CFGLWM_WIDTH 6
++
++/* GMAC FIFO configuration register 3 */
++#define GMF_CFG3_REG_MAC 0x15
++#define GMF_CFGHWMFT_LBN 16
++#define GMF_CFGHWMFT_WIDTH 6
++#define GMF_CFGFTTH_LBN 0
++#define GMF_CFGFTTH_WIDTH 6
++
++/* GMAC FIFO configuration register 4 */
++#define GMF_CFG4_REG_MAC 0x16
++#define GMF_HSTFLTRFRM_PAUSE_LBN 12
++#define GMF_HSTFLTRFRM_PAUSE_WIDTH 12
++
++/* GMAC FIFO configuration register 5 */
++#define GMF_CFG5_REG_MAC 0x17
++#define GMF_CFGHDPLX_LBN 22
++#define GMF_CFGHDPLX_WIDTH 1
++#define GMF_CFGBYTMODE_LBN 19
++#define GMF_CFGBYTMODE_WIDTH 1
++#define GMF_HSTDRPLT64_LBN 18
++#define GMF_HSTDRPLT64_WIDTH 1
++#define GMF_HSTFLTRFRMDC_PAUSE_LBN 12
++#define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
++
++struct efab_mentormac_parameters {
++ int gmf_cfgfrth;
++ int gmf_cfgftth;
++ int gmf_cfghwmft;
++ int gmf_cfghwm;
++ int gmf_cfglwm;
++};
++
++/**
++ * Reset Mentor MAC
++ *
++ */
++static void mentormac_reset ( struct efab_nic *efab, int reset ) {
++ efab_dword_t reg;
++
++ EFAB_POPULATE_DWORD_1 ( reg, GM_SW_RST, reset );
++ efab->op->mac_writel ( efab, ®, GM_CFG1_REG_MAC );
++ udelay ( 1000 );
++
++ if ( ( ! reset ) && ( efab->port == 0 ) ) {
++ /* Configure GMII interface so PHY is accessible.
++ * Note that GMII interface is connected only to port
++ * 0
++ */
++ EFAB_POPULATE_DWORD_1 ( reg, GM_MGMT_CLK_SEL, 0x4 );
++ efab->op->mac_writel ( efab, ®, GM_MII_MGMT_CFG_REG_MAC );
++ udelay ( 10 );
++ }
++}
++
++/**
++ * Initialise Mentor MAC
++ *
++ */
++static void mentormac_init ( struct efab_nic *efab,
++ struct efab_mentormac_parameters *params ) {
++ int pause, if_mode, full_duplex, bytemode, half_duplex;
++ efab_dword_t reg;
++
++ /* Configuration register 1 */
++ pause = ( efab->link_options & LPA_PAUSE ) ? 1 : 0;
++ if ( ! ( efab->link_options & LPA_DUPLEX ) ) {
++ /* Half-duplex operation requires TX flow control */
++ pause = 1;
++ }
++ EFAB_POPULATE_DWORD_4 ( reg,
++ GM_TX_EN, 1,
++ GM_TX_FC_EN, pause,
++ GM_RX_EN, 1,
++ GM_RX_FC_EN, 1 );
++ efab->op->mac_writel ( efab, ®, GM_CFG1_REG_MAC );
++ udelay ( 10 );
++
++ /* Configuration register 2 */
++ if_mode = ( efab->link_options & LPA_1000 ) ? 2 : 1;
++ full_duplex = ( efab->link_options & LPA_DUPLEX ) ? 1 : 0;
++ EFAB_POPULATE_DWORD_4 ( reg,
++ GM_IF_MODE, if_mode,
++ GM_PAD_CRC_EN, 1,
++ GM_FD, full_duplex,
++ GM_PAMBL_LEN, 0x7 /* ? */ );
++ efab->op->mac_writel ( efab, ®, GM_CFG2_REG_MAC );
++ udelay ( 10 );
++
++ /* Max frame len register */
++ EFAB_POPULATE_DWORD_1 ( reg, GM_MAX_FLEN, ETH_FRAME_LEN );
++ efab->op->mac_writel ( efab, ®, GM_MAX_FLEN_REG_MAC );
++ udelay ( 10 );
++
++ /* FIFO configuration register 0 */
++ EFAB_POPULATE_DWORD_5 ( reg,
++ GMF_FTFENREQ, 1,
++ GMF_STFENREQ, 1,
++ GMF_FRFENREQ, 1,
++ GMF_SRFENREQ, 1,
++ GMF_WTMENREQ, 1 );
++ efab->op->mac_writel ( efab, ®, GMF_CFG0_REG_MAC );
++ udelay ( 10 );
++
++ /* FIFO configuration register 1 */
++ EFAB_POPULATE_DWORD_2 ( reg,
++ GMF_CFGFRTH, params->gmf_cfgfrth,
++ GMF_CFGXOFFRTX, 0xffff );
++ efab->op->mac_writel ( efab, ®, GMF_CFG1_REG_MAC );
++ udelay ( 10 );
++
++ /* FIFO configuration register 2 */
++ EFAB_POPULATE_DWORD_2 ( reg,
++ GMF_CFGHWM, params->gmf_cfghwm,
++ GMF_CFGLWM, params->gmf_cfglwm );
++ efab->op->mac_writel ( efab, ®, GMF_CFG2_REG_MAC );
++ udelay ( 10 );
++
++ /* FIFO configuration register 3 */
++ EFAB_POPULATE_DWORD_2 ( reg,
++ GMF_CFGHWMFT, params->gmf_cfghwmft,
++ GMF_CFGFTTH, params->gmf_cfgftth );
++ efab->op->mac_writel ( efab, ®, GMF_CFG3_REG_MAC );
++ udelay ( 10 );
++
++ /* FIFO configuration register 4 */
++ EFAB_POPULATE_DWORD_1 ( reg, GMF_HSTFLTRFRM_PAUSE, 1 );
++ efab->op->mac_writel ( efab, ®, GMF_CFG4_REG_MAC );
++ udelay ( 10 );
++
++ /* FIFO configuration register 5 */
++ bytemode = ( efab->link_options & LPA_1000 ) ? 1 : 0;
++ half_duplex = ( efab->link_options & LPA_DUPLEX ) ? 0 : 1;
++ efab->op->mac_readl ( efab, ®, GMF_CFG5_REG_MAC );
++ EFAB_SET_DWORD_FIELD ( reg, GMF_CFGBYTMODE, bytemode );
++ EFAB_SET_DWORD_FIELD ( reg, GMF_CFGHDPLX, half_duplex );
++ EFAB_SET_DWORD_FIELD ( reg, GMF_HSTDRPLT64, half_duplex );
++ EFAB_SET_DWORD_FIELD ( reg, GMF_HSTFLTRFRMDC_PAUSE, 0 );
++ efab->op->mac_writel ( efab, ®, GMF_CFG5_REG_MAC );
++ udelay ( 10 );
++
++ /* MAC address */
++ EFAB_POPULATE_DWORD_4 ( reg,
++ GM_HWADDR_5, efab->mac_addr[5],
++ GM_HWADDR_4, efab->mac_addr[4],
++ GM_HWADDR_3, efab->mac_addr[3],
++ GM_HWADDR_2, efab->mac_addr[2] );
++ efab->op->mac_writel ( efab, ®, GM_ADR1_REG_MAC );
++ udelay ( 10 );
++ EFAB_POPULATE_DWORD_2 ( reg,
++ GM_HWADDR_1, efab->mac_addr[1],
++ GM_HWADDR_0, efab->mac_addr[0] );
++ efab->op->mac_writel ( efab, ®, GM_ADR2_REG_MAC );
++ udelay ( 10 );
++}
++
++/**
++ * Wait for GMII access to complete
++ *
++ */
++static int mentormac_gmii_wait ( struct efab_nic *efab ) {
++ int count;
++ efab_dword_t indicator;
++
++ for ( count = 0 ; count < 1000 ; count++ ) {
++ udelay ( 10 );
++ efab->op->mac_readl ( efab, &indicator,
++ GM_MII_MGMT_IND_REG_MAC );
++ if ( EFAB_DWORD_FIELD ( indicator, GM_MGMT_BUSY ) == 0 )
++ return 1;
++ }
++ printf ( "Timed out waiting for GMII\n" );
++ return 0;
++}
++
++/**
++ * Write a GMII register
++ *
++ */
++static void mentormac_mdio_write ( struct efab_nic *efab, int phy_id,
++ int location, int value ) {
++ efab_dword_t reg;
++ int save_port;
++
++ EFAB_TRACE ( "Writing GMII %d register %02x with %04x\n", phy_id,
++ location, value );
++
++ /* Mentor MAC connects both PHYs to MAC 0 */
++ save_port = efab->port;
++ efab->port = 0;
++
++ /* Check MII not currently being accessed */
++ if ( ! mentormac_gmii_wait ( efab ) )
++ goto out;
++
++ /* Write the address register */
++ EFAB_POPULATE_DWORD_2 ( reg,
++ GM_MGMT_PHY_ADDR, phy_id,
++ GM_MGMT_REG_ADDR, location );
++ efab->op->mac_writel ( efab, ®, GM_MII_MGMT_ADR_REG_MAC );
++ udelay ( 10 );
++
++ /* Write data */
++ EFAB_POPULATE_DWORD_1 ( reg, GM_MGMT_CTL, value );
++ efab->op->mac_writel ( efab, ®, GM_MII_MGMT_CTL_REG_MAC );
++
++ /* Wait for data to be written */
++ mentormac_gmii_wait ( efab );
++
++ out:
++ /* Restore efab->port */
++ efab->port = save_port;
++}
++
++/**
++ * Read a GMII register
++ *
++ */
++static int mentormac_mdio_read ( struct efab_nic *efab, int phy_id,
++ int location ) {
++ efab_dword_t reg;
++ int value = 0xffff;
++ int save_port;
++
++ /* Mentor MAC connects both PHYs to MAC 0 */
++ save_port = efab->port;
++ efab->port = 0;
++
++ /* Check MII not currently being accessed */
++ if ( ! mentormac_gmii_wait ( efab ) )
++ goto out;
++
++ /* Write the address register */
++ EFAB_POPULATE_DWORD_2 ( reg,
++ GM_MGMT_PHY_ADDR, phy_id,
++ GM_MGMT_REG_ADDR, location );
++ efab->op->mac_writel ( efab, ®, GM_MII_MGMT_ADR_REG_MAC );
++ udelay ( 10 );
++
++ /* Request data to be read */
++ EFAB_POPULATE_DWORD_1 ( reg, GM_MGMT_RD_CYC, 1 );
++ efab->op->mac_writel ( efab, ®, GM_MII_MGMT_CMD_REG_MAC );
++
++ /* Wait for data to be become available */
++ if ( mentormac_gmii_wait ( efab ) ) {
++ /* Read data */
++ efab->op->mac_readl ( efab, ®, GM_MII_MGMT_STAT_REG_MAC );
++ value = EFAB_DWORD_FIELD ( reg, GM_MGMT_STAT );
++ EFAB_TRACE ( "Read from GMII %d register %02x, got %04x\n",
++ phy_id, location, value );
++ }
++
++ /* Signal completion */
++ EFAB_ZERO_DWORD ( reg );
++ efab->op->mac_writel ( efab, ®, GM_MII_MGMT_CMD_REG_MAC );
++ udelay ( 10 );
++
++ out:
++ /* Restore efab->port */
++ efab->port = save_port;
++
++ return value;
++}
++
++/**************************************************************************
++ *
++ * EF1002 routines
++ *
++ **************************************************************************
++ */
++
++/** Control and General Status */
++#define EF1_CTR_GEN_STATUS0_REG 0x0
++#define EF1_MASTER_EVENTS_LBN 12
++#define EF1_MASTER_EVENTS_WIDTH 1
++#define EF1_TX_ENGINE_EN_LBN 19
++#define EF1_TX_ENGINE_EN_WIDTH 1
++#define EF1_RX_ENGINE_EN_LBN 18
++#define EF1_RX_ENGINE_EN_WIDTH 1
++#define EF1_LB_RESET_LBN 3
++#define EF1_LB_RESET_WIDTH 1
++#define EF1_MAC_RESET_LBN 2
++#define EF1_MAC_RESET_WIDTH 1
++#define EF1_CAM_ENABLE_LBN 1
++#define EF1_CAM_ENABLE_WIDTH 1
++
++/** IRQ sources */
++#define EF1_IRQ_SRC_REG 0x0008
++
++/** IRQ mask */
++#define EF1_IRQ_MASK_REG 0x000c
++#define EF1_IRQ_PHY1_LBN 11
++#define EF1_IRQ_PHY1_WIDTH 1
++#define EF1_IRQ_PHY0_LBN 10
++#define EF1_IRQ_PHY0_WIDTH 1
++#define EF1_IRQ_SERR_LBN 7
++#define EF1_IRQ_SERR_WIDTH 1
++#define EF1_IRQ_EVQ_LBN 3
++#define EF1_IRQ_EVQ_WIDTH 1
++
++/** Event generation */
++#define EF1_EVT3_REG 0x38
++
++/** EEPROM access */
++#define EF1_EEPROM_REG 0x0040
++
++/** Control register 2 */
++#define EF1_CTL2_REG 0x4c
++#define EF1_MEM_MAP_4MB_LBN 11
++#define EF1_MEM_MAP_4MB_WIDTH 1
++#define EF1_EV_INTR_CLR_WRITE_LBN 6
++#define EF1_EV_INTR_CLR_WRITE_WIDTH 1
++#define EF1_SW_RESET_LBN 2
++#define EF1_SW_RESET_WIDTH 1
++#define EF1_INTR_AFTER_EVENT_LBN 1
++#define EF1_INTR_AFTER_EVENT_WIDTH 1
++
++/** Event FIFO */
++#define EF1_EVENT_FIFO_REG 0x50
++
++/** Event FIFO count */
++#define EF1_EVENT_FIFO_COUNT_REG 0x5c
++#define EF1_EV_COUNT_LBN 0
++#define EF1_EV_COUNT_WIDTH 16
++
++/** TX DMA control and status */
++#define EF1_DMA_TX_CSR_REG 0x80
++#define EF1_DMA_TX_CSR_CHAIN_EN_LBN 8
++#define EF1_DMA_TX_CSR_CHAIN_EN_WIDTH 1
++#define EF1_DMA_TX_CSR_ENABLE_LBN 4
++#define EF1_DMA_TX_CSR_ENABLE_WIDTH 1
++#define EF1_DMA_TX_CSR_INT_EN_LBN 0
++#define EF1_DMA_TX_CSR_INT_EN_WIDTH 1
++
++/** RX DMA control and status */
++#define EF1_DMA_RX_CSR_REG 0xa0
++#define EF1_DMA_RX_ABOVE_1GB_EN_LBN 6
++#define EF1_DMA_RX_ABOVE_1GB_EN_WIDTH 1
++#define EF1_DMA_RX_BELOW_1MB_EN_LBN 5
++#define EF1_DMA_RX_BELOW_1MB_EN_WIDTH 1
++#define EF1_DMA_RX_CSR_ENABLE_LBN 0
++#define EF1_DMA_RX_CSR_ENABLE_WIDTH 1
++
++/** Level 5 watermark register (in MAC space) */
++#define EF1_GMF_L5WM_REG_MAC 0x20
++#define EF1_L5WM_LBN 0
++#define EF1_L5WM_WIDTH 32
++
++/** MAC clock */
++#define EF1_GM_MAC_CLK_REG 0x112000
++#define EF1_GM_PORT0_MAC_CLK_LBN 0
++#define EF1_GM_PORT0_MAC_CLK_WIDTH 1
++#define EF1_GM_PORT1_MAC_CLK_LBN 1
++#define EF1_GM_PORT1_MAC_CLK_WIDTH 1
++
++/** TX descriptor FIFO */
++#define EF1_TX_DESC_FIFO 0x141000
++#define EF1_TX_KER_EVQ_LBN 80
++#define EF1_TX_KER_EVQ_WIDTH 12
++#define EF1_TX_KER_IDX_LBN 64
++#define EF1_TX_KER_IDX_WIDTH 16
++#define EF1_TX_KER_MODE_LBN 63
++#define EF1_TX_KER_MODE_WIDTH 1
++#define EF1_TX_KER_PORT_LBN 60
++#define EF1_TX_KER_PORT_WIDTH 1
++#define EF1_TX_KER_CONT_LBN 56
++#define EF1_TX_KER_CONT_WIDTH 1
++#define EF1_TX_KER_BYTE_CNT_LBN 32
++#define EF1_TX_KER_BYTE_CNT_WIDTH 24
++#define EF1_TX_KER_BUF_ADR_LBN 0
++#define EF1_TX_KER_BUF_ADR_WIDTH 32
++
++/** TX descriptor FIFO flush */
++#define EF1_TX_DESC_FIFO_FLUSH 0x141ffc
++
++/** RX descriptor FIFO */
++#define EF1_RX_DESC_FIFO 0x145000
++#define EF1_RX_KER_EVQ_LBN 48
++#define EF1_RX_KER_EVQ_WIDTH 12
++#define EF1_RX_KER_IDX_LBN 32
++#define EF1_RX_KER_IDX_WIDTH 16
++#define EF1_RX_KER_BUF_ADR_LBN 0
++#define EF1_RX_KER_BUF_ADR_WIDTH 32
++
++/** RX descriptor FIFO flush */
++#define EF1_RX_DESC_FIFO_FLUSH 0x145ffc
++
++/** CAM */
++#define EF1_CAM_BASE 0x1c0000
++#define EF1_CAM_WTF_DOES_THIS_DO_LBN 0
++#define EF1_CAM_WTF_DOES_THIS_DO_WIDTH 32
++
++/** Event queue pointers */
++#define EF1_EVQ_PTR_BASE 0x260000
++#define EF1_EVQ_SIZE_LBN 29
++#define EF1_EVQ_SIZE_WIDTH 2
++#define EF1_EVQ_SIZE_4K 3
++#define EF1_EVQ_SIZE_2K 2
++#define EF1_EVQ_SIZE_1K 1
++#define EF1_EVQ_SIZE_512 0
++#define EF1_EVQ_BUF_BASE_ID_LBN 0
++#define EF1_EVQ_BUF_BASE_ID_WIDTH 29
++
++/* MAC registers */
++#define EF1002_MAC_REGBANK 0x110000
++#define EF1002_MAC_REGBANK_SIZE 0x1000
++#define EF1002_MAC_REG_SIZE 0x08
++
++/** Offset of a MAC register within EF1002 */
++#define EF1002_MAC_REG( efab, mac_reg ) \
++ ( EF1002_MAC_REGBANK + \
++ ( (efab)->port * EF1002_MAC_REGBANK_SIZE ) + \
++ ( (mac_reg) * EF1002_MAC_REG_SIZE ) )
++
++/* Event queue entries */
++#define EF1_EV_CODE_LBN 20
++#define EF1_EV_CODE_WIDTH 8
++#define EF1_RX_EV_DECODE 0x01
++#define EF1_TX_EV_DECODE 0x02
++#define EF1_DRV_GEN_EV_DECODE 0x0f
++
++/* Receive events */
++#define EF1_RX_EV_LEN_LBN 48
++#define EF1_RX_EV_LEN_WIDTH 16
++#define EF1_RX_EV_PORT_LBN 17
++#define EF1_RX_EV_PORT_WIDTH 3
++#define EF1_RX_EV_OK_LBN 16
++#define EF1_RX_EV_OK_WIDTH 1
++#define EF1_RX_EV_IDX_LBN 0
++#define EF1_RX_EV_IDX_WIDTH 16
++
++/* Transmit events */
++#define EF1_TX_EV_PORT_LBN 17
++#define EF1_TX_EV_PORT_WIDTH 3
++#define EF1_TX_EV_OK_LBN 16
++#define EF1_TX_EV_OK_WIDTH 1
++#define EF1_TX_EV_IDX_LBN 0
++#define EF1_TX_EV_IDX_WIDTH 16
++
++/**
++ * Write dword to EF1002 register
++ *
++ */
++static inline void ef1002_writel ( struct efab_nic *efab, efab_dword_t *value,
++ unsigned int reg ) {
++ EFAB_REGDUMP ( "Writing register %x with " EFAB_DWORD_FMT "\n",
++ reg, EFAB_DWORD_VAL ( *value ) );
++ writel ( value->u32[0], efab->membase + reg );
++}
++
++/**
++ * Read dword from an EF1002 register
++ *
++ */
++static inline void ef1002_readl ( struct efab_nic *efab, efab_dword_t *value,
++ unsigned int reg ) {
++ value->u32[0] = readl ( efab->membase + reg );
++ EFAB_REGDUMP ( "Read from register %x, got " EFAB_DWORD_FMT "\n",
++ reg, EFAB_DWORD_VAL ( *value ) );
++}
++
++/**
++ * Read dword from an EF1002 register, silently
++ *
++ */
++static inline void ef1002_readl_silent ( struct efab_nic *efab,
++ efab_dword_t *value,
++ unsigned int reg ) {
++ value->u32[0] = readl ( efab->membase + reg );
++}
++
++/**
++ * Get memory base
++ *
++ */
++static void ef1002_get_membase ( struct efab_nic *efab ) {
++ unsigned long membase_phys;
++
++ membase_phys = pci_bar_start ( efab->pci, PCI_BASE_ADDRESS_0 );
++ efab->membase = ioremap ( membase_phys, 0x800000 );
++}
++
++/** PCI registers to backup/restore over a device reset */
++static const unsigned int efab_pci_reg_addr[] = {
++ PCI_COMMAND, 0x0c /* PCI_CACHE_LINE_SIZE */,
++ PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_1, PCI_BASE_ADDRESS_2,
++ PCI_BASE_ADDRESS_3, PCI_ROM_ADDRESS, PCI_INTERRUPT_LINE,
++};
++/** Number of registers in efab_pci_reg_addr */
++#define EFAB_NUM_PCI_REG \
++ ( sizeof ( efab_pci_reg_addr ) / sizeof ( efab_pci_reg_addr[0] ) )
++/** PCI configuration space backup */
++struct efab_pci_reg {
++ uint32_t reg[EFAB_NUM_PCI_REG];
++};
++
++/**
++ * Reset device
++ *
++ */
++static int ef1002_reset ( struct efab_nic *efab ) {
++ struct efab_pci_reg pci_reg;
++ struct pci_device *pci_dev = efab->pci;
++ efab_dword_t reg;
++ unsigned int i;
++ uint32_t tmp;
++
++ /* Back up PCI configuration registers */
++ for ( i = 0 ; i < EFAB_NUM_PCI_REG ; i++ ) {
++ pci_read_config_dword ( pci_dev, efab_pci_reg_addr[i],
++ &pci_reg.reg[i] );
++ }
++
++ /* Reset the whole device. */
++ EFAB_POPULATE_DWORD_1 ( reg, EF1_SW_RESET, 1 );
++ ef1002_writel ( efab, ®, EF1_CTL2_REG );
++ mdelay ( 200 );
++
++ /* Restore PCI configuration space */
++ for ( i = 0 ; i < EFAB_NUM_PCI_REG ; i++ ) {
++ pci_write_config_dword ( pci_dev, efab_pci_reg_addr[i],
++ pci_reg.reg[i] );
++ }
++
++ /* Verify PCI configuration space */
++ for ( i = 0 ; i < EFAB_NUM_PCI_REG ; i++ ) {
++ pci_read_config_dword ( pci_dev, efab_pci_reg_addr[i], &tmp );
++ if ( tmp != pci_reg.reg[i] ) {
++ printf ( "PCI restore failed on register %02x "
++ "(is %08x, should be %08x); reboot\n",
++ i, tmp, pci_reg.reg[i] );
++ return 0;
++ }
++ }
++
++ /* Verify device reset complete */
++ ef1002_readl ( efab, ®, EF1_CTR_GEN_STATUS0_REG );
++ if ( EFAB_DWORD_IS_ALL_ONES ( reg ) ) {
++ printf ( "Reset failed\n" );
++ return 0;
++ }
++
++ return 1;
++}
++
++/**
++ * Initialise NIC
++ *
++ */
++static int ef1002_init_nic ( struct efab_nic *efab ) {
++ efab_dword_t reg;
++ int save_port;
++
++ /* No idea what CAM is, but the 'datasheet' says that we have
++ * to write these values in at start of day
++ */
++ EFAB_POPULATE_DWORD_1 ( reg, EF1_CAM_WTF_DOES_THIS_DO, 0x6 );
++ ef1002_writel ( efab, ®, EF1_CAM_BASE + 0x20018 );
++ udelay ( 1000 );
++ EFAB_POPULATE_DWORD_1 ( reg, EF1_CAM_WTF_DOES_THIS_DO, 0x01000000 );
++ ef1002_writel ( efab, ®, EF1_CAM_BASE + 0x00018 );
++ udelay ( 1000 );
++
++ /* General control register 0 */
++ ef1002_readl ( efab, ®, EF1_CTR_GEN_STATUS0_REG );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_MASTER_EVENTS, 0 );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_CAM_ENABLE, 1 );
++ ef1002_writel ( efab, ®, EF1_CTR_GEN_STATUS0_REG );
++ udelay ( 1000 );
++
++ /* General control register 2 */
++ ef1002_readl ( efab, ®, EF1_CTL2_REG );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_INTR_AFTER_EVENT, 1 );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_EV_INTR_CLR_WRITE, 0 );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_MEM_MAP_4MB, 0 );
++ ef1002_writel ( efab, ®, EF1_CTL2_REG );
++ udelay ( 1000 );
++
++ /* Enable RX DMA */
++ ef1002_readl ( efab, ®, EF1_DMA_RX_CSR_REG );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_DMA_RX_CSR_ENABLE, 1 );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_DMA_RX_BELOW_1MB_EN, 1 );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_DMA_RX_ABOVE_1GB_EN, 1 );
++ ef1002_writel ( efab, ®, EF1_DMA_RX_CSR_REG );
++ udelay ( 1000 );
++
++ /* Enable TX DMA */
++ ef1002_readl ( efab, ®, EF1_DMA_TX_CSR_REG );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_DMA_TX_CSR_CHAIN_EN, 1 );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_DMA_TX_CSR_ENABLE, 0 /* ?? */ );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_DMA_TX_CSR_INT_EN, 0 /* ?? */ );
++ ef1002_writel ( efab, ®, EF1_DMA_TX_CSR_REG );
++ udelay ( 1000 );
++
++ /* Flush descriptor queues */
++ EFAB_ZERO_DWORD ( reg );
++ ef1002_writel ( efab, ®, EF1_RX_DESC_FIFO_FLUSH );
++ ef1002_writel ( efab, ®, EF1_TX_DESC_FIFO_FLUSH );
++ wmb();
++ udelay ( 10000 );
++
++ /* Reset both MACs */
++ save_port = efab->port;
++ efab->port = 0;
++ mentormac_reset ( efab, 1 );
++ efab->port = 1;
++ mentormac_reset ( efab, 1 );
++
++ /* Reset both PHYs */
++ ef1002_readl ( efab, ®, EF1_CTR_GEN_STATUS0_REG );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_MAC_RESET, 1 );
++ ef1002_writel ( efab, ®, EF1_CTR_GEN_STATUS0_REG );
++ udelay ( 10000 );
++ EFAB_SET_DWORD_FIELD ( reg, EF1_MAC_RESET, 0 );
++ ef1002_writel ( efab, ®, EF1_CTR_GEN_STATUS0_REG );
++ udelay ( 10000 );
++
++ /* Take MACs out of reset */
++ efab->port = 0;
++ mentormac_reset ( efab, 0 );
++ efab->port = 1;
++ mentormac_reset ( efab, 0 );
++ efab->port = save_port;
++
++ /* Give PHY time to wake up. It takes a while. */
++ sleep ( 2 );
++
++ return 1;
++}
++
++/**
++ * Read MAC address from EEPROM
++ *
++ */
++static int ef1002_read_eeprom ( struct efab_nic *efab ) {
++ return efab_eeprom_read_mac ( efab->membase + EF1_EEPROM_REG,
++ efab->mac_addr );
++}
++
++/** RX descriptor */
++typedef efab_qword_t ef1002_rx_desc_t;
++
++/**
++ * Build RX descriptor
++ *
++ */
++static void ef1002_build_rx_desc ( struct efab_nic *efab,
++ struct efab_rx_buf *rx_buf ) {
++ ef1002_rx_desc_t rxd;
++
++ EFAB_POPULATE_QWORD_3 ( rxd,
++ EF1_RX_KER_EVQ, 0,
++ EF1_RX_KER_IDX, rx_buf->id,
++ EF1_RX_KER_BUF_ADR,
++ virt_to_bus ( rx_buf->addr ) );
++ ef1002_writel ( efab, &rxd.dword[0], EF1_RX_DESC_FIFO + 0 );
++ ef1002_writel ( efab, &rxd.dword[1], EF1_RX_DESC_FIFO + 4 );
++ udelay ( 10 );
++}
++
++/**
++ * Update RX descriptor write pointer
++ *
++ */
++static void ef1002_notify_rx_desc ( struct efab_nic *efab __unused ) {
++ /* Nothing to do */
++}
++
++/** TX descriptor */
++typedef efab_oword_t ef1002_tx_desc_t;
++
++/**
++ * Build TX descriptor
++ *
++ */
++static void ef1002_build_tx_desc ( struct efab_nic *efab,
++ struct efab_tx_buf *tx_buf ) {
++ ef1002_tx_desc_t txd;
++
++ EFAB_POPULATE_OWORD_7 ( txd,
++ EF1_TX_KER_EVQ, 0,
++ EF1_TX_KER_IDX, tx_buf->id,
++ EF1_TX_KER_MODE, 0 /* IP mode */,
++ EF1_TX_KER_PORT, efab->port,
++ EF1_TX_KER_CONT, 0,
++ EF1_TX_KER_BYTE_CNT, tx_buf->len,
++ EF1_TX_KER_BUF_ADR,
++ virt_to_bus ( tx_buf->addr ) );
++
++ ef1002_writel ( efab, &txd.dword[0], EF1_TX_DESC_FIFO + 0 );
++ ef1002_writel ( efab, &txd.dword[1], EF1_TX_DESC_FIFO + 4 );
++ ef1002_writel ( efab, &txd.dword[2], EF1_TX_DESC_FIFO + 8 );
++ udelay ( 10 );
++}
++
++/**
++ * Update TX descriptor write pointer
++ *
++ */
++static void ef1002_notify_tx_desc ( struct efab_nic *efab __unused ) {
++ /* Nothing to do */
++}
++
++/** An event */
++typedef efab_qword_t ef1002_event_t;
++
++/**
++ * Retrieve event from event queue
++ *
++ */
++static int ef1002_fetch_event ( struct efab_nic *efab,
++ struct efab_event *event ) {
++ efab_dword_t reg;
++ int ev_code;
++ int words;
++
++ /* Check event FIFO depth */
++ ef1002_readl_silent ( efab, ®, EF1_EVENT_FIFO_COUNT_REG );
++ words = EFAB_DWORD_FIELD ( reg, EF1_EV_COUNT );
++ if ( ! words )
++ return 0;
++
++ /* Read event data */
++ ef1002_readl ( efab, ®, EF1_EVENT_FIFO_REG );
++ DBG ( "Event is " EFAB_DWORD_FMT "\n", EFAB_DWORD_VAL ( reg ) );
++
++ /* Decode event */
++ ev_code = EFAB_DWORD_FIELD ( reg, EF1_EV_CODE );
++ switch ( ev_code ) {
++ case EF1_TX_EV_DECODE:
++ event->type = EFAB_EV_TX;
++ break;
++ case EF1_RX_EV_DECODE:
++ event->type = EFAB_EV_RX;
++ event->rx_id = EFAB_DWORD_FIELD ( reg, EF1_RX_EV_IDX );
++ /* RX len not available via event FIFO */
++ event->rx_len = ETH_FRAME_LEN;
++ break;
++ default:
++ printf ( "Unknown event type %d\n", ev_code );
++ event->type = EFAB_EV_NONE;
++ }
++
++ /* Clear any pending interrupts */
++ ef1002_readl ( efab, ®, EF1_IRQ_SRC_REG );
++
++ return 1;
++}
++
++/**
++ * Enable/disable interrupts
++ *
++ */
++static void ef1002_mask_irq ( struct efab_nic *efab, int enabled ) {
++ efab_dword_t irq_mask;
++
++ EFAB_POPULATE_DWORD_2 ( irq_mask,
++ EF1_IRQ_SERR, enabled,
++ EF1_IRQ_EVQ, enabled );
++ ef1002_writel ( efab, &irq_mask, EF1_IRQ_MASK_REG );
++}
++
++/**
++ * Generate interrupt
++ *
++ */
++static void ef1002_generate_irq ( struct efab_nic *efab ) {
++ ef1002_event_t test_event;
++
++ EFAB_POPULATE_QWORD_1 ( test_event,
++ EF1_EV_CODE, EF1_DRV_GEN_EV_DECODE );
++ ef1002_writel ( efab, &test_event.dword[0], EF1_EVT3_REG );
++}
++
++/**
++ * Write dword to an EF1002 MAC register
++ *
++ */
++static void ef1002_mac_writel ( struct efab_nic *efab,
++ efab_dword_t *value, unsigned int mac_reg ) {
++ ef1002_writel ( efab, value, EF1002_MAC_REG ( efab, mac_reg ) );
++}
++
++/**
++ * Read dword from an EF1002 MAC register
++ *
++ */
++static void ef1002_mac_readl ( struct efab_nic *efab,
++ efab_dword_t *value, unsigned int mac_reg ) {
++ ef1002_readl ( efab, value, EF1002_MAC_REG ( efab, mac_reg ) );
++}
++
++/**
++ * Initialise MAC
++ *
++ */
++static int ef1002_init_mac ( struct efab_nic *efab ) {
++ static struct efab_mentormac_parameters ef1002_mentormac_params = {
++ .gmf_cfgfrth = 0x13,
++ .gmf_cfgftth = 0x10,
++ .gmf_cfghwmft = 0x555,
++ .gmf_cfghwm = 0x2a,
++ .gmf_cfglwm = 0x15,
++ };
++ efab_dword_t reg;
++ unsigned int mac_clk;
++
++ /* Initialise PHY */
++ alaska_init ( efab );
++
++ /* Initialise MAC */
++ mentormac_init ( efab, &ef1002_mentormac_params );
++
++ /* Write Level 5 watermark register */
++ EFAB_POPULATE_DWORD_1 ( reg, EF1_L5WM, 0x10040000 );
++ efab->op->mac_writel ( efab, ®, EF1_GMF_L5WM_REG_MAC );
++ udelay ( 10 );
++
++ /* Set MAC clock speed */
++ ef1002_readl ( efab, ®, EF1_GM_MAC_CLK_REG );
++ mac_clk = ( efab->link_options & LPA_1000 ) ? 0 : 1;
++ if ( efab->port == 0 ) {
++ EFAB_SET_DWORD_FIELD ( reg, EF1_GM_PORT0_MAC_CLK, mac_clk );
++ } else {
++ EFAB_SET_DWORD_FIELD ( reg, EF1_GM_PORT1_MAC_CLK, mac_clk );
++ }
++ ef1002_writel ( efab, ®, EF1_GM_MAC_CLK_REG );
++ udelay ( 10 );
++
++ return 1;
++}
++
++/** MDIO write */
++static void ef1002_mdio_write ( struct efab_nic *efab, int location,
++ int value ) {
++ mentormac_mdio_write ( efab, efab->port + 2, location, value );
++}
++
++/** MDIO read */
++static int ef1002_mdio_read ( struct efab_nic *efab, int location ) {
++ return mentormac_mdio_read ( efab, efab->port + 2, location );
++}
++
++static struct efab_operations ef1002_operations = {
++ .get_membase = ef1002_get_membase,
++ .reset = ef1002_reset,
++ .init_nic = ef1002_init_nic,
++ .read_eeprom = ef1002_read_eeprom,
++ .build_rx_desc = ef1002_build_rx_desc,
++ .notify_rx_desc = ef1002_notify_rx_desc,
++ .build_tx_desc = ef1002_build_tx_desc,
++ .notify_tx_desc = ef1002_notify_tx_desc,
++ .fetch_event = ef1002_fetch_event,
++ .mask_irq = ef1002_mask_irq,
++ .generate_irq = ef1002_generate_irq,
++ .mac_writel = ef1002_mac_writel,
++ .mac_readl = ef1002_mac_readl,
++ .init_mac = ef1002_init_mac,
++ .mdio_write = ef1002_mdio_write,
++ .mdio_read = ef1002_mdio_read,
++};
++
++/**************************************************************************
++ *
++ * Falcon routines
++ *
++ **************************************************************************
++ */
++
++/* I/O BAR address register */
++#define FCN_IOM_IND_ADR_REG 0x0
++
++/* I/O BAR data register */
++#define FCN_IOM_IND_DAT_REG 0x4
++
++/* Interrupt enable register */
++#define FCN_INT_EN_REG_KER 0x0010
++#define FCN_MEM_PERR_INT_EN_KER_LBN 5
++#define FCN_MEM_PERR_INT_EN_KER_WIDTH 1
++#define FCN_KER_INT_CHAR_LBN 4
++#define FCN_KER_INT_CHAR_WIDTH 1
++#define FCN_KER_INT_KER_LBN 3
++#define FCN_KER_INT_KER_WIDTH 1
++#define FCN_ILL_ADR_ERR_INT_EN_KER_LBN 2
++#define FCN_ILL_ADR_ERR_INT_EN_KER_WIDTH 1
++#define FCN_SRM_PERR_INT_EN_KER_LBN 1
++#define FCN_SRM_PERR_INT_EN_KER_WIDTH 1
++#define FCN_DRV_INT_EN_KER_LBN 0
++#define FCN_DRV_INT_EN_KER_WIDTH 1
++
++/* Interrupt status register */
++#define FCN_INT_ADR_REG_KER 0x0030
++#define FCN_INT_ADR_KER_LBN 0
++#define FCN_INT_ADR_KER_WIDTH EFAB_DMA_TYPE_WIDTH ( 64 )
++
++/* Interrupt acknowledge register */
++#define FCN_INT_ACK_KER_REG 0x0050
++
++/* SPI host command register */
++#define FCN_EE_SPI_HCMD_REG_KER 0x0100
++#define FCN_EE_SPI_HCMD_CMD_EN_LBN 31
++#define FCN_EE_SPI_HCMD_CMD_EN_WIDTH 1
++#define FCN_EE_WR_TIMER_ACTIVE_LBN 28
++#define FCN_EE_WR_TIMER_ACTIVE_WIDTH 1
++#define FCN_EE_SPI_HCMD_SF_SEL_LBN 24
++#define FCN_EE_SPI_HCMD_SF_SEL_WIDTH 1
++#define FCN_EE_SPI_EEPROM 0
++#define FCN_EE_SPI_FLASH 1
++#define FCN_EE_SPI_HCMD_DABCNT_LBN 16
++#define FCN_EE_SPI_HCMD_DABCNT_WIDTH 5
++#define FCN_EE_SPI_HCMD_READ_LBN 15
++#define FCN_EE_SPI_HCMD_READ_WIDTH 1
++#define FCN_EE_SPI_READ 1
++#define FCN_EE_SPI_WRITE 0
++#define FCN_EE_SPI_HCMD_DUBCNT_LBN 12
++#define FCN_EE_SPI_HCMD_DUBCNT_WIDTH 2
++#define FCN_EE_SPI_HCMD_ADBCNT_LBN 8
++#define FCN_EE_SPI_HCMD_ADBCNT_WIDTH 2
++#define FCN_EE_SPI_HCMD_ENC_LBN 0
++#define FCN_EE_SPI_HCMD_ENC_WIDTH 8
++
++/* SPI host address register */
++#define FCN_EE_SPI_HADR_REG_KER 0x0110
++#define FCN_EE_SPI_HADR_DUBYTE_LBN 24
++#define FCN_EE_SPI_HADR_DUBYTE_WIDTH 8
++#define FCN_EE_SPI_HADR_ADR_LBN 0
++#define FCN_EE_SPI_HADR_ADR_WIDTH 24
++
++/* SPI host data register */
++#define FCN_EE_SPI_HDATA_REG_KER 0x0120
++#define FCN_EE_SPI_HDATA3_LBN 96
++#define FCN_EE_SPI_HDATA3_WIDTH 32
++#define FCN_EE_SPI_HDATA2_LBN 64
++#define FCN_EE_SPI_HDATA2_WIDTH 32
++#define FCN_EE_SPI_HDATA1_LBN 32
++#define FCN_EE_SPI_HDATA1_WIDTH 32
++#define FCN_EE_SPI_HDATA0_LBN 0
++#define FCN_EE_SPI_HDATA0_WIDTH 32
++
++/* GPIO control register */
++#define FCN_GPIO_CTL_REG_KER 0x0210
++#define FCN_FLASH_PRESENT_LBN 7
++#define FCN_FLASH_PRESENT_WIDTH 1
++#define FCN_EEPROM_PRESENT_LBN 6
++#define FCN_EEPROM_PRESENT_WIDTH 1
++
++/* Global control register */
++#define FCN_GLB_CTL_REG_KER 0x0220
++#define FCN_EXT_PHY_RST_CTL_LBN 63
++#define FCN_EXT_PHY_RST_CTL_WIDTH 1
++#define FCN_PCIE_SD_RST_CTL_LBN 61
++#define FCN_PCIE_SD_RST_CTL_WIDTH 1
++#define FCN_PCIX_RST_CTL_LBN 60
++#define FCN_PCIX_RST_CTL_WIDTH 1
++#define FCN_RST_EXT_PHY_LBN 31
++#define FCN_RST_EXT_PHY_WIDTH 1
++#define FCN_INT_RST_DUR_LBN 4
++#define FCN_INT_RST_DUR_WIDTH 3
++#define FCN_EXT_PHY_RST_DUR_LBN 1
++#define FCN_EXT_PHY_RST_DUR_WIDTH 3
++#define FCN_SWRST_LBN 0
++#define FCN_SWRST_WIDTH 1
++#define FCN_INCLUDE_IN_RESET 0
++#define FCN_EXCLUDE_FROM_RESET 1
++
++/* Timer table for kernel access */
++#define FCN_TIMER_CMD_REG_KER 0x420
++#define FCN_TIMER_MODE_LBN 12
++#define FCN_TIMER_MODE_WIDTH 2
++#define FCN_TIMER_MODE_DIS 0
++#define FCN_TIMER_MODE_INT_HLDOFF 1
++#define FCN_TIMER_VAL_LBN 0
++#define FCN_TIMER_VAL_WIDTH 12
++
++/* SRAM receive descriptor cache configuration register */
++#define FCN_SRM_RX_DC_CFG_REG_KER 0x610
++#define FCN_SRM_RX_DC_BASE_ADR_LBN 0
++#define FCN_SRM_RX_DC_BASE_ADR_WIDTH 21
++
++/* SRAM transmit descriptor cache configuration register */
++#define FCN_SRM_TX_DC_CFG_REG_KER 0x620
++#define FCN_SRM_TX_DC_BASE_ADR_LBN 0
++#define FCN_SRM_TX_DC_BASE_ADR_WIDTH 21
++
++/* Receive filter control register */
++#define FCN_RX_FILTER_CTL_REG_KER 0x810
++#define FCN_NUM_KER_LBN 24
++#define FCN_NUM_KER_WIDTH 2
++
++/* Receive descriptor update register */
++#define FCN_RX_DESC_UPD_REG_KER 0x0830
++#define FCN_RX_DESC_WPTR_LBN 96
++#define FCN_RX_DESC_WPTR_WIDTH 12
++#define FCN_RX_DESC_UPD_REG_KER_DWORD ( FCN_RX_DESC_UPD_REG_KER + 12 )
++#define FCN_RX_DESC_WPTR_DWORD_LBN 0
++#define FCN_RX_DESC_WPTR_DWORD_WIDTH 12
++
++/* Receive descriptor cache configuration register */
++#define FCN_RX_DC_CFG_REG_KER 0x840
++#define FCN_RX_DC_SIZE_LBN 0
++#define FCN_RX_DC_SIZE_WIDTH 2
++
++/* Transmit descriptor update register */
++#define FCN_TX_DESC_UPD_REG_KER 0x0a10
++#define FCN_TX_DESC_WPTR_LBN 96
++#define FCN_TX_DESC_WPTR_WIDTH 12
++#define FCN_TX_DESC_UPD_REG_KER_DWORD ( FCN_TX_DESC_UPD_REG_KER + 12 )
++#define FCN_TX_DESC_WPTR_DWORD_LBN 0
++#define FCN_TX_DESC_WPTR_DWORD_WIDTH 12
++
++/* Transmit descriptor cache configuration register */
++#define FCN_TX_DC_CFG_REG_KER 0xa20
++#define FCN_TX_DC_SIZE_LBN 0
++#define FCN_TX_DC_SIZE_WIDTH 2
++
++/* PHY management transmit data register */
++#define FCN_MD_TXD_REG_KER 0xc00
++#define FCN_MD_TXD_LBN 0
++#define FCN_MD_TXD_WIDTH 16
++
++/* PHY management receive data register */
++#define FCN_MD_RXD_REG_KER 0xc10
++#define FCN_MD_RXD_LBN 0
++#define FCN_MD_RXD_WIDTH 16
++
++/* PHY management configuration & status register */
++#define FCN_MD_CS_REG_KER 0xc20
++#define FCN_MD_GC_LBN 4
++#define FCN_MD_GC_WIDTH 1
++#define FCN_MD_RIC_LBN 2
++#define FCN_MD_RIC_WIDTH 1
++#define FCN_MD_WRC_LBN 0
++#define FCN_MD_WRC_WIDTH 1
++
++/* PHY management PHY address register */
++#define FCN_MD_PHY_ADR_REG_KER 0xc30
++#define FCN_MD_PHY_ADR_LBN 0
++#define FCN_MD_PHY_ADR_WIDTH 16
++
++/* PHY management ID register */
++#define FCN_MD_ID_REG_KER 0xc40
++#define FCN_MD_PRT_ADR_LBN 11
++#define FCN_MD_PRT_ADR_WIDTH 5
++#define FCN_MD_DEV_ADR_LBN 6
++#define FCN_MD_DEV_ADR_WIDTH 5
++
++/* PHY management status & mask register */
++#define FCN_MD_STAT_REG_KER 0xc50
++#define FCN_MD_BSY_LBN 0
++#define FCN_MD_BSY_WIDTH 1
++
++/* Port 0 and 1 MAC control registers */
++#define FCN_MAC0_CTRL_REG_KER 0xc80
++#define FCN_MAC1_CTRL_REG_KER 0xc90
++#define FCN_MAC_XOFF_VAL_LBN 16
++#define FCN_MAC_XOFF_VAL_WIDTH 16
++#define FCN_MAC_BCAD_ACPT_LBN 4
++#define FCN_MAC_BCAD_ACPT_WIDTH 1
++#define FCN_MAC_UC_PROM_LBN 3
++#define FCN_MAC_UC_PROM_WIDTH 1
++#define FCN_MAC_LINK_STATUS_LBN 2
++#define FCN_MAC_LINK_STATUS_WIDTH 1
++#define FCN_MAC_SPEED_LBN 0
++#define FCN_MAC_SPEED_WIDTH 2
++
++/* XGMAC global configuration - port 0*/
++#define FCN_XM_GLB_CFG_REG_P0_KER 0x1220
++#define FCN_XM_RX_STAT_EN_LBN 11
++#define FCN_XM_RX_STAT_EN_WIDTH 1
++#define FCN_XM_TX_STAT_EN_LBN 10
++#define FCN_XM_TX_STAT_EN_WIDTH 1
++#define FCN_XM_CUT_THRU_MODE_LBN 7
++#define FCN_XM_CUT_THRU_MODE_WIDTH 1
++#define FCN_XM_RX_JUMBO_MODE_LBN 6
++#define FCN_XM_RX_JUMBO_MODE_WIDTH 1
++
++/* XGMAC transmit configuration - port 0 */
++#define FCN_XM_TX_CFG_REG_P0_KER 0x1230
++#define FCN_XM_IPG_LBN 16
++#define FCN_XM_IPG_WIDTH 4
++#define FCN_XM_WTF_DOES_THIS_DO_LBN 9
++#define FCN_XM_WTF_DOES_THIS_DO_WIDTH 1
++#define FCN_XM_TXCRC_LBN 8
++#define FCN_XM_TXCRC_WIDTH 1
++#define FCN_XM_AUTO_PAD_LBN 5
++#define FCN_XM_AUTO_PAD_WIDTH 1
++#define FCN_XM_TX_PRMBL_LBN 2
++#define FCN_XM_TX_PRMBL_WIDTH 1
++#define FCN_XM_TXEN_LBN 1
++#define FCN_XM_TXEN_WIDTH 1
++
++/* XGMAC receive configuration - port 0 */
++#define FCN_XM_RX_CFG_REG_P0_KER 0x1240
++#define FCN_XM_PASS_CRC_ERR_LBN 25
++#define FCN_XM_PASS_CRC_ERR_WIDTH 1
++#define FCN_XM_AUTO_DEPAD_LBN 8
++#define FCN_XM_AUTO_DEPAD_WIDTH 1
++#define FCN_XM_RXEN_LBN 1
++#define FCN_XM_RXEN_WIDTH 1
++
++/* Receive descriptor pointer table */
++#define FCN_RX_DESC_PTR_TBL_KER 0x11800
++#define FCN_RX_DESCQ_BUF_BASE_ID_LBN 36
++#define FCN_RX_DESCQ_BUF_BASE_ID_WIDTH 20
++#define FCN_RX_DESCQ_EVQ_ID_LBN 24
++#define FCN_RX_DESCQ_EVQ_ID_WIDTH 12
++#define FCN_RX_DESCQ_OWNER_ID_LBN 10
++#define FCN_RX_DESCQ_OWNER_ID_WIDTH 14
++#define FCN_RX_DESCQ_SIZE_LBN 3
++#define FCN_RX_DESCQ_SIZE_WIDTH 2
++#define FCN_RX_DESCQ_SIZE_4K 3
++#define FCN_RX_DESCQ_SIZE_2K 2
++#define FCN_RX_DESCQ_SIZE_1K 1
++#define FCN_RX_DESCQ_SIZE_512 0
++#define FCN_RX_DESCQ_TYPE_LBN 2
++#define FCN_RX_DESCQ_TYPE_WIDTH 1
++#define FCN_RX_DESCQ_JUMBO_LBN 1
++#define FCN_RX_DESCQ_JUMBO_WIDTH 1
++#define FCN_RX_DESCQ_EN_LBN 0
++#define FCN_RX_DESCQ_EN_WIDTH 1
++
++/* Transmit descriptor pointer table */
++#define FCN_TX_DESC_PTR_TBL_KER 0x11900
++#define FCN_TX_DESCQ_EN_LBN 88
++#define FCN_TX_DESCQ_EN_WIDTH 1
++#define FCN_TX_DESCQ_BUF_BASE_ID_LBN 36
++#define FCN_TX_DESCQ_BUF_BASE_ID_WIDTH 20
++#define FCN_TX_DESCQ_EVQ_ID_LBN 24
++#define FCN_TX_DESCQ_EVQ_ID_WIDTH 12
++#define FCN_TX_DESCQ_OWNER_ID_LBN 10
++#define FCN_TX_DESCQ_OWNER_ID_WIDTH 14
++#define FCN_TX_DESCQ_SIZE_LBN 3
++#define FCN_TX_DESCQ_SIZE_WIDTH 2
++#define FCN_TX_DESCQ_SIZE_4K 3
++#define FCN_TX_DESCQ_SIZE_2K 2
++#define FCN_TX_DESCQ_SIZE_1K 1
++#define FCN_TX_DESCQ_SIZE_512 0
++#define FCN_TX_DESCQ_TYPE_LBN 1
++#define FCN_TX_DESCQ_TYPE_WIDTH 2
++#define FCN_TX_DESCQ_FLUSH_LBN 0
++#define FCN_TX_DESCQ_FLUSH_WIDTH 1
++
++/* Event queue pointer */
++#define FCN_EVQ_PTR_TBL_KER 0x11a00
++#define FCN_EVQ_EN_LBN 23
++#define FCN_EVQ_EN_WIDTH 1
++#define FCN_EVQ_SIZE_LBN 20
++#define FCN_EVQ_SIZE_WIDTH 3
++#define FCN_EVQ_SIZE_32K 6
++#define FCN_EVQ_SIZE_16K 5
++#define FCN_EVQ_SIZE_8K 4
++#define FCN_EVQ_SIZE_4K 3
++#define FCN_EVQ_SIZE_2K 2
++#define FCN_EVQ_SIZE_1K 1
++#define FCN_EVQ_SIZE_512 0
++#define FCN_EVQ_BUF_BASE_ID_LBN 0
++#define FCN_EVQ_BUF_BASE_ID_WIDTH 20
++
++/* Event queue read pointer */
++#define FCN_EVQ_RPTR_REG_KER 0x11b00
++#define FCN_EVQ_RPTR_LBN 0
++#define FCN_EVQ_RPTR_WIDTH 14
++#define FCN_EVQ_RPTR_REG_KER_DWORD ( FCN_EVQ_RPTR_REG_KER + 0 )
++#define FCN_EVQ_RPTR_DWORD_LBN 0
++#define FCN_EVQ_RPTR_DWORD_WIDTH 14
++
++/* Special buffer descriptors */
++#define FCN_BUF_FULL_TBL_KER 0x18000
++#define FCN_IP_DAT_BUF_SIZE_LBN 50
++#define FCN_IP_DAT_BUF_SIZE_WIDTH 1
++#define FCN_IP_DAT_BUF_SIZE_8K 1
++#define FCN_IP_DAT_BUF_SIZE_4K 0
++#define FCN_BUF_ADR_FBUF_LBN 14
++#define FCN_BUF_ADR_FBUF_WIDTH 34
++#define FCN_BUF_OWNER_ID_FBUF_LBN 0
++#define FCN_BUF_OWNER_ID_FBUF_WIDTH 14
++
++/* MAC registers */
++#define FALCON_MAC_REGBANK 0xe00
++#define FALCON_MAC_REGBANK_SIZE 0x200
++#define FALCON_MAC_REG_SIZE 0x10
++
++/** Offset of a MAC register within Falcon */
++#define FALCON_MAC_REG( efab, mac_reg ) \
++ ( FALCON_MAC_REGBANK + \
++ ( (efab)->port * FALCON_MAC_REGBANK_SIZE ) + \
++ ( (mac_reg) * FALCON_MAC_REG_SIZE ) )
++#define FCN_MAC_DATA_LBN 0
++#define FCN_MAC_DATA_WIDTH 32
++
++/* Transmit descriptor */
++#define FCN_TX_KER_PORT_LBN 63
++#define FCN_TX_KER_PORT_WIDTH 1
++#define FCN_TX_KER_BYTE_CNT_LBN 48
++#define FCN_TX_KER_BYTE_CNT_WIDTH 14
++#define FCN_TX_KER_BUF_ADR_LBN 0
++#define FCN_TX_KER_BUF_ADR_WIDTH EFAB_DMA_TYPE_WIDTH ( 46 )
++
++/* Receive descriptor */
++#define FCN_RX_KER_BUF_SIZE_LBN 48
++#define FCN_RX_KER_BUF_SIZE_WIDTH 14
++#define FCN_RX_KER_BUF_ADR_LBN 0
++#define FCN_RX_KER_BUF_ADR_WIDTH EFAB_DMA_TYPE_WIDTH ( 46 )
++
++/* Event queue entries */
++#define FCN_EV_CODE_LBN 60
++#define FCN_EV_CODE_WIDTH 4
++#define FCN_RX_IP_EV_DECODE 0
++#define FCN_TX_IP_EV_DECODE 2
++#define FCN_DRIVER_EV_DECODE 5
++
++/* Receive events */
++#define FCN_RX_PORT_LBN 30
++#define FCN_RX_PORT_WIDTH 1
++#define FCN_RX_EV_BYTE_CNT_LBN 16
++#define FCN_RX_EV_BYTE_CNT_WIDTH 14
++#define FCN_RX_EV_DESC_PTR_LBN 0
++#define FCN_RX_EV_DESC_PTR_WIDTH 12
++
++/* Transmit events */
++#define FCN_TX_EV_DESC_PTR_LBN 0
++#define FCN_TX_EV_DESC_PTR_WIDTH 12
++
++/* Fixed special buffer numbers to use */
++#define FALCON_EVQ_ID 0
++#define FALCON_TXD_ID 1
++#define FALCON_RXD_ID 2
++
++#if FALCON_USE_IO_BAR
++
++/* Write dword via the I/O BAR */
++static inline void _falcon_writel ( struct efab_nic *efab, uint32_t value,
++ unsigned int reg ) {
++ outl ( reg, efab->iobase + FCN_IOM_IND_ADR_REG );
++ outl ( value, efab->iobase + FCN_IOM_IND_DAT_REG );
++}
++
++/* Read dword via the I/O BAR */
++static inline uint32_t _falcon_readl ( struct efab_nic *efab,
++ unsigned int reg ) {
++ outl ( reg, efab->iobase + FCN_IOM_IND_ADR_REG );
++ return inl ( efab->iobase + FCN_IOM_IND_DAT_REG );
++}
++
++#else /* FALCON_USE_IO_BAR */
++
++#define _falcon_writel( efab, value, reg ) \
++ writel ( (value), (efab)->membase + (reg) )
++#define _falcon_readl( efab, reg ) readl ( (efab)->membase + (reg) )
++
++#endif /* FALCON_USE_IO_BAR */
++
++/**
++ * Write to a Falcon register
++ *
++ */
++static inline void falcon_write ( struct efab_nic *efab, efab_oword_t *value,
++ unsigned int reg ) {
++
++ EFAB_REGDUMP ( "Writing register %x with " EFAB_OWORD_FMT "\n",
++ reg, EFAB_OWORD_VAL ( *value ) );
++
++ _falcon_writel ( efab, value->u32[0], reg + 0 );
++ _falcon_writel ( efab, value->u32[1], reg + 4 );
++ _falcon_writel ( efab, value->u32[2], reg + 8 );
++ _falcon_writel ( efab, value->u32[3], reg + 12 );
++ wmb();
++}
++
++/**
++ * Write to Falcon SRAM
++ *
++ */
++static inline void falcon_write_sram ( struct efab_nic *efab,
++ efab_qword_t *value,
++ unsigned int index ) {
++ unsigned int reg = ( FCN_BUF_FULL_TBL_KER +
++ ( index * sizeof ( *value ) ) );
++
++ EFAB_REGDUMP ( "Writing SRAM register %x with " EFAB_QWORD_FMT "\n",
++ reg, EFAB_QWORD_VAL ( *value ) );
++
++ _falcon_writel ( efab, value->u32[0], reg + 0 );
++ _falcon_writel ( efab, value->u32[1], reg + 4 );
++ wmb();
++}
++
++/**
++ * Write dword to Falcon register that allows partial writes
++ *
++ */
++static inline void falcon_writel ( struct efab_nic *efab, efab_dword_t *value,
++ unsigned int reg ) {
++ EFAB_REGDUMP ( "Writing partial register %x with " EFAB_DWORD_FMT "\n",
++ reg, EFAB_DWORD_VAL ( *value ) );
++ _falcon_writel ( efab, value->u32[0], reg );
++}
++
++/**
++ * Read from a Falcon register
++ *
++ */
++static inline void falcon_read ( struct efab_nic *efab, efab_oword_t *value,
++ unsigned int reg ) {
++ value->u32[0] = _falcon_readl ( efab, reg + 0 );
++ value->u32[1] = _falcon_readl ( efab, reg + 4 );
++ value->u32[2] = _falcon_readl ( efab, reg + 8 );
++ value->u32[3] = _falcon_readl ( efab, reg + 12 );
++
++ EFAB_REGDUMP ( "Read from register %x, got " EFAB_OWORD_FMT "\n",
++ reg, EFAB_OWORD_VAL ( *value ) );
++}
++
++/**
++ * Read from Falcon SRAM
++ *
++ */
++static inline void falcon_read_sram ( struct efab_nic *efab,
++ efab_qword_t *value,
++ unsigned int index ) {
++ unsigned int reg = ( FCN_BUF_FULL_TBL_KER +
++ ( index * sizeof ( *value ) ) );
++
++ value->u32[0] = _falcon_readl ( efab, reg + 0 );
++ value->u32[1] = _falcon_readl ( efab, reg + 4 );
++ EFAB_REGDUMP ( "Read from SRAM register %x, got " EFAB_QWORD_FMT "\n",
++ reg, EFAB_QWORD_VAL ( *value ) );
++}
++
++/**
++ * Read dword from a portion of a Falcon register
++ *
++ */
++static inline void falcon_readl ( struct efab_nic *efab, efab_dword_t *value,
++ unsigned int reg ) {
++ value->u32[0] = _falcon_readl ( efab, reg );
++ EFAB_REGDUMP ( "Read from register %x, got " EFAB_DWORD_FMT "\n",
++ reg, EFAB_DWORD_VAL ( *value ) );
++}
++
++/**
++ * Verified write to Falcon SRAM
++ *
++ */
++static inline void falcon_write_sram_verify ( struct efab_nic *efab,
++ efab_qword_t *value,
++ unsigned int index ) {
++ efab_qword_t verify;
++
++ falcon_write_sram ( efab, value, index );
++ udelay ( 1000 );
++ falcon_read_sram ( efab, &verify, index );
++ if ( memcmp ( &verify, value, sizeof ( verify ) ) != 0 ) {
++ printf ( "SRAM index %x failure: wrote " EFAB_QWORD_FMT
++ " got " EFAB_QWORD_FMT "\n", index,
++ EFAB_QWORD_VAL ( *value ),
++ EFAB_QWORD_VAL ( verify ) );
++ }
++}
++
++/**
++ * Get memory base
++ *
++ */
++static void falcon_get_membase ( struct efab_nic *efab ) {
++ unsigned long membase_phys;
++
++ membase_phys = pci_bar_start ( efab->pci, PCI_BASE_ADDRESS_2 );
++ efab->membase = ioremap ( membase_phys, 0x20000 );
++}
++
++#define FCN_DUMP_REG( efab, _reg ) do { \
++ efab_oword_t reg; \
++ falcon_read ( efab, ®, _reg ); \
++ printf ( #_reg " = " EFAB_OWORD_FMT "\n", \
++ EFAB_OWORD_VAL ( reg ) ); \
++ } while ( 0 );
++
++#define FCN_DUMP_MAC_REG( efab, _mac_reg ) do { \
++ efab_dword_t reg; \
++ efab->op->mac_readl ( efab, ®, _mac_reg ); \
++ printf ( #_mac_reg " = " EFAB_DWORD_FMT "\n", \
++ EFAB_DWORD_VAL ( reg ) ); \
++ } while ( 0 );
++
++/**
++ * Dump register contents (for debugging)
++ *
++ * Marked as static inline so that it will not be compiled in if not
++ * used.
++ */
++static inline void falcon_dump_regs ( struct efab_nic *efab ) {
++ FCN_DUMP_REG ( efab, FCN_INT_EN_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_INT_ADR_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_GLB_CTL_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_TIMER_CMD_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_SRM_RX_DC_CFG_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_SRM_TX_DC_CFG_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_RX_FILTER_CTL_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_RX_DC_CFG_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_TX_DC_CFG_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_MAC0_CTRL_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_MAC1_CTRL_REG_KER );
++ FCN_DUMP_REG ( efab, FCN_XM_GLB_CFG_REG_P0_KER );
++ FCN_DUMP_REG ( efab, FCN_XM_TX_CFG_REG_P0_KER );
++ FCN_DUMP_REG ( efab, FCN_XM_RX_CFG_REG_P0_KER );
++ FCN_DUMP_REG ( efab, FCN_RX_DESC_PTR_TBL_KER );
++ FCN_DUMP_REG ( efab, FCN_TX_DESC_PTR_TBL_KER );
++ FCN_DUMP_REG ( efab, FCN_EVQ_PTR_TBL_KER );
++ FCN_DUMP_MAC_REG ( efab, GM_CFG1_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GM_CFG2_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GM_MAX_FLEN_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GM_MII_MGMT_CFG_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GM_ADR1_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GM_ADR2_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GMF_CFG0_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GMF_CFG1_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GMF_CFG2_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GMF_CFG3_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GMF_CFG4_REG_MAC );
++ FCN_DUMP_MAC_REG ( efab, GMF_CFG5_REG_MAC );
++}
++
++/**
++ * Create special buffer
++ *
++ */
++static void falcon_create_special_buffer ( struct efab_nic *efab,
++ void *addr, unsigned int index ) {
++ efab_qword_t buf_desc;
++ unsigned long dma_addr;
++
++ memset ( addr, 0, 4096 );
++ dma_addr = virt_to_bus ( addr );
++ EFAB_ASSERT ( ( dma_addr & ( EFAB_BUF_ALIGN - 1 ) ) == 0 );
++ EFAB_POPULATE_QWORD_3 ( buf_desc,
++ FCN_IP_DAT_BUF_SIZE, FCN_IP_DAT_BUF_SIZE_4K,
++ FCN_BUF_ADR_FBUF, ( dma_addr >> 12 ),
++ FCN_BUF_OWNER_ID_FBUF, 0 );
++ falcon_write_sram_verify ( efab, &buf_desc, index );
++}
++
++/**
++ * Update event queue read pointer
++ *
++ */
++static void falcon_eventq_read_ack ( struct efab_nic *efab ) {
++ efab_dword_t reg;
++
++ EFAB_ASSERT ( efab->eventq_read_ptr < EFAB_EVQ_SIZE );
++
++ EFAB_POPULATE_DWORD_1 ( reg, FCN_EVQ_RPTR_DWORD,
++ efab->eventq_read_ptr );
++ falcon_writel ( efab, ®, FCN_EVQ_RPTR_REG_KER_DWORD );
++}
++
++/**
++ * Reset device
++ *
++ */
++static int falcon_reset ( struct efab_nic *efab ) {
++ efab_oword_t glb_ctl_reg_ker;
++
++ /* Initiate software reset */
++ EFAB_POPULATE_OWORD_5 ( glb_ctl_reg_ker,
++ FCN_EXT_PHY_RST_CTL, FCN_EXCLUDE_FROM_RESET,
++ FCN_PCIE_SD_RST_CTL, FCN_EXCLUDE_FROM_RESET,
++ FCN_PCIX_RST_CTL, FCN_EXCLUDE_FROM_RESET,
++ FCN_INT_RST_DUR, 0x7 /* datasheet */,
++ FCN_SWRST, 1 );
++ falcon_write ( efab, &glb_ctl_reg_ker, FCN_GLB_CTL_REG_KER );
++
++ /* Allow 20ms for reset */
++ mdelay ( 20 );
++
++ /* Check for device reset complete */
++ falcon_read ( efab, &glb_ctl_reg_ker, FCN_GLB_CTL_REG_KER );
++ if ( EFAB_OWORD_FIELD ( glb_ctl_reg_ker, FCN_SWRST ) != 0 ) {
++ printf ( "Reset failed\n" );
++ return 0;
++ }
++
++ return 1;
++}
++
++/**
++ * Initialise NIC
++ *
++ */
++static int falcon_init_nic ( struct efab_nic *efab ) {
++ efab_oword_t reg;
++ efab_dword_t timer_cmd;
++
++ /* Set up TX and RX descriptor caches in SRAM */
++ EFAB_POPULATE_OWORD_1 ( reg, FCN_SRM_TX_DC_BASE_ADR,
++ 0x130000 /* recommended in datasheet */ );
++ falcon_write ( efab, ®, FCN_SRM_TX_DC_CFG_REG_KER );
++ EFAB_POPULATE_OWORD_1 ( reg, FCN_TX_DC_SIZE, 2 /* 32 descriptors */ );
++ falcon_write ( efab, ®, FCN_TX_DC_CFG_REG_KER );
++ EFAB_POPULATE_OWORD_1 ( reg, FCN_SRM_RX_DC_BASE_ADR,
++ 0x100000 /* recommended in datasheet */ );
++ falcon_write ( efab, ®, FCN_SRM_RX_DC_CFG_REG_KER );
++ EFAB_POPULATE_OWORD_1 ( reg, FCN_RX_DC_SIZE, 2 /* 32 descriptors */ );
++ falcon_write ( efab, ®, FCN_RX_DC_CFG_REG_KER );
++
++ /* Set number of RSS CPUs */
++ EFAB_POPULATE_OWORD_1 ( reg, FCN_NUM_KER, 0 );
++ falcon_write ( efab, ®, FCN_RX_FILTER_CTL_REG_KER );
++ udelay ( 1000 );
++
++ /* Reset the MAC */
++ mentormac_reset ( efab, 1 );
++ /* Take MAC out of reset */
++ mentormac_reset ( efab, 0 );
++
++ /* Set up event queue */
++ falcon_create_special_buffer ( efab, efab->eventq, FALCON_EVQ_ID );
++ EFAB_POPULATE_OWORD_3 ( reg,
++ FCN_EVQ_EN, 1,
++ FCN_EVQ_SIZE, FCN_EVQ_SIZE_512,
++ FCN_EVQ_BUF_BASE_ID, FALCON_EVQ_ID );
++ falcon_write ( efab, ®, FCN_EVQ_PTR_TBL_KER );
++ udelay ( 1000 );
++
++ /* Set timer register */
++ EFAB_POPULATE_DWORD_2 ( timer_cmd,
++ FCN_TIMER_MODE, FCN_TIMER_MODE_DIS,
++ FCN_TIMER_VAL, 0 );
++ falcon_writel ( efab, &timer_cmd, FCN_TIMER_CMD_REG_KER );
++ udelay ( 1000 );
++
++ /* Initialise event queue read pointer */
++ falcon_eventq_read_ack ( efab );
++
++ /* Set up TX descriptor ring */
++ falcon_create_special_buffer ( efab, efab->txd, FALCON_TXD_ID );
++ EFAB_POPULATE_OWORD_5 ( reg,
++ FCN_TX_DESCQ_EN, 1,
++ FCN_TX_DESCQ_BUF_BASE_ID, FALCON_TXD_ID,
++ FCN_TX_DESCQ_EVQ_ID, 0,
++ FCN_TX_DESCQ_SIZE, FCN_TX_DESCQ_SIZE_512,
++ FCN_TX_DESCQ_TYPE, 0 /* kernel queue */ );
++ falcon_write ( efab, ®, FCN_TX_DESC_PTR_TBL_KER );
++
++ /* Set up RX descriptor ring */
++ falcon_create_special_buffer ( efab, efab->rxd, FALCON_RXD_ID );
++ EFAB_POPULATE_OWORD_6 ( reg,
++ FCN_RX_DESCQ_BUF_BASE_ID, FALCON_RXD_ID,
++ FCN_RX_DESCQ_EVQ_ID, 0,
++ FCN_RX_DESCQ_SIZE, FCN_RX_DESCQ_SIZE_512,
++ FCN_RX_DESCQ_TYPE, 0 /* kernel queue */,
++ FCN_RX_DESCQ_JUMBO, 1,
++ FCN_RX_DESCQ_EN, 1 );
++ falcon_write ( efab, ®, FCN_RX_DESC_PTR_TBL_KER );
++
++ /* Program INT_ADR_REG_KER */
++ EFAB_POPULATE_OWORD_1 ( reg,
++ FCN_INT_ADR_KER,
++ virt_to_bus ( &efab->int_ker ) );
++ falcon_write ( efab, ®, FCN_INT_ADR_REG_KER );
++ udelay ( 1000 );
++
++ return 1;
++}
++
++/** SPI device */
++struct efab_spi_device {
++ /** Device ID */
++ unsigned int device_id;
++ /** Address length (in bytes) */
++ unsigned int addr_len;
++ /** Read command */
++ unsigned int read_command;
++};
++
++/**
++ * Wait for SPI command completion
++ *
++ */
++static int falcon_spi_wait ( struct efab_nic *efab ) {
++ efab_oword_t reg;
++ int count;
++
++ count = 0;
++ do {
++ udelay ( 100 );
++ falcon_read ( efab, ®, FCN_EE_SPI_HCMD_REG_KER );
++ if ( EFAB_OWORD_FIELD ( reg, FCN_EE_SPI_HCMD_CMD_EN ) == 0 )
++ return 1;
++ } while ( ++count < 1000 );
++ printf ( "Timed out waiting for SPI\n" );
++ return 0;
++}
++
++/**
++ * Perform SPI read
++ *
++ */
++static int falcon_spi_read ( struct efab_nic *efab,
++ struct efab_spi_device *spi,
++ int address, void *data, unsigned int len ) {
++ efab_oword_t reg;
++
++ /* Program address register */
++ EFAB_POPULATE_OWORD_1 ( reg, FCN_EE_SPI_HADR_ADR, address );
++ falcon_write ( efab, ®, FCN_EE_SPI_HADR_REG_KER );
++
++ /* Issue read command */
++ EFAB_POPULATE_OWORD_7 ( reg,
++ FCN_EE_SPI_HCMD_CMD_EN, 1,
++ FCN_EE_SPI_HCMD_SF_SEL, spi->device_id,
++ FCN_EE_SPI_HCMD_DABCNT, len,
++ FCN_EE_SPI_HCMD_READ, FCN_EE_SPI_READ,
++ FCN_EE_SPI_HCMD_DUBCNT, 0,
++ FCN_EE_SPI_HCMD_ADBCNT, spi->addr_len,
++ FCN_EE_SPI_HCMD_ENC, spi->read_command );
++ falcon_write ( efab, ®, FCN_EE_SPI_HCMD_REG_KER );
++
++ /* Wait for read to complete */
++ if ( ! falcon_spi_wait ( efab ) )
++ return 0;
++
++ /* Read data */
++ falcon_read ( efab, ®, FCN_EE_SPI_HDATA_REG_KER );
++ memcpy ( data, ®, len );
++
++ return 1;
++}
++
++#define SPI_READ_CMD 0x03
++#define AT25F1024_ADDR_LEN 3
++#define AT25F1024_READ_CMD SPI_READ_CMD
++#define MC25XX640_ADDR_LEN 2
++#define MC25XX640_READ_CMD SPI_READ_CMD
++
++/** Falcon Flash SPI device */
++static struct efab_spi_device falcon_spi_flash = {
++ .device_id = FCN_EE_SPI_FLASH,
++ .addr_len = AT25F1024_ADDR_LEN,
++ .read_command = AT25F1024_READ_CMD,
++};
++
++/** Falcon EEPROM SPI device */
++static struct efab_spi_device falcon_spi_large_eeprom = {
++ .device_id = FCN_EE_SPI_EEPROM,
++ .addr_len = MC25XX640_ADDR_LEN,
++ .read_command = MC25XX640_READ_CMD,
++};
++
++/** Offset of MAC address within EEPROM or Flash */
++#define FALCON_MAC_ADDRESS_OFFSET(port) ( 0x310 + 0x08 * (port) )
++
++/**
++ * Read MAC address from EEPROM
++ *
++ */
++static int falcon_read_eeprom ( struct efab_nic *efab ) {
++ efab_oword_t reg;
++ int has_flash;
++ struct efab_spi_device *spi;
++
++ /* Determine the SPI device containing the MAC address */
++ falcon_read ( efab, ®, FCN_GPIO_CTL_REG_KER );
++ has_flash = EFAB_OWORD_FIELD ( reg, FCN_FLASH_PRESENT );
++ spi = has_flash ? &falcon_spi_flash : &falcon_spi_large_eeprom;
++
++ return falcon_spi_read ( efab, spi,
++ FALCON_MAC_ADDRESS_OFFSET ( efab->port ),
++ efab->mac_addr, sizeof ( efab->mac_addr ) );
++}
++
++/** RX descriptor */
++typedef efab_qword_t falcon_rx_desc_t;
++
++/**
++ * Build RX descriptor
++ *
++ */
++static void falcon_build_rx_desc ( struct efab_nic *efab,
++ struct efab_rx_buf *rx_buf ) {
++ falcon_rx_desc_t *rxd;
++
++ rxd = ( ( falcon_rx_desc_t * ) efab->rxd ) + rx_buf->id;
++ EFAB_POPULATE_QWORD_2 ( *rxd,
++ FCN_RX_KER_BUF_SIZE, EFAB_DATA_BUF_SIZE,
++ FCN_RX_KER_BUF_ADR,
++ virt_to_bus ( rx_buf->addr ) );
++}
++
++/**
++ * Update RX descriptor write pointer
++ *
++ */
++static void falcon_notify_rx_desc ( struct efab_nic *efab ) {
++ efab_dword_t reg;
++
++ EFAB_POPULATE_DWORD_1 ( reg, FCN_RX_DESC_WPTR_DWORD,
++ efab->rx_write_ptr );
++ falcon_writel ( efab, ®, FCN_RX_DESC_UPD_REG_KER_DWORD );
++}
++
++/** TX descriptor */
++typedef efab_qword_t falcon_tx_desc_t;
++
++/**
++ * Build TX descriptor
++ *
++ */
++static void falcon_build_tx_desc ( struct efab_nic *efab,
++ struct efab_tx_buf *tx_buf ) {
++ falcon_rx_desc_t *txd;
++
++ txd = ( ( falcon_rx_desc_t * ) efab->txd ) + tx_buf->id;
++ EFAB_POPULATE_QWORD_3 ( *txd,
++ FCN_TX_KER_PORT, efab->port,
++ FCN_TX_KER_BYTE_CNT, tx_buf->len,
++ FCN_TX_KER_BUF_ADR,
++ virt_to_bus ( tx_buf->addr ) );
++}
++
++/**
++ * Update TX descriptor write pointer
++ *
++ */
++static void falcon_notify_tx_desc ( struct efab_nic *efab ) {
++ efab_dword_t reg;
++
++ EFAB_POPULATE_DWORD_1 ( reg, FCN_TX_DESC_WPTR_DWORD,
++ efab->tx_write_ptr );
++ falcon_writel ( efab, ®, FCN_TX_DESC_UPD_REG_KER_DWORD );
++}
++
++/** An event */
++typedef efab_qword_t falcon_event_t;
++
++/**
++ * Retrieve event from event queue
++ *
++ */
++static int falcon_fetch_event ( struct efab_nic *efab,
++ struct efab_event *event ) {
++ falcon_event_t *evt;
++ int ev_code;
++ int rx_port;
++
++ /* Check for event */
++ evt = ( ( falcon_event_t * ) efab->eventq ) + efab->eventq_read_ptr;
++ if ( EFAB_QWORD_IS_ZERO ( *evt ) ) {
++ /* No event */
++ return 0;
++ }
++
++ DBG ( "Event is " EFAB_QWORD_FMT "\n", EFAB_QWORD_VAL ( *evt ) );
++
++ /* Decode event */
++ ev_code = EFAB_QWORD_FIELD ( *evt, FCN_EV_CODE );
++ switch ( ev_code ) {
++ case FCN_TX_IP_EV_DECODE:
++ event->type = EFAB_EV_TX;
++ break;
++ case FCN_RX_IP_EV_DECODE:
++ event->type = EFAB_EV_RX;
++ event->rx_id = EFAB_QWORD_FIELD ( *evt, FCN_RX_EV_DESC_PTR );
++ event->rx_len = EFAB_QWORD_FIELD ( *evt, FCN_RX_EV_BYTE_CNT );
++ rx_port = EFAB_QWORD_FIELD ( *evt, FCN_RX_PORT );
++ if ( rx_port != efab->port ) {
++ /* Ignore packets on the wrong port. We can't
++ * just set event->type = EFAB_EV_NONE,
++ * because then the descriptor ring won't get
++ * refilled.
++ */
++ event->rx_len = 0;
++ }
++ break;
++ case FCN_DRIVER_EV_DECODE:
++ /* Ignore start-of-day events */
++ event->type = EFAB_EV_NONE;
++ break;
++ default:
++ printf ( "Unknown event type %d\n", ev_code );
++ event->type = EFAB_EV_NONE;
++ }
++
++ /* Clear event and any pending interrupts */
++ EFAB_ZERO_QWORD ( *evt );
++ falcon_writel ( efab, 0, FCN_INT_ACK_KER_REG );
++ udelay ( 10 );
++
++ /* Increment and update event queue read pointer */
++ efab->eventq_read_ptr = ( ( efab->eventq_read_ptr + 1 )
++ % EFAB_EVQ_SIZE );
++ falcon_eventq_read_ack ( efab );
++
++ return 1;
++}
++
++/**
++ * Enable/disable/generate interrupt
++ *
++ */
++static inline void falcon_interrupts ( struct efab_nic *efab, int enabled,
++ int force ) {
++ efab_oword_t int_en_reg_ker;
++
++ EFAB_POPULATE_OWORD_2 ( int_en_reg_ker,
++ FCN_KER_INT_KER, force,
++ FCN_DRV_INT_EN_KER, enabled );
++ falcon_write ( efab, &int_en_reg_ker, FCN_INT_EN_REG_KER );
++}
++
++/**
++ * Enable/disable interrupts
++ *
++ */
++static void falcon_mask_irq ( struct efab_nic *efab, int enabled ) {
++ falcon_interrupts ( efab, enabled, 0 );
++ if ( enabled ) {
++ /* Events won't trigger interrupts until we do this */
++ falcon_eventq_read_ack ( efab );
++ }
++}
++
++/**
++ * Generate interrupt
++ *
++ */
++static void falcon_generate_irq ( struct efab_nic *efab ) {
++ falcon_interrupts ( efab, 1, 1 );
++}
++
++/**
++ * Write dword to a Falcon MAC register
++ *
++ */
++static void falcon_mac_writel ( struct efab_nic *efab,
++ efab_dword_t *value, unsigned int mac_reg ) {
++ efab_oword_t temp;
++
++ EFAB_POPULATE_OWORD_1 ( temp, FCN_MAC_DATA,
++ EFAB_DWORD_FIELD ( *value, FCN_MAC_DATA ) );
++ falcon_write ( efab, &temp, FALCON_MAC_REG ( efab, mac_reg ) );
++}
++
++/**
++ * Read dword from a Falcon MAC register
++ *
++ */
++static void falcon_mac_readl ( struct efab_nic *efab, efab_dword_t *value,
++ unsigned int mac_reg ) {
++ efab_oword_t temp;
++
++ falcon_read ( efab, &temp, FALCON_MAC_REG ( efab, mac_reg ) );
++ EFAB_POPULATE_DWORD_1 ( *value, FCN_MAC_DATA,
++ EFAB_OWORD_FIELD ( temp, FCN_MAC_DATA ) );
++}
++
++/**
++ * Initialise MAC
++ *
++ */
++static int falcon_init_mac ( struct efab_nic *efab ) {
++ static struct efab_mentormac_parameters falcon_mentormac_params = {
++ .gmf_cfgfrth = 0x12,
++ .gmf_cfgftth = 0x08,
++ .gmf_cfghwmft = 0x1c,
++ .gmf_cfghwm = 0x3f,
++ .gmf_cfglwm = 0xa,
++ };
++ efab_oword_t reg;
++ int link_speed;
++
++ /* Initialise PHY */
++ alaska_init ( efab );
++
++ /* Initialise MAC */
++ mentormac_init ( efab, &falcon_mentormac_params );
++
++ /* Configure the Falcon MAC wrapper */
++ EFAB_POPULATE_OWORD_4 ( reg,
++ FCN_XM_RX_JUMBO_MODE, 0,
++ FCN_XM_CUT_THRU_MODE, 0,
++ FCN_XM_TX_STAT_EN, 1,
++ FCN_XM_RX_STAT_EN, 1);
++ falcon_write ( efab, ®, FCN_XM_GLB_CFG_REG_P0_KER );
++
++ EFAB_POPULATE_OWORD_6 ( reg,
++ FCN_XM_TXEN, 1,
++ FCN_XM_TX_PRMBL, 1,
++ FCN_XM_AUTO_PAD, 1,
++ FCN_XM_TXCRC, 1,
++ FCN_XM_WTF_DOES_THIS_DO, 1,
++ FCN_XM_IPG, 0x3 );
++ falcon_write ( efab, ®, FCN_XM_TX_CFG_REG_P0_KER );
++
++ EFAB_POPULATE_OWORD_3 ( reg,
++ FCN_XM_RXEN, 1,
++ FCN_XM_AUTO_DEPAD, 1,
++ FCN_XM_PASS_CRC_ERR, 1 );
++ falcon_write ( efab, ®, FCN_XM_RX_CFG_REG_P0_KER );
++
++#warning "10G support not yet present"
++#define LPA_10000 0
++ if ( efab->link_options & LPA_10000 ) {
++ link_speed = 0x3;
++ } else if ( efab->link_options & LPA_1000 ) {
++ link_speed = 0x2;
++ } else if ( efab->link_options & LPA_100 ) {
++ link_speed = 0x1;
++ } else {
++ link_speed = 0x0;
++ }
++ EFAB_POPULATE_OWORD_5 ( reg,
++ FCN_MAC_XOFF_VAL, 0xffff /* datasheet */,
++ FCN_MAC_BCAD_ACPT, 1,
++ FCN_MAC_UC_PROM, 0,
++ FCN_MAC_LINK_STATUS, 1,
++ FCN_MAC_SPEED, link_speed );
++ falcon_write ( efab, ®, ( efab->port == 0 ?
++ FCN_MAC0_CTRL_REG_KER : FCN_MAC1_CTRL_REG_KER ) );
++
++ return 1;
++}
++
++/**
++ * Wait for GMII access to complete
++ *
++ */
++static int falcon_gmii_wait ( struct efab_nic *efab ) {
++ efab_oword_t md_stat;
++ int count;
++
++ for ( count = 0 ; count < 1000 ; count++ ) {
++ udelay ( 10 );
++ falcon_read ( efab, &md_stat, FCN_MD_STAT_REG_KER );
++ if ( EFAB_OWORD_FIELD ( md_stat, FCN_MD_BSY ) == 0 )
++ return 1;
++ }
++ printf ( "Timed out waiting for GMII\n" );
++ return 0;
++}
++
++/** MDIO write */
++static void falcon_mdio_write ( struct efab_nic *efab, int location,
++ int value ) {
++ int phy_id = efab->port + 2;
++ efab_oword_t reg;
++
++#warning "10G PHY access not yet in place"
++
++ EFAB_TRACE ( "Writing GMII %d register %02x with %04x\n",
++ phy_id, location, value );
++
++ /* Check MII not currently being accessed */
++ if ( ! falcon_gmii_wait ( efab ) )
++ return;
++
++ /* Write the address registers */
++ EFAB_POPULATE_OWORD_1 ( reg, FCN_MD_PHY_ADR, 0 /* phy_id ? */ );
++ falcon_write ( efab, ®, FCN_MD_PHY_ADR_REG_KER );
++ udelay ( 10 );
++ EFAB_POPULATE_OWORD_2 ( reg,
++ FCN_MD_PRT_ADR, phy_id,
++ FCN_MD_DEV_ADR, location );
++ falcon_write ( efab, ®, FCN_MD_ID_REG_KER );
++ udelay ( 10 );
++
++ /* Write data */
++ EFAB_POPULATE_OWORD_1 ( reg, FCN_MD_TXD, value );
++ falcon_write ( efab, ®, FCN_MD_TXD_REG_KER );
++ udelay ( 10 );
++ EFAB_POPULATE_OWORD_2 ( reg,
++ FCN_MD_WRC, 1,
++ FCN_MD_GC, 1 );
++ falcon_write ( efab, ®, FCN_MD_CS_REG_KER );
++ udelay ( 10 );
++
++ /* Wait for data to be written */
++ falcon_gmii_wait ( efab );
++}
++
++/** MDIO read */
++static int falcon_mdio_read ( struct efab_nic *efab, int location ) {
++ int phy_id = efab->port + 2;
++ efab_oword_t reg;
++ int value;
++
++ /* Check MII not currently being accessed */
++ if ( ! falcon_gmii_wait ( efab ) )
++ return 0xffff;
++
++ /* Write the address registers */
++ EFAB_POPULATE_OWORD_1 ( reg, FCN_MD_PHY_ADR, 0 /* phy_id ? */ );
++ falcon_write ( efab, ®, FCN_MD_PHY_ADR_REG_KER );
++ udelay ( 10 );
++ EFAB_POPULATE_OWORD_2 ( reg,
++ FCN_MD_PRT_ADR, phy_id,
++ FCN_MD_DEV_ADR, location );
++ falcon_write ( efab, ®, FCN_MD_ID_REG_KER );
++ udelay ( 10 );
++
++ /* Request data to be read */
++ EFAB_POPULATE_OWORD_2 ( reg,
++ FCN_MD_RIC, 1,
++ FCN_MD_GC, 1 );
++ falcon_write ( efab, ®, FCN_MD_CS_REG_KER );
++ udelay ( 10 );
++
++ /* Wait for data to become available */
++ falcon_gmii_wait ( efab );
++
++ /* Read the data */
++ falcon_read ( efab, ®, FCN_MD_RXD_REG_KER );
++ value = EFAB_OWORD_FIELD ( reg, FCN_MD_RXD );
++
++ EFAB_TRACE ( "Read from GMII %d register %02x, got %04x\n",
++ phy_id, location, value );
++
++ return value;
++}
++
++static struct efab_operations falcon_operations = {
++ .get_membase = falcon_get_membase,
++ .reset = falcon_reset,
++ .init_nic = falcon_init_nic,
++ .read_eeprom = falcon_read_eeprom,
++ .build_rx_desc = falcon_build_rx_desc,
++ .notify_rx_desc = falcon_notify_rx_desc,
++ .build_tx_desc = falcon_build_tx_desc,
++ .notify_tx_desc = falcon_notify_tx_desc,
++ .fetch_event = falcon_fetch_event,
++ .mask_irq = falcon_mask_irq,
++ .generate_irq = falcon_generate_irq,
++ .mac_writel = falcon_mac_writel,
++ .mac_readl = falcon_mac_readl,
++ .init_mac = falcon_init_mac,
++ .mdio_write = falcon_mdio_write,
++ .mdio_read = falcon_mdio_read,
++};
++
++/**************************************************************************
++ *
++ * Etherfabric abstraction layer
++ *
++ **************************************************************************
++ */
++
++/**
++ * Push RX buffer to RXD ring
++ *
++ */
++static inline void efab_push_rx_buffer ( struct efab_nic *efab,
++ struct efab_rx_buf *rx_buf ) {
++ /* Create RX descriptor */
++ rx_buf->id = efab->rx_write_ptr;
++ efab->op->build_rx_desc ( efab, rx_buf );
++
++ /* Update RX write pointer */
++ efab->rx_write_ptr = ( efab->rx_write_ptr + 1 ) % EFAB_RXD_SIZE;
++ efab->op->notify_rx_desc ( efab );
++
++ DBG ( "Added RX id %x\n", rx_buf->id );
++}
++
++/**
++ * Push TX buffer to TXD ring
++ *
++ */
++static inline void efab_push_tx_buffer ( struct efab_nic *efab,
++ struct efab_tx_buf *tx_buf ) {
++ /* Create TX descriptor */
++ tx_buf->id = efab->tx_write_ptr;
++ efab->op->build_tx_desc ( efab, tx_buf );
++
++ /* Update TX write pointer */
++ efab->tx_write_ptr = ( efab->tx_write_ptr + 1 ) % EFAB_TXD_SIZE;
++ efab->op->notify_tx_desc ( efab );
++
++ DBG ( "Added TX id %x\n", tx_buf->id );
++}
++
++/**
++ * Initialise MAC and wait for link up
++ *
++ */
++static int efab_init_mac ( struct efab_nic *efab ) {
++ int count;
++
++ /* This can take several seconds */
++ printf ( "Waiting for link.." );
++ count = 0;
++ do {
++ putchar ( '.' );
++ if ( ! efab->op->init_mac ( efab ) ) {
++ printf ( "failed\n" );
++ return 0;
++ }
++ if ( efab->link_up ) {
++ /* PHY init printed the message for us */
++ return 1;
++ }
++ sleep ( 1 );
++ } while ( ++count < 5 );
++ printf ( "timed out\n" );
++
++ return 0;
++}
++
++/**
++ * Initialise NIC
++ *
++ */
++static int efab_init_nic ( struct efab_nic *efab ) {
++ int i;
++
++ /* Reset NIC */
++ if ( ! efab->op->reset ( efab ) )
++ return 0;
++
++ /* Initialise NIC */
++ if ( ! efab->op->init_nic ( efab ) )
++ return 0;
++
++ /* Push RX descriptors */
++ for ( i = 0 ; i < EFAB_RX_BUFS ; i++ ) {
++ efab_push_rx_buffer ( efab, &efab->rx_bufs[i] );
++ }
++
++ /* Read MAC address from EEPROM */
++ if ( ! efab->op->read_eeprom ( efab ) )
++ return 0;
++ efab->mac_addr[ETH_ALEN-1] += efab->port;
++
++ /* Initialise MAC and wait for link up */
++ if ( ! efab_init_mac ( efab ) )
++ return 0;
++
++ return 1;
++}
++
++/**************************************************************************
++ *
++ * Etherboot interface
++ *
++ **************************************************************************
++ */
++
++/**************************************************************************
++POLL - Wait for a frame
++***************************************************************************/
++static int etherfabric_poll ( struct nic *nic, int retrieve ) {
++ struct efab_nic *efab = nic->priv_data;
++ struct efab_event event;
++ static struct efab_rx_buf *rx_buf = NULL;
++ int i;
++
++ /* Process the event queue until we hit either a packet
++ * received event or an empty event slot.
++ */
++ while ( ( rx_buf == NULL ) &&
++ efab->op->fetch_event ( efab, &event ) ) {
++ if ( event.type == EFAB_EV_TX ) {
++ /* TX completed - mark as done */
++ DBG ( "TX id %x complete\n",
++ efab->tx_buf.id );
++ efab->tx_in_progress = 0;
++ } else if ( event.type == EFAB_EV_RX ) {
++ /* RX - find corresponding buffer */
++ for ( i = 0 ; i < EFAB_RX_BUFS ; i++ ) {
++ if ( efab->rx_bufs[i].id == event.rx_id ) {
++ rx_buf = &efab->rx_bufs[i];
++ rx_buf->len = event.rx_len;
++ DBG ( "RX id %x (len %x) received\n",
++ rx_buf->id, rx_buf->len );
++ break;
++ }
++ }
++ if ( ! rx_buf ) {
++ printf ( "Invalid RX ID %x\n", event.rx_id );
++ }
++ } else if ( event.type == EFAB_EV_NONE ) {
++ DBG ( "Ignorable event\n" );
++ } else {
++ DBG ( "Unknown event\n" );
++ }
++ }
++
++ /* If there is no packet, return 0 */
++ if ( ! rx_buf )
++ return 0;
++
++ /* If we don't want to retrieve it just yet, return 1 */
++ if ( ! retrieve )
++ return 1;
++
++ /* Copy packet contents */
++ nic->packetlen = rx_buf->len;
++ memcpy ( nic->packet, rx_buf->addr, nic->packetlen );
++
++ /* Give this buffer back to the NIC */
++ efab_push_rx_buffer ( efab, rx_buf );
++
++ /* Prepare to receive next packet */
++ rx_buf = NULL;
++
++ return 1;
++}
++
++/**************************************************************************
++TRANSMIT - Transmit a frame
++***************************************************************************/
++static void etherfabric_transmit ( struct nic *nic, const char *dest,
++ unsigned int type, unsigned int size,
++ const char *data ) {
++ struct efab_nic *efab = nic->priv_data;
++ unsigned int nstype = htons ( type );
++
++ /* We can only transmit one packet at a time; a TX completion
++ * event must be received before we can transmit the next
++ * packet. Since there is only one static TX buffer, we don't
++ * worry unduly about overflow, but we report it anyway.
++ */
++ if ( efab->tx_in_progress ) {
++ printf ( "TX overflow!\n" );
++ }
++
++ /* Fill TX buffer, pad to ETH_ZLEN */
++ memcpy ( efab->tx_buf.addr, dest, ETH_ALEN );
++ memcpy ( efab->tx_buf.addr + ETH_ALEN, nic->node_addr, ETH_ALEN );
++ memcpy ( efab->tx_buf.addr + 2 * ETH_ALEN, &nstype, 2 );
++ memcpy ( efab->tx_buf.addr + ETH_HLEN, data, size );
++ size += ETH_HLEN;
++ while ( size < ETH_ZLEN ) {
++ efab->tx_buf.addr[size++] = '\0';
++ }
++ efab->tx_buf.len = size;
++
++ /* Push TX descriptor */
++ efab_push_tx_buffer ( efab, &efab->tx_buf );
++
++ /* There is no way to wait for TX complete (i.e. TX buffer
++ * available to re-use for the next transmit) without reading
++ * from the event queue. We therefore simply leave the TX
++ * buffer marked as "in use" until a TX completion event
++ * happens to be picked up by a call to etherfabric_poll().
++ */
++ efab->tx_in_progress = 1;
++
++ return;
++}
++
++/**************************************************************************
++DISABLE - Turn off ethernet interface
++***************************************************************************/
++static void etherfabric_disable ( struct dev *dev ) {
++ struct nic *nic = ( struct nic * ) dev;
++ struct efab_nic *efab = nic->priv_data;
++
++ efab->op->reset ( efab );
++ if ( efab->membase )
++ iounmap ( efab->membase );
++}
++
++/**************************************************************************
++IRQ - handle interrupts
++***************************************************************************/
++static void etherfabric_irq ( struct nic *nic, irq_action_t action ) {
++ struct efab_nic *efab = nic->priv_data;
++
++ switch ( action ) {
++ case DISABLE :
++ efab->op->mask_irq ( efab, 1 );
++ break;
++ case ENABLE :
++ efab->op->mask_irq ( efab, 0 );
++ break;
++ case FORCE :
++ /* Force NIC to generate a receive interrupt */
++ efab->op->generate_irq ( efab );
++ break;
++ }
++
++ return;
++}
++
++/**************************************************************************
++PROBE - Look for an adapter, this routine's visible to the outside
++***************************************************************************/
++static int etherfabric_probe ( struct dev *dev, struct pci_device *pci ) {
++ struct nic *nic = ( struct nic * ) dev;
++ static struct efab_nic efab;
++ static int nic_port = 1;
++ struct efab_buffers *buffers;
++ int i;
++
++ /* Set up our private data structure */
++ nic->priv_data = &efab;
++ memset ( &efab, 0, sizeof ( efab ) );
++ memset ( &efab_buffers, 0, sizeof ( efab_buffers ) );
++
++ /* Hook in appropriate operations table. Do this early. */
++ if ( pci->dev_id == EF1002_DEVID ) {
++ efab.op = &ef1002_operations;
++ } else {
++ efab.op = &falcon_operations;
++ }
++
++ /* Initialise efab data structure */
++ efab.pci = pci;
++ buffers = ( ( struct efab_buffers * )
++ ( ( ( void * ) &efab_buffers ) +
++ ( - virt_to_bus ( &efab_buffers ) ) % EFAB_BUF_ALIGN ) );
++ efab.eventq = buffers->eventq;
++ efab.txd = buffers->txd;
++ efab.rxd = buffers->rxd;
++ efab.tx_buf.addr = buffers->tx_buf;
++ for ( i = 0 ; i < EFAB_RX_BUFS ; i++ ) {
++ efab.rx_bufs[i].addr = buffers->rx_buf[i];
++ }
++
++ /* Enable the PCI device */
++ adjust_pci_device ( pci );
++ nic->ioaddr = pci->ioaddr & ~3;
++ nic->irqno = pci->irq;
++
++ /* Get iobase/membase */
++ efab.iobase = nic->ioaddr;
++ efab.op->get_membase ( &efab );
++
++ /* Switch NIC ports (i.e. try different ports on each probe) */
++ nic_port = 1 - nic_port;
++ efab.port = nic_port;
++
++ /* Initialise hardware */
++ if ( ! efab_init_nic ( &efab ) )
++ return 0;
++ memcpy ( nic->node_addr, efab.mac_addr, ETH_ALEN );
++
++ /* hello world */
++ printf ( "Found EtherFabric %s NIC %!\n", pci->name, nic->node_addr );
++
++ /* point to NIC specific routines */
++ dev->disable = etherfabric_disable;
++ nic->poll = etherfabric_poll;
++ nic->transmit = etherfabric_transmit;
++ nic->irq = etherfabric_irq;
++
++ return 1;
++}
++
++static struct pci_id etherfabric_nics[] = {
++PCI_ROM(0x1924, 0xC101, "ef1002", "EtherFabric EF1002"),
++PCI_ROM(0x1924, 0x0703, "falcon", "EtherFabric Falcon"),
++};
++
++static struct pci_driver etherfabric_driver __pci_driver = {
++ .type = NIC_DRIVER,
++ .name = "EFAB",
++ .probe = etherfabric_probe,
++ .ids = etherfabric_nics,
++ .id_count = sizeof(etherfabric_nics)/sizeof(etherfabric_nics[0]),
++ .class = 0,
++};
++
++/*
++ * Local variables:
++ * c-basic-offset: 8
++ * c-indent-level: 8
++ * tab-width: 8
++ * End:
++ */
--- /dev/null
--- /dev/null
++/**************************************************************************
++ *
++ * GPL net driver for Level 5 Etherfabric network cards
++ *
++ * Written by Michael Brown <mbrown@fensystems.co.uk>
++ *
++ * Copyright Fen Systems Ltd. 2005
++ * Copyright Level 5 Networks Inc. 2005
++ *
++ * This software may be used and distributed according to the terms of
++ * the GNU General Public License (GPL), incorporated herein by
++ * reference. Drivers based on or derived from this code fall under
++ * the GPL and must retain the authorship, copyright and license
++ * notice. This file is not a complete program and may only be used
++ * when the entire operating system is licensed under the GPL.
++ *
++ **************************************************************************
++ */
++
++#ifndef EFAB_BITFIELD_H
++#define EFAB_BITFIELD_H
++
++/** @file
++ *
++ * Etherfabric bitfield access
++ *
++ * Etherfabric NICs make extensive use of bitfields up to 128 bits
++ * wide. Since there is no native 128-bit datatype on most systems,
++ * and since 64-bit datatypes are inefficient on 32-bit systems and
++ * vice versa, we wrap accesses in a way that uses the most efficient
++ * datatype.
++ *
++ * The NICs are PCI devices and therefore little-endian. Since most
++ * of the quantities that we deal with are DMAed to/from host memory,
++ * we define our datatypes (efab_oword_t, efab_qword_t and
++ * efab_dword_t) to be little-endian.
++ *
++ * In the less common case of using PIO for individual register
++ * writes, we construct the little-endian datatype in host memory and
++ * then use non-swapping equivalents of writel/writeq, rather than
++ * constructing a native-endian datatype and relying on the implicit
++ * byte-swapping done by writel/writeq. (We use a similar strategy
++ * for register reads.)
++ */
++
++/** Dummy field low bit number */
++#define EFAB_DUMMY_FIELD_LBN 0
++/** Dummy field width */
++#define EFAB_DUMMY_FIELD_WIDTH 0
++/** Dword 0 low bit number */
++#define EFAB_DWORD_0_LBN 0
++/** Dword 0 width */
++#define EFAB_DWORD_0_WIDTH 32
++/** Dword 1 low bit number */
++#define EFAB_DWORD_1_LBN 32
++/** Dword 1 width */
++#define EFAB_DWORD_1_WIDTH 32
++/** Dword 2 low bit number */
++#define EFAB_DWORD_2_LBN 64
++/** Dword 2 width */
++#define EFAB_DWORD_2_WIDTH 32
++/** Dword 3 low bit number */
++#define EFAB_DWORD_3_LBN 96
++/** Dword 3 width */
++#define EFAB_DWORD_3_WIDTH 32
++
++/** Specified attribute (e.g. LBN) of the specified field */
++#define EFAB_VAL(field,attribute) field ## _ ## attribute
++/** Low bit number of the specified field */
++#define EFAB_LOW_BIT( field ) EFAB_VAL ( field, LBN )
++/** Bit width of the specified field */
++#define EFAB_WIDTH( field ) EFAB_VAL ( field, WIDTH )
++/** High bit number of the specified field */
++#define EFAB_HIGH_BIT(field) ( EFAB_LOW_BIT(field) + EFAB_WIDTH(field) - 1 )
++/** Mask equal in width to the specified field.
++ *
++ * For example, a field with width 5 would have a mask of 0x1f.
++ *
++ * The maximum width mask that can be generated is 64 bits.
++ */
++#define EFAB_MASK64( field ) \
++ ( EFAB_WIDTH(field) == 64 ? ~( ( uint64_t ) 0 ) : \
++ ( ( ( ( ( uint64_t ) 1 ) << EFAB_WIDTH(field) ) ) - 1 ) )
++
++/** Mask equal in width to the specified field.
++ *
++ * For example, a field with width 5 would have a mask of 0x1f.
++ *
++ * The maximum width mask that can be generated is 32 bits. Use
++ * EFAB_MASK64 for higher width fields.
++ */
++#define EFAB_MASK32( field ) \
++ ( EFAB_WIDTH(field) == 32 ? ~( ( uint32_t ) 0 ) : \
++ ( ( ( ( ( uint32_t ) 1 ) << EFAB_WIDTH(field) ) ) - 1 ) )
++
++/** A doubleword (i.e. 4 byte) datatype
++ *
++ * This datatype is defined to be little-endian.
++ */
++typedef union efab_dword {
++ uint32_t u32[1];
++ uint32_t opaque; /* For bitwise operations between two efab_dwords */
++} efab_dword_t;
++
++/** A quadword (i.e. 8 byte) datatype
++ *
++ * This datatype is defined to be little-endian.
++ */
++typedef union efab_qword {
++ uint64_t u64[1];
++ uint32_t u32[2];
++ efab_dword_t dword[2];
++} efab_qword_t;
++
++/**
++ * An octword (eight-word, i.e. 16 byte) datatype
++ *
++ * This datatype is defined to be little-endian.
++ */
++typedef union efab_oword {
++ uint64_t u64[2];
++ efab_qword_t qword[2];
++ uint32_t u32[4];
++ efab_dword_t dword[4];
++} efab_oword_t;
++
++/** Format string for printing an efab_dword_t */
++#define EFAB_DWORD_FMT "%08x"
++
++/** Format string for printing an efab_qword_t */
++#define EFAB_QWORD_FMT "%08x:%08x"
++
++/** Format string for printing an efab_oword_t */
++#define EFAB_OWORD_FMT "%08x:%08x:%08x:%08x"
++
++/** printk parameters for printing an efab_dword_t */
++#define EFAB_DWORD_VAL(dword) \
++ ( ( unsigned int ) le32_to_cpu ( (dword).u32[0] ) )
++
++/** printk parameters for printing an efab_qword_t */
++#define EFAB_QWORD_VAL(qword) \
++ ( ( unsigned int ) le32_to_cpu ( (qword).u32[1] ) ), \
++ ( ( unsigned int ) le32_to_cpu ( (qword).u32[0] ) )
++
++/** printk parameters for printing an efab_oword_t */
++#define EFAB_OWORD_VAL(oword) \
++ ( ( unsigned int ) le32_to_cpu ( (oword).u32[3] ) ), \
++ ( ( unsigned int ) le32_to_cpu ( (oword).u32[2] ) ), \
++ ( ( unsigned int ) le32_to_cpu ( (oword).u32[1] ) ), \
++ ( ( unsigned int ) le32_to_cpu ( (oword).u32[0] ) )
++
++/**
++ * Extract bit field portion [low,high) from the native-endian element
++ * which contains bits [min,max).
++ *
++ * For example, suppose "element" represents the high 32 bits of a
++ * 64-bit value, and we wish to extract the bits belonging to the bit
++ * field occupying bits 28-45 of this 64-bit value.
++ *
++ * Then EFAB_EXTRACT ( element, 32, 63, 28, 45 ) would give
++ *
++ * ( element ) << 4
++ *
++ * The result will contain the relevant bits filled in in the range
++ * [0,high-low), with garbage in bits [high-low+1,...).
++ */
++#define EFAB_EXTRACT_NATIVE( native_element, min ,max ,low ,high ) \
++ ( ( ( low > max ) || ( high < min ) ) ? 0 : \
++ ( ( low > min ) ? \
++ ( (native_element) >> ( low - min ) ) : \
++ ( (native_element) << ( min - low ) ) ) )
++
++/**
++ * Extract bit field portion [low,high) from the 64-bit little-endian
++ * element which contains bits [min,max)
++ */
++#define EFAB_EXTRACT64( element, min, max, low, high ) \
++ EFAB_EXTRACT_NATIVE ( le64_to_cpu(element), min, max, low, high )
++
++/**
++ * Extract bit field portion [low,high) from the 32-bit little-endian
++ * element which contains bits [min,max)
++ */
++#define EFAB_EXTRACT32( element, min, max, low, high ) \
++ EFAB_EXTRACT_NATIVE ( le32_to_cpu(element), min, max, low, high )
++
++#define EFAB_EXTRACT_OWORD64( oword, low, high ) \
++ ( EFAB_EXTRACT64 ( (oword).u64[0], 0, 63, low, high ) | \
++ EFAB_EXTRACT64 ( (oword).u64[1], 64, 127, low, high ) )
++
++#define EFAB_EXTRACT_QWORD64( qword, low, high ) \
++ ( EFAB_EXTRACT64 ( (qword).u64[0], 0, 63, low, high ) )
++
++#define EFAB_EXTRACT_OWORD32( oword, low, high ) \
++ ( EFAB_EXTRACT32 ( (oword).u32[0], 0, 31, low, high ) | \
++ EFAB_EXTRACT32 ( (oword).u32[1], 32, 63, low, high ) | \
++ EFAB_EXTRACT32 ( (oword).u32[2], 64, 95, low, high ) | \
++ EFAB_EXTRACT32 ( (oword).u32[3], 96, 127, low, high ) )
++
++#define EFAB_EXTRACT_QWORD32( qword, low, high ) \
++ ( EFAB_EXTRACT32 ( (qword).u32[0], 0, 31, low, high ) | \
++ EFAB_EXTRACT32 ( (qword).u32[1], 32, 63, low, high ) )
++
++#define EFAB_EXTRACT_DWORD( dword, low, high ) \
++ ( EFAB_EXTRACT32 ( (dword).u32[0], 0, 31, low, high ) )
++
++#define EFAB_OWORD_FIELD64( oword, field ) \
++ ( EFAB_EXTRACT_OWORD64 ( oword, EFAB_LOW_BIT ( field ), \
++ EFAB_HIGH_BIT ( field ) ) & \
++ EFAB_MASK64 ( field ) )
++
++#define EFAB_QWORD_FIELD64( qword, field ) \
++ ( EFAB_EXTRACT_QWORD64 ( qword, EFAB_LOW_BIT ( field ), \
++ EFAB_HIGH_BIT ( field ) ) & \
++ EFAB_MASK64 ( field ) )
++
++#define EFAB_OWORD_FIELD32( oword, field ) \
++ ( EFAB_EXTRACT_OWORD32 ( oword, EFAB_LOW_BIT ( field ), \
++ EFAB_HIGH_BIT ( field ) ) & \
++ EFAB_MASK32 ( field ) )
++
++#define EFAB_QWORD_FIELD32( qword, field ) \
++ ( EFAB_EXTRACT_QWORD32 ( qword, EFAB_LOW_BIT ( field ), \
++ EFAB_HIGH_BIT ( field ) ) & \
++ EFAB_MASK32 ( field ) )
++
++#define EFAB_DWORD_FIELD( dword, field ) \
++ ( EFAB_EXTRACT_DWORD ( dword, EFAB_LOW_BIT ( field ), \
++ EFAB_HIGH_BIT ( field ) ) & \
++ EFAB_MASK32 ( field ) )
++
++#define EFAB_OWORD_IS_ZERO64( oword ) \
++ ( ! ( (oword).u64[0] || (oword).u64[1] ) )
++
++#define EFAB_QWORD_IS_ZERO64( qword ) \
++ ( ! ( (qword).u64[0] ) )
++
++#define EFAB_OWORD_IS_ZERO32( oword ) \
++ ( ! ( (oword).u32[0] || (oword).u32[1] || \
++ (oword).u32[2] || (oword).u32[3] ) )
++
++#define EFAB_QWORD_IS_ZERO32( qword ) \
++ ( ! ( (qword).u32[0] || (qword).u32[1] ) )
++
++#define EFAB_DWORD_IS_ZERO( dword ) \
++ ( ! ( (dword).u32[0] ) )
++
++#define EFAB_OWORD_IS_ALL_ONES64( oword ) \
++ ( ( (oword).u64[0] & (oword).u64[1] ) == ~( ( uint64_t ) 0 ) )
++
++#define EFAB_QWORD_IS_ALL_ONES64( qword ) \
++ ( (qword).u64[0] == ~( ( uint64_t ) 0 ) )
++
++#define EFAB_OWORD_IS_ALL_ONES32( oword ) \
++ ( ( (oword).u32[0] & (oword).u32[1] & \
++ (oword).u32[2] & (oword).u32[3] ) == ~( ( uint32_t ) 0 ) )
++
++#define EFAB_QWORD_IS_ALL_ONES32( qword ) \
++ ( ( (qword).u32[0] & (qword).u32[1] ) == ~( ( uint32_t ) 0 ) )
++
++#define EFAB_DWORD_IS_ALL_ONES( dword ) \
++ ( (dword).u32[0] == ~( ( uint32_t ) 0 ) )
++
++#if ( BITS_PER_LONG == 64 )
++#define EFAB_OWORD_FIELD EFAB_OWORD_FIELD64
++#define EFAB_QWORD_FIELD EFAB_QWORD_FIELD64
++#define EFAB_OWORD_IS_ZERO EFAB_OWORD_IS_ZERO64
++#define EFAB_QWORD_IS_ZERO EFAB_QWORD_IS_ZERO64
++#define EFAB_OWORD_IS_ALL_ONES EFAB_OWORD_IS_ALL_ONES64
++#define EFAB_QWORD_IS_ALL_ONES EFAB_QWORD_IS_ALL_ONES64
++#else
++#define EFAB_OWORD_FIELD EFAB_OWORD_FIELD32
++#define EFAB_QWORD_FIELD EFAB_QWORD_FIELD32
++#define EFAB_OWORD_IS_ZERO EFAB_OWORD_IS_ZERO32
++#define EFAB_QWORD_IS_ZERO EFAB_QWORD_IS_ZERO32
++#define EFAB_OWORD_IS_ALL_ONES EFAB_OWORD_IS_ALL_ONES32
++#define EFAB_QWORD_IS_ALL_ONES EFAB_QWORD_IS_ALL_ONES32
++#endif
++
++/**
++ * Construct bit field portion
++ *
++ * Creates the portion of the bit field [low,high) that lies within
++ * the range [min,max).
++ */
++#define EFAB_INSERT_NATIVE64( min, max, low, high, value ) \
++ ( ( ( low > max ) || ( high < min ) ) ? 0 : \
++ ( ( low > min ) ? \
++ ( ( ( uint64_t ) (value) ) << ( low - min ) ) : \
++ ( ( ( uint64_t ) (value) ) >> ( min - low ) ) ) )
++
++#define EFAB_INSERT_NATIVE32( min, max, low, high, value ) \
++ ( ( ( low > max ) || ( high < min ) ) ? 0 : \
++ ( ( low > min ) ? \
++ ( ( ( uint32_t ) (value) ) << ( low - min ) ) : \
++ ( ( ( uint32_t ) (value) ) >> ( min - low ) ) ) )
++
++#define EFAB_INSERT_NATIVE( min, max, low, high, value ) \
++ ( ( ( ( max - min ) >= 32 ) || \
++ ( ( high - low ) >= 32 ) ) \
++ ? EFAB_INSERT_NATIVE64 ( min, max, low, high, value ) \
++ : EFAB_INSERT_NATIVE32 ( min, max, low, high, value ) )
++
++/**
++ * Construct bit field portion
++ *
++ * Creates the portion of the named bit field that lies within the
++ * range [min,max).
++ */
++#define EFAB_INSERT_FIELD_NATIVE( min, max, field, value ) \
++ EFAB_INSERT_NATIVE ( min, max, EFAB_LOW_BIT ( field ), \
++ EFAB_HIGH_BIT ( field ), value )
++
++/**
++ * Construct bit field
++ *
++ * Creates the portion of the named bit fields that lie within the
++ * range [min,max).
++ */
++#define EFAB_INSERT_FIELDS_NATIVE( min, max, \
++ field1, value1, \
++ field2, value2, \
++ field3, value3, \
++ field4, value4, \
++ field5, value5, \
++ field6, value6, \
++ field7, value7, \
++ field8, value8, \
++ field9, value9, \
++ field10, value10 ) \
++ ( EFAB_INSERT_FIELD_NATIVE ( min, max, field1, value1 ) | \
++ EFAB_INSERT_FIELD_NATIVE ( min, max, field2, value2 ) | \
++ EFAB_INSERT_FIELD_NATIVE ( min, max, field3, value3 ) | \
++ EFAB_INSERT_FIELD_NATIVE ( min, max, field4, value4 ) | \
++ EFAB_INSERT_FIELD_NATIVE ( min, max, field5, value5 ) | \
++ EFAB_INSERT_FIELD_NATIVE ( min, max, field6, value6 ) | \
++ EFAB_INSERT_FIELD_NATIVE ( min, max, field7, value7 ) | \
++ EFAB_INSERT_FIELD_NATIVE ( min, max, field8, value8 ) | \
++ EFAB_INSERT_FIELD_NATIVE ( min, max, field9, value9 ) | \
++ EFAB_INSERT_FIELD_NATIVE ( min, max, field10, value10 ) )
++
++#define EFAB_INSERT_FIELDS64( ... ) \
++ cpu_to_le64 ( EFAB_INSERT_FIELDS_NATIVE ( __VA_ARGS__ ) )
++
++#define EFAB_INSERT_FIELDS32( ... ) \
++ cpu_to_le32 ( EFAB_INSERT_FIELDS_NATIVE ( __VA_ARGS__ ) )
++
++#define EFAB_POPULATE_OWORD64( oword, ... ) do { \
++ (oword).u64[0] = EFAB_INSERT_FIELDS64 ( 0, 63, __VA_ARGS__ );\
++ (oword).u64[1] = EFAB_INSERT_FIELDS64 ( 64, 127, __VA_ARGS__ );\
++ } while ( 0 )
++
++#define EFAB_POPULATE_QWORD64( qword, ... ) do { \
++ (qword).u64[0] = EFAB_INSERT_FIELDS64 ( 0, 63, __VA_ARGS__ );\
++ } while ( 0 )
++
++#define EFAB_POPULATE_OWORD32( oword, ... ) do { \
++ (oword).u32[0] = EFAB_INSERT_FIELDS32 ( 0, 31, __VA_ARGS__ );\
++ (oword).u32[1] = EFAB_INSERT_FIELDS32 ( 32, 63, __VA_ARGS__ );\
++ (oword).u32[2] = EFAB_INSERT_FIELDS32 ( 64, 95, __VA_ARGS__ );\
++ (oword).u32[3] = EFAB_INSERT_FIELDS32 ( 96, 127, __VA_ARGS__ );\
++ } while ( 0 )
++
++#define EFAB_POPULATE_QWORD32( qword, ... ) do { \
++ (qword).u32[0] = EFAB_INSERT_FIELDS32 ( 0, 31, __VA_ARGS__ );\
++ (qword).u32[1] = EFAB_INSERT_FIELDS32 ( 32, 63, __VA_ARGS__ );\
++ } while ( 0 )
++
++#define EFAB_POPULATE_DWORD( dword, ... ) do { \
++ (dword).u32[0] = EFAB_INSERT_FIELDS32 ( 0, 31, __VA_ARGS__ );\
++ } while ( 0 )
++
++#if ( BITS_PER_LONG == 64 )
++#define EFAB_POPULATE_OWORD EFAB_POPULATE_OWORD64
++#define EFAB_POPULATE_QWORD EFAB_POPULATE_QWORD64
++#else
++#define EFAB_POPULATE_OWORD EFAB_POPULATE_OWORD32
++#define EFAB_POPULATE_QWORD EFAB_POPULATE_QWORD32
++#endif
++
++/* Populate an octword field with various numbers of arguments */
++#define EFAB_POPULATE_OWORD_10 EFAB_POPULATE_OWORD
++#define EFAB_POPULATE_OWORD_9( oword, ... ) \
++ EFAB_POPULATE_OWORD_10 ( oword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_OWORD_8( oword, ... ) \
++ EFAB_POPULATE_OWORD_9 ( oword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_OWORD_7( oword, ... ) \
++ EFAB_POPULATE_OWORD_8 ( oword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_OWORD_6( oword, ... ) \
++ EFAB_POPULATE_OWORD_7 ( oword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_OWORD_5( oword, ... ) \
++ EFAB_POPULATE_OWORD_6 ( oword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_OWORD_4( oword, ... ) \
++ EFAB_POPULATE_OWORD_5 ( oword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_OWORD_3( oword, ... ) \
++ EFAB_POPULATE_OWORD_4 ( oword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_OWORD_2( oword, ... ) \
++ EFAB_POPULATE_OWORD_3 ( oword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_OWORD_1( oword, ... ) \
++ EFAB_POPULATE_OWORD_2 ( oword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_ZERO_OWORD( oword ) \
++ EFAB_POPULATE_OWORD_1 ( oword, EFAB_DUMMY_FIELD, 0 )
++#define EFAB_SET_OWORD( oword ) \
++ EFAB_POPULATE_OWORD_4 ( oword, \
++ EFAB_DWORD_0, 0xffffffff, \
++ EFAB_DWORD_1, 0xffffffff, \
++ EFAB_DWORD_2, 0xffffffff, \
++ EFAB_DWORD_3, 0xffffffff )
++
++/* Populate a quadword field with various numbers of arguments */
++#define EFAB_POPULATE_QWORD_10 EFAB_POPULATE_QWORD
++#define EFAB_POPULATE_QWORD_9( qword, ... ) \
++ EFAB_POPULATE_QWORD_10 ( qword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_QWORD_8( qword, ... ) \
++ EFAB_POPULATE_QWORD_9 ( qword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_QWORD_7( qword, ... ) \
++ EFAB_POPULATE_QWORD_8 ( qword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_QWORD_6( qword, ... ) \
++ EFAB_POPULATE_QWORD_7 ( qword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_QWORD_5( qword, ... ) \
++ EFAB_POPULATE_QWORD_6 ( qword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_QWORD_4( qword, ... ) \
++ EFAB_POPULATE_QWORD_5 ( qword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_QWORD_3( qword, ... ) \
++ EFAB_POPULATE_QWORD_4 ( qword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_QWORD_2( qword, ... ) \
++ EFAB_POPULATE_QWORD_3 ( qword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_QWORD_1( qword, ... ) \
++ EFAB_POPULATE_QWORD_2 ( qword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_ZERO_QWORD( qword ) \
++ EFAB_POPULATE_QWORD_1 ( qword, EFAB_DUMMY_FIELD, 0 )
++#define EFAB_SET_QWORD( qword ) \
++ EFAB_POPULATE_QWORD_2 ( qword, \
++ EFAB_DWORD_0, 0xffffffff, \
++ EFAB_DWORD_1, 0xffffffff )
++
++/* Populate a dword field with various numbers of arguments */
++#define EFAB_POPULATE_DWORD_10 EFAB_POPULATE_DWORD
++#define EFAB_POPULATE_DWORD_9( dword, ... ) \
++ EFAB_POPULATE_DWORD_10 ( dword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_DWORD_8( dword, ... ) \
++ EFAB_POPULATE_DWORD_9 ( dword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_DWORD_7( dword, ... ) \
++ EFAB_POPULATE_DWORD_8 ( dword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_DWORD_6( dword, ... ) \
++ EFAB_POPULATE_DWORD_7 ( dword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_DWORD_5( dword, ... ) \
++ EFAB_POPULATE_DWORD_6 ( dword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_DWORD_4( dword, ... ) \
++ EFAB_POPULATE_DWORD_5 ( dword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_DWORD_3( dword, ... ) \
++ EFAB_POPULATE_DWORD_4 ( dword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_DWORD_2( dword, ... ) \
++ EFAB_POPULATE_DWORD_3 ( dword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_POPULATE_DWORD_1( dword, ... ) \
++ EFAB_POPULATE_DWORD_2 ( dword, EFAB_DUMMY_FIELD, 0, __VA_ARGS__ )
++#define EFAB_ZERO_DWORD( dword ) \
++ EFAB_POPULATE_DWORD_1 ( dword, EFAB_DUMMY_FIELD, 0 )
++#define EFAB_SET_DWORD( dword ) \
++ EFAB_POPULATE_DWORD_1 ( dword, EFAB_DWORD_0, 0xffffffff )
++
++/*
++ * Modify a named field within an already-populated structure. Used
++ * for read-modify-write operations.
++ *
++ */
++
++#define EFAB_INSERT_FIELD64( ... ) \
++ cpu_to_le64 ( EFAB_INSERT_FIELD_NATIVE ( __VA_ARGS__ ) )
++
++#define EFAB_INSERT_FIELD32( ... ) \
++ cpu_to_le32 ( EFAB_INSERT_FIELD_NATIVE ( __VA_ARGS__ ) )
++
++#define EFAB_INPLACE_MASK64( min, max, field ) \
++ EFAB_INSERT_FIELD64 ( min, max, field, EFAB_MASK64 ( field ) )
++
++#define EFAB_INPLACE_MASK32( min, max, field ) \
++ EFAB_INSERT_FIELD32 ( min, max, field, EFAB_MASK32 ( field ) )
++
++#define EFAB_SET_OWORD_FIELD64( oword, field, value ) do { \
++ (oword).u64[0] = ( ( (oword).u64[0] \
++ & ~EFAB_INPLACE_MASK64 ( 0, 63, field ) ) \
++ | EFAB_INSERT_FIELD64 ( 0, 63, field, value ) ); \
++ (oword).u64[1] = ( ( (oword).u64[1] \
++ & ~EFAB_INPLACE_MASK64 ( 64, 127, field ) ) \
++ | EFAB_INSERT_FIELD64 ( 64, 127, field, value ) ); \
++ } while ( 0 )
++
++#define EFAB_SET_QWORD_FIELD64( qword, field, value ) do { \
++ (qword).u64[0] = ( ( (qword).u64[0] \
++ & ~EFAB_INPLACE_MASK64 ( 0, 63, field ) ) \
++ | EFAB_INSERT_FIELD64 ( 0, 63, field, value ) ); \
++ } while ( 0 )
++
++#define EFAB_SET_OWORD_FIELD32( oword, field, value ) do { \
++ (oword).u32[0] = ( ( (oword).u32[0] \
++ & ~EFAB_INPLACE_MASK32 ( 0, 31, field ) ) \
++ | EFAB_INSERT_FIELD32 ( 0, 31, field, value ) ); \
++ (oword).u32[1] = ( ( (oword).u32[1] \
++ & ~EFAB_INPLACE_MASK32 ( 32, 63, field ) ) \
++ | EFAB_INSERT_FIELD32 ( 32, 63, field, value ) ); \
++ (oword).u32[2] = ( ( (oword).u32[2] \
++ & ~EFAB_INPLACE_MASK32 ( 64, 95, field ) ) \
++ | EFAB_INSERT_FIELD32 ( 64, 95, field, value ) ); \
++ (oword).u32[3] = ( ( (oword).u32[3] \
++ & ~EFAB_INPLACE_MASK32 ( 96, 127, field ) ) \
++ | EFAB_INSERT_FIELD32 ( 96, 127, field, value ) ); \
++ } while ( 0 )
++
++#define EFAB_SET_QWORD_FIELD32( qword, field, value ) do { \
++ (qword).u32[0] = ( ( (qword).u32[0] \
++ & ~EFAB_INPLACE_MASK32 ( 0, 31, field ) ) \
++ | EFAB_INSERT_FIELD32 ( 0, 31, field, value ) ); \
++ (qword).u32[1] = ( ( (qword).u32[1] \
++ & ~EFAB_INPLACE_MASK32 ( 32, 63, field ) ) \
++ | EFAB_INSERT_FIELD32 ( 32, 63, field, value ) ); \
++ } while ( 0 )
++
++#define EFAB_SET_DWORD_FIELD( dword, field, value ) do { \
++ (dword).u32[0] = ( ( (dword).u32[0] \
++ & ~EFAB_INPLACE_MASK32 ( 0, 31, field ) ) \
++ | EFAB_INSERT_FIELD32 ( 0, 31, field, value ) ); \
++ } while ( 0 )
++
++#if ( BITS_PER_LONG == 64 )
++#define EFAB_SET_OWORD_FIELD EFAB_SET_OWORD_FIELD64
++#define EFAB_SET_QWORD_FIELD EFAB_SET_QWORD_FIELD64
++#else
++#define EFAB_SET_OWORD_FIELD EFAB_SET_OWORD_FIELD32
++#define EFAB_SET_QWORD_FIELD EFAB_SET_QWORD_FIELD32
++#endif
++
++/* Used to avoid compiler warnings about shift range exceeding width
++ * of the data types when dma_addr_t is only 32 bits wide.
++ */
++#define DMA_ADDR_T_WIDTH ( 8 * sizeof ( dma_addr_t ) )
++#define EFAB_DMA_TYPE_WIDTH( width ) \
++ ( ( (width) < DMA_ADDR_T_WIDTH ) ? (width) : DMA_ADDR_T_WIDTH )
++#define EFAB_DMA_MAX_MASK ( ( DMA_ADDR_T_WIDTH == 64 ) ? \
++ ~( ( uint64_t ) 0 ) : ~( ( uint32_t ) 0 ) )
++#define EFAB_DMA_MASK(mask) ( (mask) & EFAB_DMA_MAX_MASK )
++
++#endif /* EFAB_BITFIELD_H */
++
++/*
++ * Local variables:
++ * c-basic-offset: 8
++ * c-indent-level: 8
++ * tab-width: 8
++ * End:
++ */
--- /dev/null
--- /dev/null
++diff -ru ../../orig/dhcp-3.0.4b2/common/options.c ./common/options.c
++--- ../../orig/dhcp-3.0.4b2/common/options.c 2005-11-02 01:19:03.000000000 +0200
+++++ ./common/options.c 2005-12-06 14:38:17.000000000 +0200
++@@ -537,6 +537,7 @@
++ priority_list [priority_len++] = DHO_DHCP_LEASE_TIME;
++ priority_list [priority_len++] = DHO_DHCP_MESSAGE;
++ priority_list [priority_len++] = DHO_DHCP_REQUESTED_ADDRESS;
+++ priority_list [priority_len++] = DHO_DHCP_CLIENT_IDENTIFIER;
++ priority_list [priority_len++] = DHO_FQDN;
++
++ if (prl && prl -> len > 0) {
++diff -ru ../../orig/dhcp-3.0.4b2/includes/site.h ./includes/site.h
++--- ../../orig/dhcp-3.0.4b2/includes/site.h 2002-03-12 20:33:39.000000000 +0200
+++++ ./includes/site.h 2005-12-06 14:36:55.000000000 +0200
++@@ -135,7 +135,7 @@
++ the aforementioned problems do not matter to you, or if no other
++ API is supported for your system, you may want to go with it. */
++
++-/* #define USE_SOCKETS */
+++#define USE_SOCKETS
++
++ /* Define this to use the Sun Streams NIT API.
++
--- /dev/null
--- /dev/null
++# dhcpd.conf
++#
++# Sample configuration file for ISC dhcpd
++#
++
++# option definitions common to all supported networks...
++
++DHCPD_INTERFACE = "ib0";
++
++# if you do not use dynamical DNS updates:
++#
++# this statement is needed by dhcpd-3 needs at least this statement.
++# you have to delete it for dhcpd-2, because it does not know it.
++#
++# if you want to use dynamical DNS updates, you should first read
++# read /usr/share/doc/packages/dhcp-server/DDNS-howto.txt
++ddns-update-style none; ddns-updates off;
++
++filename "pxelinux.bin";
++
++# If this DHCP server is the official DHCP server for the local
++# network, the authoritative directive should be uncommented.
++#authoritative;
++
++# No service will be given on this subnet, but declaring it helps the
++# DHCP server to understand the network topology.
++
++subnet 10.152.187.0 netmask 255.255.255.0 {
++}
++
++# This declaration allows BOOTP clients to get dynamic addresses,
++# which we don't really recommend.
++
++shared-network "ipoib_network" {
++ subnet 11.4.8.0 netmask 255.255.255.0 {
++ option dhcp-client-identifier = option dhcp-client-identifier;
++ option subnet-mask 255.255.255.0;
++ option domain-name "yok.mtl.com";
++ option domain-name-servers 10.0.0.1;
++ default-lease-time 28800;
++ max-lease-time 86400;
++ next-server 11.4.8.99;
++
++ }
++}
++
++
++# You need one such entry for each client
++host swlab35 {
++ fixed-address 11.4.8.35; # the IP address to be assigned to the client
++ # The value of the client identifier must be comprised from the prefix 20:00:
++ # folowed by the client's ipoib qp number - 55:04:01 in this example -
++ # followed by the GID of the port
++ option dhcp-client-identifier = 20:00:55:04:01:fe:80:00:00:00:00:00:00:00:02:c9:00:01:70:8a:81;
++}
++
if (lp->options & PCNET32_PORT_ASEL)
val |= 2;
lp->a.write_bcr(ioaddr, 2, val);
++
/* handle full duplex setting */
if (lp->full_duplex) {
val = lp->a.read_bcr(ioaddr, 9) & ~3;
lp->a.write_csr(ioaddr, 3, val);
}
#endif
++ if (1)
++ {
++ //disable interrupts
++ val = lp->a.read_csr(ioaddr, 3);
++ val = val
++ | (1 << 14) //BABLM intr disabled
++ | (1 << 12) //MISSM missed frame mask intr disabled
++ | (1 << 10) //RINTM receive intr disabled
++ | (1 << 9) //TINTM transmit intr disabled
++ | (1 << 8) //IDONM init done intr disabled
++ ;
++ lp->a.write_csr(ioaddr, 3, val);
++ }
if (lp->ltint) { /* Enable TxDone-intr inhibitor */
val = lp->a.read_csr(ioaddr, 5);
lp->a.write_csr(ioaddr, 0, 0x0004);
/*
-- * Switch back to 16-bit mode to avoid problesm with dumb
++ * Switch back to 16-bit mode to avoid problems with dumb
* DOS packet driver after a warm reboot
*/
-- lp->a.write_bcr(ioaddr, 20, 4);
++ lp->a.write_bcr(ioaddr, 20, 0);
}
/**************************************************************************
chip_version =
a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
-- dprintf(("PCnet chip version is %0xhX\n", chip_version));
++ dprintf(("PCnet chip version is 0x%X\n", chip_version));
if ((chip_version & 0xfff) != 0x003)
return 0;
mii = 1;
break;
default:
++ chipname = "UNKNOWN";
printf("PCnet version %#x, no PCnet32 chip.\n",
chip_version);
return 0;
nic->node_addr[i] = promaddr[i];
}
/* Print out some hardware info */
-- printf("%s: %! at ioaddr %hX, ", pci->name, nic->node_addr,
++ printf("%s: %! at ioaddr 0x%hX, ", chipname, nic->node_addr,
ioaddr);
/* Set to pci bus master */
/* switch pcnet32 to 32bit mode */
a->write_bcr(ioaddr, 20, 2);
--
a->write_csr(ioaddr, 1, (virt_to_bus(&lp->init_block)) & 0xffff);
a->write_csr(ioaddr, 2, (virt_to_bus(&lp->init_block)) >> 16);
*/
/* Trigger an initialization just for the interrupt. */
-- a->write_csr(ioaddr, 0, 0x41);
-- mdelay(1);
++
++// a->write_csr(ioaddr, 0, 0x41);
++// mdelay(1);
cards_found++;
/* point to NIC specific routines */
pcnet32_reset(nic);
-- if (1) {
-- int tmp;
++ if (mii) {
++ int tmp;
int phy, phy_idx = 0;
u16 mii_lpa;
lp->phys[0] = 1; /* Default Setting */
printf("10Mbps Half-Duplex\n");
else
printf("\n");
++ } else {
++ /* The older chips are fixed 10Mbps, and some support full duplex,
++ * although not via autonegotiation, but only via configuration. */
++ if (fdx)
++ printf("10Mbps Full-Duplex\n");
++ else
++ printf("10Mbps Half-Duplex\n");
}
nic->nic_op = &pcnet32_operations;
};
static struct pci_id pcnet32_nics[] = {
-- PCI_ROM(0x1022, 0x2000, "lancepci", "AMD Lance/PCI"),
-- PCI_ROM(0x1022, 0x2625, "pcnetfastiii", "AMD Lance/PCI PCNet/32"),
-- PCI_ROM(0x1022, 0x2001, "amdhomepna", "AMD Lance/HomePNA"),
++ PCI_ROM(0x1022, 0x2000, "pcnet32", "AMD PCnet/PCI"),
++ PCI_ROM(0x1022, 0x2625, "pcnetfastiii", "AMD PCNet FAST III"),
++ PCI_ROM(0x1022, 0x2001, "amdhomepna", "AMD PCnet/HomePNA"),
};
PCI_DRIVER ( pcnet32_driver, pcnet32_nics, PCI_NO_CLASS );
--- /dev/null
--- /dev/null
++#define EB54 1
++/**************************************************************************
++* via-velocity.c: Etherboot device driver for the VIA 6120 Gigabit
++* Changes for Etherboot port:
++* Copyright (c) 2006 by Timothy Legge <tlegge@rogers.com>
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the License, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not, write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++* This driver is based on:
++* via-velocity.c: VIA Velocity VT6120, VT6122 Ethernet driver
++* The changes are (c) Copyright 2004, Red Hat Inc.
++* <alan@redhat.com>
++* Additional fixes and clean up: Francois Romieu
++*
++* Original code:
++* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
++* All rights reserved.
++* Author: Chuang Liang-Shing, AJ Jiang
++*
++* Linux Driver Version 2.6.15.4
++*
++* REVISION HISTORY:
++* ================
++*
++* v1.0 03-06-2006 timlegge Initial port of Linux driver
++*
++* Indent Options: indent -kr -i8
++*************************************************************************/
++
++/* to get some global routines like printf */
++#include "etherboot.h"
++/* to get the interface to the body of the program */
++#include "nic.h"
++/* to get the PCI support functions, if this is a PCI NIC */
++#include "pci.h"
++
++
++#include "via-velocity.h"
++
++typedef int pci_power_t;
++
++#define PCI_D0 ((int) 0)
++#define PCI_D1 ((int) 1)
++#define PCI_D2 ((int) 2)
++#define PCI_D3hot ((int) 3)
++#define PCI_D3cold ((int) 4)
++#define PCI_POWER_ERROR ((int) -1)
++
++
++/* Condensed operations for readability. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++
++//FIXME: Move to pci.c
++int pci_set_power_state(struct pci_device *dev, int state);
++
++/* FIXME: Move BASE to the private structure */
++static u32 BASE;
++
++/* NIC specific static variables go here */
++#define VELOCITY_PARAM(N,D) \
++ static const int N[MAX_UNITS]=OPTION_DEFAULT;
++/* MODULE_PARM(N, "1-" __MODULE_STRING(MAX_UNITS) "i");\
++ MODULE_PARM_DESC(N, D); */
++
++VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
++VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
++
++
++#define VLAN_ID_MIN 0
++#define VLAN_ID_MAX 4095
++#define VLAN_ID_DEF 0
++/* VID_setting[] is used for setting the VID of NIC.
++ 0: default VID.
++ 1-4094: other VIDs.
++*/
++VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID");
++
++#define RX_THRESH_MIN 0
++#define RX_THRESH_MAX 3
++#define RX_THRESH_DEF 0
++/* rx_thresh[] is used for controlling the receive fifo threshold.
++ 0: indicate the rxfifo threshold is 128 bytes.
++ 1: indicate the rxfifo threshold is 512 bytes.
++ 2: indicate the rxfifo threshold is 1024 bytes.
++ 3: indicate the rxfifo threshold is store & forward.
++*/
++VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
++
++#define DMA_LENGTH_MIN 0
++#define DMA_LENGTH_MAX 7
++#define DMA_LENGTH_DEF 0
++
++/* DMA_length[] is used for controlling the DMA length
++ 0: 8 DWORDs
++ 1: 16 DWORDs
++ 2: 32 DWORDs
++ 3: 64 DWORDs
++ 4: 128 DWORDs
++ 5: 256 DWORDs
++ 6: SF(flush till emply)
++ 7: SF(flush till emply)
++*/
++VELOCITY_PARAM(DMA_length, "DMA length");
++
++#define TAGGING_DEF 0
++/* enable_tagging[] is used for enabling 802.1Q VID tagging.
++ 0: disable VID seeting(default).
++ 1: enable VID setting.
++*/
++VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging");
++
++#define IP_ALIG_DEF 0
++/* IP_byte_align[] is used for IP header DWORD byte aligned
++ 0: indicate the IP header won't be DWORD byte aligned.(Default) .
++ 1: indicate the IP header will be DWORD byte aligned.
++ In some enviroment, the IP header should be DWORD byte aligned,
++ or the packet will be droped when we receive it. (eg: IPVS)
++*/
++VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
++
++#define TX_CSUM_DEF 1
++/* txcsum_offload[] is used for setting the checksum offload ability of NIC.
++ (We only support RX checksum offload now)
++ 0: disable csum_offload[checksum offload
++ 1: enable checksum offload. (Default)
++*/
++VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
++
++#define FLOW_CNTL_DEF 1
++#define FLOW_CNTL_MIN 1
++#define FLOW_CNTL_MAX 5
++
++/* flow_control[] is used for setting the flow control ability of NIC.
++ 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
++ 2: enable TX flow control.
++ 3: enable RX flow control.
++ 4: enable RX/TX flow control.
++ 5: disable
++*/
++VELOCITY_PARAM(flow_control, "Enable flow control ability");
++
++#define MED_LNK_DEF 0
++#define MED_LNK_MIN 0
++#define MED_LNK_MAX 4
++/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
++ 0: indicate autonegotiation for both speed and duplex mode
++ 1: indicate 100Mbps half duplex mode
++ 2: indicate 100Mbps full duplex mode
++ 3: indicate 10Mbps half duplex mode
++ 4: indicate 10Mbps full duplex mode
++
++ Note:
++ if EEPROM have been set to the force mode, this option is ignored
++ by driver.
++*/
++VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
++
++#define VAL_PKT_LEN_DEF 0
++/* ValPktLen[] is used for setting the checksum offload ability of NIC.
++ 0: Receive frame with invalid layer 2 length (Default)
++ 1: Drop frame with invalid layer 2 length
++*/
++VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
++
++#define WOL_OPT_DEF 0
++#define WOL_OPT_MIN 0
++#define WOL_OPT_MAX 7
++/* wol_opts[] is used for controlling wake on lan behavior.
++ 0: Wake up if recevied a magic packet. (Default)
++ 1: Wake up if link status is on/off.
++ 2: Wake up if recevied an arp packet.
++ 4: Wake up if recevied any unicast packet.
++ Those value can be sumed up to support more than one option.
++*/
++VELOCITY_PARAM(wol_opts, "Wake On Lan options");
++
++#define INT_WORKS_DEF 20
++#define INT_WORKS_MIN 10
++#define INT_WORKS_MAX 64
++
++VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
++
++/* The descriptors for this card are required to be aligned on
++64 byte boundaries. As the align attribute does not guarantee alignment
++greater than the alignment of the start address (which for Etherboot
++is 16 bytes of alignment) it requires some extra steps. Add 64 to the
++size of the array and the init_ring adjusts the alignment */
++
++/* Define the TX Descriptor */
++static u8 tx_ring[TX_DESC_DEF * sizeof(struct tx_desc) + 64];
++
++/* Create a static buffer of size PKT_BUF_SZ for each TX Descriptor.
++All descriptors point to a part of this buffer */
++static u8 txb[(TX_DESC_DEF * PKT_BUF_SZ) + 64];
++
++/* Define the RX Descriptor */
++static u8 rx_ring[RX_DESC_DEF * sizeof(struct rx_desc) + 64];
++
++/* Create a static buffer of size PKT_BUF_SZ for each RX Descriptor
++ All descriptors point to a part of this buffer */
++static u8 rxb[(RX_DESC_DEF * PKT_BUF_SZ) + 64];
++
++static void velocity_init_info(struct pci_device *pdev,
++ struct velocity_info *vptr,
++ struct velocity_info_tbl *info);
++static int velocity_get_pci_info(struct velocity_info *,
++ struct pci_device *pdev);
++static int velocity_open(struct nic *nic, struct pci_device *pci);
++
++static int velocity_soft_reset(struct velocity_info *vptr);
++static void velocity_init_cam_filter(struct velocity_info *vptr);
++static void mii_init(struct velocity_info *vptr, u32 mii_status);
++static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
++static void velocity_print_link_status(struct velocity_info *vptr);
++static void safe_disable_mii_autopoll(struct mac_regs *regs);
++static void enable_flow_control_ability(struct velocity_info *vptr);
++static void enable_mii_autopoll(struct mac_regs *regs);
++static int velocity_mii_read(struct mac_regs *, u8 byIdx, u16 * pdata);
++static int velocity_mii_write(struct mac_regs *, u8 byMiiAddr, u16 data);
++static u32 mii_check_media_mode(struct mac_regs *regs);
++static u32 check_connection_type(struct mac_regs *regs);
++static int velocity_set_media_mode(struct velocity_info *vptr,
++ u32 mii_status);
++
++
++/*
++ * Internal board variants. At the moment we have only one
++ */
++
++static struct velocity_info_tbl chip_info_table[] = {
++ {CHIP_TYPE_VT6110,
++ "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1,
++ 0x00FFFFFFUL},
++ {0, NULL, 0, 0, 0}
++};
++
++/**
++ * velocity_set_int_opt - parser for integer options
++ * @opt: pointer to option value
++ * @val: value the user requested (or -1 for default)
++ * @min: lowest value allowed
++ * @max: highest value allowed
++ * @def: default value
++ * @name: property name
++ * @dev: device name
++ *
++ * Set an integer property in the module options. This function does
++ * all the verification and checking as well as reporting so that
++ * we don't duplicate code for each option.
++ */
++
++static void velocity_set_int_opt(int *opt, int val, int min, int max,
++ int def, char *name, char *devname)
++{
++ if (val == -1) {
++ printf("%s: set value of parameter %s to %d\n",
++ devname, name, def);
++ *opt = def;
++ } else if (val < min || val > max) {
++ printf
++ ("%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
++ devname, name, min, max);
++ *opt = def;
++ } else {
++ printf("%s: set value of parameter %s to %d\n",
++ devname, name, val);
++ *opt = val;
++ }
++}
++
++/**
++ * velocity_set_bool_opt - parser for boolean options
++ * @opt: pointer to option value
++ * @val: value the user requested (or -1 for default)
++ * @def: default value (yes/no)
++ * @flag: numeric value to set for true.
++ * @name: property name
++ * @dev: device name
++ *
++ * Set a boolean property in the module options. This function does
++ * all the verification and checking as well as reporting so that
++ * we don't duplicate code for each option.
++ */
++
++static void velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag,
++ char *name, char *devname)
++{
++ (*opt) &= (~flag);
++ if (val == -1) {
++ printf("%s: set parameter %s to %s\n",
++ devname, name, def ? "TRUE" : "FALSE");
++ *opt |= (def ? flag : 0);
++ } else if (val < 0 || val > 1) {
++ printf
++ ("%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
++ devname, name);
++ *opt |= (def ? flag : 0);
++ } else {
++ printf("%s: set parameter %s to %s\n",
++ devname, name, val ? "TRUE" : "FALSE");
++ *opt |= (val ? flag : 0);
++ }
++}
++
++/**
++ * velocity_get_options - set options on device
++ * @opts: option structure for the device
++ * @index: index of option to use in module options array
++ * @devname: device name
++ *
++ * Turn the module and command options into a single structure
++ * for the current device
++ */
++
++static void velocity_get_options(struct velocity_opt *opts, int index,
++ char *devname)
++{
++
++ /* FIXME Do the options need to be configurable */
++ velocity_set_int_opt(&opts->rx_thresh, -1, RX_THRESH_MIN,
++ RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh",
++ devname);
++ velocity_set_int_opt(&opts->DMA_length, DMA_length[index],
++ DMA_LENGTH_MIN, DMA_LENGTH_MAX,
++ DMA_LENGTH_DEF, "DMA_length", devname);
++ velocity_set_int_opt(&opts->numrx, RxDescriptors[index],
++ RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF,
++ "RxDescriptors", devname);
++ velocity_set_int_opt(&opts->numtx, TxDescriptors[index],
++ TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF,
++ "TxDescriptors", devname);
++ velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN,
++ VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting",
++ devname);
++ velocity_set_bool_opt(&opts->flags, enable_tagging[index],
++ TAGGING_DEF, VELOCITY_FLAGS_TAGGING,
++ "enable_tagging", devname);
++ velocity_set_bool_opt(&opts->flags, txcsum_offload[index],
++ TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM,
++ "txcsum_offload", devname);
++ velocity_set_int_opt(&opts->flow_cntl, flow_control[index],
++ FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF,
++ "flow_control", devname);
++ velocity_set_bool_opt(&opts->flags, IP_byte_align[index],
++ IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN,
++ "IP_byte_align", devname);
++ velocity_set_bool_opt(&opts->flags, ValPktLen[index],
++ VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN,
++ "ValPktLen", devname);
++ velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index],
++ MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF,
++ "Media link mode", devname);
++ velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index],
++ WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF,
++ "Wake On Lan options", devname);
++ velocity_set_int_opt((int *) &opts->int_works, int_works[index],
++ INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF,
++ "Interrupt service works", devname);
++ opts->numrx = (opts->numrx & ~3);
++}
++
++/**
++ * velocity_init_cam_filter - initialise CAM
++ * @vptr: velocity to program
++ *
++ * Initialize the content addressable memory used for filters. Load
++ * appropriately according to the presence of VLAN
++ */
++
++static void velocity_init_cam_filter(struct velocity_info *vptr)
++{
++ struct mac_regs *regs = vptr->mac_regs;
++
++ /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
++ WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG);
++ WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG);
++
++ /* Disable all CAMs */
++ memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
++ memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
++ mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
++ mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
++
++ /* Enable first VCAM */
++ if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
++ /* If Tagging option is enabled and VLAN ID is not zero, then
++ turn on MCFG_RTGOPT also */
++ if (vptr->options.vid != 0)
++ WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG);
++
++ mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid),
++ VELOCITY_VLAN_ID_CAM);
++ vptr->vCAMmask[0] |= 1;
++ mac_set_cam_mask(regs, vptr->vCAMmask,
++ VELOCITY_VLAN_ID_CAM);
++ } else {
++ u16 temp = 0;
++ mac_set_cam(regs, 0, (u8 *) & temp, VELOCITY_VLAN_ID_CAM);
++ temp = 1;
++ mac_set_cam_mask(regs, (u8 *) & temp,
++ VELOCITY_VLAN_ID_CAM);
++ }
++}
++
++static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
++{
++ struct mac_regs *regs = vptr->mac_regs;
++ int avail, dirty, unusable;
++
++ /*
++ * RD number must be equal to 4X per hardware spec
++ * (programming guide rev 1.20, p.13)
++ */
++ if (vptr->rd_filled < 4)
++ return;
++
++ wmb();
++
++ unusable = vptr->rd_filled & 0x0003;
++ dirty = vptr->rd_dirty - unusable;
++ for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
++ dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
++// printf("return dirty: %d\n", dirty);
++ vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
++ }
++
++ writew(vptr->rd_filled & 0xfffc, ®s->RBRDU);
++ vptr->rd_filled = unusable;
++}
++
++static int velocity_rx_refill(struct velocity_info *vptr)
++{
++ int dirty = vptr->rd_dirty, done = 0, ret = 0;
++
++// printf("rx_refill - rd_curr = %d, dirty = %d\n", vptr->rd_curr, dirty);
++ do {
++ struct rx_desc *rd = vptr->rd_ring + dirty;
++
++ /* Fine for an all zero Rx desc at init time as well */
++ if (rd->rdesc0.owner == OWNED_BY_NIC)
++ break;
++// printf("rx_refill - after owner %d\n", dirty);
++
++ rd->inten = 1;
++ rd->pa_high = 0;
++ rd->rdesc0.len = cpu_to_le32(vptr->rx_buf_sz);;
++
++ done++;
++ dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
++ } while (dirty != vptr->rd_curr);
++
++ if (done) {
++// printf("\nGive Back Desc\n");
++ vptr->rd_dirty = dirty;
++ vptr->rd_filled += done;
++ velocity_give_many_rx_descs(vptr);
++ }
++
++ return ret;
++}
++
++extern void hex_dump(const char *data, const unsigned int len);
++/**************************************************************************
++POLL - Wait for a frame
++***************************************************************************/
++//EB53 static int velocity_poll(struct nic *nic, int retrieve)
++static int velocity_poll(struct nic *nic __unused)
++{
++ /* Work out whether or not there's an ethernet packet ready to
++ * read. Return 0 if not.
++ */
++
++ int rd_curr = vptr->rd_curr % RX_DESC_DEF;
++ struct rx_desc *rd = &(vptr->rd_ring[rd_curr]);
++
++ if (rd->rdesc0.owner == OWNED_BY_NIC)
++ return 0;
++ rmb();
++
++ /*
++ * Don't drop CE or RL error frame although RXOK is off
++ */
++ if ((rd->rdesc0.RSR & RSR_RXOK)
++ || (!(rd->rdesc0.RSR & RSR_RXOK)
++ && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
++
++ nic->packetlen = rd->rdesc0.len;
++ // ptr->rxb + (rd_curr * PKT_BUF_SZ)
++ memcpy(nic->packet, bus_to_virt(rd->pa_low),
++ nic->packetlen - 4);
++
++ vptr->rd_curr++;
++ vptr->rd_curr = vptr->rd_curr % RX_DESC_DEF;
++ velocity_rx_refill(vptr);
++ return 1; /* Remove this line once this method is implemented */
++ }
++ return 0;
++}
++
++#define TX_TIMEOUT (1000);
++/**************************************************************************
++TRANSMIT - Transmit a frame
++***************************************************************************/
++static void velocity_transmit(struct nic *nic, const char *dest, /* Destination */
++ unsigned int type, /* Type */
++ unsigned int size, /* size */
++ const char *packet)
++{ /* Packet */
++ u16 nstype;
++ u32 to;
++ u8 *ptxb;
++ unsigned int pktlen;
++ struct tx_desc *td_ptr;
++
++ int entry = vptr->td_curr % TX_DESC_DEF;
++ td_ptr = &(vptr->td_rings[entry]);
++
++ /* point to the current txb incase multiple tx_rings are used */
++ ptxb = vptr->txb + (entry * PKT_BUF_SZ);
++ memcpy(ptxb, dest, ETH_ALEN); /* Destination */
++ memcpy(ptxb + ETH_ALEN, nic->node_addr, ETH_ALEN); /* Source */
++ nstype = htons((u16) type); /* Type */
++ memcpy(ptxb + 2 * ETH_ALEN, (u8 *) & nstype, 2); /* Type */
++ memcpy(ptxb + ETH_HLEN, packet, size);
++
++ td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
++ td_ptr->tdesc1.TCR = TCR0_TIC;
++ td_ptr->td_buf[0].queue = 0;
++
++ size += ETH_HLEN;
++ while (size < ETH_ZLEN) /* pad to min length */
++ ptxb[size++] = '\0';
++
++ if (size < ETH_ZLEN) {
++// printf("Padd that packet\n");
++ pktlen = ETH_ZLEN;
++// memcpy(ptxb, skb->data, skb->len);
++ memset(ptxb + size, 0, ETH_ZLEN - size);
++
++ vptr->td_rings[entry].tdesc0.pktsize = pktlen;
++ vptr->td_rings[entry].td_buf[0].pa_low = virt_to_bus(ptxb);
++ vptr->td_rings[entry].td_buf[0].pa_high &=
++ cpu_to_le32(0xffff0000L);
++ vptr->td_rings[entry].td_buf[0].bufsize =
++ vptr->td_rings[entry].tdesc0.pktsize;
++ vptr->td_rings[entry].tdesc1.CMDZ = 2;
++ } else {
++// printf("Correct size packet\n");
++ td_ptr->tdesc0.pktsize = size;
++ td_ptr->td_buf[0].pa_low = virt_to_bus(ptxb);
++ td_ptr->td_buf[0].pa_high = 0;
++ td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
++// tdinfo->nskb_dma = 1;
++ td_ptr->tdesc1.CMDZ = 2;
++ }
++
++ if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
++ td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff);
++ td_ptr->tdesc1.pqinf.priority = 0;
++ td_ptr->tdesc1.pqinf.CFI = 0;
++ td_ptr->tdesc1.TCR |= TCR0_VETAG;
++ }
++
++ vptr->td_curr = (entry + 1);
++
++ {
++
++ int prev = entry - 1;
++
++ if (prev < 0)
++ prev = TX_DESC_DEF - 1;
++ td_ptr->tdesc0.owner |= OWNED_BY_NIC;
++ td_ptr = &(vptr->td_rings[prev]);
++ td_ptr->td_buf[0].queue = 1;
++ mac_tx_queue_wake(vptr->mac_regs, 0);
++
++ }
++
++ to = currticks() + TX_TIMEOUT;
++ while ((td_ptr->tdesc0.owner & OWNED_BY_NIC) && (currticks() < to)); /* wait */
++
++ if (currticks() >= to) {
++ printf("TX Time Out");
++ }
++
++}
++
++/**************************************************************************
++DISABLE - Turn off ethernet interface
++***************************************************************************/
++static void velocity_disable(struct dev *dev __unused)
++{
++ /* put the card in its initial state */
++ /* This function serves 3 purposes.
++ * This disables DMA and interrupts so we don't receive
++ * unexpected packets or interrupts from the card after
++ * etherboot has finished.
++ * This frees resources so etherboot may use
++ * this driver on another interface
++ * This allows etherboot to reinitialize the interface
++ * if something is something goes wrong.
++ */
++ struct mac_regs *regs = vptr->mac_regs;
++ mac_disable_int(regs);
++ writel(CR0_STOP, ®s->CR0Set);
++ writew(0xFFFF, ®s->TDCSRClr);
++ writeb(0xFF, ®s->RDCSRClr);
++ safe_disable_mii_autopoll(regs);
++ mac_clear_isr(regs);
++
++ /* Power down the chip */
++// pci_set_power_state(vptr->pdev, PCI_D3hot);
++
++ vptr->flags &= (~VELOCITY_FLAGS_OPENED);
++}
++
++#ifdef EB54
++/**************************************************************************
++IRQ - handle interrupts
++***************************************************************************/
++static void velocity_irq(struct nic *nic __unused, irq_action_t action)
++{
++ /* This routine is somewhat optional. Etherboot itself
++ * doesn't use interrupts, but they are required under some
++ * circumstances when we're acting as a PXE stack.
++ *
++ * If you don't implement this routine, the only effect will
++ * be that your driver cannot be used via Etherboot's UNDI
++ * API. This won't affect programs that use only the UDP
++ * portion of the PXE API, such as pxelinux.
++ */
++
++ switch (action) {
++ case DISABLE:
++ case ENABLE:
++ /* Set receive interrupt enabled/disabled state */
++ /*
++ outb ( action == ENABLE ? IntrMaskEnabled : IntrMaskDisabled,
++ nic->ioaddr + IntrMaskRegister );
++ */
++ break;
++ case FORCE:
++ /* Force NIC to generate a receive interrupt */
++ /*
++ outb ( ForceInterrupt, nic->ioaddr + IntrForceRegister );
++ */
++ break;
++ }
++}
++#endif
++/**************************************************************************
++PROBE - Look for an adapter, this routine's visible to the outside
++***************************************************************************/
++
++#define board_found 1
++#define valid_link 0
++static int velocity_probe(struct dev *dev, struct pci_device *pci)
++{
++ struct nic *nic = (struct nic *) dev;
++ int ret, i;
++ struct mac_regs *regs;
++
++ printf("via-velocity.c: Found %s Vendor=0x%hX Device=0x%hX\n",
++ pci->name, pci->vendor, pci->dev_id);
++
++ /* point to private storage */
++ vptr = &vptx;
++ info = chip_info_table;
++
++ velocity_init_info(pci, vptr, info);
++
++//FIXME: pci_enable_device(pci);
++//FIXME: pci_set_power_state(pci, PCI_D0);
++
++ ret = velocity_get_pci_info(vptr, pci);
++ if (ret < 0) {
++ printf("Failed to find PCI device.\n");
++ return 0;
++ }
++
++ regs = ioremap(vptr->memaddr, vptr->io_size);
++ if (regs == NULL) {
++ printf("Unable to remap io\n");
++ return 0;
++ }
++
++ vptr->mac_regs = regs;
++
++ BASE = vptr->ioaddr;
++
++ printf("Chip ID: %hX\n", vptr->chip_id);
++
++ for (i = 0; i < 6; i++)
++ nic->node_addr[i] = readb(®s->PAR[i]);
++
++ /* Print out some hardware info */
++ printf("%s: %! at ioaddr %hX, ", pci->name, nic->node_addr, BASE);
++
++ velocity_get_options(&vptr->options, 0, pci->name);
++
++ /*
++ * Mask out the options cannot be set to the chip
++ */
++ vptr->options.flags &= 0x00FFFFFFUL; //info->flags = 0x00FFFFFFUL;
++
++ /*
++ * Enable the chip specified capbilities
++ */
++
++ vptr->flags =
++ vptr->options.
++ flags | (0x00FFFFFFUL /*info->flags */ & 0xFF000000UL);
++
++ vptr->wol_opts = vptr->options.wol_opts;
++ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
++
++ vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
++
++ if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) {
++ printf("features missing\n");
++ }
++
++ /* and leave the chip powered down */
++// FIXME: pci_set_power_state(pci, PCI_D3hot);
++
++ check_connection_type(vptr->mac_regs);
++ velocity_open(nic, pci);
++
++ /* store NIC parameters */
++#ifdef EB54
++ nic->ioaddr = pci->ioaddr & ~3;
++ nic->irqno = pci->irq;
++ nic->irq = velocity_irq;
++#endif
++ dev->disable = velocity_disable;
++ nic->poll = velocity_poll;
++ nic->transmit = velocity_transmit;
++ return 1;
++}
++
++//#define IORESOURCE_IO 0x00000100 /* Resource type */
++
++/**
++ * velocity_init_info - init private data
++ * @pdev: PCI device
++ * @vptr: Velocity info
++ * @info: Board type
++ *
++ * Set up the initial velocity_info struct for the device that has been
++ * discovered.
++ */
++
++static void velocity_init_info(struct pci_device *pdev,
++ struct velocity_info *vptr,
++ struct velocity_info_tbl *info)
++{
++ memset(vptr, 0, sizeof(struct velocity_info));
++
++ vptr->pdev = pdev;
++ vptr->chip_id = info->chip_id;
++ vptr->io_size = info->io_size;
++ vptr->num_txq = info->txqueue;
++ vptr->multicast_limit = MCAM_SIZE;
++
++ printf
++ ("chip_id: 0x%hX, io_size: %d, num_txq %d, multicast_limit: %d\n",
++ vptr->chip_id, vptr->io_size, vptr->num_txq,
++ vptr->multicast_limit);
++ printf("Name: %s\n", info->name);
++
++// spin_lock_init(&vptr->lock);
++// INIT_LIST_HEAD(&vptr->list);
++}
++
++/**
++ * velocity_get_pci_info - retrieve PCI info for device
++ * @vptr: velocity device
++ * @pdev: PCI device it matches
++ *
++ * Retrieve the PCI configuration space data that interests us from
++ * the kernel PCI layer
++ */
++
++#define IORESOURCE_IO 0x00000100 /* Resource type */
++#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */
++
++#define IORESOURCE_MEM 0x00000200
++#define BAR_0 0
++#define BAR_1 1
++#define BAR_5 5
++#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
++#define PCI_BASE_ADDRESS_SPACE_IO 0x01
++#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
++#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
++#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
++#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */
++#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
++#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
++//#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL)
++// #define PCI_BASE_ADDRESS_IO_MASK (~0x03UL)
++
++unsigned long pci_resource_flags(struct pci_device *pdev, unsigned int bar)
++{
++ uint32_t l, sz;
++ unsigned long flags = 0;
++
++ pci_read_config_dword(pdev, bar, &l);
++ pci_write_config_dword(pdev, bar, ~0);
++ pci_read_config_dword(pdev, bar, &sz);
++ pci_write_config_dword(pdev, bar, l);
++
++ if (!sz || sz == 0xffffffff)
++ printf("Weird size\n");
++ if (l == 0xffffffff)
++ l = 0;
++ if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) {
++ /* sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK);
++ if (!sz)
++ continue;
++ res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
++ */ flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
++ printf("Memory Resource\n");
++ } else {
++ // sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
++ /// if (!sz)
++ /// continue;
++// res->start = l & PCI_BASE_ADDRESS_IO_MASK;
++ flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
++ printf("I/O Resource\n");
++ }
++ if (flags & PCI_BASE_ADDRESS_SPACE_IO) {
++ printf("Why is it here\n");
++ flags |= IORESOURCE_IO;
++ } else {
++ printf("here\n");
++//flags &= ~IORESOURCE_IO;
++ }
++
++
++ if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
++ flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
++
++
++ return flags;
++}
++static int velocity_get_pci_info(struct velocity_info *vptr,
++ struct pci_device *pdev)
++{
++ if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) {
++ printf("DEBUG: pci_read_config_byte failed\n");
++ return -1;
++ }
++
++ adjust_pci_device(pdev);
++
++ vptr->ioaddr = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
++ vptr->memaddr = pci_bar_start(pdev, PCI_BASE_ADDRESS_1);
++
++ printf("Looking for I/O Resource - Found:");
++ if (!
++ (pci_resource_flags(pdev, PCI_BASE_ADDRESS_0) & IORESOURCE_IO))
++ {
++ printf
++ ("DEBUG: region #0 is not an I/O resource, aborting.\n");
++ return -1;
++ }
++
++ printf("Looking for Memory Resource - Found:");
++ if ((pci_resource_flags(pdev, PCI_BASE_ADDRESS_1) & IORESOURCE_IO)) {
++ printf("DEBUG: region #1 is an I/O resource, aborting.\n");
++ return -1;
++ }
++
++ if (pci_bar_size(pdev, PCI_BASE_ADDRESS_1) < 256) {
++ printf("DEBUG: region #1 is too small.\n");
++ return -1;
++ }
++ vptr->pdev = pdev;
++
++ return 0;
++}
++
++/**
++ * velocity_print_link_status - link status reporting
++ * @vptr: velocity to report on
++ *
++ * Turn the link status of the velocity card into a kernel log
++ * description of the new link state, detailing speed and duplex
++ * status
++ */
++
++static void velocity_print_link_status(struct velocity_info *vptr)
++{
++
++ if (vptr->mii_status & VELOCITY_LINK_FAIL) {
++ printf("failed to detect cable link\n");
++ } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
++ printf("Link autonegation");
++
++ if (vptr->mii_status & VELOCITY_SPEED_1000)
++ printf(" speed 1000M bps");
++ else if (vptr->mii_status & VELOCITY_SPEED_100)
++ printf(" speed 100M bps");
++ else
++ printf(" speed 10M bps");
++
++ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
++ printf(" full duplex\n");
++ else
++ printf(" half duplex\n");
++ } else {
++ printf("Link forced");
++ switch (vptr->options.spd_dpx) {
++ case SPD_DPX_100_HALF:
++ printf(" speed 100M bps half duplex\n");
++ break;
++ case SPD_DPX_100_FULL:
++ printf(" speed 100M bps full duplex\n");
++ break;
++ case SPD_DPX_10_HALF:
++ printf(" speed 10M bps half duplex\n");
++ break;
++ case SPD_DPX_10_FULL:
++ printf(" speed 10M bps full duplex\n");
++ break;
++ default:
++ break;
++ }
++ }
++}
++
++/**
++ * velocity_rx_reset - handle a receive reset
++ * @vptr: velocity we are resetting
++ *
++ * Reset the ownership and status for the receive ring side.
++ * Hand all the receive queue to the NIC.
++ */
++
++static void velocity_rx_reset(struct velocity_info *vptr)
++{
++
++ struct mac_regs *regs = vptr->mac_regs;
++ int i;
++
++//ptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0;
++
++ /*
++ * Init state, all RD entries belong to the NIC
++ */
++ for (i = 0; i < vptr->options.numrx; ++i)
++ vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
++
++ writew(RX_DESC_DEF, ®s->RBRDU);
++ writel(virt_to_le32desc(vptr->rd_ring), ®s->RDBaseLo);
++ writew(0, ®s->RDIdx);
++ writew(RX_DESC_DEF - 1, ®s->RDCSize);
++}
++
++/**
++ * velocity_init_registers - initialise MAC registers
++ * @vptr: velocity to init
++ * @type: type of initialisation (hot or cold)
++ *
++ * Initialise the MAC on a reset or on first set up on the
++ * hardware.
++ */
++
++static void velocity_init_registers(struct nic *nic,
++ struct velocity_info *vptr,
++ enum velocity_init_type type)
++{
++ struct mac_regs *regs = vptr->mac_regs;
++ int i, mii_status;
++
++ mac_wol_reset(regs);
++
++ switch (type) {
++ case VELOCITY_INIT_RESET:
++ case VELOCITY_INIT_WOL:
++
++//netif_stop_queue(vptr->dev);
++
++ /*
++ * Reset RX to prevent RX pointer not on the 4X location
++ */
++ velocity_rx_reset(vptr);
++ mac_rx_queue_run(regs);
++ mac_rx_queue_wake(regs);
++
++ mii_status = velocity_get_opt_media_mode(vptr);
++
++ if (velocity_set_media_mode(vptr, mii_status) !=
++ VELOCITY_LINK_CHANGE) {
++ velocity_print_link_status(vptr);
++ if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
++ printf("Link Failed\n");
++// netif_wake_queue(vptr->dev);
++ }
++
++ enable_flow_control_ability(vptr);
++
++ mac_clear_isr(regs);
++ writel(CR0_STOP, ®s->CR0Clr);
++ //writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
++ writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
++ ®s->CR0Set);
++ break;
++
++ case VELOCITY_INIT_COLD:
++ default:
++ /*
++ * Do reset
++ */
++ velocity_soft_reset(vptr);
++ mdelay(5);
++
++ mac_eeprom_reload(regs);
++ for (i = 0; i < 6; i++) {
++ writeb(nic->node_addr[i], &(regs->PAR[i]));
++ }
++ /*
++ * clear Pre_ACPI bit.
++ */
++ BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
++ mac_set_rx_thresh(regs, vptr->options.rx_thresh);
++ mac_set_dma_length(regs, vptr->options.DMA_length);
++
++ writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet);
++ /*
++ * Back off algorithm use original IEEE standard
++ */
++ BYTE_REG_BITS_SET(CFGB_OFSET,
++ (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA |
++ CFGB_BAKOPT), ®s->CFGB);
++
++ /*
++ * Init CAM filter
++ */
++ velocity_init_cam_filter(vptr);
++
++ /*
++ * Set packet filter: Receive directed and broadcast address
++ */
++//FIXME Multicast velocity_set_multi(nic);
++
++ /*
++ * Enable MII auto-polling
++ */
++ enable_mii_autopoll(regs);
++
++ vptr->int_mask = INT_MASK_DEF;
++
++ writel(virt_to_le32desc(vptr->rd_ring), ®s->RDBaseLo);
++ writew(vptr->options.numrx - 1, ®s->RDCSize);
++ mac_rx_queue_run(regs);
++ mac_rx_queue_wake(regs);
++
++ writew(vptr->options.numtx - 1, ®s->TDCSize);
++
++// for (i = 0; i < vptr->num_txq; i++) {
++ writel(virt_to_le32desc(vptr->td_rings),
++ &(regs->TDBaseLo[0]));
++ mac_tx_queue_run(regs, 0);
++// }
++
++ init_flow_control_register(vptr);
++
++ writel(CR0_STOP, ®s->CR0Clr);
++ writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
++ ®s->CR0Set);
++
++ mii_status = velocity_get_opt_media_mode(vptr);
++// netif_stop_queue(vptr->dev);
++
++ mii_init(vptr, mii_status);
++
++ if (velocity_set_media_mode(vptr, mii_status) !=
++ VELOCITY_LINK_CHANGE) {
++ velocity_print_link_status(vptr);
++ if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
++ printf("Link Faaailll\n");
++// netif_wake_queue(vptr->dev);
++ }
++
++ enable_flow_control_ability(vptr);
++ mac_hw_mibs_init(regs);
++ mac_write_int_mask(vptr->int_mask, regs);
++ mac_clear_isr(regs);
++
++
++ }
++ velocity_print_link_status(vptr);
++}
++
++/**
++ * velocity_soft_reset - soft reset
++ * @vptr: velocity to reset
++ *
++ * Kick off a soft reset of the velocity adapter and then poll
++ * until the reset sequence has completed before returning.
++ */
++
++static int velocity_soft_reset(struct velocity_info *vptr)
++{
++ struct mac_regs *regs = vptr->mac_regs;
++ unsigned int i = 0;
++
++ writel(CR0_SFRST, ®s->CR0Set);
++
++ for (i = 0; i < W_MAX_TIMEOUT; i++) {
++ udelay(5);
++ if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set))
++ break;
++ }
++
++ if (i == W_MAX_TIMEOUT) {
++ writel(CR0_FORSRST, ®s->CR0Set);
++ /* FIXME: PCI POSTING */
++ /* delay 2ms */
++ mdelay(2);
++ }
++ return 0;
++}
++
++/**
++ * velocity_init_rings - set up DMA rings
++ * @vptr: Velocity to set up
++ *
++ * Allocate PCI mapped DMA rings for the receive and transmit layer
++ * to use.
++ */
++
++static int velocity_init_rings(struct velocity_info *vptr)
++{
++
++ int idx;
++
++ vptr->rd_curr = 0;
++ vptr->td_curr = 0;
++ memset(vptr->td_rings, 0, TX_DESC_DEF * sizeof(struct tx_desc));
++ memset(vptr->rd_ring, 0, RX_DESC_DEF * sizeof(struct rx_desc));
++// memset(vptr->tx_buffs, 0, TX_DESC_DEF * PKT_BUF_SZ);
++
++
++ for (idx = 0; idx < RX_DESC_DEF; idx++) {
++ *((u32 *) & (vptr->rd_ring[idx].rdesc0)) = 0;
++ vptr->rd_ring[idx].len = cpu_to_le32(vptr->rx_buf_sz);
++ vptr->rd_ring[idx].inten = 1;
++ vptr->rd_ring[idx].pa_low =
++ virt_to_bus(vptr->rxb + (RX_DESC_DEF * idx));
++ vptr->rd_ring[idx].pa_high = 0;
++ vptr->rd_ring[idx].rdesc0.owner = OWNED_BY_NIC;
++
++ }
++
++/* for (i = 0; idx < TX_DESC_DEF; idx++ ) {
++ vptr->td_rings[idx].tdesc1.TCPLS = TCPLS_NORMAL;
++ vptr->td_rings[idx].tdesc1.TCR = TCR0_TIC;
++ vptr->td_rings[idx].td_buf[0].queue = 0;
++ vptr->td_rings[idx].tdesc0.owner = ~OWNED_BY_NIC;
++ vptr->td_rings[idx].tdesc0.pktsize = 0;
++ vptr->td_rings[idx].td_buf[0].pa_low = cpu_to_le32(virt_to_bus(vptr->txb + (idx * PKT_BUF_SZ)));
++ vptr->td_rings[idx].td_buf[0].pa_high = 0;
++ vptr->td_rings[idx].td_buf[0].bufsize = 0;
++ vptr->td_rings[idx].tdesc1.CMDZ = 2;
++ }
++*/
++ return 0;
++}
++
++/**
++ * velocity_open - interface activation callback
++ * @dev: network layer device to open
++ *
++ * Called when the network layer brings the interface up. Returns
++ * a negative posix error code on failure, or zero on success.
++ *
++ * All the ring allocation and set up is done on open for this
++ * adapter to minimise memory usage when inactive
++ */
++
++#define PCI_BYTE_REG_BITS_ON(x,i,p) do{\
++ u8 byReg;\
++ pci_read_config_byte((p), (i), &(byReg));\
++ (byReg) |= (x);\
++ pci_write_config_byte((p), (i), (byReg));\
++} while (0)
++
++//
++// Registers in the PCI configuration space
++//
++#define PCI_REG_COMMAND 0x04 //
++#define PCI_REG_MODE0 0x60 //
++#define PCI_REG_MODE1 0x61 //
++#define PCI_REG_MODE2 0x62 //
++#define PCI_REG_MODE3 0x63 //
++#define PCI_REG_DELAY_TIMER 0x64 //
++
++// Bits in the (MODE2, 0x62) register
++//
++#define MODE2_PCEROPT 0x80 // take PCI bus ERror as a fatal and shutdown from software control
++#define MODE2_TXQ16 0x40 // TX write-back Queue control. 0->32 entries available in Tx write-back queue, 1->16 entries
++#define MODE2_TXPOST 0x08 // (Not support in VT3119)
++#define MODE2_AUTOOPT 0x04 // (VT3119 GHCI without such behavior)
++#define MODE2_MODE10T 0x02 // used to control tx Threshold for 10M case
++#define MODE2_TCPLSOPT 0x01 // TCP large send field update disable, hardware will not update related fields, leave it to software.
++
++//
++// Bits in the MODE3 register
++//
++#define MODE3_MIION 0x04 // MII symbol codine error detect enable ??
++
++// Bits in the (COMMAND, 0x04) register
++#define COMMAND_BUSM 0x04
++#define COMMAND_WAIT 0x80
++static int velocity_open(struct nic *nic, struct pci_device *pci __unused)
++{
++ int ret;
++
++ u8 diff;
++ u32 TxPhyAddr, RxPhyAddr;
++ u32 TxBufPhyAddr, RxBufPhyAddr;
++ vptr->TxDescArrays = tx_ring;
++ if (vptr->TxDescArrays == 0)
++ printf("Allot Error");
++
++ /* Tx Descriptor needs 64 bytes alignment; */
++ TxPhyAddr = virt_to_bus(vptr->TxDescArrays);
++ printf("Unaligned Address : %lX\n", TxPhyAddr);
++ diff = 64 - (TxPhyAddr - ((TxPhyAddr >> 6) << 6));
++ TxPhyAddr += diff;
++ vptr->td_rings = (struct tx_desc *) (vptr->TxDescArrays + diff);
++
++ printf("Aligned Address: %lX\n", virt_to_bus(vptr->td_rings));
++ vptr->tx_buffs = txb;
++ /* Rx Buffer needs 64 bytes alignment; */
++ TxBufPhyAddr = virt_to_bus(vptr->tx_buffs);
++ diff = 64 - (TxBufPhyAddr - ((TxBufPhyAddr >> 6) << 6));
++ TxBufPhyAddr += diff;
++ vptr->txb = (unsigned char *) (vptr->tx_buffs + diff);
++
++ vptr->RxDescArrays = rx_ring;
++ /* Rx Descriptor needs 64 bytes alignment; */
++ RxPhyAddr = virt_to_bus(vptr->RxDescArrays);
++ diff = 64 - (RxPhyAddr - ((RxPhyAddr >> 6) << 6));
++ RxPhyAddr += diff;
++ vptr->rd_ring = (struct rx_desc *) (vptr->RxDescArrays + diff);
++
++ vptr->rx_buffs = rxb;
++ /* Rx Buffer needs 64 bytes alignment; */
++ RxBufPhyAddr = virt_to_bus(vptr->rx_buffs);
++ diff = 64 - (RxBufPhyAddr - ((RxBufPhyAddr >> 6) << 6));
++ RxBufPhyAddr += diff;
++ vptr->rxb = (unsigned char *) (vptr->rx_buffs + diff);
++
++ if (vptr->RxDescArrays == NULL || vptr->RxDescArrays == NULL) {
++ printf("Allocate tx_ring or rd_ring failed\n");
++ return 0;
++ }
++
++ vptr->rx_buf_sz = PKT_BUF_SZ;
++/*
++ // turn this on to avoid retry forever
++ PCI_BYTE_REG_BITS_ON(MODE2_PCEROPT, PCI_REG_MODE2, pci);
++ // for some legacy BIOS and OS don't open BusM
++ // bit in PCI configuration space. So, turn it on.
++ PCI_BYTE_REG_BITS_ON(COMMAND_BUSM, PCI_REG_COMMAND, pci);
++ // turn this on to detect MII coding error
++ PCI_BYTE_REG_BITS_ON(MODE3_MIION, PCI_REG_MODE3, pci);
++ */
++ ret = velocity_init_rings(vptr);
++
++ /* Ensure chip is running */
++//FIXME: pci_set_power_state(vptr->pdev, PCI_D0);
++
++ velocity_init_registers(nic, vptr, VELOCITY_INIT_COLD);
++ mac_write_int_mask(0, vptr->mac_regs);
++// _int(vptr->mac_regs);
++ //mac_enable_int(vptr->mac_regs);
++
++ vptr->flags |= VELOCITY_FLAGS_OPENED;
++ return 1;
++
++}
++
++/*
++ * MII access , media link mode setting functions
++ */
++
++
++/**
++ * mii_init - set up MII
++ * @vptr: velocity adapter
++ * @mii_status: links tatus
++ *
++ * Set up the PHY for the current link state.
++ */
++
++static void mii_init(struct velocity_info *vptr, u32 mii_status __unused)
++{
++ u16 BMCR;
++
++ switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
++ case PHYID_CICADA_CS8201:
++ /*
++ * Reset to hardware default
++ */
++ MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR,
++ vptr->mac_regs);
++ /*
++ * Turn on ECHODIS bit in NWay-forced full mode and turn it
++ * off it in NWay-forced half mode for NWay-forced v.s.
++ * legacy-forced issue.
++ */
++ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
++ MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR,
++ vptr->mac_regs);
++ else
++ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR,
++ vptr->mac_regs);
++ /*
++ * Turn on Link/Activity LED enable bit for CIS8201
++ */
++ MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
++ break;
++ case PHYID_VT3216_32BIT:
++ case PHYID_VT3216_64BIT:
++ /*
++ * Reset to hardware default
++ */
++ MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR,
++ vptr->mac_regs);
++ /*
++ * Turn on ECHODIS bit in NWay-forced full mode and turn it
++ * off it in NWay-forced half mode for NWay-forced v.s.
++ * legacy-forced issue
++ */
++ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
++ MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR,
++ vptr->mac_regs);
++ else
++ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR,
++ vptr->mac_regs);
++ break;
++
++ case PHYID_MARVELL_1000:
++ case PHYID_MARVELL_1000S:
++ /*
++ * Assert CRS on Transmit
++ */
++ MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
++ /*
++ * Reset to hardware default
++ */
++ MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR,
++ vptr->mac_regs);
++ break;
++ default:
++ ;
++ }
++ velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
++ if (BMCR & BMCR_ISO) {
++ BMCR &= ~BMCR_ISO;
++ velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
++ }
++}
++
++/**
++ * safe_disable_mii_autopoll - autopoll off
++ * @regs: velocity registers
++ *
++ * Turn off the autopoll and wait for it to disable on the chip
++ */
++
++static void safe_disable_mii_autopoll(struct mac_regs *regs)
++{
++ u16 ww;
++
++ /* turn off MAUTO */
++ writeb(0, ®s->MIICR);
++ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
++ udelay(1);
++ if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
++ break;
++ }
++}
++
++/**
++ * enable_mii_autopoll - turn on autopolling
++ * @regs: velocity registers
++ *
++ * Enable the MII link status autopoll feature on the Velocity
++ * hardware. Wait for it to enable.
++ */
++
++static void enable_mii_autopoll(struct mac_regs *regs)
++{
++ unsigned int ii;
++
++ writeb(0, &(regs->MIICR));
++ writeb(MIIADR_SWMPL, ®s->MIIADR);
++
++ for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
++ udelay(1);
++ if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
++ break;
++ }
++
++ writeb(MIICR_MAUTO, ®s->MIICR);
++
++ for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
++ udelay(1);
++ if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
++ break;
++ }
++
++}
++
++/**
++ * velocity_mii_read - read MII data
++ * @regs: velocity registers
++ * @index: MII register index
++ * @data: buffer for received data
++ *
++ * Perform a single read of an MII 16bit register. Returns zero
++ * on success or -ETIMEDOUT if the PHY did not respond.
++ */
++
++static int velocity_mii_read(struct mac_regs *regs, u8 index, u16 * data)
++{
++ u16 ww;
++
++ /*
++ * Disable MIICR_MAUTO, so that mii addr can be set normally
++ */
++ safe_disable_mii_autopoll(regs);
++
++ writeb(index, ®s->MIIADR);
++
++ BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR);
++
++ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
++ if (!(readb(®s->MIICR) & MIICR_RCMD))
++ break;
++ }
++
++ *data = readw(®s->MIIDATA);
++
++ enable_mii_autopoll(regs);
++ if (ww == W_MAX_TIMEOUT)
++ return -1;
++ return 0;
++}
++
++/**
++ * velocity_mii_write - write MII data
++ * @regs: velocity registers
++ * @index: MII register index
++ * @data: 16bit data for the MII register
++ *
++ * Perform a single write to an MII 16bit register. Returns zero
++ * on success or -ETIMEDOUT if the PHY did not respond.
++ */
++
++static int velocity_mii_write(struct mac_regs *regs, u8 mii_addr, u16 data)
++{
++ u16 ww;
++
++ /*
++ * Disable MIICR_MAUTO, so that mii addr can be set normally
++ */
++ safe_disable_mii_autopoll(regs);
++
++ /* MII reg offset */
++ writeb(mii_addr, ®s->MIIADR);
++ /* set MII data */
++ writew(data, ®s->MIIDATA);
++
++ /* turn on MIICR_WCMD */
++ BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR);
++
++ /* W_MAX_TIMEOUT is the timeout period */
++ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
++ udelay(5);
++ if (!(readb(®s->MIICR) & MIICR_WCMD))
++ break;
++ }
++ enable_mii_autopoll(regs);
++
++ if (ww == W_MAX_TIMEOUT)
++ return -1;
++ return 0;
++}
++
++/**
++ * velocity_get_opt_media_mode - get media selection
++ * @vptr: velocity adapter
++ *
++ * Get the media mode stored in EEPROM or module options and load
++ * mii_status accordingly. The requested link state information
++ * is also returned.
++ */
++
++static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
++{
++ u32 status = 0;
++
++ switch (vptr->options.spd_dpx) {
++ case SPD_DPX_AUTO:
++ status = VELOCITY_AUTONEG_ENABLE;
++ break;
++ case SPD_DPX_100_FULL:
++ status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
++ break;
++ case SPD_DPX_10_FULL:
++ status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
++ break;
++ case SPD_DPX_100_HALF:
++ status = VELOCITY_SPEED_100;
++ break;
++ case SPD_DPX_10_HALF:
++ status = VELOCITY_SPEED_10;
++ break;
++ }
++ vptr->mii_status = status;
++ return status;
++}
++
++/**
++ * mii_set_auto_on - autonegotiate on
++ * @vptr: velocity
++ *
++ * Enable autonegotation on this interface
++ */
++
++static void mii_set_auto_on(struct velocity_info *vptr)
++{
++ if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
++ MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
++ else
++ MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
++}
++
++
++/*
++static void mii_set_auto_off(struct velocity_info * vptr)
++{
++ MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
++}
++*/
++
++/**
++ * set_mii_flow_control - flow control setup
++ * @vptr: velocity interface
++ *
++ * Set up the flow control on this interface according to
++ * the supplied user/eeprom options.
++ */
++
++static void set_mii_flow_control(struct velocity_info *vptr)
++{
++ /*Enable or Disable PAUSE in ANAR */
++ switch (vptr->options.flow_cntl) {
++ case FLOW_CNTL_TX:
++ MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
++ MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
++ break;
++
++ case FLOW_CNTL_RX:
++ MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
++ MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
++ break;
++
++ case FLOW_CNTL_TX_RX:
++ MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
++ MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
++ break;
++
++ case FLOW_CNTL_DISABLE:
++ MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
++ MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR,
++ vptr->mac_regs);
++ break;
++ default:
++ break;
++ }
++}
++
++/**
++ * velocity_set_media_mode - set media mode
++ * @mii_status: old MII link state
++ *
++ * Check the media link state and configure the flow control
++ * PHY and also velocity hardware setup accordingly. In particular
++ * we need to set up CD polling and frame bursting.
++ */
++
++static int velocity_set_media_mode(struct velocity_info *vptr,
++ u32 mii_status)
++{
++ u32 curr_status;
++ struct mac_regs *regs = vptr->mac_regs;
++
++ vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
++ curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
++
++ /* Set mii link status */
++ set_mii_flow_control(vptr);
++
++ /*
++ Check if new status is consisent with current status
++ if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
++ || (mii_status==curr_status)) {
++ vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
++ vptr->mii_status=check_connection_type(vptr->mac_regs);
++ printf(MSG_LEVEL_INFO, "Velocity link no change\n");
++ return 0;
++ }
++ */
++
++ if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) {
++ MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR,
++ vptr->mac_regs);
++ }
++
++ /*
++ * If connection type is AUTO
++ */
++ if (mii_status & VELOCITY_AUTONEG_ENABLE) {
++ printf("Velocity is AUTO mode\n");
++ /* clear force MAC mode bit */
++ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
++ /* set duplex mode of MAC according to duplex mode of MII */
++ MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10,
++ MII_REG_ANAR, vptr->mac_regs);
++ MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000,
++ MII_REG_G1000CR, vptr->mac_regs);
++ MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR,
++ vptr->mac_regs);
++
++ /* enable AUTO-NEGO mode */
++ mii_set_auto_on(vptr);
++ } else {
++ u16 ANAR;
++ u8 CHIPGCR;
++
++ /*
++ * 1. if it's 3119, disable frame bursting in halfduplex mode
++ * and enable it in fullduplex mode
++ * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
++ * 3. only enable CD heart beat counter in 10HD mode
++ */
++
++ /* set force MAC mode bit */
++ BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
++
++ CHIPGCR = readb(®s->CHIPGCR);
++ CHIPGCR &= ~CHIPGCR_FCGMII;
++
++ if (mii_status & VELOCITY_DUPLEX_FULL) {
++ CHIPGCR |= CHIPGCR_FCFDX;
++ writeb(CHIPGCR, ®s->CHIPGCR);
++ printf
++ ("DEBUG: set Velocity to forced full mode\n");
++ if (vptr->rev_id < REV_ID_VT3216_A0)
++ BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
++ } else {
++ CHIPGCR &= ~CHIPGCR_FCFDX;
++ printf
++ ("DEBUG: set Velocity to forced half mode\n");
++ writeb(CHIPGCR, ®s->CHIPGCR);
++ if (vptr->rev_id < REV_ID_VT3216_A0)
++ BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
++ }
++
++ MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000,
++ MII_REG_G1000CR, vptr->mac_regs);
++
++ if (!(mii_status & VELOCITY_DUPLEX_FULL)
++ && (mii_status & VELOCITY_SPEED_10)) {
++ BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
++ } else {
++ BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
++ }
++ /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
++ velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
++ ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
++ if (mii_status & VELOCITY_SPEED_100) {
++ if (mii_status & VELOCITY_DUPLEX_FULL)
++ ANAR |= ANAR_TXFD;
++ else
++ ANAR |= ANAR_TX;
++ } else {
++ if (mii_status & VELOCITY_DUPLEX_FULL)
++ ANAR |= ANAR_10FD;
++ else
++ ANAR |= ANAR_10;
++ }
++ velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
++ /* enable AUTO-NEGO mode */
++ mii_set_auto_on(vptr);
++ /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
++ }
++ /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
++ /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
++ return VELOCITY_LINK_CHANGE;
++}
++
++/**
++ * mii_check_media_mode - check media state
++ * @regs: velocity registers
++ *
++ * Check the current MII status and determine the link status
++ * accordingly
++ */
++
++static u32 mii_check_media_mode(struct mac_regs *regs)
++{
++ u32 status = 0;
++ u16 ANAR;
++
++ if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
++ status |= VELOCITY_LINK_FAIL;
++
++ if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
++ status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
++ else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
++ status |= (VELOCITY_SPEED_1000);
++ else {
++ velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
++ if (ANAR & ANAR_TXFD)
++ status |=
++ (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
++ else if (ANAR & ANAR_TX)
++ status |= VELOCITY_SPEED_100;
++ else if (ANAR & ANAR_10FD)
++ status |=
++ (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
++ else
++ status |= (VELOCITY_SPEED_10);
++ }
++
++ if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
++ velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
++ if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
++ == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
++ if (MII_REG_BITS_IS_ON
++ (G1000CR_1000 | G1000CR_1000FD,
++ MII_REG_G1000CR, regs))
++ status |= VELOCITY_AUTONEG_ENABLE;
++ }
++ }
++
++ return status;
++}
++
++static u32 check_connection_type(struct mac_regs *regs)
++{
++ u32 status = 0;
++ u8 PHYSR0;
++ u16 ANAR;
++ PHYSR0 = readb(®s->PHYSR0);
++
++ /*
++ if (!(PHYSR0 & PHYSR0_LINKGD))
++ status|=VELOCITY_LINK_FAIL;
++ */
++
++ if (PHYSR0 & PHYSR0_FDPX)
++ status |= VELOCITY_DUPLEX_FULL;
++
++ if (PHYSR0 & PHYSR0_SPDG)
++ status |= VELOCITY_SPEED_1000;
++ if (PHYSR0 & PHYSR0_SPD10)
++ status |= VELOCITY_SPEED_10;
++ else
++ status |= VELOCITY_SPEED_100;
++
++ if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
++ velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
++ if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
++ == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
++ if (MII_REG_BITS_IS_ON
++ (G1000CR_1000 | G1000CR_1000FD,
++ MII_REG_G1000CR, regs))
++ status |= VELOCITY_AUTONEG_ENABLE;
++ }
++ }
++
++ return status;
++}
++
++/**
++ * enable_flow_control_ability - flow control
++ * @vptr: veloity to configure
++ *
++ * Set up flow control according to the flow control options
++ * determined by the eeprom/configuration.
++ */
++
++static void enable_flow_control_ability(struct velocity_info *vptr)
++{
++
++ struct mac_regs *regs = vptr->mac_regs;
++
++ switch (vptr->options.flow_cntl) {
++
++ case FLOW_CNTL_DEFAULT:
++ if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0))
++ writel(CR0_FDXRFCEN, ®s->CR0Set);
++ else
++ writel(CR0_FDXRFCEN, ®s->CR0Clr);
++
++ if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0))
++ writel(CR0_FDXTFCEN, ®s->CR0Set);
++ else
++ writel(CR0_FDXTFCEN, ®s->CR0Clr);
++ break;
++
++ case FLOW_CNTL_TX:
++ writel(CR0_FDXTFCEN, ®s->CR0Set);
++ writel(CR0_FDXRFCEN, ®s->CR0Clr);
++ break;
++
++ case FLOW_CNTL_RX:
++ writel(CR0_FDXRFCEN, ®s->CR0Set);
++ writel(CR0_FDXTFCEN, ®s->CR0Clr);
++ break;
++
++ case FLOW_CNTL_TX_RX:
++ writel(CR0_FDXTFCEN, ®s->CR0Set);
++ writel(CR0_FDXRFCEN, ®s->CR0Set);
++ break;
++
++ case FLOW_CNTL_DISABLE:
++ writel(CR0_FDXRFCEN, ®s->CR0Clr);
++ writel(CR0_FDXTFCEN, ®s->CR0Clr);
++ break;
++
++ default:
++ break;
++ }
++
++}
++
++/* FIXME: Move to pci.c */
++/**
++ * pci_set_power_state - Set the power state of a PCI device
++ * @dev: PCI device to be suspended
++ * @state: Power state we're entering
++ *
++ * Transition a device to a new power state, using the Power Management
++ * Capabilities in the device's config space.
++ *
++ * RETURN VALUE:
++ * -EINVAL if trying to enter a lower state than we're already in.
++ * 0 if we're already in the requested state.
++ * -EIO if device does not support PCI PM.
++ * 0 if we can successfully change the power state.
++ */
++
++int pci_set_power_state(struct pci_device *dev, int state)
++{
++ int pm;
++ u16 pmcsr;
++ int current_state = 0;
++
++ /* bound the state we're entering */
++ if (state > 3)
++ state = 3;
++
++ /* Validate current state:
++ * Can enter D0 from any state, but if we can only go deeper
++ * to sleep if we're already in a low power state
++ */
++ if (state > 0 && current_state > state)
++ return -1;
++ else if (current_state == state)
++ return 0; /* we're already there */
++
++ /* find PCI PM capability in list */
++ pm = pci_find_capability(dev, PCI_CAP_ID_PM);
++
++ /* abort if the device doesn't support PM capabilities */
++ if (!pm)
++ return -2;
++
++ /* check if this device supports the desired state */
++ if (state == 1 || state == 2) {
++ u16 pmc;
++ pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
++ if (state == 1 && !(pmc & PCI_PM_CAP_D1))
++ return -2;
++ else if (state == 2 && !(pmc & PCI_PM_CAP_D2))
++ return -2;
++ }
++
++ /* If we're in D3, force entire word to 0.
++ * This doesn't affect PME_Status, disables PME_En, and
++ * sets PowerState to 0.
++ */
++ if (current_state >= 3)
++ pmcsr = 0;
++ else {
++ pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
++ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++ pmcsr |= state;
++ }
++
++ /* enter specified state */
++ pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
++
++ /* Mandatory power management transition delays */
++ /* see PCI PM 1.1 5.6.1 table 18 */
++ if (state == 3 || current_state == 3)
++ mdelay(10);
++ else if (state == 2 || current_state == 2)
++ udelay(200);
++ current_state = state;
++
++ return 0;
++}
++
++static struct pci_id velocity_nics[] = {
++ PCI_ROM(0x1106, 0x3119, "via-velocity", "VIA Networking Velocity Family Gigabit Ethernet Adapter"),
++};
++
++static struct pci_driver velocity_driver __pci_driver = {
++ .type = NIC_DRIVER,
++ .name = "VIA-VELOCITY/PCI",
++ .probe = velocity_probe,
++ .ids = velocity_nics,
++ .id_count = sizeof(velocity_nics) / sizeof(velocity_nics[0]),
++ .class = 0,
++};
--- /dev/null
--- /dev/null
++#define EB54 1
++/*
++ * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
++ * All rights reserved.
++ *
++ * This software may be redistributed and/or modified under
++ * the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * File: via-velocity.h
++ *
++ * Purpose: Header file to define driver's private structures.
++ *
++ * Author: Chuang Liang-Shing, AJ Jiang
++ *
++ * Date: Jan 24, 2003
++ *
++ * Changes for Etherboot Port:
++ * Copyright (c) 2006 by Timothy Legge <tlegge@rogers.com>
++ */
++
++#include "timer.h"
++
++#ifndef EB54
++typedef unsigned char u8;
++typedef signed char s8;
++typedef unsigned short u16;
++typedef signed short s16;
++typedef unsigned int u32;
++typedef signed int s32;
++#endif
++#ifndef VELOCITY_H
++#define VELOCITY_H
++
++#define VELOCITY_TX_CSUM_SUPPORT
++
++#define VELOCITY_NAME "via-velocity"
++#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"
++#define VELOCITY_VERSION "1.13"
++
++#define PKT_BUF_SZ 1564
++
++#define MAX_UNITS 8
++#define OPTION_DEFAULT { [0 ... MAX_UNITS-1] = -1}
++
++#define REV_ID_VT6110 (0)
++
++#define BYTE_REG_BITS_ON(x,p) do { writeb(readb((p))|(x),(p));} while (0)
++#define WORD_REG_BITS_ON(x,p) do { writew(readw((p))|(x),(p));} while (0)
++#define DWORD_REG_BITS_ON(x,p) do { writel(readl((p))|(x),(p));} while (0)
++
++#define BYTE_REG_BITS_IS_ON(x,p) (readb((p)) & (x))
++#define WORD_REG_BITS_IS_ON(x,p) (readw((p)) & (x))
++#define DWORD_REG_BITS_IS_ON(x,p) (readl((p)) & (x))
++
++#define BYTE_REG_BITS_OFF(x,p) do { writeb(readb((p)) & (~(x)),(p));} while (0)
++#define WORD_REG_BITS_OFF(x,p) do { writew(readw((p)) & (~(x)),(p));} while (0)
++#define DWORD_REG_BITS_OFF(x,p) do { writel(readl((p)) & (~(x)),(p));} while (0)
++
++#define BYTE_REG_BITS_SET(x,m,p) do { writeb( (readb((p)) & (~(m))) |(x),(p));} while (0)
++#define WORD_REG_BITS_SET(x,m,p) do { writew( (readw((p)) & (~(m))) |(x),(p));} while (0)
++#define DWORD_REG_BITS_SET(x,m,p) do { writel( (readl((p)) & (~(m)))|(x),(p));} while (0)
++
++#define VAR_USED(p) do {(p)=(p);} while (0)
++
++/*
++ * Purpose: Structures for MAX RX/TX descriptors.
++ */
++
++
++#define B_OWNED_BY_CHIP 1
++#define B_OWNED_BY_HOST 0
++
++/*
++ * Bits in the RSR0 register
++ */
++
++#define RSR_DETAG 0x0080
++#define RSR_SNTAG 0x0040
++#define RSR_RXER 0x0020
++#define RSR_RL 0x0010
++#define RSR_CE 0x0008
++#define RSR_FAE 0x0004
++#define RSR_CRC 0x0002
++#define RSR_VIDM 0x0001
++
++/*
++ * Bits in the RSR1 register
++ */
++
++#define RSR_RXOK 0x8000 // rx OK
++#define RSR_PFT 0x4000 // Perfect filtering address match
++#define RSR_MAR 0x2000 // MAC accept multicast address packet
++#define RSR_BAR 0x1000 // MAC accept broadcast address packet
++#define RSR_PHY 0x0800 // MAC accept physical address packet
++#define RSR_VTAG 0x0400 // 802.1p/1q tagging packet indicator
++#define RSR_STP 0x0200 // start of packet
++#define RSR_EDP 0x0100 // end of packet
++
++/*
++ * Bits in the RSR1 register
++ */
++
++#define RSR1_RXOK 0x80 // rx OK
++#define RSR1_PFT 0x40 // Perfect filtering address match
++#define RSR1_MAR 0x20 // MAC accept multicast address packet
++#define RSR1_BAR 0x10 // MAC accept broadcast address packet
++#define RSR1_PHY 0x08 // MAC accept physical address packet
++#define RSR1_VTAG 0x04 // 802.1p/1q tagging packet indicator
++#define RSR1_STP 0x02 // start of packet
++#define RSR1_EDP 0x01 // end of packet
++
++/*
++ * Bits in the CSM register
++ */
++
++#define CSM_IPOK 0x40 //IP Checkusm validatiaon ok
++#define CSM_TUPOK 0x20 //TCP/UDP Checkusm validatiaon ok
++#define CSM_FRAG 0x10 //Fragment IP datagram
++#define CSM_IPKT 0x04 //Received an IP packet
++#define CSM_TCPKT 0x02 //Received a TCP packet
++#define CSM_UDPKT 0x01 //Received a UDP packet
++
++/*
++ * Bits in the TSR0 register
++ */
++
++#define TSR0_ABT 0x0080 // Tx abort because of excessive collision
++#define TSR0_OWT 0x0040 // Jumbo frame Tx abort
++#define TSR0_OWC 0x0020 // Out of window collision
++#define TSR0_COLS 0x0010 // experience collision in this transmit event
++#define TSR0_NCR3 0x0008 // collision retry counter[3]
++#define TSR0_NCR2 0x0004 // collision retry counter[2]
++#define TSR0_NCR1 0x0002 // collision retry counter[1]
++#define TSR0_NCR0 0x0001 // collision retry counter[0]
++#define TSR0_TERR 0x8000 //
++#define TSR0_FDX 0x4000 // current transaction is serviced by full duplex mode
++#define TSR0_GMII 0x2000 // current transaction is serviced by GMII mode
++#define TSR0_LNKFL 0x1000 // packet serviced during link down
++#define TSR0_SHDN 0x0400 // shutdown case
++#define TSR0_CRS 0x0200 // carrier sense lost
++#define TSR0_CDH 0x0100 // AQE test fail (CD heartbeat)
++
++/*
++ * Bits in the TSR1 register
++ */
++
++#define TSR1_TERR 0x80 //
++#define TSR1_FDX 0x40 // current transaction is serviced by full duplex mode
++#define TSR1_GMII 0x20 // current transaction is serviced by GMII mode
++#define TSR1_LNKFL 0x10 // packet serviced during link down
++#define TSR1_SHDN 0x04 // shutdown case
++#define TSR1_CRS 0x02 // carrier sense lost
++#define TSR1_CDH 0x01 // AQE test fail (CD heartbeat)
++
++//
++// Bits in the TCR0 register
++//
++#define TCR0_TIC 0x80 // assert interrupt immediately while descriptor has been send complete
++#define TCR0_PIC 0x40 // priority interrupt request, INA# is issued over adaptive interrupt scheme
++#define TCR0_VETAG 0x20 // enable VLAN tag
++#define TCR0_IPCK 0x10 // request IP checksum calculation.
++#define TCR0_UDPCK 0x08 // request UDP checksum calculation.
++#define TCR0_TCPCK 0x04 // request TCP checksum calculation.
++#define TCR0_JMBO 0x02 // indicate a jumbo packet in GMAC side
++#define TCR0_CRC 0x01 // disable CRC generation
++
++#define TCPLS_NORMAL 3
++#define TCPLS_START 2
++#define TCPLS_END 1
++#define TCPLS_MED 0
++
++
++// max transmit or receive buffer size
++#define CB_RX_BUF_SIZE 2048UL // max buffer size
++ // NOTE: must be multiple of 4
++
++#define CB_MAX_RD_NUM 512 // MAX # of RD
++#define CB_MAX_TD_NUM 256 // MAX # of TD
++
++#define CB_INIT_RD_NUM_3119 128 // init # of RD, for setup VT3119
++#define CB_INIT_TD_NUM_3119 64 // init # of TD, for setup VT3119
++
++#define CB_INIT_RD_NUM 128 // init # of RD, for setup default
++#define CB_INIT_TD_NUM 64 // init # of TD, for setup default
++
++// for 3119
++#define CB_TD_RING_NUM 4 // # of TD rings.
++#define CB_MAX_SEG_PER_PKT 7 // max data seg per packet (Tx)
++
++
++/*
++ * If collisions excess 15 times , tx will abort, and
++ * if tx fifo underflow, tx will fail
++ * we should try to resend it
++ */
++
++#define CB_MAX_TX_ABORT_RETRY 3
++
++/*
++ * Receive descriptor
++ */
++
++struct rdesc0 {
++ u16 RSR; /* Receive status */
++ u16 len:14; /* Received packet length */
++ u16 reserved:1;
++ u16 owner:1; /* Who owns this buffer ? */
++};
++
++struct rdesc1 {
++ u16 PQTAG;
++ u8 CSM;
++ u8 IPKT;
++};
++
++struct rx_desc {
++ struct rdesc0 rdesc0;
++ struct rdesc1 rdesc1;
++ u32 pa_low; /* Low 32 bit PCI address */
++ u16 pa_high; /* Next 16 bit PCI address (48 total) */
++ u16 len:15; /* Frame size */
++ u16 inten:1; /* Enable interrupt */
++} __attribute__ ((__packed__));
++
++/*
++ * Transmit descriptor
++ */
++
++struct tdesc0 {
++ u16 TSR; /* Transmit status register */
++ u16 pktsize:14; /* Size of frame */
++ u16 reserved:1;
++ u16 owner:1; /* Who owns the buffer */
++};
++
++struct pqinf { /* Priority queue info */
++ u16 VID:12;
++ u16 CFI:1;
++ u16 priority:3;
++} __attribute__ ((__packed__));
++
++struct tdesc1 {
++ struct pqinf pqinf;
++ u8 TCR;
++ u8 TCPLS:2;
++ u8 reserved:2;
++ u8 CMDZ:4;
++} __attribute__ ((__packed__));
++
++struct td_buf {
++ u32 pa_low;
++ u16 pa_high;
++ u16 bufsize:14;
++ u16 reserved:1;
++ u16 queue:1;
++} __attribute__ ((__packed__));
++
++struct tx_desc {
++ struct tdesc0 tdesc0;
++ struct tdesc1 tdesc1;
++ struct td_buf td_buf[7];
++};
++
++#ifdef LINUX
++struct velocity_rd_info {
++ struct sk_buff *skb;
++ dma_addr_t skb_dma;
++};
++
++
++/**
++ * alloc_rd_info - allocate an rd info block
++ *
++ * Alocate and initialize a receive info structure used for keeping
++ * track of kernel side information related to each receive
++ * descriptor we are using
++ */
++
++static inline struct velocity_rd_info *alloc_rd_info(void)
++{
++ struct velocity_rd_info *ptr;
++ if ((ptr =
++ kmalloc(sizeof(struct velocity_rd_info), GFP_ATOMIC)) == NULL)
++ return NULL;
++ else {
++ memset(ptr, 0, sizeof(struct velocity_rd_info));
++ return ptr;
++ }
++}
++
++/*
++ * Used to track transmit side buffers.
++ */
++
++struct velocity_td_info {
++ struct sk_buff *skb;
++ u8 *buf;
++ int nskb_dma;
++ dma_addr_t skb_dma[7];
++ dma_addr_t buf_dma;
++};
++
++#endif
++enum {
++ OWNED_BY_HOST = 0,
++ OWNED_BY_NIC = 1
++} velocity_owner;
++
++
++/*
++ * MAC registers and macros.
++ */
++
++
++#define MCAM_SIZE 64
++#define VCAM_SIZE 64
++#define TX_QUEUE_NO 4
++
++#define MAX_HW_MIB_COUNTER 32
++#define VELOCITY_MIN_MTU (1514-14)
++#define VELOCITY_MAX_MTU (9000)
++
++/*
++ * Registers in the MAC
++ */
++
++#define MAC_REG_PAR 0x00 // physical address
++#define MAC_REG_RCR 0x06
++#define MAC_REG_TCR 0x07
++#define MAC_REG_CR0_SET 0x08
++#define MAC_REG_CR1_SET 0x09
++#define MAC_REG_CR2_SET 0x0A
++#define MAC_REG_CR3_SET 0x0B
++#define MAC_REG_CR0_CLR 0x0C
++#define MAC_REG_CR1_CLR 0x0D
++#define MAC_REG_CR2_CLR 0x0E
++#define MAC_REG_CR3_CLR 0x0F
++#define MAC_REG_MAR 0x10
++#define MAC_REG_CAM 0x10
++#define MAC_REG_DEC_BASE_HI 0x18
++#define MAC_REG_DBF_BASE_HI 0x1C
++#define MAC_REG_ISR_CTL 0x20
++#define MAC_REG_ISR_HOTMR 0x20
++#define MAC_REG_ISR_TSUPTHR 0x20
++#define MAC_REG_ISR_RSUPTHR 0x20
++#define MAC_REG_ISR_CTL1 0x21
++#define MAC_REG_TXE_SR 0x22
++#define MAC_REG_RXE_SR 0x23
++#define MAC_REG_ISR 0x24
++#define MAC_REG_ISR0 0x24
++#define MAC_REG_ISR1 0x25
++#define MAC_REG_ISR2 0x26
++#define MAC_REG_ISR3 0x27
++#define MAC_REG_IMR 0x28
++#define MAC_REG_IMR0 0x28
++#define MAC_REG_IMR1 0x29
++#define MAC_REG_IMR2 0x2A
++#define MAC_REG_IMR3 0x2B
++#define MAC_REG_TDCSR_SET 0x30
++#define MAC_REG_RDCSR_SET 0x32
++#define MAC_REG_TDCSR_CLR 0x34
++#define MAC_REG_RDCSR_CLR 0x36
++#define MAC_REG_RDBASE_LO 0x38
++#define MAC_REG_RDINDX 0x3C
++#define MAC_REG_TDBASE_LO 0x40
++#define MAC_REG_RDCSIZE 0x50
++#define MAC_REG_TDCSIZE 0x52
++#define MAC_REG_TDINDX 0x54
++#define MAC_REG_TDIDX0 0x54
++#define MAC_REG_TDIDX1 0x56
++#define MAC_REG_TDIDX2 0x58
++#define MAC_REG_TDIDX3 0x5A
++#define MAC_REG_PAUSE_TIMER 0x5C
++#define MAC_REG_RBRDU 0x5E
++#define MAC_REG_FIFO_TEST0 0x60
++#define MAC_REG_FIFO_TEST1 0x64
++#define MAC_REG_CAMADDR 0x68
++#define MAC_REG_CAMCR 0x69
++#define MAC_REG_GFTEST 0x6A
++#define MAC_REG_FTSTCMD 0x6B
++#define MAC_REG_MIICFG 0x6C
++#define MAC_REG_MIISR 0x6D
++#define MAC_REG_PHYSR0 0x6E
++#define MAC_REG_PHYSR1 0x6F
++#define MAC_REG_MIICR 0x70
++#define MAC_REG_MIIADR 0x71
++#define MAC_REG_MIIDATA 0x72
++#define MAC_REG_SOFT_TIMER0 0x74
++#define MAC_REG_SOFT_TIMER1 0x76
++#define MAC_REG_CFGA 0x78
++#define MAC_REG_CFGB 0x79
++#define MAC_REG_CFGC 0x7A
++#define MAC_REG_CFGD 0x7B
++#define MAC_REG_DCFG0 0x7C
++#define MAC_REG_DCFG1 0x7D
++#define MAC_REG_MCFG0 0x7E
++#define MAC_REG_MCFG1 0x7F
++
++#define MAC_REG_TBIST 0x80
++#define MAC_REG_RBIST 0x81
++#define MAC_REG_PMCC 0x82
++#define MAC_REG_STICKHW 0x83
++#define MAC_REG_MIBCR 0x84
++#define MAC_REG_EERSV 0x85
++#define MAC_REG_REVID 0x86
++#define MAC_REG_MIBREAD 0x88
++#define MAC_REG_BPMA 0x8C
++#define MAC_REG_EEWR_DATA 0x8C
++#define MAC_REG_BPMD_WR 0x8F
++#define MAC_REG_BPCMD 0x90
++#define MAC_REG_BPMD_RD 0x91
++#define MAC_REG_EECHKSUM 0x92
++#define MAC_REG_EECSR 0x93
++#define MAC_REG_EERD_DATA 0x94
++#define MAC_REG_EADDR 0x96
++#define MAC_REG_EMBCMD 0x97
++#define MAC_REG_JMPSR0 0x98
++#define MAC_REG_JMPSR1 0x99
++#define MAC_REG_JMPSR2 0x9A
++#define MAC_REG_JMPSR3 0x9B
++#define MAC_REG_CHIPGSR 0x9C
++#define MAC_REG_TESTCFG 0x9D
++#define MAC_REG_DEBUG 0x9E
++#define MAC_REG_CHIPGCR 0x9F
++#define MAC_REG_WOLCR0_SET 0xA0
++#define MAC_REG_WOLCR1_SET 0xA1
++#define MAC_REG_PWCFG_SET 0xA2
++#define MAC_REG_WOLCFG_SET 0xA3
++#define MAC_REG_WOLCR0_CLR 0xA4
++#define MAC_REG_WOLCR1_CLR 0xA5
++#define MAC_REG_PWCFG_CLR 0xA6
++#define MAC_REG_WOLCFG_CLR 0xA7
++#define MAC_REG_WOLSR0_SET 0xA8
++#define MAC_REG_WOLSR1_SET 0xA9
++#define MAC_REG_WOLSR0_CLR 0xAC
++#define MAC_REG_WOLSR1_CLR 0xAD
++#define MAC_REG_PATRN_CRC0 0xB0
++#define MAC_REG_PATRN_CRC1 0xB2
++#define MAC_REG_PATRN_CRC2 0xB4
++#define MAC_REG_PATRN_CRC3 0xB6
++#define MAC_REG_PATRN_CRC4 0xB8
++#define MAC_REG_PATRN_CRC5 0xBA
++#define MAC_REG_PATRN_CRC6 0xBC
++#define MAC_REG_PATRN_CRC7 0xBE
++#define MAC_REG_BYTEMSK0_0 0xC0
++#define MAC_REG_BYTEMSK0_1 0xC4
++#define MAC_REG_BYTEMSK0_2 0xC8
++#define MAC_REG_BYTEMSK0_3 0xCC
++#define MAC_REG_BYTEMSK1_0 0xD0
++#define MAC_REG_BYTEMSK1_1 0xD4
++#define MAC_REG_BYTEMSK1_2 0xD8
++#define MAC_REG_BYTEMSK1_3 0xDC
++#define MAC_REG_BYTEMSK2_0 0xE0
++#define MAC_REG_BYTEMSK2_1 0xE4
++#define MAC_REG_BYTEMSK2_2 0xE8
++#define MAC_REG_BYTEMSK2_3 0xEC
++#define MAC_REG_BYTEMSK3_0 0xF0
++#define MAC_REG_BYTEMSK3_1 0xF4
++#define MAC_REG_BYTEMSK3_2 0xF8
++#define MAC_REG_BYTEMSK3_3 0xFC
++
++/*
++ * Bits in the RCR register
++ */
++
++#define RCR_AS 0x80
++#define RCR_AP 0x40
++#define RCR_AL 0x20
++#define RCR_PROM 0x10
++#define RCR_AB 0x08
++#define RCR_AM 0x04
++#define RCR_AR 0x02
++#define RCR_SEP 0x01
++
++/*
++ * Bits in the TCR register
++ */
++
++#define TCR_TB2BDIS 0x80
++#define TCR_COLTMC1 0x08
++#define TCR_COLTMC0 0x04
++#define TCR_LB1 0x02 /* loopback[1] */
++#define TCR_LB0 0x01 /* loopback[0] */
++
++/*
++ * Bits in the CR0 register
++ */
++
++#define CR0_TXON 0x00000008UL
++#define CR0_RXON 0x00000004UL
++#define CR0_STOP 0x00000002UL /* stop MAC, default = 1 */
++#define CR0_STRT 0x00000001UL /* start MAC */
++#define CR0_SFRST 0x00008000UL /* software reset */
++#define CR0_TM1EN 0x00004000UL
++#define CR0_TM0EN 0x00002000UL
++#define CR0_DPOLL 0x00000800UL /* disable rx/tx auto polling */
++#define CR0_DISAU 0x00000100UL
++#define CR0_XONEN 0x00800000UL
++#define CR0_FDXTFCEN 0x00400000UL /* full-duplex TX flow control enable */
++#define CR0_FDXRFCEN 0x00200000UL /* full-duplex RX flow control enable */
++#define CR0_HDXFCEN 0x00100000UL /* half-duplex flow control enable */
++#define CR0_XHITH1 0x00080000UL /* TX XON high threshold 1 */
++#define CR0_XHITH0 0x00040000UL /* TX XON high threshold 0 */
++#define CR0_XLTH1 0x00020000UL /* TX pause frame low threshold 1 */
++#define CR0_XLTH0 0x00010000UL /* TX pause frame low threshold 0 */
++#define CR0_GSPRST 0x80000000UL
++#define CR0_FORSRST 0x40000000UL
++#define CR0_FPHYRST 0x20000000UL
++#define CR0_DIAG 0x10000000UL
++#define CR0_INTPCTL 0x04000000UL
++#define CR0_GINTMSK1 0x02000000UL
++#define CR0_GINTMSK0 0x01000000UL
++
++/*
++ * Bits in the CR1 register
++ */
++
++#define CR1_SFRST 0x80 /* software reset */
++#define CR1_TM1EN 0x40
++#define CR1_TM0EN 0x20
++#define CR1_DPOLL 0x08 /* disable rx/tx auto polling */
++#define CR1_DISAU 0x01
++
++/*
++ * Bits in the CR2 register
++ */
++
++#define CR2_XONEN 0x80
++#define CR2_FDXTFCEN 0x40 /* full-duplex TX flow control enable */
++#define CR2_FDXRFCEN 0x20 /* full-duplex RX flow control enable */
++#define CR2_HDXFCEN 0x10 /* half-duplex flow control enable */
++#define CR2_XHITH1 0x08 /* TX XON high threshold 1 */
++#define CR2_XHITH0 0x04 /* TX XON high threshold 0 */
++#define CR2_XLTH1 0x02 /* TX pause frame low threshold 1 */
++#define CR2_XLTH0 0x01 /* TX pause frame low threshold 0 */
++
++/*
++ * Bits in the CR3 register
++ */
++
++#define CR3_GSPRST 0x80
++#define CR3_FORSRST 0x40
++#define CR3_FPHYRST 0x20
++#define CR3_DIAG 0x10
++#define CR3_INTPCTL 0x04
++#define CR3_GINTMSK1 0x02
++#define CR3_GINTMSK0 0x01
++
++#define ISRCTL_UDPINT 0x8000
++#define ISRCTL_TSUPDIS 0x4000
++#define ISRCTL_RSUPDIS 0x2000
++#define ISRCTL_PMSK1 0x1000
++#define ISRCTL_PMSK0 0x0800
++#define ISRCTL_INTPD 0x0400
++#define ISRCTL_HCRLD 0x0200
++#define ISRCTL_SCRLD 0x0100
++
++/*
++ * Bits in the ISR_CTL1 register
++ */
++
++#define ISRCTL1_UDPINT 0x80
++#define ISRCTL1_TSUPDIS 0x40
++#define ISRCTL1_RSUPDIS 0x20
++#define ISRCTL1_PMSK1 0x10
++#define ISRCTL1_PMSK0 0x08
++#define ISRCTL1_INTPD 0x04
++#define ISRCTL1_HCRLD 0x02
++#define ISRCTL1_SCRLD 0x01
++
++/*
++ * Bits in the TXE_SR register
++ */
++
++#define TXESR_TFDBS 0x08
++#define TXESR_TDWBS 0x04
++#define TXESR_TDRBS 0x02
++#define TXESR_TDSTR 0x01
++
++/*
++ * Bits in the RXE_SR register
++ */
++
++#define RXESR_RFDBS 0x08
++#define RXESR_RDWBS 0x04
++#define RXESR_RDRBS 0x02
++#define RXESR_RDSTR 0x01
++
++/*
++ * Bits in the ISR register
++ */
++
++#define ISR_ISR3 0x80000000UL
++#define ISR_ISR2 0x40000000UL
++#define ISR_ISR1 0x20000000UL
++#define ISR_ISR0 0x10000000UL
++#define ISR_TXSTLI 0x02000000UL
++#define ISR_RXSTLI 0x01000000UL
++#define ISR_HFLD 0x00800000UL
++#define ISR_UDPI 0x00400000UL
++#define ISR_MIBFI 0x00200000UL
++#define ISR_SHDNI 0x00100000UL
++#define ISR_PHYI 0x00080000UL
++#define ISR_PWEI 0x00040000UL
++#define ISR_TMR1I 0x00020000UL
++#define ISR_TMR0I 0x00010000UL
++#define ISR_SRCI 0x00008000UL
++#define ISR_LSTPEI 0x00004000UL
++#define ISR_LSTEI 0x00002000UL
++#define ISR_OVFI 0x00001000UL
++#define ISR_FLONI 0x00000800UL
++#define ISR_RACEI 0x00000400UL
++#define ISR_TXWB1I 0x00000200UL
++#define ISR_TXWB0I 0x00000100UL
++#define ISR_PTX3I 0x00000080UL
++#define ISR_PTX2I 0x00000040UL
++#define ISR_PTX1I 0x00000020UL
++#define ISR_PTX0I 0x00000010UL
++#define ISR_PTXI 0x00000008UL
++#define ISR_PRXI 0x00000004UL
++#define ISR_PPTXI 0x00000002UL
++#define ISR_PPRXI 0x00000001UL
++
++/*
++ * Bits in the IMR register
++ */
++
++#define IMR_TXSTLM 0x02000000UL
++#define IMR_UDPIM 0x00400000UL
++#define IMR_MIBFIM 0x00200000UL
++#define IMR_SHDNIM 0x00100000UL
++#define IMR_PHYIM 0x00080000UL
++#define IMR_PWEIM 0x00040000UL
++#define IMR_TMR1IM 0x00020000UL
++#define IMR_TMR0IM 0x00010000UL
++
++#define IMR_SRCIM 0x00008000UL
++#define IMR_LSTPEIM 0x00004000UL
++#define IMR_LSTEIM 0x00002000UL
++#define IMR_OVFIM 0x00001000UL
++#define IMR_FLONIM 0x00000800UL
++#define IMR_RACEIM 0x00000400UL
++#define IMR_TXWB1IM 0x00000200UL
++#define IMR_TXWB0IM 0x00000100UL
++
++#define IMR_PTX3IM 0x00000080UL
++#define IMR_PTX2IM 0x00000040UL
++#define IMR_PTX1IM 0x00000020UL
++#define IMR_PTX0IM 0x00000010UL
++#define IMR_PTXIM 0x00000008UL
++#define IMR_PRXIM 0x00000004UL
++#define IMR_PPTXIM 0x00000002UL
++#define IMR_PPRXIM 0x00000001UL
++
++/* 0x0013FB0FUL = initial value of IMR */
++
++#define INT_MASK_DEF ( IMR_PPTXIM|IMR_PPRXIM| IMR_PTXIM|IMR_PRXIM | \
++ IMR_PWEIM|IMR_TXWB0IM|IMR_TXWB1IM|IMR_FLONIM| \
++ IMR_OVFIM|IMR_LSTEIM|IMR_LSTPEIM|IMR_SRCIM|IMR_MIBFIM|\
++ IMR_SHDNIM |IMR_TMR1IM|IMR_TMR0IM|IMR_TXSTLM )
++
++/*
++ * Bits in the TDCSR0/1, RDCSR0 register
++ */
++
++#define TRDCSR_DEAD 0x0008
++#define TRDCSR_WAK 0x0004
++#define TRDCSR_ACT 0x0002
++#define TRDCSR_RUN 0x0001
++
++/*
++ * Bits in the CAMADDR register
++ */
++
++#define CAMADDR_CAMEN 0x80
++#define CAMADDR_VCAMSL 0x40
++
++/*
++ * Bits in the CAMCR register
++ */
++
++#define CAMCR_PS1 0x80
++#define CAMCR_PS0 0x40
++#define CAMCR_AITRPKT 0x20
++#define CAMCR_AITR16 0x10
++#define CAMCR_CAMRD 0x08
++#define CAMCR_CAMWR 0x04
++#define CAMCR_PS_CAM_MASK 0x40
++#define CAMCR_PS_CAM_DATA 0x80
++#define CAMCR_PS_MAR 0x00
++
++/*
++ * Bits in the MIICFG register
++ */
++
++#define MIICFG_MPO1 0x80
++#define MIICFG_MPO0 0x40
++#define MIICFG_MFDC 0x20
++
++/*
++ * Bits in the MIISR register
++ */
++
++#define MIISR_MIDLE 0x80
++
++/*
++ * Bits in the PHYSR0 register
++ */
++
++#define PHYSR0_PHYRST 0x80
++#define PHYSR0_LINKGD 0x40
++#define PHYSR0_FDPX 0x10
++#define PHYSR0_SPDG 0x08
++#define PHYSR0_SPD10 0x04
++#define PHYSR0_RXFLC 0x02
++#define PHYSR0_TXFLC 0x01
++
++/*
++ * Bits in the PHYSR1 register
++ */
++
++#define PHYSR1_PHYTBI 0x01
++
++/*
++ * Bits in the MIICR register
++ */
++
++#define MIICR_MAUTO 0x80
++#define MIICR_RCMD 0x40
++#define MIICR_WCMD 0x20
++#define MIICR_MDPM 0x10
++#define MIICR_MOUT 0x08
++#define MIICR_MDO 0x04
++#define MIICR_MDI 0x02
++#define MIICR_MDC 0x01
++
++/*
++ * Bits in the MIIADR register
++ */
++
++#define MIIADR_SWMPL 0x80
++
++/*
++ * Bits in the CFGA register
++ */
++
++#define CFGA_PMHCTG 0x08
++#define CFGA_GPIO1PD 0x04
++#define CFGA_ABSHDN 0x02
++#define CFGA_PACPI 0x01
++
++/*
++ * Bits in the CFGB register
++ */
++
++#define CFGB_GTCKOPT 0x80
++#define CFGB_MIIOPT 0x40
++#define CFGB_CRSEOPT 0x20
++#define CFGB_OFSET 0x10
++#define CFGB_CRANDOM 0x08
++#define CFGB_CAP 0x04
++#define CFGB_MBA 0x02
++#define CFGB_BAKOPT 0x01
++
++/*
++ * Bits in the CFGC register
++ */
++
++#define CFGC_EELOAD 0x80
++#define CFGC_BROPT 0x40
++#define CFGC_DLYEN 0x20
++#define CFGC_DTSEL 0x10
++#define CFGC_BTSEL 0x08
++#define CFGC_BPS2 0x04 /* bootrom select[2] */
++#define CFGC_BPS1 0x02 /* bootrom select[1] */
++#define CFGC_BPS0 0x01 /* bootrom select[0] */
++
++/*
++ * Bits in the CFGD register
++ */
++
++#define CFGD_IODIS 0x80
++#define CFGD_MSLVDACEN 0x40
++#define CFGD_CFGDACEN 0x20
++#define CFGD_PCI64EN 0x10
++#define CFGD_HTMRL4 0x08
++
++/*
++ * Bits in the DCFG1 register
++ */
++
++#define DCFG_XMWI 0x8000
++#define DCFG_XMRM 0x4000
++#define DCFG_XMRL 0x2000
++#define DCFG_PERDIS 0x1000
++#define DCFG_MRWAIT 0x0400
++#define DCFG_MWWAIT 0x0200
++#define DCFG_LATMEN 0x0100
++
++/*
++ * Bits in the MCFG0 register
++ */
++
++#define MCFG_RXARB 0x0080
++#define MCFG_RFT1 0x0020
++#define MCFG_RFT0 0x0010
++#define MCFG_LOWTHOPT 0x0008
++#define MCFG_PQEN 0x0004
++#define MCFG_RTGOPT 0x0002
++#define MCFG_VIDFR 0x0001
++
++/*
++ * Bits in the MCFG1 register
++ */
++
++#define MCFG_TXARB 0x8000
++#define MCFG_TXQBK1 0x0800
++#define MCFG_TXQBK0 0x0400
++#define MCFG_TXQNOBK 0x0200
++#define MCFG_SNAPOPT 0x0100
++
++/*
++ * Bits in the PMCC register
++ */
++
++#define PMCC_DSI 0x80
++#define PMCC_D2_DIS 0x40
++#define PMCC_D1_DIS 0x20
++#define PMCC_D3C_EN 0x10
++#define PMCC_D3H_EN 0x08
++#define PMCC_D2_EN 0x04
++#define PMCC_D1_EN 0x02
++#define PMCC_D0_EN 0x01
++
++/*
++ * Bits in STICKHW
++ */
++
++#define STICKHW_SWPTAG 0x10
++#define STICKHW_WOLSR 0x08
++#define STICKHW_WOLEN 0x04
++#define STICKHW_DS1 0x02 /* R/W by software/cfg cycle */
++#define STICKHW_DS0 0x01 /* suspend well DS write port */
++
++/*
++ * Bits in the MIBCR register
++ */
++
++#define MIBCR_MIBISTOK 0x80
++#define MIBCR_MIBISTGO 0x40
++#define MIBCR_MIBINC 0x20
++#define MIBCR_MIBHI 0x10
++#define MIBCR_MIBFRZ 0x08
++#define MIBCR_MIBFLSH 0x04
++#define MIBCR_MPTRINI 0x02
++#define MIBCR_MIBCLR 0x01
++
++/*
++ * Bits in the EERSV register
++ */
++
++#define EERSV_BOOT_RPL ((u8) 0x01) /* Boot method selection for VT6110 */
++
++#define EERSV_BOOT_MASK ((u8) 0x06)
++#define EERSV_BOOT_INT19 ((u8) 0x00)
++#define EERSV_BOOT_INT18 ((u8) 0x02)
++#define EERSV_BOOT_LOCAL ((u8) 0x04)
++#define EERSV_BOOT_BEV ((u8) 0x06)
++
++
++/*
++ * Bits in BPCMD
++ */
++
++#define BPCMD_BPDNE 0x80
++#define BPCMD_EBPWR 0x02
++#define BPCMD_EBPRD 0x01
++
++/*
++ * Bits in the EECSR register
++ */
++
++#define EECSR_EMBP 0x40 /* eeprom embeded programming */
++#define EECSR_RELOAD 0x20 /* eeprom content reload */
++#define EECSR_DPM 0x10 /* eeprom direct programming */
++#define EECSR_ECS 0x08 /* eeprom CS pin */
++#define EECSR_ECK 0x04 /* eeprom CK pin */
++#define EECSR_EDI 0x02 /* eeprom DI pin */
++#define EECSR_EDO 0x01 /* eeprom DO pin */
++
++/*
++ * Bits in the EMBCMD register
++ */
++
++#define EMBCMD_EDONE 0x80
++#define EMBCMD_EWDIS 0x08
++#define EMBCMD_EWEN 0x04
++#define EMBCMD_EWR 0x02
++#define EMBCMD_ERD 0x01
++
++/*
++ * Bits in TESTCFG register
++ */
++
++#define TESTCFG_HBDIS 0x80
++
++/*
++ * Bits in CHIPGCR register
++ */
++
++#define CHIPGCR_FCGMII 0x80
++#define CHIPGCR_FCFDX 0x40
++#define CHIPGCR_FCRESV 0x20
++#define CHIPGCR_FCMODE 0x10
++#define CHIPGCR_LPSOPT 0x08
++#define CHIPGCR_TM1US 0x04
++#define CHIPGCR_TM0US 0x02
++#define CHIPGCR_PHYINTEN 0x01
++
++/*
++ * Bits in WOLCR0
++ */
++
++#define WOLCR_MSWOLEN7 0x0080 /* enable pattern match filtering */
++#define WOLCR_MSWOLEN6 0x0040
++#define WOLCR_MSWOLEN5 0x0020
++#define WOLCR_MSWOLEN4 0x0010
++#define WOLCR_MSWOLEN3 0x0008
++#define WOLCR_MSWOLEN2 0x0004
++#define WOLCR_MSWOLEN1 0x0002
++#define WOLCR_MSWOLEN0 0x0001
++#define WOLCR_ARP_EN 0x0001
++
++/*
++ * Bits in WOLCR1
++ */
++
++#define WOLCR_LINKOFF_EN 0x0800 /* link off detected enable */
++#define WOLCR_LINKON_EN 0x0400 /* link on detected enable */
++#define WOLCR_MAGIC_EN 0x0200 /* magic packet filter enable */
++#define WOLCR_UNICAST_EN 0x0100 /* unicast filter enable */
++
++
++/*
++ * Bits in PWCFG
++ */
++
++#define PWCFG_PHYPWOPT 0x80 /* internal MII I/F timing */
++#define PWCFG_PCISTICK 0x40 /* PCI sticky R/W enable */
++#define PWCFG_WOLTYPE 0x20 /* pulse(1) or button (0) */
++#define PWCFG_LEGCY_WOL 0x10
++#define PWCFG_PMCSR_PME_SR 0x08
++#define PWCFG_PMCSR_PME_EN 0x04 /* control by PCISTICK */
++#define PWCFG_LEGACY_WOLSR 0x02 /* Legacy WOL_SR shadow */
++#define PWCFG_LEGACY_WOLEN 0x01 /* Legacy WOL_EN shadow */
++
++/*
++ * Bits in WOLCFG
++ */
++
++#define WOLCFG_PMEOVR 0x80 /* for legacy use, force PMEEN always */
++#define WOLCFG_SAM 0x20 /* accept multicast case reset, default=0 */
++#define WOLCFG_SAB 0x10 /* accept broadcast case reset, default=0 */
++#define WOLCFG_SMIIACC 0x08 /* ?? */
++#define WOLCFG_SGENWH 0x02
++#define WOLCFG_PHYINTEN 0x01 /* 0:PHYINT trigger enable, 1:use internal MII
++ to report status change */
++/*
++ * Bits in WOLSR1
++ */
++
++#define WOLSR_LINKOFF_INT 0x0800
++#define WOLSR_LINKON_INT 0x0400
++#define WOLSR_MAGIC_INT 0x0200
++#define WOLSR_UNICAST_INT 0x0100
++
++/*
++ * Ethernet address filter type
++ */
++
++#define PKT_TYPE_NONE 0x0000 /* Turn off receiver */
++#define PKT_TYPE_DIRECTED 0x0001 /* obselete, directed address is always accepted */
++#define PKT_TYPE_MULTICAST 0x0002
++#define PKT_TYPE_ALL_MULTICAST 0x0004
++#define PKT_TYPE_BROADCAST 0x0008
++#define PKT_TYPE_PROMISCUOUS 0x0020
++#define PKT_TYPE_LONG 0x2000 /* NOTE.... the definition of LONG is >2048 bytes in our chip */
++#define PKT_TYPE_RUNT 0x4000
++#define PKT_TYPE_ERROR 0x8000 /* Accept error packets, e.g. CRC error */
++
++/*
++ * Loopback mode
++ */
++
++#define MAC_LB_NONE 0x00
++#define MAC_LB_INTERNAL 0x01
++#define MAC_LB_EXTERNAL 0x02
++
++/*
++ * Enabled mask value of irq
++ */
++
++#if defined(_SIM)
++#define IMR_MASK_VALUE 0x0033FF0FUL /* initial value of IMR
++ set IMR0 to 0x0F according to spec */
++
++#else
++#define IMR_MASK_VALUE 0x0013FB0FUL /* initial value of IMR
++ ignore MIBFI,RACEI to
++ reduce intr. frequency
++ NOTE.... do not enable NoBuf int mask at driver driver
++ when (1) NoBuf -> RxThreshold = SF
++ (2) OK -> RxThreshold = original value
++ */
++#endif
++
++/*
++ * Revision id
++ */
++
++#define REV_ID_VT3119_A0 0x00
++#define REV_ID_VT3119_A1 0x01
++#define REV_ID_VT3216_A0 0x10
++
++/*
++ * Max time out delay time
++ */
++
++#define W_MAX_TIMEOUT 0x0FFFU
++
++
++/*
++ * MAC registers as a structure. Cannot be directly accessed this
++ * way but generates offsets for readl/writel() calls
++ */
++
++struct mac_regs {
++ volatile u8 PAR[6]; /* 0x00 */
++ volatile u8 RCR;
++ volatile u8 TCR;
++
++ volatile u32 CR0Set; /* 0x08 */
++ volatile u32 CR0Clr; /* 0x0C */
++
++ volatile u8 MARCAM[8]; /* 0x10 */
++
++ volatile u32 DecBaseHi; /* 0x18 */
++ volatile u16 DbfBaseHi; /* 0x1C */
++ volatile u16 reserved_1E;
++
++ volatile u16 ISRCTL; /* 0x20 */
++ volatile u8 TXESR;
++ volatile u8 RXESR;
++
++ volatile u32 ISR; /* 0x24 */
++ volatile u32 IMR;
++
++ volatile u32 TDStatusPort; /* 0x2C */
++
++ volatile u16 TDCSRSet; /* 0x30 */
++ volatile u8 RDCSRSet;
++ volatile u8 reserved_33;
++ volatile u16 TDCSRClr;
++ volatile u8 RDCSRClr;
++ volatile u8 reserved_37;
++
++ volatile u32 RDBaseLo; /* 0x38 */
++ volatile u16 RDIdx; /* 0x3C */
++ volatile u16 reserved_3E;
++
++ volatile u32 TDBaseLo[4]; /* 0x40 */
++
++ volatile u16 RDCSize; /* 0x50 */
++ volatile u16 TDCSize; /* 0x52 */
++ volatile u16 TDIdx[4]; /* 0x54 */
++ volatile u16 tx_pause_timer; /* 0x5C */
++ volatile u16 RBRDU; /* 0x5E */
++
++ volatile u32 FIFOTest0; /* 0x60 */
++ volatile u32 FIFOTest1; /* 0x64 */
++
++ volatile u8 CAMADDR; /* 0x68 */
++ volatile u8 CAMCR; /* 0x69 */
++ volatile u8 GFTEST; /* 0x6A */
++ volatile u8 FTSTCMD; /* 0x6B */
++
++ volatile u8 MIICFG; /* 0x6C */
++ volatile u8 MIISR;
++ volatile u8 PHYSR0;
++ volatile u8 PHYSR1;
++ volatile u8 MIICR;
++ volatile u8 MIIADR;
++ volatile u16 MIIDATA;
++
++ volatile u16 SoftTimer0; /* 0x74 */
++ volatile u16 SoftTimer1;
++
++ volatile u8 CFGA; /* 0x78 */
++ volatile u8 CFGB;
++ volatile u8 CFGC;
++ volatile u8 CFGD;
++
++ volatile u16 DCFG; /* 0x7C */
++ volatile u16 MCFG;
++
++ volatile u8 TBIST; /* 0x80 */
++ volatile u8 RBIST;
++ volatile u8 PMCPORT;
++ volatile u8 STICKHW;
++
++ volatile u8 MIBCR; /* 0x84 */
++ volatile u8 reserved_85;
++ volatile u8 rev_id;
++ volatile u8 PORSTS;
++
++ volatile u32 MIBData; /* 0x88 */
++
++ volatile u16 EEWrData;
++
++ volatile u8 reserved_8E;
++ volatile u8 BPMDWr;
++ volatile u8 BPCMD;
++ volatile u8 BPMDRd;
++
++ volatile u8 EECHKSUM; /* 0x92 */
++ volatile u8 EECSR;
++
++ volatile u16 EERdData; /* 0x94 */
++ volatile u8 EADDR;
++ volatile u8 EMBCMD;
++
++
++ volatile u8 JMPSR0; /* 0x98 */
++ volatile u8 JMPSR1;
++ volatile u8 JMPSR2;
++ volatile u8 JMPSR3;
++ volatile u8 CHIPGSR; /* 0x9C */
++ volatile u8 TESTCFG;
++ volatile u8 DEBUG;
++ volatile u8 CHIPGCR;
++
++ volatile u16 WOLCRSet; /* 0xA0 */
++ volatile u8 PWCFGSet;
++ volatile u8 WOLCFGSet;
++
++ volatile u16 WOLCRClr; /* 0xA4 */
++ volatile u8 PWCFGCLR;
++ volatile u8 WOLCFGClr;
++
++ volatile u16 WOLSRSet; /* 0xA8 */
++ volatile u16 reserved_AA;
++
++ volatile u16 WOLSRClr; /* 0xAC */
++ volatile u16 reserved_AE;
++
++ volatile u16 PatternCRC[8]; /* 0xB0 */
++ volatile u32 ByteMask[4][4]; /* 0xC0 */
++} __attribute__ ((__packed__));
++
++
++enum hw_mib {
++ HW_MIB_ifRxAllPkts = 0,
++ HW_MIB_ifRxOkPkts,
++ HW_MIB_ifTxOkPkts,
++ HW_MIB_ifRxErrorPkts,
++ HW_MIB_ifRxRuntOkPkt,
++ HW_MIB_ifRxRuntErrPkt,
++ HW_MIB_ifRx64Pkts,
++ HW_MIB_ifTx64Pkts,
++ HW_MIB_ifRx65To127Pkts,
++ HW_MIB_ifTx65To127Pkts,
++ HW_MIB_ifRx128To255Pkts,
++ HW_MIB_ifTx128To255Pkts,
++ HW_MIB_ifRx256To511Pkts,
++ HW_MIB_ifTx256To511Pkts,
++ HW_MIB_ifRx512To1023Pkts,
++ HW_MIB_ifTx512To1023Pkts,
++ HW_MIB_ifRx1024To1518Pkts,
++ HW_MIB_ifTx1024To1518Pkts,
++ HW_MIB_ifTxEtherCollisions,
++ HW_MIB_ifRxPktCRCE,
++ HW_MIB_ifRxJumboPkts,
++ HW_MIB_ifTxJumboPkts,
++ HW_MIB_ifRxMacControlFrames,
++ HW_MIB_ifTxMacControlFrames,
++ HW_MIB_ifRxPktFAE,
++ HW_MIB_ifRxLongOkPkt,
++ HW_MIB_ifRxLongPktErrPkt,
++ HW_MIB_ifTXSQEErrors,
++ HW_MIB_ifRxNobuf,
++ HW_MIB_ifRxSymbolErrors,
++ HW_MIB_ifInRangeLengthErrors,
++ HW_MIB_ifLateCollisions,
++ HW_MIB_SIZE
++};
++
++enum chip_type {
++ CHIP_TYPE_VT6110 = 1,
++};
++
++struct velocity_info_tbl {
++ enum chip_type chip_id;
++ char *name;
++ int io_size;
++ int txqueue;
++ u32 flags;
++};
++
++struct velocity_info_tbl *info;
++
++#define mac_hw_mibs_init(regs) {\
++ BYTE_REG_BITS_ON(MIBCR_MIBFRZ,&((regs)->MIBCR));\
++ BYTE_REG_BITS_ON(MIBCR_MIBCLR,&((regs)->MIBCR));\
++ do {}\
++ while (BYTE_REG_BITS_IS_ON(MIBCR_MIBCLR,&((regs)->MIBCR)));\
++ BYTE_REG_BITS_OFF(MIBCR_MIBFRZ,&((regs)->MIBCR));\
++}
++
++#define mac_read_isr(regs) readl(&((regs)->ISR))
++#define mac_write_isr(regs, x) writel((x),&((regs)->ISR))
++#define mac_clear_isr(regs) writel(0xffffffffL,&((regs)->ISR))
++
++#define mac_write_int_mask(mask, regs) writel((mask),&((regs)->IMR));
++#define mac_disable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Clr))
++#define mac_enable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Set))
++
++#define mac_hw_mibs_read(regs, MIBs) {\
++ int i;\
++ BYTE_REG_BITS_ON(MIBCR_MPTRINI,&((regs)->MIBCR));\
++ for (i=0;i<HW_MIB_SIZE;i++) {\
++ (MIBs)[i]=readl(&((regs)->MIBData));\
++ }\
++}
++
++#define mac_set_dma_length(regs, n) {\
++ BYTE_REG_BITS_SET((n),0x07,&((regs)->DCFG));\
++}
++
++#define mac_set_rx_thresh(regs, n) {\
++ BYTE_REG_BITS_SET((n),(MCFG_RFT0|MCFG_RFT1),&((regs)->MCFG));\
++}
++
++#define mac_rx_queue_run(regs) {\
++ writeb(TRDCSR_RUN, &((regs)->RDCSRSet));\
++}
++
++#define mac_rx_queue_wake(regs) {\
++ writeb(TRDCSR_WAK, &((regs)->RDCSRSet));\
++}
++
++#define mac_tx_queue_run(regs, n) {\
++ writew(TRDCSR_RUN<<((n)*4),&((regs)->TDCSRSet));\
++}
++
++#define mac_tx_queue_wake(regs, n) {\
++ writew(TRDCSR_WAK<<(n*4),&((regs)->TDCSRSet));\
++}
++
++#define mac_eeprom_reload(regs) {\
++ int i=0;\
++ BYTE_REG_BITS_ON(EECSR_RELOAD,&((regs)->EECSR));\
++ do {\
++ udelay(10);\
++ if (i++>0x1000) {\
++ break;\
++ }\
++ }while (BYTE_REG_BITS_IS_ON(EECSR_RELOAD,&((regs)->EECSR)));\
++}
++
++enum velocity_cam_type {
++ VELOCITY_VLAN_ID_CAM = 0,
++ VELOCITY_MULTICAST_CAM
++};
++
++/**
++ * mac_get_cam_mask - Read a CAM mask
++ * @regs: register block for this velocity
++ * @mask: buffer to store mask
++ * @cam_type: CAM to fetch
++ *
++ * Fetch the mask bits of the selected CAM and store them into the
++ * provided mask buffer.
++ */
++
++static inline void mac_get_cam_mask(struct mac_regs *regs, u8 * mask,
++ enum velocity_cam_type cam_type)
++{
++ int i;
++ /* Select CAM mask */
++ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0,
++ ®s->CAMCR);
++
++ if (cam_type == VELOCITY_VLAN_ID_CAM)
++ writeb(CAMADDR_VCAMSL, ®s->CAMADDR);
++ else
++ writeb(0, ®s->CAMADDR);
++
++ /* read mask */
++ for (i = 0; i < 8; i++)
++ *mask++ = readb(&(regs->MARCAM[i]));
++
++ /* disable CAMEN */
++ writeb(0, ®s->CAMADDR);
++
++ /* Select mar */
++ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0,
++ ®s->CAMCR);
++
++}
++
++/**
++ * mac_set_cam_mask - Set a CAM mask
++ * @regs: register block for this velocity
++ * @mask: CAM mask to load
++ * @cam_type: CAM to store
++ *
++ * Store a new mask into a CAM
++ */
++
++static inline void mac_set_cam_mask(struct mac_regs *regs, u8 * mask,
++ enum velocity_cam_type cam_type)
++{
++ int i;
++ /* Select CAM mask */
++ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0,
++ ®s->CAMCR);
++
++ if (cam_type == VELOCITY_VLAN_ID_CAM)
++ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR);
++ else
++ writeb(CAMADDR_CAMEN, ®s->CAMADDR);
++
++ for (i = 0; i < 8; i++) {
++ writeb(*mask++, &(regs->MARCAM[i]));
++ }
++ /* disable CAMEN */
++ writeb(0, ®s->CAMADDR);
++
++ /* Select mar */
++ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0,
++ ®s->CAMCR);
++}
++
++/**
++ * mac_set_cam - set CAM data
++ * @regs: register block of this velocity
++ * @idx: Cam index
++ * @addr: 2 or 6 bytes of CAM data
++ * @cam_type: CAM to load
++ *
++ * Load an address or vlan tag into a CAM
++ */
++
++static inline void mac_set_cam(struct mac_regs *regs, int idx, u8 * addr,
++ enum velocity_cam_type cam_type)
++{
++ int i;
++
++ /* Select CAM mask */
++ BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0,
++ ®s->CAMCR);
++
++ idx &= (64 - 1);
++
++ if (cam_type == VELOCITY_VLAN_ID_CAM)
++ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx,
++ ®s->CAMADDR);
++ else
++ writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR);
++
++ if (cam_type == VELOCITY_VLAN_ID_CAM)
++ writew(*((u16 *) addr), ®s->MARCAM[0]);
++ else {
++ for (i = 0; i < 6; i++) {
++ writeb(*addr++, &(regs->MARCAM[i]));
++ }
++ }
++ BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR);
++
++ udelay(10);
++
++ writeb(0, ®s->CAMADDR);
++
++ /* Select mar */
++ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0,
++ ®s->CAMCR);
++}
++
++/**
++ * mac_get_cam - fetch CAM data
++ * @regs: register block of this velocity
++ * @idx: Cam index
++ * @addr: buffer to hold up to 6 bytes of CAM data
++ * @cam_type: CAM to load
++ *
++ * Load an address or vlan tag from a CAM into the buffer provided by
++ * the caller. VLAN tags are 2 bytes the address cam entries are 6.
++ */
++
++static inline void mac_get_cam(struct mac_regs *regs, int idx, u8 * addr,
++ enum velocity_cam_type cam_type)
++{
++ int i;
++
++ /* Select CAM mask */
++ BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0,
++ ®s->CAMCR);
++
++ idx &= (64 - 1);
++
++ if (cam_type == VELOCITY_VLAN_ID_CAM)
++ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx,
++ ®s->CAMADDR);
++ else
++ writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR);
++
++ BYTE_REG_BITS_ON(CAMCR_CAMRD, ®s->CAMCR);
++
++ udelay(10);
++
++ if (cam_type == VELOCITY_VLAN_ID_CAM)
++ *((u16 *) addr) = readw(&(regs->MARCAM[0]));
++ else
++ for (i = 0; i < 6; i++, addr++)
++ *((u8 *) addr) = readb(&(regs->MARCAM[i]));
++
++ writeb(0, ®s->CAMADDR);
++
++ /* Select mar */
++ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0,
++ ®s->CAMCR);
++}
++
++/**
++ * mac_wol_reset - reset WOL after exiting low power
++ * @regs: register block of this velocity
++ *
++ * Called after we drop out of wake on lan mode in order to
++ * reset the Wake on lan features. This function doesn't restore
++ * the rest of the logic from the result of sleep/wakeup
++ */
++
++inline static void mac_wol_reset(struct mac_regs *regs)
++{
++
++ /* Turn off SWPTAG right after leaving power mode */
++ BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW);
++ /* clear sticky bits */
++ BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW);
++
++ BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR);
++ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
++ /* disable force PME-enable */
++ writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr);
++ /* disable power-event config bit */
++ writew(0xFFFF, ®s->WOLCRClr);
++ /* clear power status */
++ writew(0xFFFF, ®s->WOLSRClr);
++}
++
++
++/*
++ * Header for WOL definitions. Used to compute hashes
++ */
++
++typedef u8 MCAM_ADDR[ETH_ALEN];
++
++struct arp_packet {
++ u8 dest_mac[ETH_ALEN];
++ u8 src_mac[ETH_ALEN];
++ u16 type;
++ u16 ar_hrd;
++ u16 ar_pro;
++ u8 ar_hln;
++ u8 ar_pln;
++ u16 ar_op;
++ u8 ar_sha[ETH_ALEN];
++ u8 ar_sip[4];
++ u8 ar_tha[ETH_ALEN];
++ u8 ar_tip[4];
++} __attribute__ ((__packed__));
++
++struct _magic_packet {
++ u8 dest_mac[6];
++ u8 src_mac[6];
++ u16 type;
++ u8 MAC[16][6];
++ u8 password[6];
++} __attribute__ ((__packed__));
++
++/*
++ * Store for chip context when saving and restoring status. Not
++ * all fields are saved/restored currently.
++ */
++
++struct velocity_context {
++ u8 mac_reg[256];
++ MCAM_ADDR cam_addr[MCAM_SIZE];
++ u16 vcam[VCAM_SIZE];
++ u32 cammask[2];
++ u32 patcrc[2];
++ u32 pattern[8];
++};
++
++
++/*
++ * MII registers.
++ */
++
++
++/*
++ * Registers in the MII (offset unit is WORD)
++ */
++
++#define MII_REG_BMCR 0x00 // physical address
++#define MII_REG_BMSR 0x01 //
++#define MII_REG_PHYID1 0x02 // OUI
++#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID
++#define MII_REG_ANAR 0x04 //
++#define MII_REG_ANLPAR 0x05 //
++#define MII_REG_G1000CR 0x09 //
++#define MII_REG_G1000SR 0x0A //
++#define MII_REG_MODCFG 0x10 //
++#define MII_REG_TCSR 0x16 //
++#define MII_REG_PLED 0x1B //
++// NS, MYSON only
++#define MII_REG_PCR 0x17 //
++// ESI only
++#define MII_REG_PCSR 0x17 //
++#define MII_REG_AUXCR 0x1C //
++
++// Marvell 88E1000/88E1000S
++#define MII_REG_PSCR 0x10 // PHY specific control register
++
++//
++// Bits in the BMCR register
++//
++#define BMCR_RESET 0x8000 //
++#define BMCR_LBK 0x4000 //
++#define BMCR_SPEED100 0x2000 //
++#define BMCR_AUTO 0x1000 //
++#define BMCR_PD 0x0800 //
++#define BMCR_ISO 0x0400 //
++#define BMCR_REAUTO 0x0200 //
++#define BMCR_FDX 0x0100 //
++#define BMCR_SPEED1G 0x0040 //
++//
++// Bits in the BMSR register
++//
++#define BMSR_AUTOCM 0x0020 //
++#define BMSR_LNK 0x0004 //
++
++//
++// Bits in the ANAR register
++//
++#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support
++#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support
++#define ANAR_T4 0x0200 //
++#define ANAR_TXFD 0x0100 //
++#define ANAR_TX 0x0080 //
++#define ANAR_10FD 0x0040 //
++#define ANAR_10 0x0020 //
++//
++// Bits in the ANLPAR register
++//
++#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support
++#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support
++#define ANLPAR_T4 0x0200 //
++#define ANLPAR_TXFD 0x0100 //
++#define ANLPAR_TX 0x0080 //
++#define ANLPAR_10FD 0x0040 //
++#define ANLPAR_10 0x0020 //
++
++//
++// Bits in the G1000CR register
++//
++#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable
++#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable
++
++//
++// Bits in the G1000SR register
++//
++#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable
++#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable
++
++#define TCSR_ECHODIS 0x2000 //
++#define AUXCR_MDPPS 0x0004 //
++
++// Bits in the PLED register
++#define PLED_LALBE 0x0004 //
++
++// Marvell 88E1000/88E1000S Bits in the PHY specific control register (10h)
++#define PSCR_ACRSTX 0x0800 // Assert CRS on Transmit
++
++#define PHYID_CICADA_CS8201 0x000FC410UL
++#define PHYID_VT3216_32BIT 0x000FC610UL
++#define PHYID_VT3216_64BIT 0x000FC600UL
++#define PHYID_MARVELL_1000 0x01410C50UL
++#define PHYID_MARVELL_1000S 0x01410C40UL
++
++#define PHYID_REV_ID_MASK 0x0000000FUL
++
++#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK)
++#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
++
++#define MII_REG_BITS_ON(x,i,p) do {\
++ u16 w;\
++ velocity_mii_read((p),(i),&(w));\
++ (w)|=(x);\
++ velocity_mii_write((p),(i),(w));\
++} while (0)
++
++#define MII_REG_BITS_OFF(x,i,p) do {\
++ u16 w;\
++ velocity_mii_read((p),(i),&(w));\
++ (w)&=(~(x));\
++ velocity_mii_write((p),(i),(w));\
++} while (0)
++
++#define MII_REG_BITS_IS_ON(x,i,p) ({\
++ u16 w;\
++ velocity_mii_read((p),(i),&(w));\
++ ((int) ((w) & (x)));})
++
++#define MII_GET_PHY_ID(p) ({\
++ u32 id;\
++ velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\
++ velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\
++ (id);})
++
++#ifdef LINUX
++/*
++ * Inline debug routine
++ */
++
++
++enum velocity_msg_level {
++ MSG_LEVEL_ERR = 0, //Errors that will cause abnormal operation.
++ MSG_LEVEL_NOTICE = 1, //Some errors need users to be notified.
++ MSG_LEVEL_INFO = 2, //Normal message.
++ MSG_LEVEL_VERBOSE = 3, //Will report all trival errors.
++ MSG_LEVEL_DEBUG = 4 //Only for debug purpose.
++};
++
++#ifdef VELOCITY_DEBUG
++#define ASSERT(x) { \
++ if (!(x)) { \
++ printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\
++ __FUNCTION__, __LINE__);\
++ BUG(); \
++ }\
++}
++#define VELOCITY_DBG(p,args...) printk(p, ##args)
++#else
++#define ASSERT(x)
++#define VELOCITY_DBG(x)
++#endif
++
++#define VELOCITY_PRT(l, p, args...) do {if (l<=msglevel) printf( p ,##args);} while (0)
++
++#define VELOCITY_PRT_CAMMASK(p,t) {\
++ int i;\
++ if ((t)==VELOCITY_MULTICAST_CAM) {\
++ for (i=0;i<(MCAM_SIZE/8);i++)\
++ printk("%02X",(p)->mCAMmask[i]);\
++ }\
++ else {\
++ for (i=0;i<(VCAM_SIZE/8);i++)\
++ printk("%02X",(p)->vCAMmask[i]);\
++ }\
++ printk("\n");\
++}
++
++#endif
++
++#define VELOCITY_WOL_MAGIC 0x00000000UL
++#define VELOCITY_WOL_PHY 0x00000001UL
++#define VELOCITY_WOL_ARP 0x00000002UL
++#define VELOCITY_WOL_UCAST 0x00000004UL
++#define VELOCITY_WOL_BCAST 0x00000010UL
++#define VELOCITY_WOL_MCAST 0x00000020UL
++#define VELOCITY_WOL_MAGIC_SEC 0x00000040UL
++
++/*
++ * Flags for options
++ */
++
++#define VELOCITY_FLAGS_TAGGING 0x00000001UL
++#define VELOCITY_FLAGS_TX_CSUM 0x00000002UL
++#define VELOCITY_FLAGS_RX_CSUM 0x00000004UL
++#define VELOCITY_FLAGS_IP_ALIGN 0x00000008UL
++#define VELOCITY_FLAGS_VAL_PKT_LEN 0x00000010UL
++
++#define VELOCITY_FLAGS_FLOW_CTRL 0x01000000UL
++
++/*
++ * Flags for driver status
++ */
++
++#define VELOCITY_FLAGS_OPENED 0x00010000UL
++#define VELOCITY_FLAGS_VMNS_CONNECTED 0x00020000UL
++#define VELOCITY_FLAGS_VMNS_COMMITTED 0x00040000UL
++#define VELOCITY_FLAGS_WOL_ENABLED 0x00080000UL
++
++/*
++ * Flags for MII status
++ */
++
++#define VELOCITY_LINK_FAIL 0x00000001UL
++#define VELOCITY_SPEED_10 0x00000002UL
++#define VELOCITY_SPEED_100 0x00000004UL
++#define VELOCITY_SPEED_1000 0x00000008UL
++#define VELOCITY_DUPLEX_FULL 0x00000010UL
++#define VELOCITY_AUTONEG_ENABLE 0x00000020UL
++#define VELOCITY_FORCED_BY_EEPROM 0x00000040UL
++
++/*
++ * For velocity_set_media_duplex
++ */
++
++#define VELOCITY_LINK_CHANGE 0x00000001UL
++
++enum speed_opt {
++ SPD_DPX_AUTO = 0,
++ SPD_DPX_100_HALF = 1,
++ SPD_DPX_100_FULL = 2,
++ SPD_DPX_10_HALF = 3,
++ SPD_DPX_10_FULL = 4
++};
++
++enum velocity_init_type {
++ VELOCITY_INIT_COLD = 0,
++ VELOCITY_INIT_RESET,
++ VELOCITY_INIT_WOL
++};
++
++enum velocity_flow_cntl_type {
++ FLOW_CNTL_DEFAULT = 1,
++ FLOW_CNTL_TX,
++ FLOW_CNTL_RX,
++ FLOW_CNTL_TX_RX,
++ FLOW_CNTL_DISABLE,
++};
++
++struct velocity_opt {
++ int numrx; /* Number of RX descriptors */
++ int numtx; /* Number of TX descriptors */
++ enum speed_opt spd_dpx; /* Media link mode */
++ int vid; /* vlan id */
++ int DMA_length; /* DMA length */
++ int rx_thresh; /* RX_THRESH */
++ int flow_cntl;
++ int wol_opts; /* Wake on lan options */
++ int td_int_count;
++ int int_works;
++ int rx_bandwidth_hi;
++ int rx_bandwidth_lo;
++ int rx_bandwidth_en;
++ u32 flags;
++};
++
++#define RX_DESC_MIN 4
++#define RX_DESC_MAX 255
++#define RX_DESC_DEF 64
++
++#define TX_DESC_MIN 1
++#define TX_DESC_MAX 256
++#define TX_DESC_DEF 4
++
++struct velocity_info {
++// struct list_head list;
++
++ struct pci_device *pdev;
++// struct net_device *dev;
++// struct net_device_stats stats;
++
++#ifdef CONFIG_PM
++ u32 pci_state[16];
++#endif
++
++// dma_addr_t rd_pool_dma;
++// dma_addr_t td_pool_dma[TX_QUEUE_NO];
++
++// dma_addr_t tx_bufs_dma;
++ u8 *tx_bufs;
++
++ u8 ip_addr[4];
++ enum chip_type chip_id;
++
++ struct mac_regs *mac_regs;
++ unsigned long memaddr;
++ unsigned long ioaddr;
++ u32 io_size;
++
++ u8 rev_id;
++
++#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)]))
++
++ int num_txq;
++
++ volatile int td_used[TX_QUEUE_NO];
++ int td_curr;
++ int td_tail[TX_QUEUE_NO];
++ unsigned char *TxDescArrays; /* Index of Tx Descriptor buffer */
++ unsigned char *RxDescArrays; /* Index of Rx Descriptor buffer */
++ unsigned char *tx_buffs;
++ unsigned char *rx_buffs;
++
++ unsigned char *txb;
++ unsigned char *rxb;
++ struct tx_desc *td_rings;
++ struct velocity_td_info *td_infos[TX_QUEUE_NO];
++
++ int rd_curr;
++ int rd_dirty;
++ u32 rd_filled;
++ struct rx_desc *rd_ring;
++ struct velocity_rd_info *rd_info; /* It's an array */
++
++#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
++ u32 mib_counter[MAX_HW_MIB_COUNTER];
++ struct velocity_opt options;
++
++ u32 int_mask;
++
++ u32 flags;
++
++ int rx_buf_sz;
++ u32 mii_status;
++ u32 phy_id;
++ int multicast_limit;
++
++ u8 vCAMmask[(VCAM_SIZE / 8)];
++ u8 mCAMmask[(MCAM_SIZE / 8)];
++
++// spinlock_t lock;
++
++ int wol_opts;
++ u8 wol_passwd[6];
++
++ struct velocity_context context;
++
++ u32 ticks;
++ u32 rx_bytes;
++
++} vptx;
++
++static struct velocity_info *vptr;
++
++#ifdef LINUX
++/**
++ * velocity_get_ip - find an IP address for the device
++ * @vptr: Velocity to query
++ *
++ * Dig out an IP address for this interface so that we can
++ * configure wakeup with WOL for ARP. If there are multiple IP
++ * addresses on this chain then we use the first - multi-IP WOL is not
++ * supported.
++ *
++ * CHECK ME: locking
++ */
++
++inline static int velocity_get_ip(struct velocity_info *vptr)
++{
++ struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr;
++ struct in_ifaddr *ifa;
++
++ if (in_dev != NULL) {
++ ifa = (struct in_ifaddr *) in_dev->ifa_list;
++ if (ifa != NULL) {
++ memcpy(vptr->ip_addr, &ifa->ifa_address, 4);
++ return 0;
++ }
++ }
++ return -ENOENT;
++}
++
++/**
++ * velocity_update_hw_mibs - fetch MIB counters from chip
++ * @vptr: velocity to update
++ *
++ * The velocity hardware keeps certain counters in the hardware
++ * side. We need to read these when the user asks for statistics
++ * or when they overflow (causing an interrupt). The read of the
++ * statistic clears it, so we keep running master counters in user
++ * space.
++ */
++
++static inline void velocity_update_hw_mibs(struct velocity_info *vptr)
++{
++ u32 tmp;
++ int i;
++ BYTE_REG_BITS_ON(MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR));
++
++ while (BYTE_REG_BITS_IS_ON
++ (MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR)));
++
++ BYTE_REG_BITS_ON(MIBCR_MPTRINI, &(vptr->mac_regs->MIBCR));
++ for (i = 0; i < HW_MIB_SIZE; i++) {
++ tmp = readl(&(vptr->mac_regs->MIBData)) & 0x00FFFFFFUL;
++ vptr->mib_counter[i] += tmp;
++ }
++}
++#endif
++/**
++ * init_flow_control_register - set up flow control
++ * @vptr: velocity to configure
++ *
++ * Configure the flow control registers for this velocity device.
++ */
++
++static inline void init_flow_control_register(struct velocity_info *vptr)
++{
++ struct mac_regs *regs = vptr->mac_regs;
++
++ /* Set {XHITH1, XHITH0, XLTH1, XLTH0} in FlowCR1 to {1, 0, 1, 1}
++ depend on RD=64, and Turn on XNOEN in FlowCR1 */
++ writel((CR0_XONEN | CR0_XHITH1 | CR0_XLTH1 | CR0_XLTH0),
++ ®s->CR0Set);
++ writel((CR0_FDXTFCEN | CR0_FDXRFCEN | CR0_HDXFCEN | CR0_XHITH0),
++ ®s->CR0Clr);
++
++ /* Set TxPauseTimer to 0xFFFF */
++ writew(0xFFFF, ®s->tx_pause_timer);
++
++ /* Initialize RBRDU to Rx buffer count. */
++ writew(vptr->options.numrx, ®s->RBRDU);
++}
++
++
++#endif