2 * gPXE driver for Marvell Yukon chipset and SysKonnect Gigabit
3 * Ethernet adapters. Derived from Linux skge driver (v1.13), which was
4 * based on earlier sk98lin, e100 and FreeBSD if_sk drivers.
6 * This driver intentionally does not support all the features
7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels.
10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
12 * Modified for gPXE, July 2008 by Michael Decker and in
13 * December 2009 by Thomas Miletich <thomas.miletich@gmail.com>
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 FILE_LICENCE ( GPL2_ONLY );
35 #include <gpxe/netdevice.h>
36 #include <gpxe/ethernet.h>
37 #include <gpxe/if_ether.h>
38 #include <gpxe/iobuf.h>
39 #include <gpxe/malloc.h>
44 static struct pci_device_id skge_id_table[] = {
45 PCI_ROM(0x10b7, 0x1700, "3C940", "3COM 3C940", 0),
46 PCI_ROM(0x10b7, 0x80eb, "3C940B", "3COM 3C940", 0),
47 PCI_ROM(0x1148, 0x4300, "GE", "Syskonnect GE", 0),
48 PCI_ROM(0x1148, 0x4320, "YU", "Syskonnect YU", 0),
49 PCI_ROM(0x1186, 0x4C00, "DGE510T", "DLink DGE-510T", 0),
50 PCI_ROM(0x1186, 0x4b01, "DGE530T", "DLink DGE-530T", 0),
51 PCI_ROM(0x11ab, 0x4320, "id4320", "Marvell id4320", 0),
52 PCI_ROM(0x11ab, 0x5005, "id5005", "Marvell id5005", 0), /* Belkin */
53 PCI_ROM(0x1371, 0x434e, "Gigacard", "CNET Gigacard", 0),
54 PCI_ROM(0x1737, 0x1064, "EG1064", "Linksys EG1064", 0),
55 PCI_ROM(0x1737, 0xffff, "id_any", "Linksys [any]", 0)
58 static int skge_up(struct net_device *dev);
59 static void skge_down(struct net_device *dev);
60 static void skge_tx_clean(struct net_device *dev);
61 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
62 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
63 static void yukon_init(struct skge_hw *hw, int port);
64 static void genesis_mac_init(struct skge_hw *hw, int port);
65 static void genesis_link_up(struct skge_port *skge);
67 static void skge_phyirq(struct skge_hw *hw);
68 static void skge_poll(struct net_device *dev);
69 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob);
70 static void skge_net_irq ( struct net_device *dev, int enable );
72 static void skge_rx_refill(struct net_device *dev);
74 static struct net_device_operations skge_operations = {
77 .transmit = skge_xmit_frame,
82 /* Avoid conditionals by using array */
83 static const int txqaddr[] = { Q_XA1, Q_XA2 };
84 static const int rxqaddr[] = { Q_R1, Q_R2 };
85 static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
86 static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
87 static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
88 static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
90 /* Determine supported/advertised modes based on hardware.
91 * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
93 static u32 skge_supported_modes(const struct skge_hw *hw)
98 supported = SUPPORTED_10baseT_Half
99 | SUPPORTED_10baseT_Full
100 | SUPPORTED_100baseT_Half
101 | SUPPORTED_100baseT_Full
102 | SUPPORTED_1000baseT_Half
103 | SUPPORTED_1000baseT_Full
104 | SUPPORTED_Autoneg| SUPPORTED_TP;
106 if (hw->chip_id == CHIP_ID_GENESIS)
107 supported &= ~(SUPPORTED_10baseT_Half
108 | SUPPORTED_10baseT_Full
109 | SUPPORTED_100baseT_Half
110 | SUPPORTED_100baseT_Full);
112 else if (hw->chip_id == CHIP_ID_YUKON)
113 supported &= ~SUPPORTED_1000baseT_Half;
115 supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half
116 | SUPPORTED_FIBRE | SUPPORTED_Autoneg;
121 /* Chip internal frequency for clock calculations */
122 static inline u32 hwkhz(const struct skge_hw *hw)
124 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
127 /* Microseconds to chip HZ */
128 static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
130 return hwkhz(hw) * usec / 1000;
133 enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
134 static void skge_led(struct skge_port *skge, enum led_mode mode)
136 struct skge_hw *hw = skge->hw;
137 int port = skge->port;
139 if (hw->chip_id == CHIP_ID_GENESIS) {
142 if (hw->phy_type == SK_PHY_BCOM)
143 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
145 skge_write32(hw, SK_REG(port, TX_LED_VAL), 0);
146 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF);
148 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
149 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
150 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
154 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
155 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
157 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
158 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
163 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
164 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
165 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
167 if (hw->phy_type == SK_PHY_BCOM)
168 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
170 skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON);
171 skge_write32(hw, SK_REG(port, TX_LED_VAL), 100);
172 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
179 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
180 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
181 PHY_M_LED_MO_DUP(MO_LED_OFF) |
182 PHY_M_LED_MO_10(MO_LED_OFF) |
183 PHY_M_LED_MO_100(MO_LED_OFF) |
184 PHY_M_LED_MO_1000(MO_LED_OFF) |
185 PHY_M_LED_MO_RX(MO_LED_OFF));
188 gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
189 PHY_M_LED_PULS_DUR(PULS_170MS) |
190 PHY_M_LED_BLINK_RT(BLINK_84MS) |
194 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
195 PHY_M_LED_MO_RX(MO_LED_OFF) |
196 (skge->speed == SPEED_100 ?
197 PHY_M_LED_MO_100(MO_LED_ON) : 0));
200 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
201 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
202 PHY_M_LED_MO_DUP(MO_LED_ON) |
203 PHY_M_LED_MO_10(MO_LED_ON) |
204 PHY_M_LED_MO_100(MO_LED_ON) |
205 PHY_M_LED_MO_1000(MO_LED_ON) |
206 PHY_M_LED_MO_RX(MO_LED_ON));
212 * I've left in these EEPROM and VPD functions, as someone may desire to
213 * integrate them in the future. -mdeck
215 * static int skge_get_eeprom_len(struct net_device *dev)
217 * struct skge_port *skge = netdev_priv(dev);
220 * pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2);
221 * return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
224 * static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
228 * pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
231 * pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
232 * } while (!(offset & PCI_VPD_ADDR_F));
234 * pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
238 * static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
240 * pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
241 * pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
242 * offset | PCI_VPD_ADDR_F);
245 * pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
246 * } while (offset & PCI_VPD_ADDR_F);
249 * static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
252 * struct skge_port *skge = netdev_priv(dev);
253 * struct pci_dev *pdev = skge->hw->pdev;
254 * int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
255 * int length = eeprom->len;
256 * u16 offset = eeprom->offset;
261 * eeprom->magic = SKGE_EEPROM_MAGIC;
263 * while (length > 0) {
264 * u32 val = skge_vpd_read(pdev, cap, offset);
265 * int n = min_t(int, length, sizeof(val));
267 * memcpy(data, &val, n);
275 * static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
278 * struct skge_port *skge = netdev_priv(dev);
279 * struct pci_dev *pdev = skge->hw->pdev;
280 * int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
281 * int length = eeprom->len;
282 * u16 offset = eeprom->offset;
287 * if (eeprom->magic != SKGE_EEPROM_MAGIC)
290 * while (length > 0) {
292 * int n = min_t(int, length, sizeof(val));
294 * if (n < sizeof(val))
295 * val = skge_vpd_read(pdev, cap, offset);
296 * memcpy(&val, data, n);
298 * skge_vpd_write(pdev, cap, offset, val);
309 * Allocate ring elements and chain them together
310 * One-to-one association of board descriptors with ring elements
312 static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base,
315 struct skge_tx_desc *d;
316 struct skge_element *e;
319 ring->start = zalloc(num*sizeof(*e));
323 for (i = 0, e = ring->start, d = vaddr; i < num; i++, e++, d++) {
326 e->next = ring->start;
327 d->next_offset = base;
330 d->next_offset = base + (i+1) * sizeof(*d);
333 ring->to_use = ring->to_clean = ring->start;
338 /* Allocate and setup a new buffer for receiving */
339 static void skge_rx_setup(struct skge_port *skge __unused,
340 struct skge_element *e,
341 struct io_buffer *iob, unsigned int bufsize)
343 struct skge_rx_desc *rd = e->desc;
346 map = ( iob != NULL ) ? virt_to_bus(iob->data) : 0;
349 rd->dma_hi = map >> 32;
351 rd->csum1_start = ETH_HLEN;
352 rd->csum2_start = ETH_HLEN;
358 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
361 /* Resume receiving using existing skb,
362 * Note: DMA address is not changed by chip.
363 * MTU not changed while receiver active.
365 static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
367 struct skge_rx_desc *rd = e->desc;
370 rd->csum2_start = ETH_HLEN;
374 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
378 /* Free all buffers in receive ring, assumes receiver stopped */
379 static void skge_rx_clean(struct skge_port *skge)
381 struct skge_ring *ring = &skge->rx_ring;
382 struct skge_element *e;
386 struct skge_rx_desc *rd = e->desc;
392 } while ((e = e->next) != ring->start);
395 static void skge_link_up(struct skge_port *skge)
397 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
398 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
400 netdev_link_up(skge->netdev);
402 DBG2(PFX "%s: Link is up at %d Mbps, %s duplex\n",
403 skge->netdev->name, skge->speed,
404 skge->duplex == DUPLEX_FULL ? "full" : "half");
407 static void skge_link_down(struct skge_port *skge)
409 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
410 netdev_link_down(skge->netdev);
412 DBG2(PFX "%s: Link is down.\n", skge->netdev->name);
416 static void xm_link_down(struct skge_hw *hw, int port)
418 struct net_device *dev = hw->dev[port];
419 struct skge_port *skge = netdev_priv(dev);
421 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
423 if (netdev_link_ok(dev))
424 skge_link_down(skge);
427 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
431 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
432 *val = xm_read16(hw, port, XM_PHY_DATA);
434 if (hw->phy_type == SK_PHY_XMAC)
437 for (i = 0; i < PHY_RETRIES; i++) {
438 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
445 *val = xm_read16(hw, port, XM_PHY_DATA);
450 static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
453 if (__xm_phy_read(hw, port, reg, &v))
454 DBG(PFX "%s: phy read timed out\n",
455 hw->dev[port]->name);
459 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
463 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
464 for (i = 0; i < PHY_RETRIES; i++) {
465 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
472 xm_write16(hw, port, XM_PHY_DATA, val);
473 for (i = 0; i < PHY_RETRIES; i++) {
474 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
481 static void genesis_init(struct skge_hw *hw)
483 /* set blink source counter */
484 skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
485 skge_write8(hw, B2_BSC_CTRL, BSC_START);
487 /* configure mac arbiter */
488 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
490 /* configure mac arbiter timeout values */
491 skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
492 skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
493 skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
494 skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
496 skge_write8(hw, B3_MA_RCINI_RX1, 0);
497 skge_write8(hw, B3_MA_RCINI_RX2, 0);
498 skge_write8(hw, B3_MA_RCINI_TX1, 0);
499 skge_write8(hw, B3_MA_RCINI_TX2, 0);
501 /* configure packet arbiter timeout */
502 skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
503 skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
504 skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
505 skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
506 skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
509 static void genesis_reset(struct skge_hw *hw, int port)
511 const u8 zero[8] = { 0 };
514 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
516 /* reset the statistics module */
517 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
518 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
519 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
520 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
521 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
523 /* disable Broadcom PHY IRQ */
524 if (hw->phy_type == SK_PHY_BCOM)
525 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
527 xm_outhash(hw, port, XM_HSM, zero);
529 /* Flush TX and RX fifo */
530 reg = xm_read32(hw, port, XM_MODE);
531 xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF);
532 xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF);
536 /* Convert mode to MII values */
537 static const u16 phy_pause_map[] = {
538 [FLOW_MODE_NONE] = 0,
539 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
540 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
541 [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
544 /* special defines for FIBER (88E1011S only) */
545 static const u16 fiber_pause_map[] = {
546 [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE,
547 [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD,
548 [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD,
549 [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD,
553 /* Check status of Broadcom phy link */
554 static void bcom_check_link(struct skge_hw *hw, int port)
556 struct net_device *dev = hw->dev[port];
557 struct skge_port *skge = netdev_priv(dev);
560 /* read twice because of latch */
561 xm_phy_read(hw, port, PHY_BCOM_STAT);
562 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
564 if ((status & PHY_ST_LSYNC) == 0) {
565 xm_link_down(hw, port);
569 if (skge->autoneg == AUTONEG_ENABLE) {
572 if (!(status & PHY_ST_AN_OVER))
575 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
576 if (lpa & PHY_B_AN_RF) {
577 DBG(PFX "%s: remote fault\n",
582 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
584 /* Check Duplex mismatch */
585 switch (aux & PHY_B_AS_AN_RES_MSK) {
586 case PHY_B_RES_1000FD:
587 skge->duplex = DUPLEX_FULL;
589 case PHY_B_RES_1000HD:
590 skge->duplex = DUPLEX_HALF;
593 DBG(PFX "%s: duplex mismatch\n",
598 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
599 switch (aux & PHY_B_AS_PAUSE_MSK) {
600 case PHY_B_AS_PAUSE_MSK:
601 skge->flow_status = FLOW_STAT_SYMMETRIC;
604 skge->flow_status = FLOW_STAT_REM_SEND;
607 skge->flow_status = FLOW_STAT_LOC_SEND;
610 skge->flow_status = FLOW_STAT_NONE;
612 skge->speed = SPEED_1000;
615 if (!netdev_link_ok(dev))
616 genesis_link_up(skge);
619 /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
620 * Phy on for 100 or 10Mbit operation
622 static void bcom_phy_init(struct skge_port *skge)
624 struct skge_hw *hw = skge->hw;
625 int port = skge->port;
627 u16 id1, r, ext, ctl;
629 /* magic workaround patterns for Broadcom */
630 static const struct {
634 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
635 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
636 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
637 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
639 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
640 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
643 /* read Id from external PHY (all have the same address) */
644 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
646 /* Optimize MDIO transfer by suppressing preamble. */
647 r = xm_read16(hw, port, XM_MMU_CMD);
649 xm_write16(hw, port, XM_MMU_CMD,r);
652 case PHY_BCOM_ID1_C0:
654 * Workaround BCOM Errata for the C0 type.
655 * Write magic patterns to reserved registers.
657 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
658 xm_phy_write(hw, port,
659 C0hack[i].reg, C0hack[i].val);
662 case PHY_BCOM_ID1_A1:
664 * Workaround BCOM Errata for the A1 type.
665 * Write magic patterns to reserved registers.
667 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
668 xm_phy_write(hw, port,
669 A1hack[i].reg, A1hack[i].val);
674 * Workaround BCOM Errata (#10523) for all BCom PHYs.
675 * Disable Power Management after reset.
677 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
678 r |= PHY_B_AC_DIS_PM;
679 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
682 xm_read16(hw, port, XM_ISRC);
684 ext = PHY_B_PEC_EN_LTR; /* enable tx led */
685 ctl = PHY_CT_SP1000; /* always 1000mbit */
687 if (skge->autoneg == AUTONEG_ENABLE) {
689 * Workaround BCOM Errata #1 for the C5 type.
690 * 1000Base-T Link Acquisition Failure in Slave Mode
691 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
693 u16 adv = PHY_B_1000C_RD;
694 if (skge->advertising & ADVERTISED_1000baseT_Half)
695 adv |= PHY_B_1000C_AHD;
696 if (skge->advertising & ADVERTISED_1000baseT_Full)
697 adv |= PHY_B_1000C_AFD;
698 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
700 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
702 if (skge->duplex == DUPLEX_FULL)
703 ctl |= PHY_CT_DUP_MD;
705 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
708 /* Set autonegotiation pause parameters */
709 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
710 phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
712 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
713 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
715 /* Use link status change interrupt */
716 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
719 static void xm_phy_init(struct skge_port *skge)
721 struct skge_hw *hw = skge->hw;
722 int port = skge->port;
725 if (skge->autoneg == AUTONEG_ENABLE) {
726 if (skge->advertising & ADVERTISED_1000baseT_Half)
728 if (skge->advertising & ADVERTISED_1000baseT_Full)
731 ctrl |= fiber_pause_map[skge->flow_control];
733 xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
735 /* Restart Auto-negotiation */
736 ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
738 /* Set DuplexMode in Config register */
739 if (skge->duplex == DUPLEX_FULL)
740 ctrl |= PHY_CT_DUP_MD;
742 * Do NOT enable Auto-negotiation here. This would hold
743 * the link down because no IDLEs are transmitted
747 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
749 /* Poll PHY for status changes */
750 skge->use_xm_link_timer = 1;
753 static int xm_check_link(struct net_device *dev)
755 struct skge_port *skge = netdev_priv(dev);
756 struct skge_hw *hw = skge->hw;
757 int port = skge->port;
760 /* read twice because of latch */
761 xm_phy_read(hw, port, PHY_XMAC_STAT);
762 status = xm_phy_read(hw, port, PHY_XMAC_STAT);
764 if ((status & PHY_ST_LSYNC) == 0) {
765 xm_link_down(hw, port);
769 if (skge->autoneg == AUTONEG_ENABLE) {
772 if (!(status & PHY_ST_AN_OVER))
775 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
776 if (lpa & PHY_B_AN_RF) {
777 DBG(PFX "%s: remote fault\n",
782 res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI);
784 /* Check Duplex mismatch */
785 switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
787 skge->duplex = DUPLEX_FULL;
790 skge->duplex = DUPLEX_HALF;
793 DBG(PFX "%s: duplex mismatch\n",
798 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
799 if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
800 skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
801 (lpa & PHY_X_P_SYM_MD))
802 skge->flow_status = FLOW_STAT_SYMMETRIC;
803 else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
804 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
805 /* Enable PAUSE receive, disable PAUSE transmit */
806 skge->flow_status = FLOW_STAT_REM_SEND;
807 else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
808 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
809 /* Disable PAUSE receive, enable PAUSE transmit */
810 skge->flow_status = FLOW_STAT_LOC_SEND;
812 skge->flow_status = FLOW_STAT_NONE;
814 skge->speed = SPEED_1000;
817 if (!netdev_link_ok(dev))
818 genesis_link_up(skge);
822 /* Poll to check for link coming up.
824 * Since internal PHY is wired to a level triggered pin, can't
825 * get an interrupt when carrier is detected, need to poll for
828 static void xm_link_timer(struct skge_port *skge)
830 struct net_device *dev = skge->netdev;
831 struct skge_hw *hw = skge->hw;
832 int port = skge->port;
836 * Verify that the link by checking GPIO register three times.
837 * This pin has the signal from the link_sync pin connected to it.
839 for (i = 0; i < 3; i++) {
840 if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS)
844 /* Re-enable interrupt to detect link down */
845 if (xm_check_link(dev)) {
846 u16 msk = xm_read16(hw, port, XM_IMSK);
847 msk &= ~XM_IS_INP_ASS;
848 xm_write16(hw, port, XM_IMSK, msk);
849 xm_read16(hw, port, XM_ISRC);
853 static void genesis_mac_init(struct skge_hw *hw, int port)
855 struct net_device *dev = hw->dev[port];
856 struct skge_port *skge = netdev_priv(dev);
859 const u8 zero[6] = { 0 };
861 for (i = 0; i < 10; i++) {
862 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
864 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
869 DBG(PFX "%s: genesis reset failed\n", dev->name);
872 /* Unreset the XMAC. */
873 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
876 * Perform additional initialization for external PHYs,
877 * namely for the 1000baseTX cards that use the XMAC's
880 if (hw->phy_type != SK_PHY_XMAC) {
881 /* Take external Phy out of reset */
882 r = skge_read32(hw, B2_GP_IO);
884 r |= GP_DIR_0|GP_IO_0;
886 r |= GP_DIR_2|GP_IO_2;
888 skge_write32(hw, B2_GP_IO, r);
890 /* Enable GMII interface */
891 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
895 switch(hw->phy_type) {
901 bcom_check_link(hw, port);
904 /* Set Station Address */
905 xm_outaddr(hw, port, XM_SA, dev->ll_addr);
907 /* We don't use match addresses so clear */
908 for (i = 1; i < 16; i++)
909 xm_outaddr(hw, port, XM_EXM(i), zero);
911 /* Clear MIB counters */
912 xm_write16(hw, port, XM_STAT_CMD,
913 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
914 /* Clear two times according to Errata #3 */
915 xm_write16(hw, port, XM_STAT_CMD,
916 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
918 /* configure Rx High Water Mark (XM_RX_HI_WM) */
919 xm_write16(hw, port, XM_RX_HI_WM, 1450);
921 /* We don't need the FCS appended to the packet. */
922 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
924 if (skge->duplex == DUPLEX_HALF) {
926 * If in manual half duplex mode the other side might be in
927 * full duplex mode, so ignore if a carrier extension is not seen
932 xm_write16(hw, port, XM_RX_CMD, r);
934 /* We want short frames padded to 60 bytes. */
935 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
937 xm_write16(hw, port, XM_TX_THR, 512);
940 * Enable the reception of all error frames. This is is
941 * a necessary evil due to the design of the XMAC. The
942 * XMAC's receive FIFO is only 8K in size, however jumbo
943 * frames can be up to 9000 bytes in length. When bad
944 * frame filtering is enabled, the XMAC's RX FIFO operates
945 * in 'store and forward' mode. For this to work, the
946 * entire frame has to fit into the FIFO, but that means
947 * that jumbo frames larger than 8192 bytes will be
948 * truncated. Disabling all bad frame filtering causes
949 * the RX FIFO to operate in streaming mode, in which
950 * case the XMAC will start transferring frames out of the
951 * RX FIFO as soon as the FIFO threshold is reached.
953 xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
957 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
958 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
959 * and 'Octets Rx OK Hi Cnt Ov'.
961 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
964 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
965 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
966 * and 'Octets Tx OK Hi Cnt Ov'.
968 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
970 /* Configure MAC arbiter */
971 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
973 /* configure timeout values */
974 skge_write8(hw, B3_MA_TOINI_RX1, 72);
975 skge_write8(hw, B3_MA_TOINI_RX2, 72);
976 skge_write8(hw, B3_MA_TOINI_TX1, 72);
977 skge_write8(hw, B3_MA_TOINI_TX2, 72);
979 skge_write8(hw, B3_MA_RCINI_RX1, 0);
980 skge_write8(hw, B3_MA_RCINI_RX2, 0);
981 skge_write8(hw, B3_MA_RCINI_TX1, 0);
982 skge_write8(hw, B3_MA_RCINI_TX2, 0);
984 /* Configure Rx MAC FIFO */
985 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
986 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
987 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
989 /* Configure Tx MAC FIFO */
990 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
991 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
992 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
994 /* enable timeout timers */
995 skge_write16(hw, B3_PA_CTRL,
996 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
999 static void genesis_stop(struct skge_port *skge)
1001 struct skge_hw *hw = skge->hw;
1002 int port = skge->port;
1003 unsigned retries = 1000;
1006 /* Disable Tx and Rx */
1007 cmd = xm_read16(hw, port, XM_MMU_CMD);
1008 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1009 xm_write16(hw, port, XM_MMU_CMD, cmd);
1011 genesis_reset(hw, port);
1013 /* Clear Tx packet arbiter timeout IRQ */
1014 skge_write16(hw, B3_PA_CTRL,
1015 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1018 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1020 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1021 if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST))
1023 } while (--retries > 0);
1025 /* For external PHYs there must be special handling */
1026 if (hw->phy_type != SK_PHY_XMAC) {
1027 u32 reg = skge_read32(hw, B2_GP_IO);
1035 skge_write32(hw, B2_GP_IO, reg);
1036 skge_read32(hw, B2_GP_IO);
1039 xm_write16(hw, port, XM_MMU_CMD,
1040 xm_read16(hw, port, XM_MMU_CMD)
1041 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1043 xm_read16(hw, port, XM_MMU_CMD);
1046 static void genesis_link_up(struct skge_port *skge)
1048 struct skge_hw *hw = skge->hw;
1049 int port = skge->port;
1053 cmd = xm_read16(hw, port, XM_MMU_CMD);
1056 * enabling pause frame reception is required for 1000BT
1057 * because the XMAC is not reset if the link is going down
1059 if (skge->flow_status == FLOW_STAT_NONE ||
1060 skge->flow_status == FLOW_STAT_LOC_SEND)
1061 /* Disable Pause Frame Reception */
1062 cmd |= XM_MMU_IGN_PF;
1064 /* Enable Pause Frame Reception */
1065 cmd &= ~XM_MMU_IGN_PF;
1067 xm_write16(hw, port, XM_MMU_CMD, cmd);
1069 mode = xm_read32(hw, port, XM_MODE);
1070 if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
1071 skge->flow_status == FLOW_STAT_LOC_SEND) {
1073 * Configure Pause Frame Generation
1074 * Use internal and external Pause Frame Generation.
1075 * Sending pause frames is edge triggered.
1076 * Send a Pause frame with the maximum pause time if
1077 * internal oder external FIFO full condition occurs.
1078 * Send a zero pause time frame to re-start transmission.
1080 /* XM_PAUSE_DA = '010000C28001' (default) */
1081 /* XM_MAC_PTIME = 0xffff (maximum) */
1082 /* remember this value is defined in big endian (!) */
1083 xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1085 mode |= XM_PAUSE_MODE;
1086 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1089 * disable pause frame generation is required for 1000BT
1090 * because the XMAC is not reset if the link is going down
1092 /* Disable Pause Mode in Mode Register */
1093 mode &= ~XM_PAUSE_MODE;
1095 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1098 xm_write32(hw, port, XM_MODE, mode);
1100 /* Turn on detection of Tx underrun */
1101 msk = xm_read16(hw, port, XM_IMSK);
1102 msk &= ~XM_IS_TXF_UR;
1103 xm_write16(hw, port, XM_IMSK, msk);
1105 xm_read16(hw, port, XM_ISRC);
1107 /* get MMU Command Reg. */
1108 cmd = xm_read16(hw, port, XM_MMU_CMD);
1109 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
1110 cmd |= XM_MMU_GMII_FD;
1113 * Workaround BCOM Errata (#10523) for all BCom Phys
1114 * Enable Power Management after link up
1116 if (hw->phy_type == SK_PHY_BCOM) {
1117 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1118 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1119 & ~PHY_B_AC_DIS_PM);
1120 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1124 xm_write16(hw, port, XM_MMU_CMD,
1125 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1130 static inline void bcom_phy_intr(struct skge_port *skge)
1132 struct skge_hw *hw = skge->hw;
1133 int port = skge->port;
1136 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1137 DBGIO(PFX "%s: phy interrupt status 0x%x\n",
1138 skge->netdev->name, isrc);
1140 if (isrc & PHY_B_IS_PSE)
1141 DBG(PFX "%s: uncorrectable pair swap error\n",
1142 hw->dev[port]->name);
1144 /* Workaround BCom Errata:
1145 * enable and disable loopback mode if "NO HCD" occurs.
1147 if (isrc & PHY_B_IS_NO_HDCL) {
1148 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1149 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1150 ctrl | PHY_CT_LOOP);
1151 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1152 ctrl & ~PHY_CT_LOOP);
1155 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1156 bcom_check_link(hw, port);
1160 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1164 gma_write16(hw, port, GM_SMI_DATA, val);
1165 gma_write16(hw, port, GM_SMI_CTRL,
1166 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1167 for (i = 0; i < PHY_RETRIES; i++) {
1170 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1174 DBG(PFX "%s: phy write timeout port %x reg %x val %x\n",
1175 hw->dev[port]->name,
1180 static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1184 gma_write16(hw, port, GM_SMI_CTRL,
1185 GM_SMI_CT_PHY_AD(hw->phy_addr)
1186 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1188 for (i = 0; i < PHY_RETRIES; i++) {
1190 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1196 *val = gma_read16(hw, port, GM_SMI_DATA);
1200 static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1203 if (__gm_phy_read(hw, port, reg, &v))
1204 DBG(PFX "%s: phy read timeout port %x reg %x val %x\n",
1205 hw->dev[port]->name,
1210 /* Marvell Phy Initialization */
1211 static void yukon_init(struct skge_hw *hw, int port)
1213 struct skge_port *skge = netdev_priv(hw->dev[port]);
1214 u16 ctrl, ct1000, adv;
1216 if (skge->autoneg == AUTONEG_ENABLE) {
1217 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1219 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1220 PHY_M_EC_MAC_S_MSK);
1221 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1223 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1225 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1228 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1229 if (skge->autoneg == AUTONEG_DISABLE)
1230 ctrl &= ~PHY_CT_ANE;
1232 ctrl |= PHY_CT_RESET;
1233 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1239 if (skge->autoneg == AUTONEG_ENABLE) {
1241 if (skge->advertising & ADVERTISED_1000baseT_Full)
1242 ct1000 |= PHY_M_1000C_AFD;
1243 if (skge->advertising & ADVERTISED_1000baseT_Half)
1244 ct1000 |= PHY_M_1000C_AHD;
1245 if (skge->advertising & ADVERTISED_100baseT_Full)
1246 adv |= PHY_M_AN_100_FD;
1247 if (skge->advertising & ADVERTISED_100baseT_Half)
1248 adv |= PHY_M_AN_100_HD;
1249 if (skge->advertising & ADVERTISED_10baseT_Full)
1250 adv |= PHY_M_AN_10_FD;
1251 if (skge->advertising & ADVERTISED_10baseT_Half)
1252 adv |= PHY_M_AN_10_HD;
1254 /* Set Flow-control capabilities */
1255 adv |= phy_pause_map[skge->flow_control];
1257 if (skge->advertising & ADVERTISED_1000baseT_Full)
1258 adv |= PHY_M_AN_1000X_AFD;
1259 if (skge->advertising & ADVERTISED_1000baseT_Half)
1260 adv |= PHY_M_AN_1000X_AHD;
1262 adv |= fiber_pause_map[skge->flow_control];
1265 /* Restart Auto-negotiation */
1266 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1268 /* forced speed/duplex settings */
1269 ct1000 = PHY_M_1000C_MSE;
1271 if (skge->duplex == DUPLEX_FULL)
1272 ctrl |= PHY_CT_DUP_MD;
1274 switch (skge->speed) {
1276 ctrl |= PHY_CT_SP1000;
1279 ctrl |= PHY_CT_SP100;
1283 ctrl |= PHY_CT_RESET;
1286 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1288 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1289 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1291 /* Enable phy interrupt on autonegotiation complete (or link up) */
1292 if (skge->autoneg == AUTONEG_ENABLE)
1293 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
1295 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1298 static void yukon_reset(struct skge_hw *hw, int port)
1300 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1301 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1302 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1303 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1304 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
1306 gma_write16(hw, port, GM_RX_CTRL,
1307 gma_read16(hw, port, GM_RX_CTRL)
1308 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1311 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1312 static int is_yukon_lite_a0(struct skge_hw *hw)
1317 if (hw->chip_id != CHIP_ID_YUKON)
1320 reg = skge_read32(hw, B2_FAR);
1321 skge_write8(hw, B2_FAR + 3, 0xff);
1322 ret = (skge_read8(hw, B2_FAR + 3) != 0);
1323 skge_write32(hw, B2_FAR, reg);
1327 static void yukon_mac_init(struct skge_hw *hw, int port)
1329 struct skge_port *skge = netdev_priv(hw->dev[port]);
1332 const u8 *addr = hw->dev[port]->ll_addr;
1334 /* WA code for COMA mode -- set PHY reset */
1335 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1336 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1337 reg = skge_read32(hw, B2_GP_IO);
1338 reg |= GP_DIR_9 | GP_IO_9;
1339 skge_write32(hw, B2_GP_IO, reg);
1343 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1344 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1346 /* WA code for COMA mode -- clear PHY reset */
1347 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1348 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1349 reg = skge_read32(hw, B2_GP_IO);
1352 skge_write32(hw, B2_GP_IO, reg);
1355 /* Set hardware config mode */
1356 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1357 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
1358 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1360 /* Clear GMC reset */
1361 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1362 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1363 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1365 if (skge->autoneg == AUTONEG_DISABLE) {
1366 reg = GM_GPCR_AU_ALL_DIS;
1367 gma_write16(hw, port, GM_GP_CTRL,
1368 gma_read16(hw, port, GM_GP_CTRL) | reg);
1370 switch (skge->speed) {
1372 reg &= ~GM_GPCR_SPEED_100;
1373 reg |= GM_GPCR_SPEED_1000;
1376 reg &= ~GM_GPCR_SPEED_1000;
1377 reg |= GM_GPCR_SPEED_100;
1380 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1384 if (skge->duplex == DUPLEX_FULL)
1385 reg |= GM_GPCR_DUP_FULL;
1387 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1389 switch (skge->flow_control) {
1390 case FLOW_MODE_NONE:
1391 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1392 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1394 case FLOW_MODE_LOC_SEND:
1395 /* disable Rx flow-control */
1396 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1398 case FLOW_MODE_SYMMETRIC:
1399 case FLOW_MODE_SYM_OR_REM:
1400 /* enable Tx & Rx flow-control */
1404 gma_write16(hw, port, GM_GP_CTRL, reg);
1405 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
1407 yukon_init(hw, port);
1410 reg = gma_read16(hw, port, GM_PHY_ADDR);
1411 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
1413 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1414 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1415 gma_write16(hw, port, GM_PHY_ADDR, reg);
1417 /* transmit control */
1418 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
1420 /* receive control reg: unicast + multicast + no FCS */
1421 gma_write16(hw, port, GM_RX_CTRL,
1422 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1424 /* transmit flow control */
1425 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1427 /* transmit parameter */
1428 gma_write16(hw, port, GM_TX_PARAM,
1429 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1430 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1431 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
1433 /* configure the Serial Mode Register */
1434 reg = DATA_BLIND_VAL(DATA_BLIND_DEF)
1436 | IPG_DATA_VAL(IPG_DATA_DEF);
1438 gma_write16(hw, port, GM_SERIAL_MODE, reg);
1440 /* physical address: used for pause frames */
1441 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
1442 /* virtual address for data */
1443 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
1445 /* enable interrupt mask for counter overflows */
1446 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1447 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1448 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
1450 /* Initialize Mac Fifo */
1452 /* Configure Rx MAC FIFO */
1453 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1454 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1456 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1457 if (is_yukon_lite_a0(hw))
1458 reg &= ~GMF_RX_F_FL_ON;
1460 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1461 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1463 * because Pause Packet Truncation in GMAC is not working
1464 * we have to increase the Flush Threshold to 64 bytes
1465 * in order to flush pause packets in Rx FIFO on Yukon-1
1467 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
1469 /* Configure Tx MAC FIFO */
1470 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1471 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1474 /* Go into power down mode */
1475 static void yukon_suspend(struct skge_hw *hw, int port)
1479 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
1480 ctrl |= PHY_M_PC_POL_R_DIS;
1481 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
1483 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1484 ctrl |= PHY_CT_RESET;
1485 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1487 /* switch IEEE compatible power down mode on */
1488 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1489 ctrl |= PHY_CT_PDOWN;
1490 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1493 static void yukon_stop(struct skge_port *skge)
1495 struct skge_hw *hw = skge->hw;
1496 int port = skge->port;
1498 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1499 yukon_reset(hw, port);
1501 gma_write16(hw, port, GM_GP_CTRL,
1502 gma_read16(hw, port, GM_GP_CTRL)
1503 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
1504 gma_read16(hw, port, GM_GP_CTRL);
1506 yukon_suspend(hw, port);
1508 /* set GPHY Control reset */
1509 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1510 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1513 static u16 yukon_speed(const struct skge_hw *hw __unused, u16 aux)
1515 switch (aux & PHY_M_PS_SPEED_MSK) {
1516 case PHY_M_PS_SPEED_1000:
1518 case PHY_M_PS_SPEED_100:
1525 static void yukon_link_up(struct skge_port *skge)
1527 struct skge_hw *hw = skge->hw;
1528 int port = skge->port;
1531 /* Enable Transmit FIFO Underrun */
1532 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1534 reg = gma_read16(hw, port, GM_GP_CTRL);
1535 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1536 reg |= GM_GPCR_DUP_FULL;
1539 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1540 gma_write16(hw, port, GM_GP_CTRL, reg);
1542 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1546 static void yukon_link_down(struct skge_port *skge)
1548 struct skge_hw *hw = skge->hw;
1549 int port = skge->port;
1552 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1553 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1554 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1556 if (skge->flow_status == FLOW_STAT_REM_SEND) {
1557 ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
1558 ctrl |= PHY_M_AN_ASP;
1559 /* restore Asymmetric Pause bit */
1560 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
1563 skge_link_down(skge);
1565 yukon_init(hw, port);
1568 static void yukon_phy_intr(struct skge_port *skge)
1570 struct skge_hw *hw = skge->hw;
1571 int port = skge->port;
1572 const char *reason = NULL;
1573 u16 istatus, phystat;
1575 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1576 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1578 DBGIO(PFX "%s: phy interrupt status 0x%x 0x%x\n",
1579 skge->netdev->name, istatus, phystat);
1581 if (istatus & PHY_M_IS_AN_COMPL) {
1582 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
1584 reason = "remote fault";
1588 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1589 reason = "master/slave fault";
1593 if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1594 reason = "speed/duplex";
1598 skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
1599 ? DUPLEX_FULL : DUPLEX_HALF;
1600 skge->speed = yukon_speed(hw, phystat);
1602 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1603 switch (phystat & PHY_M_PS_PAUSE_MSK) {
1604 case PHY_M_PS_PAUSE_MSK:
1605 skge->flow_status = FLOW_STAT_SYMMETRIC;
1607 case PHY_M_PS_RX_P_EN:
1608 skge->flow_status = FLOW_STAT_REM_SEND;
1610 case PHY_M_PS_TX_P_EN:
1611 skge->flow_status = FLOW_STAT_LOC_SEND;
1614 skge->flow_status = FLOW_STAT_NONE;
1617 if (skge->flow_status == FLOW_STAT_NONE ||
1618 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
1619 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1621 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1622 yukon_link_up(skge);
1626 if (istatus & PHY_M_IS_LSP_CHANGE)
1627 skge->speed = yukon_speed(hw, phystat);
1629 if (istatus & PHY_M_IS_DUP_CHANGE)
1630 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1631 if (istatus & PHY_M_IS_LST_CHANGE) {
1632 if (phystat & PHY_M_PS_LINK_UP)
1633 yukon_link_up(skge);
1635 yukon_link_down(skge);
1639 DBG(PFX "%s: autonegotiation failed (%s)\n",
1640 skge->netdev->name, reason);
1642 /* XXX restart autonegotiation? */
1645 static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
1651 end = start + len - 1;
1653 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
1654 skge_write32(hw, RB_ADDR(q, RB_START), start);
1655 skge_write32(hw, RB_ADDR(q, RB_WP), start);
1656 skge_write32(hw, RB_ADDR(q, RB_RP), start);
1657 skge_write32(hw, RB_ADDR(q, RB_END), end);
1659 if (q == Q_R1 || q == Q_R2) {
1660 /* Set thresholds on receive queue's */
1661 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
1663 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
1666 /* Enable store & forward on Tx queue's because
1667 * Tx FIFO is only 4K on Genesis and 1K on Yukon
1669 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
1672 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
1675 /* Setup Bus Memory Interface */
1676 static void skge_qset(struct skge_port *skge, u16 q,
1677 const struct skge_element *e)
1679 struct skge_hw *hw = skge->hw;
1680 u32 watermark = 0x600;
1681 u64 base = skge->dma + (e->desc - skge->mem);
1683 /* optimization to reduce window on 32bit/33mhz */
1684 if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
1687 skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
1688 skge_write32(hw, Q_ADDR(q, Q_F), watermark);
1689 skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
1690 skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
1693 void skge_free(struct net_device *dev)
1695 struct skge_port *skge = netdev_priv(dev);
1697 free(skge->rx_ring.start);
1698 skge->rx_ring.start = NULL;
1700 free(skge->tx_ring.start);
1701 skge->tx_ring.start = NULL;
1703 free_dma(skge->mem, RING_SIZE);
1708 static int skge_up(struct net_device *dev)
1710 struct skge_port *skge = netdev_priv(dev);
1711 struct skge_hw *hw = skge->hw;
1712 int port = skge->port;
1713 u32 chunk, ram_addr;
1716 DBG2(PFX "%s: enabling interface\n", dev->name);
1718 skge->mem = malloc_dma(RING_SIZE, SKGE_RING_ALIGN);
1719 skge->dma = virt_to_bus(skge->mem);
1722 memset(skge->mem, 0, RING_SIZE);
1724 assert(!(skge->dma & 7));
1726 /* FIXME: find out whether 64 bit gPXE will be loaded > 4GB */
1727 if ((u64)skge->dma >> 32 != ((u64) skge->dma + RING_SIZE) >> 32) {
1728 DBG(PFX "pci_alloc_consistent region crosses 4G boundary\n");
1733 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma, NUM_RX_DESC);
1737 /* this call relies on e->iob and d->control to be 0
1738 * This is assured by calling memset() on skge->mem and using zalloc()
1739 * for the skge_element structures.
1741 skge_rx_refill(dev);
1743 err = skge_ring_alloc(&skge->tx_ring, skge->mem + RX_RING_SIZE,
1744 skge->dma + RX_RING_SIZE, NUM_TX_DESC);
1748 /* Initialize MAC */
1749 if (hw->chip_id == CHIP_ID_GENESIS)
1750 genesis_mac_init(hw, port);
1752 yukon_mac_init(hw, port);
1754 /* Configure RAMbuffers - equally between ports and tx/rx */
1755 chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2);
1756 ram_addr = hw->ram_offset + 2 * chunk * port;
1758 skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
1759 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
1761 assert(!(skge->tx_ring.to_use != skge->tx_ring.to_clean));
1762 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
1763 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
1765 /* Start receiver BMU */
1767 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
1768 skge_led(skge, LED_MODE_ON);
1770 hw->intr_mask |= portmask[port];
1771 skge_write32(hw, B0_IMSK, hw->intr_mask);
1776 skge_rx_clean(skge);
1783 static void skge_rx_stop(struct skge_hw *hw, int port)
1785 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
1786 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
1787 RB_RST_SET|RB_DIS_OP_MD);
1788 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
1791 static void skge_down(struct net_device *dev)
1793 struct skge_port *skge = netdev_priv(dev);
1794 struct skge_hw *hw = skge->hw;
1795 int port = skge->port;
1797 if (skge->mem == NULL)
1800 DBG2(PFX "%s: disabling interface\n", dev->name);
1802 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
1803 skge->use_xm_link_timer = 0;
1805 netdev_link_down(dev);
1807 hw->intr_mask &= ~portmask[port];
1808 skge_write32(hw, B0_IMSK, hw->intr_mask);
1810 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
1811 if (hw->chip_id == CHIP_ID_GENESIS)
1816 /* Stop transmitter */
1817 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
1818 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1819 RB_RST_SET|RB_DIS_OP_MD);
1822 /* Disable Force Sync bit and Enable Alloc bit */
1823 skge_write8(hw, SK_REG(port, TXA_CTRL),
1824 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1826 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1827 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1828 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1830 /* Reset PCI FIFO */
1831 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
1832 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1834 /* Reset the RAM Buffer async Tx queue */
1835 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
1837 skge_rx_stop(hw, port);
1839 if (hw->chip_id == CHIP_ID_GENESIS) {
1840 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
1841 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
1843 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1844 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1847 skge_led(skge, LED_MODE_OFF);
1851 skge_rx_clean(skge);
1857 static inline int skge_tx_avail(const struct skge_ring *ring)
1860 return ((ring->to_clean > ring->to_use) ? 0 : NUM_TX_DESC)
1861 + (ring->to_clean - ring->to_use) - 1;
1864 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob)
1866 struct skge_port *skge = netdev_priv(dev);
1867 struct skge_hw *hw = skge->hw;
1868 struct skge_element *e;
1869 struct skge_tx_desc *td;
1873 if (skge_tx_avail(&skge->tx_ring) < 1)
1876 e = skge->tx_ring.to_use;
1878 assert(!(td->control & BMU_OWN));
1881 map = virt_to_bus(iob->data);
1884 td->dma_hi = map >> 32;
1886 control = BMU_CHECK;
1888 control |= BMU_EOF| BMU_IRQ_EOF;
1889 /* Make sure all the descriptors written */
1891 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
1894 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
1896 DBGIO(PFX "%s: tx queued, slot %td, len %d\n",
1897 dev->name, e - skge->tx_ring.start, (unsigned int)len);
1899 skge->tx_ring.to_use = e->next;
1902 if (skge_tx_avail(&skge->tx_ring) <= 1) {
1903 DBG(PFX "%s: transmit queue full\n", dev->name);
1909 /* Free all buffers in transmit ring */
1910 static void skge_tx_clean(struct net_device *dev)
1912 struct skge_port *skge = netdev_priv(dev);
1913 struct skge_element *e;
1915 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
1916 struct skge_tx_desc *td = e->desc;
1920 skge->tx_ring.to_clean = e;
1923 static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
1925 static inline u16 phy_length(const struct skge_hw *hw, u32 status)
1927 if (hw->chip_id == CHIP_ID_GENESIS)
1928 return status >> XMR_FS_LEN_SHIFT;
1930 return status >> GMR_FS_LEN_SHIFT;
1933 static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
1935 if (hw->chip_id == CHIP_ID_GENESIS)
1936 return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
1938 return (status & GMR_FS_ANY_ERR) ||
1939 (status & GMR_FS_RX_OK) == 0;
1942 /* Free all buffers in Tx ring which are no longer owned by device */
1943 static void skge_tx_done(struct net_device *dev)
1945 struct skge_port *skge = netdev_priv(dev);
1946 struct skge_ring *ring = &skge->tx_ring;
1947 struct skge_element *e;
1949 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
1951 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
1952 u32 control = ((const struct skge_tx_desc *) e->desc)->control;
1954 if (control & BMU_OWN)
1957 netdev_tx_complete(dev, e->iob);
1959 skge->tx_ring.to_clean = e;
1961 /* Can run lockless until we need to synchronize to restart queue. */
1965 static void skge_rx_refill(struct net_device *dev)
1967 struct skge_port *skge = netdev_priv(dev);
1968 struct skge_ring *ring = &skge->rx_ring;
1969 struct skge_element *e;
1970 struct io_buffer *iob;
1971 struct skge_rx_desc *rd;
1975 for (i = 0; i < NUM_RX_DESC; i++) {
1979 control = rd->control;
1981 /* nothing to do here */
1982 if (iob || (control & BMU_OWN))
1985 DBG2("refilling rx desc %d: ", (ring->to_clean - ring->start));
1987 iob = alloc_iob(RX_BUF_SIZE);
1989 skge_rx_setup(skge, e, iob, RX_BUF_SIZE);
1991 DBG("descr %d: alloc_iob() failed\n",
1992 (ring->to_clean - ring->start));
1993 /* We pass the descriptor to the NIC even if the
1994 * allocation failed. The card will stop as soon as it
1995 * encounters a descriptor with the OWN bit set to 0,
1996 * thus never getting to the next descriptor that might
1997 * contain a valid io_buffer. This would effectively
1998 * stall the receive.
2000 skge_rx_setup(skge, e, NULL, 0);
2003 ring->to_clean = e->next;
2007 static void skge_rx_done(struct net_device *dev)
2009 struct skge_port *skge = netdev_priv(dev);
2010 struct skge_ring *ring = &skge->rx_ring;
2011 struct skge_rx_desc *rd;
2012 struct skge_element *e;
2013 struct io_buffer *iob;
2019 for (i = 0; i < NUM_RX_DESC; i++) {
2024 control = rd->control;
2026 if ((control & BMU_OWN))
2032 len = control & BMU_BBC;
2034 /* catch RX errors */
2035 if ((bad_phy_status(skge->hw, rd->status)) ||
2036 (phy_length(skge->hw, rd->status) != len)) {
2037 /* report receive errors */
2039 netdev_rx_err(dev, iob, -EIO);
2041 DBG2("received packet, len %d\n", len);
2043 netdev_rx(dev, iob);
2046 /* io_buffer passed to core, make sure we don't reuse it */
2051 skge_rx_refill(dev);
2054 static void skge_poll(struct net_device *dev)
2056 struct skge_port *skge = netdev_priv(dev);
2057 struct skge_hw *hw = skge->hw;
2060 /* reading this register ACKs interrupts */
2061 status = skge_read32(hw, B0_SP_ISRC);
2064 if (status & IS_EXT_REG) {
2066 if (skge->use_xm_link_timer)
2067 xm_link_timer(skge);
2072 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2076 /* restart receiver */
2078 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
2080 skge_read32(hw, B0_IMSK);
2085 static void skge_phyirq(struct skge_hw *hw)
2089 for (port = 0; port < hw->ports; port++) {
2090 struct net_device *dev = hw->dev[port];
2091 struct skge_port *skge = netdev_priv(dev);
2093 if (hw->chip_id != CHIP_ID_GENESIS)
2094 yukon_phy_intr(skge);
2095 else if (hw->phy_type == SK_PHY_BCOM)
2096 bcom_phy_intr(skge);
2099 hw->intr_mask |= IS_EXT_REG;
2100 skge_write32(hw, B0_IMSK, hw->intr_mask);
2101 skge_read32(hw, B0_IMSK);
2104 static const struct {
2108 { CHIP_ID_GENESIS, "Genesis" },
2109 { CHIP_ID_YUKON, "Yukon" },
2110 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
2111 { CHIP_ID_YUKON_LP, "Yukon-LP"},
2114 static const char *skge_board_name(const struct skge_hw *hw)
2117 static char buf[16];
2119 for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
2120 if (skge_chips[i].id == hw->chip_id)
2121 return skge_chips[i].name;
2123 snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
2129 * Setup the board data structure, but don't bring up
2132 static int skge_reset(struct skge_hw *hw)
2135 u16 ctst, pci_status;
2136 u8 t8, mac_cfg, pmd_type;
2139 ctst = skge_read16(hw, B0_CTST);
2142 skge_write8(hw, B0_CTST, CS_RST_SET);
2143 skge_write8(hw, B0_CTST, CS_RST_CLR);
2145 /* clear PCI errors, if any */
2146 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2147 skge_write8(hw, B2_TST_CTRL2, 0);
2149 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2150 pci_write_config_word(hw->pdev, PCI_STATUS,
2151 pci_status | PCI_STATUS_ERROR_BITS);
2152 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2153 skge_write8(hw, B0_CTST, CS_MRST_CLR);
2155 /* restore CLK_RUN bits (for Yukon-Lite) */
2156 skge_write16(hw, B0_CTST,
2157 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
2159 hw->chip_id = skge_read8(hw, B2_CHIP_ID);
2160 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
2161 pmd_type = skge_read8(hw, B2_PMD_TYP);
2162 hw->copper = (pmd_type == 'T' || pmd_type == '1');
2164 switch (hw->chip_id) {
2165 case CHIP_ID_GENESIS:
2166 switch (hw->phy_type) {
2168 hw->phy_addr = PHY_ADDR_XMAC;
2171 hw->phy_addr = PHY_ADDR_BCOM;
2174 DBG(PFX "unsupported phy type 0x%x\n",
2181 case CHIP_ID_YUKON_LITE:
2182 case CHIP_ID_YUKON_LP:
2183 if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
2186 hw->phy_addr = PHY_ADDR_MARV;
2190 DBG(PFX "unsupported chip type 0x%x\n",
2195 mac_cfg = skge_read8(hw, B2_MAC_CFG);
2196 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
2197 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
2199 /* read the adapters RAM size */
2200 t8 = skge_read8(hw, B2_E_0);
2201 if (hw->chip_id == CHIP_ID_GENESIS) {
2203 /* special case: 4 x 64k x 36, offset = 0x80000 */
2204 hw->ram_size = 0x100000;
2205 hw->ram_offset = 0x80000;
2207 hw->ram_size = t8 * 512;
2210 hw->ram_size = 0x20000;
2212 hw->ram_size = t8 * 4096;
2214 hw->intr_mask = IS_HW_ERR;
2216 /* Use PHY IRQ for all but fiber based Genesis board */
2217 if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC))
2218 hw->intr_mask |= IS_EXT_REG;
2220 if (hw->chip_id == CHIP_ID_GENESIS)
2223 /* switch power to VCC (WA for VAUX problem) */
2224 skge_write8(hw, B0_POWER_CTRL,
2225 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
2227 /* avoid boards with stuck Hardware error bits */
2228 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
2229 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
2230 DBG(PFX "stuck hardware sensor bit\n");
2231 hw->intr_mask &= ~IS_HW_ERR;
2234 /* Clear PHY COMA */
2235 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2236 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®);
2237 reg &= ~PCI_PHY_COMA;
2238 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
2239 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2242 for (i = 0; i < hw->ports; i++) {
2243 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2244 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2248 /* turn off hardware timer (unused) */
2249 skge_write8(hw, B2_TI_CTRL, TIM_STOP);
2250 skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2251 skge_write8(hw, B0_LED, LED_STAT_ON);
2253 /* enable the Tx Arbiters */
2254 for (i = 0; i < hw->ports; i++)
2255 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2257 /* Initialize ram interface */
2258 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
2260 skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
2261 skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
2262 skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
2263 skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
2264 skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
2265 skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
2266 skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
2267 skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
2268 skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
2269 skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
2270 skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
2271 skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
2273 skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
2275 /* Set interrupt moderation for Transmit only
2276 * Receive interrupts avoided by NAPI
2278 skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
2279 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
2280 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
2282 skge_write32(hw, B0_IMSK, hw->intr_mask);
2284 for (i = 0; i < hw->ports; i++) {
2285 if (hw->chip_id == CHIP_ID_GENESIS)
2286 genesis_reset(hw, i);
2294 /* Initialize network device */
2295 static struct net_device *skge_devinit(struct skge_hw *hw, int port,
2296 int highmem __unused)
2298 struct skge_port *skge;
2299 struct net_device *dev = alloc_etherdev(sizeof(*skge));
2302 DBG(PFX "etherdev alloc failed\n");
2306 dev->dev = &hw->pdev->dev;
2308 skge = netdev_priv(dev);
2312 /* Auto speed and flow control */
2313 skge->autoneg = AUTONEG_ENABLE;
2314 skge->flow_control = FLOW_MODE_SYM_OR_REM;
2317 skge->advertising = skge_supported_modes(hw);
2319 hw->dev[port] = dev;
2323 /* read the mac address */
2324 memcpy(dev->hw_addr, (void *) (hw->regs + B2_MAC_1 + port*8), ETH_ALEN);
2326 /* device is off until link detection */
2327 netdev_link_down(dev);
2332 static void skge_show_addr(struct net_device *dev)
2334 DBG2(PFX "%s: addr %s\n",
2335 dev->name, netdev_addr(dev));
2338 static int skge_probe(struct pci_device *pdev,
2339 const struct pci_device_id *ent __unused)
2341 struct net_device *dev, *dev1;
2343 int err, using_dac = 0;
2345 adjust_pci_device(pdev);
2348 hw = zalloc(sizeof(*hw));
2350 DBG(PFX "cannot allocate hardware struct\n");
2351 goto err_out_free_regions;
2356 hw->regs = (u32)ioremap(pci_bar_start(pdev, PCI_BASE_ADDRESS_0),
2359 DBG(PFX "cannot map device registers\n");
2360 goto err_out_free_hw;
2363 err = skge_reset(hw);
2365 goto err_out_iounmap;
2367 DBG(PFX " addr 0x%llx irq %d chip %s rev %d\n",
2368 (unsigned long long)pdev->ioaddr, pdev->irq,
2369 skge_board_name(hw), hw->chip_rev);
2371 dev = skge_devinit(hw, 0, using_dac);
2373 goto err_out_led_off;
2375 netdev_init ( dev, &skge_operations );
2377 err = register_netdev(dev);
2379 DBG(PFX "cannot register net device\n");
2380 goto err_out_free_netdev;
2383 skge_show_addr(dev);
2385 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
2386 if (register_netdev(dev1) == 0)
2387 skge_show_addr(dev1);
2389 /* Failure to register second port need not be fatal */
2390 DBG(PFX "register of second port failed\n");
2392 netdev_nullify(dev1);
2396 pci_set_drvdata(pdev, hw);
2400 err_out_free_netdev:
2401 netdev_nullify(dev);
2404 skge_write16(hw, B0_LED, LED_STAT_OFF);
2406 iounmap((void*)hw->regs);
2409 err_out_free_regions:
2410 pci_set_drvdata(pdev, NULL);
2414 static void skge_remove(struct pci_device *pdev)
2416 struct skge_hw *hw = pci_get_drvdata(pdev);
2417 struct net_device *dev0, *dev1;
2422 if ((dev1 = hw->dev[1]))
2423 unregister_netdev(dev1);
2425 unregister_netdev(dev0);
2428 skge_write32(hw, B0_IMSK, 0);
2429 skge_read32(hw, B0_IMSK);
2431 skge_write16(hw, B0_LED, LED_STAT_OFF);
2432 skge_write8(hw, B0_CTST, CS_RST_SET);
2435 netdev_nullify(dev1);
2438 netdev_nullify(dev0);
2441 iounmap((void*)hw->regs);
2443 pci_set_drvdata(pdev, NULL);
2447 * Enable or disable IRQ masking.
2449 * @v netdev Device to control.
2450 * @v enable Zero to mask off IRQ, non-zero to enable IRQ.
2452 * This is a gPXE Network Driver API function.
2454 static void skge_net_irq ( struct net_device *dev, int enable ) {
2455 struct skge_port *skge = netdev_priv(dev);
2456 struct skge_hw *hw = skge->hw;
2459 hw->intr_mask |= portmask[skge->port];
2461 hw->intr_mask &= ~portmask[skge->port];
2462 skge_write32(hw, B0_IMSK, hw->intr_mask);
2465 struct pci_driver skge_driver __pci_driver = {
2466 .ids = skge_id_table,
2467 .id_count = ( sizeof (skge_id_table) / sizeof (skge_id_table[0]) ),
2468 .probe = skge_probe,
2469 .remove = skge_remove