Remove MSI/MSIX support.
[people/meteger/mytg3/.git] / mytg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "mytg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "mytg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     119
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "May 18, 2011"
95
96 #define TG3_DEF_MAC_MODE        0
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp)                 1500
118
119 /* These numbers seem to be hard coded in the NIC firmware somehow.
120  * You can't change the ring sizes, but you can change where you place
121  * them in the NIC onboard memory.
122  */
123 #define TG3_RX_STD_RING_SIZE(tp) \
124         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
125          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
126 #define TG3_DEF_RX_RING_PENDING         200
127 #define TG3_RX_JMB_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
130 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
131 #define TG3_RSS_INDIR_TBL_SIZE          128
132
133 /* Do not place this n-ring entries value into the tp struct itself,
134  * we really want to expose these constants to GCC so that modulo et
135  * al.  operations are done with shifts and masks instead of with
136  * hw multiply/modulo instructions.  Another solution would be to
137  * replace things like '% foo' with '& (foo - 1)'.
138  */
139
140 #define TG3_TX_RING_SIZE                512
141 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
142
143 #define TG3_RX_STD_RING_BYTES(tp) \
144         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
145 #define TG3_RX_JMB_RING_BYTES(tp) \
146         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
147 #define TG3_RX_RCB_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
149 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
150                                  TG3_TX_RING_SIZE)
151 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
152
153 #define TG3_DMA_BYTE_ENAB               64
154
155 #define TG3_RX_STD_DMA_SZ               1536
156 #define TG3_RX_JMB_DMA_SZ               9046
157
158 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
159
160 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
161 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
162
163 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
164         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
165
166 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
167         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
168
169 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
170  * that are at least dword aligned when used in PCIX mode.  The driver
171  * works around this bug by double copying the packet.  This workaround
172  * is built into the normal double copy length check for efficiency.
173  *
174  * However, the double copy is only necessary on those architectures
175  * where unaligned memory accesses are inefficient.  For those architectures
176  * where unaligned memory accesses incur little penalty, we can reintegrate
177  * the 5701 in the normal rx path.  Doing so saves a device structure
178  * dereference by hardcoding the double copy threshold in place.
179  */
180
181 /* minimum number of free TX descriptors required to wake up TX process */
182 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
183
184 #define TG3_RAW_IP_ALIGN 2
185
186 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
187
188 #define FIRMWARE_TG3            "tigon/tg3.bin"
189 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
190 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
191
192 static char version[] __devinitdata =
193         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
194
195 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
196 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
197 MODULE_LICENSE("GPL");
198 MODULE_VERSION(DRV_MODULE_VERSION);
199 MODULE_FIRMWARE(FIRMWARE_TG3);
200 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
201 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
202
203 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
204 module_param(tg3_debug, int, 0);
205 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
206
207 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
214         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
281         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
282         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
283         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
284         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
285         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
286         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
287         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
288         {}
289 };
290
291 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
292
293
294 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
295 {
296         writel(val, tp->regs + off);
297 }
298
299 static u32 tg3_read32(struct tg3 *tp, u32 off)
300 {
301         return readl(tp->regs + off);
302 }
303
304 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
305 {
306         writel(val, tp->aperegs + off);
307 }
308
309 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
310 {
311         return readl(tp->aperegs + off);
312 }
313
314 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
315 {
316         unsigned long flags;
317
318         spin_lock_irqsave(&tp->indirect_lock, flags);
319         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
320         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
321         spin_unlock_irqrestore(&tp->indirect_lock, flags);
322 }
323
324 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
325 {
326         writel(val, tp->regs + off);
327         readl(tp->regs + off);
328 }
329
330 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
331 {
332         unsigned long flags;
333         u32 val;
334
335         spin_lock_irqsave(&tp->indirect_lock, flags);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
337         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
338         spin_unlock_irqrestore(&tp->indirect_lock, flags);
339         return val;
340 }
341
342 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
343 {
344         unsigned long flags;
345
346         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
347                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
348                                        TG3_64BIT_REG_LOW, val);
349                 return;
350         }
351         if (off == TG3_RX_STD_PROD_IDX_REG) {
352                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
353                                        TG3_64BIT_REG_LOW, val);
354                 return;
355         }
356
357         spin_lock_irqsave(&tp->indirect_lock, flags);
358         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
359         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
360         spin_unlock_irqrestore(&tp->indirect_lock, flags);
361
362         /* In indirect mode when disabling interrupts, we also need
363          * to clear the interrupt bit in the GRC local ctrl register.
364          */
365         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
366             (val == 0x1)) {
367                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
368                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
369         }
370 }
371
372 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
373 {
374         unsigned long flags;
375         u32 val;
376
377         spin_lock_irqsave(&tp->indirect_lock, flags);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
379         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
380         spin_unlock_irqrestore(&tp->indirect_lock, flags);
381         return val;
382 }
383
384 /* usec_wait specifies the wait time in usec when writing to certain registers
385  * where it is unsafe to read back the register without some delay.
386  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
387  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
388  */
389 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
390 {
391         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
392                 /* Non-posted methods */
393                 tp->write32(tp, off, val);
394         else {
395                 /* Posted method */
396                 tg3_write32(tp, off, val);
397                 if (usec_wait)
398                         udelay(usec_wait);
399                 tp->read32(tp, off);
400         }
401         /* Wait again after the read for the posted method to guarantee that
402          * the wait time is met.
403          */
404         if (usec_wait)
405                 udelay(usec_wait);
406 }
407
408 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
409 {
410         tp->write32_mbox(tp, off, val);
411         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
412                 tp->read32_mbox(tp, off);
413 }
414
415 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
416 {
417         void __iomem *mbox = tp->regs + off;
418         writel(val, mbox);
419         if (tg3_flag(tp, TXD_MBOX_HWBUG))
420                 writel(val, mbox);
421         if (tg3_flag(tp, MBOX_WRITE_REORDER))
422                 readl(mbox);
423 }
424
425 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
426 {
427         return readl(tp->regs + off + GRCMBOX_BASE);
428 }
429
430 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
431 {
432         writel(val, tp->regs + off + GRCMBOX_BASE);
433 }
434
435 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
436 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
437 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
438 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
439 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
440
441 #define tw32(reg, val)                  tp->write32(tp, reg, val)
442 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
443 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
444 #define tr32(reg)                       tp->read32(tp, reg)
445
446 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
447 {
448         unsigned long flags;
449
450         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
451             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
452                 return;
453
454         spin_lock_irqsave(&tp->indirect_lock, flags);
455         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
456                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
457                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
458
459                 /* Always leave this as zero. */
460                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
461         } else {
462                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
463                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
464
465                 /* Always leave this as zero. */
466                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
467         }
468         spin_unlock_irqrestore(&tp->indirect_lock, flags);
469 }
470
471 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
472 {
473         unsigned long flags;
474
475         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
476             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
477                 *val = 0;
478                 return;
479         }
480
481         spin_lock_irqsave(&tp->indirect_lock, flags);
482         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
483                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
484                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
485
486                 /* Always leave this as zero. */
487                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
488         } else {
489                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
490                 *val = tr32(TG3PCI_MEM_WIN_DATA);
491
492                 /* Always leave this as zero. */
493                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
494         }
495         spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497
498 static void tg3_ape_lock_init(struct tg3 *tp)
499 {
500         int i;
501         u32 regbase;
502
503         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
504                 regbase = TG3_APE_LOCK_GRANT;
505         else
506                 regbase = TG3_APE_PER_LOCK_GRANT;
507
508         /* Make sure the driver hasn't any stale locks. */
509         for (i = 0; i < 8; i++)
510                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
511 }
512
513 static int tg3_ape_lock(struct tg3 *tp, int locknum)
514 {
515         int i, off;
516         int ret = 0;
517         u32 status, req, gnt;
518
519         if (!tg3_flag(tp, ENABLE_APE))
520                 return 0;
521
522         switch (locknum) {
523         case TG3_APE_LOCK_GRC:
524         case TG3_APE_LOCK_MEM:
525                 break;
526         default:
527                 return -EINVAL;
528         }
529
530         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
531                 req = TG3_APE_LOCK_REQ;
532                 gnt = TG3_APE_LOCK_GRANT;
533         } else {
534                 req = TG3_APE_PER_LOCK_REQ;
535                 gnt = TG3_APE_PER_LOCK_GRANT;
536         }
537
538         off = 4 * locknum;
539
540         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
541
542         /* Wait for up to 1 millisecond to acquire lock. */
543         for (i = 0; i < 100; i++) {
544                 status = tg3_ape_read32(tp, gnt + off);
545                 if (status == APE_LOCK_GRANT_DRIVER)
546                         break;
547                 udelay(10);
548         }
549
550         if (status != APE_LOCK_GRANT_DRIVER) {
551                 /* Revoke the lock request. */
552                 tg3_ape_write32(tp, gnt + off,
553                                 APE_LOCK_GRANT_DRIVER);
554
555                 ret = -EBUSY;
556         }
557
558         return ret;
559 }
560
561 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
562 {
563         u32 gnt;
564
565         if (!tg3_flag(tp, ENABLE_APE))
566                 return;
567
568         switch (locknum) {
569         case TG3_APE_LOCK_GRC:
570         case TG3_APE_LOCK_MEM:
571                 break;
572         default:
573                 return;
574         }
575
576         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
577                 gnt = TG3_APE_LOCK_GRANT;
578         else
579                 gnt = TG3_APE_PER_LOCK_GRANT;
580
581         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
582 }
583
584 static void tg3_disable_ints(struct tg3 *tp)
585 {
586         int i;
587
588         tw32(TG3PCI_MISC_HOST_CTRL,
589              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
590         for (i = 0; i < tp->irq_max; i++)
591                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
592 }
593
594 static void tg3_enable_ints(struct tg3 *tp)
595 {
596         int i;
597
598         tp->irq_sync = 0;
599         wmb();
600
601         tw32(TG3PCI_MISC_HOST_CTRL,
602              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
603
604         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
605         for (i = 0; i < tp->irq_cnt; i++) {
606                 struct tg3_napi *tnapi = &tp->napi[i];
607
608                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
609                 if (tg3_flag(tp, 1SHOT_MSI))
610                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
611
612                 tp->coal_now |= tnapi->coal_now;
613         }
614
615         /* Force an initial interrupt */
616         if (!tg3_flag(tp, TAGGED_STATUS) &&
617             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
618                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
619         else
620                 tw32(HOSTCC_MODE, tp->coal_now);
621
622         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
623 }
624
625 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
626 {
627         struct tg3 *tp = tnapi->tp;
628         struct tg3_hw_status *sblk = tnapi->hw_status;
629         unsigned int work_exists = 0;
630
631         /* check for phy events */
632         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
633                 if (sblk->status & SD_STATUS_LINK_CHG)
634                         work_exists = 1;
635         }
636         /* check for RX/TX work to do */
637         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
638             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
639                 work_exists = 1;
640
641         return work_exists;
642 }
643
644 /* tg3_int_reenable
645  *  similar to tg3_enable_ints, but it accurately determines whether there
646  *  is new work pending and can return without flushing the PIO write
647  *  which reenables interrupts
648  */
649 static void tg3_int_reenable(struct tg3_napi *tnapi)
650 {
651         struct tg3 *tp = tnapi->tp;
652
653         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
654         mmiowb();
655
656         /* When doing tagged status, this work check is unnecessary.
657          * The last_tag we write above tells the chip which piece of
658          * work we've completed.
659          */
660         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
661                 tw32(HOSTCC_MODE, tp->coalesce_mode |
662                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
663 }
664
665 static void tg3_switch_clocks(struct tg3 *tp)
666 {
667         u32 clock_ctrl;
668         u32 orig_clock_ctrl;
669
670         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
671                 return;
672
673         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
674
675         orig_clock_ctrl = clock_ctrl;
676         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
677                        CLOCK_CTRL_CLKRUN_OENABLE |
678                        0x1f);
679         tp->pci_clock_ctrl = clock_ctrl;
680
681         if (tg3_flag(tp, 5705_PLUS)) {
682                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
683                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
684                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
685                 }
686         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
687                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
688                             clock_ctrl |
689                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
690                             40);
691                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
692                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
693                             40);
694         }
695         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
696 }
697
698 #define PHY_BUSY_LOOPS  5000
699
700 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
701 {
702         u32 frame_val;
703         unsigned int loops;
704         int ret;
705
706         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
707                 tw32_f(MAC_MI_MODE,
708                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
709                 udelay(80);
710         }
711
712         *val = 0x0;
713
714         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
715                       MI_COM_PHY_ADDR_MASK);
716         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
717                       MI_COM_REG_ADDR_MASK);
718         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
719
720         tw32_f(MAC_MI_COM, frame_val);
721
722         loops = PHY_BUSY_LOOPS;
723         while (loops != 0) {
724                 udelay(10);
725                 frame_val = tr32(MAC_MI_COM);
726
727                 if ((frame_val & MI_COM_BUSY) == 0) {
728                         udelay(5);
729                         frame_val = tr32(MAC_MI_COM);
730                         break;
731                 }
732                 loops -= 1;
733         }
734
735         ret = -EBUSY;
736         if (loops != 0) {
737                 *val = frame_val & MI_COM_DATA_MASK;
738                 ret = 0;
739         }
740
741         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
742                 tw32_f(MAC_MI_MODE, tp->mi_mode);
743                 udelay(80);
744         }
745
746         return ret;
747 }
748
749 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
750 {
751         u32 frame_val;
752         unsigned int loops;
753         int ret;
754
755         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
756             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
757                 return 0;
758
759         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
760                 tw32_f(MAC_MI_MODE,
761                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
762                 udelay(80);
763         }
764
765         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
766                       MI_COM_PHY_ADDR_MASK);
767         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
768                       MI_COM_REG_ADDR_MASK);
769         frame_val |= (val & MI_COM_DATA_MASK);
770         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
771
772         tw32_f(MAC_MI_COM, frame_val);
773
774         loops = PHY_BUSY_LOOPS;
775         while (loops != 0) {
776                 udelay(10);
777                 frame_val = tr32(MAC_MI_COM);
778                 if ((frame_val & MI_COM_BUSY) == 0) {
779                         udelay(5);
780                         frame_val = tr32(MAC_MI_COM);
781                         break;
782                 }
783                 loops -= 1;
784         }
785
786         ret = -EBUSY;
787         if (loops != 0)
788                 ret = 0;
789
790         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
791                 tw32_f(MAC_MI_MODE, tp->mi_mode);
792                 udelay(80);
793         }
794
795         return ret;
796 }
797
798 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
799 {
800         int err;
801
802         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
803         if (err)
804                 goto done;
805
806         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
807         if (err)
808                 goto done;
809
810         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
811                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
812         if (err)
813                 goto done;
814
815         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
816
817 done:
818         return err;
819 }
820
821 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
822 {
823         int err;
824
825         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
826         if (err)
827                 goto done;
828
829         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
830         if (err)
831                 goto done;
832
833         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
834                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
835         if (err)
836                 goto done;
837
838         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
839
840 done:
841         return err;
842 }
843
844 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
845 {
846         int err;
847
848         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
849         if (!err)
850                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
851
852         return err;
853 }
854
855 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
856 {
857         int err;
858
859         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
860         if (!err)
861                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
862
863         return err;
864 }
865
866 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
867 {
868         int err;
869
870         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
871                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
872                            MII_TG3_AUXCTL_SHDWSEL_MISC);
873         if (!err)
874                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
875
876         return err;
877 }
878
879 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
880 {
881         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
882                 set |= MII_TG3_AUXCTL_MISC_WREN;
883
884         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
885 }
886
887 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
888         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
889                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
890                              MII_TG3_AUXCTL_ACTL_TX_6DB)
891
892 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
893         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
894                              MII_TG3_AUXCTL_ACTL_TX_6DB);
895
896 static int tg3_bmcr_reset(struct tg3 *tp)
897 {
898         u32 phy_control;
899         int limit, err;
900
901         /* OK, reset it, and poll the BMCR_RESET bit until it
902          * clears or we time out.
903          */
904         phy_control = BMCR_RESET;
905         err = tg3_writephy(tp, MII_BMCR, phy_control);
906         if (err != 0)
907                 return -EBUSY;
908
909         limit = 5000;
910         while (limit--) {
911                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
912                 if (err != 0)
913                         return -EBUSY;
914
915                 if ((phy_control & BMCR_RESET) == 0) {
916                         udelay(40);
917                         break;
918                 }
919                 udelay(10);
920         }
921         if (limit < 0)
922                 return -EBUSY;
923
924         return 0;
925 }
926
927 static void tg3_mdio_init(struct tg3 *tp)
928 {
929         if (tg3_flag(tp, 5717_PLUS)) {
930                 u32 is_serdes;
931
932                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
933
934                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
935                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
936                 else
937                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
938                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
939                 if (is_serdes)
940                         tp->phy_addr += 7;
941         } else
942                 tp->phy_addr = TG3_PHY_MII_ADDR;
943 }
944
945 /* tp->lock is held. */
946 static inline void tg3_generate_fw_event(struct tg3 *tp)
947 {
948         u32 val;
949
950         val = tr32(GRC_RX_CPU_EVENT);
951         val |= GRC_RX_CPU_DRIVER_EVENT;
952         tw32_f(GRC_RX_CPU_EVENT, val);
953
954         tp->last_event_jiffies = jiffies;
955 }
956
957 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
958
959 /* tp->lock is held. */
960 static void tg3_wait_for_event_ack(struct tg3 *tp)
961 {
962         int i;
963         unsigned int delay_cnt;
964         long time_remain;
965
966         /* If enough time has passed, no wait is necessary. */
967         time_remain = (long)(tp->last_event_jiffies + 1 +
968                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
969                       (long)jiffies;
970         if (time_remain < 0)
971                 return;
972
973         /* Check if we can shorten the wait time. */
974         delay_cnt = jiffies_to_usecs(time_remain);
975         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
976                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
977         delay_cnt = (delay_cnt >> 3) + 1;
978
979         for (i = 0; i < delay_cnt; i++) {
980                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
981                         break;
982                 udelay(8);
983         }
984 }
985
986 /* tp->lock is held. */
987 static void tg3_ump_link_report(struct tg3 *tp)
988 {
989         u32 reg;
990         u32 val;
991
992         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
993                 return;
994
995         tg3_wait_for_event_ack(tp);
996
997         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
998
999         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1000
1001         val = 0;
1002         if (!tg3_readphy(tp, MII_BMCR, &reg))
1003                 val = reg << 16;
1004         if (!tg3_readphy(tp, MII_BMSR, &reg))
1005                 val |= (reg & 0xffff);
1006         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1007
1008         val = 0;
1009         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1010                 val = reg << 16;
1011         if (!tg3_readphy(tp, MII_LPA, &reg))
1012                 val |= (reg & 0xffff);
1013         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1014
1015         val = 0;
1016         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1017                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1018                         val = reg << 16;
1019                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1020                         val |= (reg & 0xffff);
1021         }
1022         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1023
1024         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1025                 val = reg << 16;
1026         else
1027                 val = 0;
1028         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1029
1030         tg3_generate_fw_event(tp);
1031 }
1032
1033 static void tg3_link_report(struct tg3 *tp)
1034 {
1035         if (!netif_carrier_ok(tp->dev)) {
1036                 netif_info(tp, link, tp->dev, "Link is down\n");
1037                 tg3_ump_link_report(tp);
1038         } else if (netif_msg_link(tp)) {
1039                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1040                             (tp->link_config.active_speed == SPEED_1000 ?
1041                              1000 :
1042                              (tp->link_config.active_speed == SPEED_100 ?
1043                               100 : 10)),
1044                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1045                              "full" : "half"));
1046
1047                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1048                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1049                             "on" : "off",
1050                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1051                             "on" : "off");
1052
1053                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1054                         netdev_info(tp->dev, "EEE is %s\n",
1055                                     tp->setlpicnt ? "enabled" : "disabled");
1056
1057                 tg3_ump_link_report(tp);
1058         }
1059 }
1060
1061 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1062 {
1063         u16 miireg;
1064
1065         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1066                 miireg = ADVERTISE_PAUSE_CAP;
1067         else if (flow_ctrl & FLOW_CTRL_TX)
1068                 miireg = ADVERTISE_PAUSE_ASYM;
1069         else if (flow_ctrl & FLOW_CTRL_RX)
1070                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1071         else
1072                 miireg = 0;
1073
1074         return miireg;
1075 }
1076
1077 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1078 {
1079         u16 miireg;
1080
1081         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1082                 miireg = ADVERTISE_1000XPAUSE;
1083         else if (flow_ctrl & FLOW_CTRL_TX)
1084                 miireg = ADVERTISE_1000XPSE_ASYM;
1085         else if (flow_ctrl & FLOW_CTRL_RX)
1086                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1087         else
1088                 miireg = 0;
1089
1090         return miireg;
1091 }
1092
1093 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1094 {
1095         u8 cap = 0;
1096
1097         if (lcladv & ADVERTISE_1000XPAUSE) {
1098                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1099                         if (rmtadv & LPA_1000XPAUSE)
1100                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1101                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1102                                 cap = FLOW_CTRL_RX;
1103                 } else {
1104                         if (rmtadv & LPA_1000XPAUSE)
1105                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1106                 }
1107         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1108                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1109                         cap = FLOW_CTRL_TX;
1110         }
1111
1112         return cap;
1113 }
1114
1115 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1116 {
1117         u8 autoneg;
1118         u8 flowctrl = 0;
1119         u32 old_rx_mode = tp->rx_mode;
1120         u32 old_tx_mode = tp->tx_mode;
1121
1122         autoneg = tp->link_config.autoneg;
1123
1124         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1125                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1126                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1127                 else
1128                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1129         } else
1130                 flowctrl = tp->link_config.flowctrl;
1131
1132         tp->link_config.active_flowctrl = flowctrl;
1133
1134         if (flowctrl & FLOW_CTRL_RX)
1135                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1136         else
1137                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1138
1139         if (old_rx_mode != tp->rx_mode)
1140                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1141
1142         if (flowctrl & FLOW_CTRL_TX)
1143                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1144         else
1145                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1146
1147         if (old_tx_mode != tp->tx_mode)
1148                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1149 }
1150
1151 static void tg3_phy_start(struct tg3 *tp)
1152 {
1153         struct phy_device *phydev;
1154
1155         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1156                 return;
1157
1158         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1159
1160         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1161                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1162                 phydev->speed = tp->link_config.orig_speed;
1163                 phydev->duplex = tp->link_config.orig_duplex;
1164                 phydev->autoneg = tp->link_config.orig_autoneg;
1165                 phydev->advertising = tp->link_config.orig_advertising;
1166         }
1167
1168         phy_start(phydev);
1169
1170         phy_start_aneg(phydev);
1171 }
1172
1173 static void tg3_phy_stop(struct tg3 *tp)
1174 {
1175         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1176                 return;
1177
1178         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1179 }
1180
1181 static void tg3_phy_fini(struct tg3 *tp)
1182 {
1183         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1184                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1185                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1186         }
1187 }
1188
1189 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1190 {
1191         u32 phytest;
1192
1193         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1194                 u32 phy;
1195
1196                 tg3_writephy(tp, MII_TG3_FET_TEST,
1197                              phytest | MII_TG3_FET_SHADOW_EN);
1198                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1199                         if (enable)
1200                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1201                         else
1202                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1203                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1204                 }
1205                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1206         }
1207 }
1208
1209 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1210 {
1211         u32 reg;
1212
1213         if (!tg3_flag(tp, 5705_PLUS) ||
1214             (tg3_flag(tp, 5717_PLUS) &&
1215              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1216                 return;
1217
1218         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1219                 tg3_phy_fet_toggle_apd(tp, enable);
1220                 return;
1221         }
1222
1223         reg = MII_TG3_MISC_SHDW_WREN |
1224               MII_TG3_MISC_SHDW_SCR5_SEL |
1225               MII_TG3_MISC_SHDW_SCR5_LPED |
1226               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1227               MII_TG3_MISC_SHDW_SCR5_SDTL |
1228               MII_TG3_MISC_SHDW_SCR5_C125OE;
1229         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1230                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1231
1232         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1233
1234
1235         reg = MII_TG3_MISC_SHDW_WREN |
1236               MII_TG3_MISC_SHDW_APD_SEL |
1237               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1238         if (enable)
1239                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1240
1241         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1242 }
1243
1244 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1245 {
1246         u32 phy;
1247
1248         if (!tg3_flag(tp, 5705_PLUS) ||
1249             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1250                 return;
1251
1252         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1253                 u32 ephy;
1254
1255                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1256                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1257
1258                         tg3_writephy(tp, MII_TG3_FET_TEST,
1259                                      ephy | MII_TG3_FET_SHADOW_EN);
1260                         if (!tg3_readphy(tp, reg, &phy)) {
1261                                 if (enable)
1262                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1263                                 else
1264                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1265                                 tg3_writephy(tp, reg, phy);
1266                         }
1267                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1268                 }
1269         } else {
1270                 int ret;
1271
1272                 ret = tg3_phy_auxctl_read(tp,
1273                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1274                 if (!ret) {
1275                         if (enable)
1276                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1277                         else
1278                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1279                         tg3_phy_auxctl_write(tp,
1280                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1281                 }
1282         }
1283 }
1284
1285 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1286 {
1287         int ret;
1288         u32 val;
1289
1290         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1291                 return;
1292
1293         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1294         if (!ret)
1295                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1296                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1297 }
1298
1299 static void tg3_phy_apply_otp(struct tg3 *tp)
1300 {
1301         u32 otp, phy;
1302
1303         if (!tp->phy_otp)
1304                 return;
1305
1306         otp = tp->phy_otp;
1307
1308         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1309                 return;
1310
1311         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1312         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1313         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1314
1315         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1316               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1317         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1318
1319         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1320         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1321         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1322
1323         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1324         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1325
1326         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1327         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1328
1329         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1330               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1331         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1332
1333         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1334 }
1335
1336 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1337 {
1338         u32 val;
1339
1340         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1341                 return;
1342
1343         tp->setlpicnt = 0;
1344
1345         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1346             current_link_up == 1 &&
1347             tp->link_config.active_duplex == DUPLEX_FULL &&
1348             (tp->link_config.active_speed == SPEED_100 ||
1349              tp->link_config.active_speed == SPEED_1000)) {
1350                 u32 eeectl;
1351
1352                 if (tp->link_config.active_speed == SPEED_1000)
1353                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1354                 else
1355                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1356
1357                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1358
1359                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1360                                   TG3_CL45_D7_EEERES_STAT, &val);
1361
1362                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1363                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1364                         tp->setlpicnt = 2;
1365         }
1366
1367         if (!tp->setlpicnt) {
1368                 val = tr32(TG3_CPMU_EEE_MODE);
1369                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1370         }
1371 }
1372
1373 static void tg3_phy_eee_enable(struct tg3 *tp)
1374 {
1375         u32 val;
1376
1377         if (tp->link_config.active_speed == SPEED_1000 &&
1378             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1379              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1380              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1381             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1382                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1383                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1384         }
1385
1386         val = tr32(TG3_CPMU_EEE_MODE);
1387         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1388 }
1389
1390 static int tg3_wait_macro_done(struct tg3 *tp)
1391 {
1392         int limit = 100;
1393
1394         while (limit--) {
1395                 u32 tmp32;
1396
1397                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1398                         if ((tmp32 & 0x1000) == 0)
1399                                 break;
1400                 }
1401         }
1402         if (limit < 0)
1403                 return -EBUSY;
1404
1405         return 0;
1406 }
1407
1408 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1409 {
1410         static const u32 test_pat[4][6] = {
1411         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1412         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1413         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1414         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1415         };
1416         int chan;
1417
1418         for (chan = 0; chan < 4; chan++) {
1419                 int i;
1420
1421                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1422                              (chan * 0x2000) | 0x0200);
1423                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1424
1425                 for (i = 0; i < 6; i++)
1426                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1427                                      test_pat[chan][i]);
1428
1429                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1430                 if (tg3_wait_macro_done(tp)) {
1431                         *resetp = 1;
1432                         return -EBUSY;
1433                 }
1434
1435                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1436                              (chan * 0x2000) | 0x0200);
1437                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1438                 if (tg3_wait_macro_done(tp)) {
1439                         *resetp = 1;
1440                         return -EBUSY;
1441                 }
1442
1443                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1444                 if (tg3_wait_macro_done(tp)) {
1445                         *resetp = 1;
1446                         return -EBUSY;
1447                 }
1448
1449                 for (i = 0; i < 6; i += 2) {
1450                         u32 low, high;
1451
1452                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1453                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1454                             tg3_wait_macro_done(tp)) {
1455                                 *resetp = 1;
1456                                 return -EBUSY;
1457                         }
1458                         low &= 0x7fff;
1459                         high &= 0x000f;
1460                         if (low != test_pat[chan][i] ||
1461                             high != test_pat[chan][i+1]) {
1462                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1463                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1464                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1465
1466                                 return -EBUSY;
1467                         }
1468                 }
1469         }
1470
1471         return 0;
1472 }
1473
1474 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1475 {
1476         int chan;
1477
1478         for (chan = 0; chan < 4; chan++) {
1479                 int i;
1480
1481                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1482                              (chan * 0x2000) | 0x0200);
1483                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1484                 for (i = 0; i < 6; i++)
1485                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1487                 if (tg3_wait_macro_done(tp))
1488                         return -EBUSY;
1489         }
1490
1491         return 0;
1492 }
1493
1494 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1495 {
1496         u32 reg32, phy9_orig;
1497         int retries, do_phy_reset, err;
1498
1499         retries = 10;
1500         do_phy_reset = 1;
1501         do {
1502                 if (do_phy_reset) {
1503                         err = tg3_bmcr_reset(tp);
1504                         if (err)
1505                                 return err;
1506                         do_phy_reset = 0;
1507                 }
1508
1509                 /* Disable transmitter and interrupt.  */
1510                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1511                         continue;
1512
1513                 reg32 |= 0x3000;
1514                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1515
1516                 /* Set full-duplex, 1000 mbps.  */
1517                 tg3_writephy(tp, MII_BMCR,
1518                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1519
1520                 /* Set to master mode.  */
1521                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1522                         continue;
1523
1524                 tg3_writephy(tp, MII_TG3_CTRL,
1525                              (MII_TG3_CTRL_AS_MASTER |
1526                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1527
1528                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1529                 if (err)
1530                         return err;
1531
1532                 /* Block the PHY control access.  */
1533                 tg3_phydsp_write(tp, 0x8005, 0x0800);
1534
1535                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1536                 if (!err)
1537                         break;
1538         } while (--retries);
1539
1540         err = tg3_phy_reset_chanpat(tp);
1541         if (err)
1542                 return err;
1543
1544         tg3_phydsp_write(tp, 0x8005, 0x0000);
1545
1546         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1547         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1548
1549         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1550
1551         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1552
1553         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1554                 reg32 &= ~0x3000;
1555                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1556         } else if (!err)
1557                 err = -EBUSY;
1558
1559         return err;
1560 }
1561
1562 /* This will reset the tigon3 PHY if there is no valid
1563  * link unless the FORCE argument is non-zero.
1564  */
1565 static int tg3_phy_reset(struct tg3 *tp)
1566 {
1567         u32 val, cpmuctrl;
1568         int err;
1569
1570         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1571                 val = tr32(GRC_MISC_CFG);
1572                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1573                 udelay(40);
1574         }
1575         err  = tg3_readphy(tp, MII_BMSR, &val);
1576         err |= tg3_readphy(tp, MII_BMSR, &val);
1577         if (err != 0)
1578                 return -EBUSY;
1579
1580         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1581                 netif_carrier_off(tp->dev);
1582                 tg3_link_report(tp);
1583         }
1584
1585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1586             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1588                 err = tg3_phy_reset_5703_4_5(tp);
1589                 if (err)
1590                         return err;
1591                 goto out;
1592         }
1593
1594         cpmuctrl = 0;
1595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1596             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1597                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1598                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1599                         tw32(TG3_CPMU_CTRL,
1600                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1601         }
1602
1603         err = tg3_bmcr_reset(tp);
1604         if (err)
1605                 return err;
1606
1607         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1608                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1609                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
1610
1611                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1612         }
1613
1614         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1615             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1616                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1617                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1618                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1619                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1620                         udelay(40);
1621                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1622                 }
1623         }
1624
1625         if (tg3_flag(tp, 5717_PLUS) &&
1626             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
1627                 return 0;
1628
1629         tg3_phy_apply_otp(tp);
1630
1631         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
1632                 tg3_phy_toggle_apd(tp, true);
1633         else
1634                 tg3_phy_toggle_apd(tp, false);
1635
1636 out:
1637         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
1638             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1639                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
1640                 tg3_phydsp_write(tp, 0x000a, 0x0323);
1641                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1642         }
1643
1644         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
1645                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1646                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1647         }
1648
1649         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1650                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1651                         tg3_phydsp_write(tp, 0x000a, 0x310b);
1652                         tg3_phydsp_write(tp, 0x201f, 0x9506);
1653                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
1654                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1655                 }
1656         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1657                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1658                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1659                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
1660                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1661                                 tg3_writephy(tp, MII_TG3_TEST1,
1662                                              MII_TG3_TEST1_TRIM_EN | 0x4);
1663                         } else
1664                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1665
1666                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1667                 }
1668         }
1669
1670         /* Set Extended packet length bit (bit 14) on all chips that */
1671         /* support jumbo frames */
1672         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1673                 /* Cannot do read-modify-write on 5401 */
1674                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
1675         }
1676
1677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1678                 /* adjust output voltage */
1679                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1680         }
1681
1682         tg3_phy_toggle_automdix(tp, 1);
1683         tg3_phy_set_wirespeed(tp);
1684         return 0;
1685 }
1686
1687 static void tg3_frob_aux_power(struct tg3 *tp)
1688 {
1689         bool need_vaux = false;
1690
1691         /* The GPIOs do something completely different on 57765. */
1692         if (!tg3_flag(tp, IS_NIC) ||
1693             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1694             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1695                 return;
1696
1697         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1698              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
1699              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1700              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
1701             tp->pdev_peer != tp->pdev) {
1702                 struct net_device *dev_peer;
1703
1704                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1705
1706                 /* remove_one() may have been run on the peer. */
1707                 if (dev_peer) {
1708                         struct tg3 *tp_peer = netdev_priv(dev_peer);
1709
1710                         if (tg3_flag(tp_peer, INIT_COMPLETE))
1711                                 return;
1712
1713                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
1714                             tg3_flag(tp_peer, ENABLE_ASF))
1715                                 need_vaux = true;
1716                 }
1717         }
1718
1719         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
1720                 need_vaux = true;
1721
1722         if (need_vaux) {
1723                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1724                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1725                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1726                                     (GRC_LCLCTRL_GPIO_OE0 |
1727                                      GRC_LCLCTRL_GPIO_OE1 |
1728                                      GRC_LCLCTRL_GPIO_OE2 |
1729                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1730                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1731                                     100);
1732                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
1733                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
1734                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1735                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1736                                              GRC_LCLCTRL_GPIO_OE1 |
1737                                              GRC_LCLCTRL_GPIO_OE2 |
1738                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1739                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1740                                              tp->grc_local_ctrl;
1741                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1742
1743                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1744                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1745
1746                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1747                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1748                 } else {
1749                         u32 no_gpio2;
1750                         u32 grc_local_ctrl = 0;
1751
1752                         /* Workaround to prevent overdrawing Amps. */
1753                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1754                             ASIC_REV_5714) {
1755                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1756                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1757                                             grc_local_ctrl, 100);
1758                         }
1759
1760                         /* On 5753 and variants, GPIO2 cannot be used. */
1761                         no_gpio2 = tp->nic_sram_data_cfg &
1762                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1763
1764                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1765                                          GRC_LCLCTRL_GPIO_OE1 |
1766                                          GRC_LCLCTRL_GPIO_OE2 |
1767                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1768                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1769                         if (no_gpio2) {
1770                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1771                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1772                         }
1773                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1774                                                     grc_local_ctrl, 100);
1775
1776                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1777
1778                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1779                                                     grc_local_ctrl, 100);
1780
1781                         if (!no_gpio2) {
1782                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1783                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1784                                             grc_local_ctrl, 100);
1785                         }
1786                 }
1787         } else {
1788                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1789                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1790                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1791                                     (GRC_LCLCTRL_GPIO_OE1 |
1792                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1793
1794                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1795                                     GRC_LCLCTRL_GPIO_OE1, 100);
1796
1797                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1798                                     (GRC_LCLCTRL_GPIO_OE1 |
1799                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1800                 }
1801         }
1802 }
1803
1804 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1805 {
1806         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1807                 return 1;
1808         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
1809                 if (speed != SPEED_10)
1810                         return 1;
1811         } else if (speed == SPEED_10)
1812                 return 1;
1813
1814         return 0;
1815 }
1816
1817 static int tg3_setup_phy(struct tg3 *, int);
1818
1819 #define RESET_KIND_SHUTDOWN     0
1820 #define RESET_KIND_INIT         1
1821 #define RESET_KIND_SUSPEND      2
1822
1823 static void tg3_write_sig_post_reset(struct tg3 *, int);
1824 static int tg3_halt_cpu(struct tg3 *, u32);
1825
1826 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
1827 {
1828         u32 val;
1829
1830         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1831                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1832                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1833                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1834
1835                         sg_dig_ctrl |=
1836                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1837                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1838                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1839                 }
1840                 return;
1841         }
1842
1843         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1844                 tg3_bmcr_reset(tp);
1845                 val = tr32(GRC_MISC_CFG);
1846                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1847                 udelay(40);
1848                 return;
1849         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1850                 u32 phytest;
1851                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1852                         u32 phy;
1853
1854                         tg3_writephy(tp, MII_ADVERTISE, 0);
1855                         tg3_writephy(tp, MII_BMCR,
1856                                      BMCR_ANENABLE | BMCR_ANRESTART);
1857
1858                         tg3_writephy(tp, MII_TG3_FET_TEST,
1859                                      phytest | MII_TG3_FET_SHADOW_EN);
1860                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
1861                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
1862                                 tg3_writephy(tp,
1863                                              MII_TG3_FET_SHDW_AUXMODE4,
1864                                              phy);
1865                         }
1866                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1867                 }
1868                 return;
1869         } else if (do_low_power) {
1870                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1871                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1872
1873                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
1874                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
1875                       MII_TG3_AUXCTL_PCTL_VREG_11V;
1876                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
1877         }
1878
1879         /* The PHY should not be powered down on some chips because
1880          * of bugs.
1881          */
1882         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1883             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1884             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1885              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1886                 return;
1887
1888         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1889             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1890                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1891                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1892                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1893                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1894         }
1895
1896         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1897 }
1898
1899 /* tp->lock is held. */
1900 static int tg3_nvram_lock(struct tg3 *tp)
1901 {
1902         if (tg3_flag(tp, NVRAM)) {
1903                 int i;
1904
1905                 if (tp->nvram_lock_cnt == 0) {
1906                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
1907                         for (i = 0; i < 8000; i++) {
1908                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
1909                                         break;
1910                                 udelay(20);
1911                         }
1912                         if (i == 8000) {
1913                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
1914                                 return -ENODEV;
1915                         }
1916                 }
1917                 tp->nvram_lock_cnt++;
1918         }
1919         return 0;
1920 }
1921
1922 /* tp->lock is held. */
1923 static void tg3_nvram_unlock(struct tg3 *tp)
1924 {
1925         if (tg3_flag(tp, NVRAM)) {
1926                 if (tp->nvram_lock_cnt > 0)
1927                         tp->nvram_lock_cnt--;
1928                 if (tp->nvram_lock_cnt == 0)
1929                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
1930         }
1931 }
1932
1933 /* tp->lock is held. */
1934 static void tg3_enable_nvram_access(struct tg3 *tp)
1935 {
1936         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1937                 u32 nvaccess = tr32(NVRAM_ACCESS);
1938
1939                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
1940         }
1941 }
1942
1943 /* tp->lock is held. */
1944 static void tg3_disable_nvram_access(struct tg3 *tp)
1945 {
1946         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1947                 u32 nvaccess = tr32(NVRAM_ACCESS);
1948
1949                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
1950         }
1951 }
1952
1953 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
1954                                         u32 offset, u32 *val)
1955 {
1956         u32 tmp;
1957         int i;
1958
1959         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
1960                 return -EINVAL;
1961
1962         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
1963                                         EEPROM_ADDR_DEVID_MASK |
1964                                         EEPROM_ADDR_READ);
1965         tw32(GRC_EEPROM_ADDR,
1966              tmp |
1967              (0 << EEPROM_ADDR_DEVID_SHIFT) |
1968              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
1969               EEPROM_ADDR_ADDR_MASK) |
1970              EEPROM_ADDR_READ | EEPROM_ADDR_START);
1971
1972         for (i = 0; i < 1000; i++) {
1973                 tmp = tr32(GRC_EEPROM_ADDR);
1974
1975                 if (tmp & EEPROM_ADDR_COMPLETE)
1976                         break;
1977                 msleep(1);
1978         }
1979         if (!(tmp & EEPROM_ADDR_COMPLETE))
1980                 return -EBUSY;
1981
1982         tmp = tr32(GRC_EEPROM_DATA);
1983
1984         /*
1985          * The data will always be opposite the native endian
1986          * format.  Perform a blind byteswap to compensate.
1987          */
1988         *val = swab32(tmp);
1989
1990         return 0;
1991 }
1992
1993 #define NVRAM_CMD_TIMEOUT 10000
1994
1995 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
1996 {
1997         int i;
1998
1999         tw32(NVRAM_CMD, nvram_cmd);
2000         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2001                 udelay(10);
2002                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2003                         udelay(10);
2004                         break;
2005                 }
2006         }
2007
2008         if (i == NVRAM_CMD_TIMEOUT)
2009                 return -EBUSY;
2010
2011         return 0;
2012 }
2013
2014 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2015 {
2016         if (tg3_flag(tp, NVRAM) &&
2017             tg3_flag(tp, NVRAM_BUFFERED) &&
2018             tg3_flag(tp, FLASH) &&
2019             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2020             (tp->nvram_jedecnum == JEDEC_ATMEL))
2021
2022                 addr = ((addr / tp->nvram_pagesize) <<
2023                         ATMEL_AT45DB0X1B_PAGE_POS) +
2024                        (addr % tp->nvram_pagesize);
2025
2026         return addr;
2027 }
2028
2029 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2030 {
2031         if (tg3_flag(tp, NVRAM) &&
2032             tg3_flag(tp, NVRAM_BUFFERED) &&
2033             tg3_flag(tp, FLASH) &&
2034             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2035             (tp->nvram_jedecnum == JEDEC_ATMEL))
2036
2037                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2038                         tp->nvram_pagesize) +
2039                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2040
2041         return addr;
2042 }
2043
2044 /* NOTE: Data read in from NVRAM is byteswapped according to
2045  * the byteswapping settings for all other register accesses.
2046  * tg3 devices are BE devices, so on a BE machine, the data
2047  * returned will be exactly as it is seen in NVRAM.  On a LE
2048  * machine, the 32-bit value will be byteswapped.
2049  */
2050 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2051 {
2052         int ret;
2053
2054         if (!tg3_flag(tp, NVRAM))
2055                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2056
2057         offset = tg3_nvram_phys_addr(tp, offset);
2058
2059         if (offset > NVRAM_ADDR_MSK)
2060                 return -EINVAL;
2061
2062         ret = tg3_nvram_lock(tp);
2063         if (ret)
2064                 return ret;
2065
2066         tg3_enable_nvram_access(tp);
2067
2068         tw32(NVRAM_ADDR, offset);
2069         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2070                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2071
2072         if (ret == 0)
2073                 *val = tr32(NVRAM_RDDATA);
2074
2075         tg3_disable_nvram_access(tp);
2076
2077         tg3_nvram_unlock(tp);
2078
2079         return ret;
2080 }
2081
2082 /* Ensures NVRAM data is in bytestream format. */
2083 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2084 {
2085         u32 v;
2086         int res = tg3_nvram_read(tp, offset, &v);
2087         if (!res)
2088                 *val = cpu_to_be32(v);
2089         return res;
2090 }
2091
2092 /* tp->lock is held. */
2093 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2094 {
2095         u32 addr_high, addr_low;
2096         int i;
2097
2098         addr_high = ((tp->dev->dev_addr[0] << 8) |
2099                      tp->dev->dev_addr[1]);
2100         addr_low = ((tp->dev->dev_addr[2] << 24) |
2101                     (tp->dev->dev_addr[3] << 16) |
2102                     (tp->dev->dev_addr[4] <<  8) |
2103                     (tp->dev->dev_addr[5] <<  0));
2104         for (i = 0; i < 4; i++) {
2105                 if (i == 1 && skip_mac_1)
2106                         continue;
2107                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2108                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2109         }
2110
2111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2113                 for (i = 0; i < 12; i++) {
2114                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2115                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2116                 }
2117         }
2118
2119         addr_high = (tp->dev->dev_addr[0] +
2120                      tp->dev->dev_addr[1] +
2121                      tp->dev->dev_addr[2] +
2122                      tp->dev->dev_addr[3] +
2123                      tp->dev->dev_addr[4] +
2124                      tp->dev->dev_addr[5]) &
2125                 TX_BACKOFF_SEED_MASK;
2126         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2127 }
2128
2129 static void tg3_enable_register_access(struct tg3 *tp)
2130 {
2131         /*
2132          * Make sure register accesses (indirect or otherwise) will function
2133          * correctly.
2134          */
2135         pci_write_config_dword(tp->pdev,
2136                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2137 }
2138
2139 static int tg3_power_up(struct tg3 *tp)
2140 {
2141         tg3_enable_register_access(tp);
2142
2143         pci_set_power_state(tp->pdev, PCI_D0);
2144
2145         /* Switch out of Vaux if it is a NIC */
2146         if (tg3_flag(tp, IS_NIC))
2147                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2148
2149         return 0;
2150 }
2151
2152 static int tg3_power_down_prepare(struct tg3 *tp)
2153 {
2154         u32 misc_host_ctrl;
2155         bool device_should_wake, do_low_power;
2156
2157         tg3_enable_register_access(tp);
2158
2159         /* Restore the CLKREQ setting. */
2160         if (tg3_flag(tp, CLKREQ_BUG)) {
2161                 u16 lnkctl;
2162
2163                 pci_read_config_word(tp->pdev,
2164                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2165                                      &lnkctl);
2166                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2167                 pci_write_config_word(tp->pdev,
2168                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2169                                       lnkctl);
2170         }
2171
2172         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2173         tw32(TG3PCI_MISC_HOST_CTRL,
2174              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2175
2176         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2177                              tg3_flag(tp, WOL_ENABLE);
2178
2179
2180         do_low_power = true;
2181
2182         if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2183                 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2184                 tp->link_config.orig_speed = tp->link_config.speed;
2185                 tp->link_config.orig_duplex = tp->link_config.duplex;
2186                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2187         }
2188
2189         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2190                 tp->link_config.speed = SPEED_10;
2191                 tp->link_config.duplex = DUPLEX_HALF;
2192                 tp->link_config.autoneg = AUTONEG_ENABLE;
2193                 tg3_setup_phy(tp, 0);
2194         }
2195
2196         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2197                 u32 val;
2198
2199                 val = tr32(GRC_VCPU_EXT_CTRL);
2200                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2201         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2202                 int i;
2203                 u32 val;
2204
2205                 for (i = 0; i < 200; i++) {
2206                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2207                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2208                                 break;
2209                         msleep(1);
2210                 }
2211         }
2212         if (tg3_flag(tp, WOL_CAP))
2213                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2214                                                      WOL_DRV_STATE_SHUTDOWN |
2215                                                      WOL_DRV_WOL |
2216                                                      WOL_SET_MAGIC_PKT);
2217
2218         if (device_should_wake) {
2219                 u32 mac_mode;
2220
2221                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2222                         if (do_low_power &&
2223                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2224                                 tg3_phy_auxctl_write(tp,
2225                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2226                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2227                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2228                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2229                                 udelay(40);
2230                         }
2231
2232                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2233                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2234                         else
2235                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2236
2237                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2238                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2239                             ASIC_REV_5700) {
2240                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2241                                              SPEED_100 : SPEED_10;
2242                                 if (tg3_5700_link_polarity(tp, speed))
2243                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2244                                 else
2245                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2246                         }
2247                 } else {
2248                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2249                 }
2250
2251                 if (!tg3_flag(tp, 5750_PLUS))
2252                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2253
2254                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2255                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2256                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2257                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2258
2259                 if (tg3_flag(tp, ENABLE_APE))
2260                         mac_mode |= MAC_MODE_APE_TX_EN |
2261                                     MAC_MODE_APE_RX_EN |
2262                                     MAC_MODE_TDE_ENABLE;
2263
2264                 tw32_f(MAC_MODE, mac_mode);
2265                 udelay(100);
2266
2267                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2268                 udelay(10);
2269         }
2270
2271         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2272             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2274                 u32 base_val;
2275
2276                 base_val = tp->pci_clock_ctrl;
2277                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2278                              CLOCK_CTRL_TXCLK_DISABLE);
2279
2280                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2281                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2282         } else if (tg3_flag(tp, 5780_CLASS) ||
2283                    tg3_flag(tp, CPMU_PRESENT) ||
2284                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2285                 /* do nothing */
2286         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2287                 u32 newbits1, newbits2;
2288
2289                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2290                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2291                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2292                                     CLOCK_CTRL_TXCLK_DISABLE |
2293                                     CLOCK_CTRL_ALTCLK);
2294                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2295                 } else if (tg3_flag(tp, 5705_PLUS)) {
2296                         newbits1 = CLOCK_CTRL_625_CORE;
2297                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2298                 } else {
2299                         newbits1 = CLOCK_CTRL_ALTCLK;
2300                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2301                 }
2302
2303                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2304                             40);
2305
2306                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2307                             40);
2308
2309                 if (!tg3_flag(tp, 5705_PLUS)) {
2310                         u32 newbits3;
2311
2312                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2313                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2314                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2315                                             CLOCK_CTRL_TXCLK_DISABLE |
2316                                             CLOCK_CTRL_44MHZ_CORE);
2317                         } else {
2318                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2319                         }
2320
2321                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2322                                     tp->pci_clock_ctrl | newbits3, 40);
2323                 }
2324         }
2325
2326         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2327                 tg3_power_down_phy(tp, do_low_power);
2328
2329         tg3_frob_aux_power(tp);
2330
2331         /* Workaround for unstable PLL clock */
2332         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2333             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2334                 u32 val = tr32(0x7d00);
2335
2336                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2337                 tw32(0x7d00, val);
2338                 if (!tg3_flag(tp, ENABLE_ASF)) {
2339                         int err;
2340
2341                         err = tg3_nvram_lock(tp);
2342                         tg3_halt_cpu(tp, RX_CPU_BASE);
2343                         if (!err)
2344                                 tg3_nvram_unlock(tp);
2345                 }
2346         }
2347
2348         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2349
2350         return 0;
2351 }
2352
2353 static void tg3_power_down(struct tg3 *tp)
2354 {
2355         tg3_power_down_prepare(tp);
2356
2357         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2358         pci_set_power_state(tp->pdev, PCI_D3hot);
2359 }
2360
2361 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2362 {
2363         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2364         case MII_TG3_AUX_STAT_10HALF:
2365                 *speed = SPEED_10;
2366                 *duplex = DUPLEX_HALF;
2367                 break;
2368
2369         case MII_TG3_AUX_STAT_10FULL:
2370                 *speed = SPEED_10;
2371                 *duplex = DUPLEX_FULL;
2372                 break;
2373
2374         case MII_TG3_AUX_STAT_100HALF:
2375                 *speed = SPEED_100;
2376                 *duplex = DUPLEX_HALF;
2377                 break;
2378
2379         case MII_TG3_AUX_STAT_100FULL:
2380                 *speed = SPEED_100;
2381                 *duplex = DUPLEX_FULL;
2382                 break;
2383
2384         case MII_TG3_AUX_STAT_1000HALF:
2385                 *speed = SPEED_1000;
2386                 *duplex = DUPLEX_HALF;
2387                 break;
2388
2389         case MII_TG3_AUX_STAT_1000FULL:
2390                 *speed = SPEED_1000;
2391                 *duplex = DUPLEX_FULL;
2392                 break;
2393
2394         default:
2395                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2396                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2397                                  SPEED_10;
2398                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2399                                   DUPLEX_HALF;
2400                         break;
2401                 }
2402                 *speed = SPEED_INVALID;
2403                 *duplex = DUPLEX_INVALID;
2404                 break;
2405         }
2406 }
2407
2408 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2409 {
2410         int err = 0;
2411         u32 val, new_adv;
2412
2413         new_adv = ADVERTISE_CSMA;
2414         if (advertise & ADVERTISED_10baseT_Half)
2415                 new_adv |= ADVERTISE_10HALF;
2416         if (advertise & ADVERTISED_10baseT_Full)
2417                 new_adv |= ADVERTISE_10FULL;
2418         if (advertise & ADVERTISED_100baseT_Half)
2419                 new_adv |= ADVERTISE_100HALF;
2420         if (advertise & ADVERTISED_100baseT_Full)
2421                 new_adv |= ADVERTISE_100FULL;
2422
2423         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2424
2425         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2426         if (err)
2427                 goto done;
2428
2429         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2430                 goto done;
2431
2432         new_adv = 0;
2433         if (advertise & ADVERTISED_1000baseT_Half)
2434                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2435         if (advertise & ADVERTISED_1000baseT_Full)
2436                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2437
2438         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2439             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2440                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2441                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2442
2443         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2444         if (err)
2445                 goto done;
2446
2447         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2448                 goto done;
2449
2450         tw32(TG3_CPMU_EEE_MODE,
2451              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2452
2453         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2454         if (!err) {
2455                 u32 err2;
2456
2457                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2458                 case ASIC_REV_5717:
2459                 case ASIC_REV_57765:
2460                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2461                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2462                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2463                         /* Fall through */
2464                 case ASIC_REV_5719:
2465                         val = MII_TG3_DSP_TAP26_ALNOKO |
2466                               MII_TG3_DSP_TAP26_RMRXSTO |
2467                               MII_TG3_DSP_TAP26_OPCSINPT;
2468                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2469                 }
2470
2471                 val = 0;
2472                 /* Advertise 100-BaseTX EEE ability */
2473                 if (advertise & ADVERTISED_100baseT_Full)
2474                         val |= MDIO_AN_EEE_ADV_100TX;
2475                 /* Advertise 1000-BaseT EEE ability */
2476                 if (advertise & ADVERTISED_1000baseT_Full)
2477                         val |= MDIO_AN_EEE_ADV_1000T;
2478                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
2479
2480                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2481                 if (!err)
2482                         err = err2;
2483         }
2484
2485 done:
2486         return err;
2487 }
2488
2489 static void tg3_phy_copper_begin(struct tg3 *tp)
2490 {
2491         u32 new_adv;
2492         int i;
2493
2494         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2495                 new_adv = ADVERTISED_10baseT_Half |
2496                           ADVERTISED_10baseT_Full;
2497                 if (tg3_flag(tp, WOL_SPEED_100MB))
2498                         new_adv |= ADVERTISED_100baseT_Half |
2499                                    ADVERTISED_100baseT_Full;
2500
2501                 tg3_phy_autoneg_cfg(tp, new_adv,
2502                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
2503         } else if (tp->link_config.speed == SPEED_INVALID) {
2504                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2505                         tp->link_config.advertising &=
2506                                 ~(ADVERTISED_1000baseT_Half |
2507                                   ADVERTISED_1000baseT_Full);
2508
2509                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
2510                                     tp->link_config.flowctrl);
2511         } else {
2512                 /* Asking for a specific link mode. */
2513                 if (tp->link_config.speed == SPEED_1000) {
2514                         if (tp->link_config.duplex == DUPLEX_FULL)
2515                                 new_adv = ADVERTISED_1000baseT_Full;
2516                         else
2517                                 new_adv = ADVERTISED_1000baseT_Half;
2518                 } else if (tp->link_config.speed == SPEED_100) {
2519                         if (tp->link_config.duplex == DUPLEX_FULL)
2520                                 new_adv = ADVERTISED_100baseT_Full;
2521                         else
2522                                 new_adv = ADVERTISED_100baseT_Half;
2523                 } else {
2524                         if (tp->link_config.duplex == DUPLEX_FULL)
2525                                 new_adv = ADVERTISED_10baseT_Full;
2526                         else
2527                                 new_adv = ADVERTISED_10baseT_Half;
2528                 }
2529
2530                 tg3_phy_autoneg_cfg(tp, new_adv,
2531                                     tp->link_config.flowctrl);
2532         }
2533
2534         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2535             tp->link_config.speed != SPEED_INVALID) {
2536                 u32 bmcr, orig_bmcr;
2537
2538                 tp->link_config.active_speed = tp->link_config.speed;
2539                 tp->link_config.active_duplex = tp->link_config.duplex;
2540
2541                 bmcr = 0;
2542                 switch (tp->link_config.speed) {
2543                 default:
2544                 case SPEED_10:
2545                         break;
2546
2547                 case SPEED_100:
2548                         bmcr |= BMCR_SPEED100;
2549                         break;
2550
2551                 case SPEED_1000:
2552                         bmcr |= TG3_BMCR_SPEED1000;
2553                         break;
2554                 }
2555
2556                 if (tp->link_config.duplex == DUPLEX_FULL)
2557                         bmcr |= BMCR_FULLDPLX;
2558
2559                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2560                     (bmcr != orig_bmcr)) {
2561                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2562                         for (i = 0; i < 1500; i++) {
2563                                 u32 tmp;
2564
2565                                 udelay(10);
2566                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2567                                     tg3_readphy(tp, MII_BMSR, &tmp))
2568                                         continue;
2569                                 if (!(tmp & BMSR_LSTATUS)) {
2570                                         udelay(40);
2571                                         break;
2572                                 }
2573                         }
2574                         tg3_writephy(tp, MII_BMCR, bmcr);
2575                         udelay(40);
2576                 }
2577         } else {
2578                 tg3_writephy(tp, MII_BMCR,
2579                              BMCR_ANENABLE | BMCR_ANRESTART);
2580         }
2581 }
2582
2583 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2584 {
2585         int err;
2586
2587         /* Turn off tap power management. */
2588         /* Set Extended packet length bit */
2589         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2590
2591         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
2592         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
2593         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
2594         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
2595         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
2596
2597         udelay(40);
2598
2599         return err;
2600 }
2601
2602 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2603 {
2604         u32 adv_reg, all_mask = 0;
2605
2606         if (mask & ADVERTISED_10baseT_Half)
2607                 all_mask |= ADVERTISE_10HALF;
2608         if (mask & ADVERTISED_10baseT_Full)
2609                 all_mask |= ADVERTISE_10FULL;
2610         if (mask & ADVERTISED_100baseT_Half)
2611                 all_mask |= ADVERTISE_100HALF;
2612         if (mask & ADVERTISED_100baseT_Full)
2613                 all_mask |= ADVERTISE_100FULL;
2614
2615         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2616                 return 0;
2617
2618         if ((adv_reg & all_mask) != all_mask)
2619                 return 0;
2620         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2621                 u32 tg3_ctrl;
2622
2623                 all_mask = 0;
2624                 if (mask & ADVERTISED_1000baseT_Half)
2625                         all_mask |= ADVERTISE_1000HALF;
2626                 if (mask & ADVERTISED_1000baseT_Full)
2627                         all_mask |= ADVERTISE_1000FULL;
2628
2629                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2630                         return 0;
2631
2632                 if ((tg3_ctrl & all_mask) != all_mask)
2633                         return 0;
2634         }
2635         return 1;
2636 }
2637
2638 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2639 {
2640         u32 curadv, reqadv;
2641
2642         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2643                 return 1;
2644
2645         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2646         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2647
2648         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2649                 if (curadv != reqadv)
2650                         return 0;
2651
2652                 if (tg3_flag(tp, PAUSE_AUTONEG))
2653                         tg3_readphy(tp, MII_LPA, rmtadv);
2654         } else {
2655                 /* Reprogram the advertisement register, even if it
2656                  * does not affect the current link.  If the link
2657                  * gets renegotiated in the future, we can save an
2658                  * additional renegotiation cycle by advertising
2659                  * it correctly in the first place.
2660                  */
2661                 if (curadv != reqadv) {
2662                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2663                                      ADVERTISE_PAUSE_ASYM);
2664                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2665                 }
2666         }
2667
2668         return 1;
2669 }
2670
2671 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2672 {
2673         int current_link_up;
2674         u32 bmsr, val;
2675         u32 lcl_adv, rmt_adv;
2676         u16 current_speed;
2677         u8 current_duplex;
2678         int i, err;
2679
2680         tw32(MAC_EVENT, 0);
2681
2682         tw32_f(MAC_STATUS,
2683              (MAC_STATUS_SYNC_CHANGED |
2684               MAC_STATUS_CFG_CHANGED |
2685               MAC_STATUS_MI_COMPLETION |
2686               MAC_STATUS_LNKSTATE_CHANGED));
2687         udelay(40);
2688
2689         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2690                 tw32_f(MAC_MI_MODE,
2691                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2692                 udelay(80);
2693         }
2694
2695         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
2696
2697         /* Some third-party PHYs need to be reset on link going
2698          * down.
2699          */
2700         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2701              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2702              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2703             netif_carrier_ok(tp->dev)) {
2704                 tg3_readphy(tp, MII_BMSR, &bmsr);
2705                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2706                     !(bmsr & BMSR_LSTATUS))
2707                         force_reset = 1;
2708         }
2709         if (force_reset)
2710                 tg3_phy_reset(tp);
2711
2712         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2713                 tg3_readphy(tp, MII_BMSR, &bmsr);
2714                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2715                     !tg3_flag(tp, INIT_COMPLETE))
2716                         bmsr = 0;
2717
2718                 if (!(bmsr & BMSR_LSTATUS)) {
2719                         err = tg3_init_5401phy_dsp(tp);
2720                         if (err)
2721                                 return err;
2722
2723                         tg3_readphy(tp, MII_BMSR, &bmsr);
2724                         for (i = 0; i < 1000; i++) {
2725                                 udelay(10);
2726                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2727                                     (bmsr & BMSR_LSTATUS)) {
2728                                         udelay(40);
2729                                         break;
2730                                 }
2731                         }
2732
2733                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
2734                             TG3_PHY_REV_BCM5401_B0 &&
2735                             !(bmsr & BMSR_LSTATUS) &&
2736                             tp->link_config.active_speed == SPEED_1000) {
2737                                 err = tg3_phy_reset(tp);
2738                                 if (!err)
2739                                         err = tg3_init_5401phy_dsp(tp);
2740                                 if (err)
2741                                         return err;
2742                         }
2743                 }
2744         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2745                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2746                 /* 5701 {A0,B0} CRC bug workaround */
2747                 tg3_writephy(tp, 0x15, 0x0a75);
2748                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
2749                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2750                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
2751         }
2752
2753         /* Clear pending interrupts... */
2754         tg3_readphy(tp, MII_TG3_ISTAT, &val);
2755         tg3_readphy(tp, MII_TG3_ISTAT, &val);
2756
2757         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
2758                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2759         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
2760                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2761
2762         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2763             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2764                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2765                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2766                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2767                 else
2768                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2769         }
2770
2771         current_link_up = 0;
2772         current_speed = SPEED_INVALID;
2773         current_duplex = DUPLEX_INVALID;
2774
2775         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
2776                 err = tg3_phy_auxctl_read(tp,
2777                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
2778                                           &val);
2779                 if (!err && !(val & (1 << 10))) {
2780                         tg3_phy_auxctl_write(tp,
2781                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
2782                                              val | (1 << 10));
2783                         goto relink;
2784                 }
2785         }
2786
2787         bmsr = 0;
2788         for (i = 0; i < 100; i++) {
2789                 tg3_readphy(tp, MII_BMSR, &bmsr);
2790                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2791                     (bmsr & BMSR_LSTATUS))
2792                         break;
2793                 udelay(40);
2794         }
2795
2796         if (bmsr & BMSR_LSTATUS) {
2797                 u32 aux_stat, bmcr;
2798
2799                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2800                 for (i = 0; i < 2000; i++) {
2801                         udelay(10);
2802                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2803                             aux_stat)
2804                                 break;
2805                 }
2806
2807                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2808                                              &current_speed,
2809                                              &current_duplex);
2810
2811                 bmcr = 0;
2812                 for (i = 0; i < 200; i++) {
2813                         tg3_readphy(tp, MII_BMCR, &bmcr);
2814                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2815                                 continue;
2816                         if (bmcr && bmcr != 0x7fff)
2817                                 break;
2818                         udelay(10);
2819                 }
2820
2821                 lcl_adv = 0;
2822                 rmt_adv = 0;
2823
2824                 tp->link_config.active_speed = current_speed;
2825                 tp->link_config.active_duplex = current_duplex;
2826
2827                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2828                         if ((bmcr & BMCR_ANENABLE) &&
2829                             tg3_copper_is_advertising_all(tp,
2830                                                 tp->link_config.advertising)) {
2831                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2832                                                                   &rmt_adv))
2833                                         current_link_up = 1;
2834                         }
2835                 } else {
2836                         if (!(bmcr & BMCR_ANENABLE) &&
2837                             tp->link_config.speed == current_speed &&
2838                             tp->link_config.duplex == current_duplex &&
2839                             tp->link_config.flowctrl ==
2840                             tp->link_config.active_flowctrl) {
2841                                 current_link_up = 1;
2842                         }
2843                 }
2844
2845                 if (current_link_up == 1 &&
2846                     tp->link_config.active_duplex == DUPLEX_FULL)
2847                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2848         }
2849
2850 relink:
2851         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2852                 tg3_phy_copper_begin(tp);
2853
2854                 tg3_readphy(tp, MII_BMSR, &bmsr);
2855                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
2856                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
2857                         current_link_up = 1;
2858         }
2859
2860         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2861         if (current_link_up == 1) {
2862                 if (tp->link_config.active_speed == SPEED_100 ||
2863                     tp->link_config.active_speed == SPEED_10)
2864                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2865                 else
2866                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2867         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2868                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2869         else
2870                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2871
2872         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2873         if (tp->link_config.active_duplex == DUPLEX_HALF)
2874                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2875
2876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2877                 if (current_link_up == 1 &&
2878                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2879                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2880                 else
2881                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2882         }
2883
2884         /* ??? Without this setting Netgear GA302T PHY does not
2885          * ??? send/receive packets...
2886          */
2887         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
2888             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2889                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2890                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2891                 udelay(80);
2892         }
2893
2894         tw32_f(MAC_MODE, tp->mac_mode);
2895         udelay(40);
2896
2897         tg3_phy_eee_adjust(tp, current_link_up);
2898
2899         if (tg3_flag(tp, USE_LINKCHG_REG)) {
2900                 /* Polled via timer. */
2901                 tw32_f(MAC_EVENT, 0);
2902         } else {
2903                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2904         }
2905         udelay(40);
2906
2907         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2908             current_link_up == 1 &&
2909             tp->link_config.active_speed == SPEED_1000 &&
2910             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
2911                 udelay(120);
2912                 tw32_f(MAC_STATUS,
2913                      (MAC_STATUS_SYNC_CHANGED |
2914                       MAC_STATUS_CFG_CHANGED));
2915                 udelay(40);
2916                 tg3_write_mem(tp,
2917                               NIC_SRAM_FIRMWARE_MBOX,
2918                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2919         }
2920
2921         /* Prevent send BD corruption. */
2922         if (tg3_flag(tp, CLKREQ_BUG)) {
2923                 u16 oldlnkctl, newlnkctl;
2924
2925                 pci_read_config_word(tp->pdev,
2926                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2927                                      &oldlnkctl);
2928                 if (tp->link_config.active_speed == SPEED_100 ||
2929                     tp->link_config.active_speed == SPEED_10)
2930                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
2931                 else
2932                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
2933                 if (newlnkctl != oldlnkctl)
2934                         pci_write_config_word(tp->pdev,
2935                                               tp->pcie_cap + PCI_EXP_LNKCTL,
2936                                               newlnkctl);
2937         }
2938
2939         if (current_link_up != netif_carrier_ok(tp->dev)) {
2940                 if (current_link_up)
2941                         netif_carrier_on(tp->dev);
2942                 else
2943                         netif_carrier_off(tp->dev);
2944                 tg3_link_report(tp);
2945         }
2946
2947         return 0;
2948 }
2949
2950 struct tg3_fiber_aneginfo {
2951         int state;
2952 #define ANEG_STATE_UNKNOWN              0
2953 #define ANEG_STATE_AN_ENABLE            1
2954 #define ANEG_STATE_RESTART_INIT         2
2955 #define ANEG_STATE_RESTART              3
2956 #define ANEG_STATE_DISABLE_LINK_OK      4
2957 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2958 #define ANEG_STATE_ABILITY_DETECT       6
2959 #define ANEG_STATE_ACK_DETECT_INIT      7
2960 #define ANEG_STATE_ACK_DETECT           8
2961 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2962 #define ANEG_STATE_COMPLETE_ACK         10
2963 #define ANEG_STATE_IDLE_DETECT_INIT     11
2964 #define ANEG_STATE_IDLE_DETECT          12
2965 #define ANEG_STATE_LINK_OK              13
2966 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2967 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2968
2969         u32 flags;
2970 #define MR_AN_ENABLE            0x00000001
2971 #define MR_RESTART_AN           0x00000002
2972 #define MR_AN_COMPLETE          0x00000004
2973 #define MR_PAGE_RX              0x00000008
2974 #define MR_NP_LOADED            0x00000010
2975 #define MR_TOGGLE_TX            0x00000020
2976 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2977 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2978 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2979 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2980 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2981 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2982 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2983 #define MR_TOGGLE_RX            0x00002000
2984 #define MR_NP_RX                0x00004000
2985
2986 #define MR_LINK_OK              0x80000000
2987
2988         unsigned long link_time, cur_time;
2989
2990         u32 ability_match_cfg;
2991         int ability_match_count;
2992
2993         char ability_match, idle_match, ack_match;
2994
2995         u32 txconfig, rxconfig;
2996 #define ANEG_CFG_NP             0x00000080
2997 #define ANEG_CFG_ACK            0x00000040
2998 #define ANEG_CFG_RF2            0x00000020
2999 #define ANEG_CFG_RF1            0x00000010
3000 #define ANEG_CFG_PS2            0x00000001
3001 #define ANEG_CFG_PS1            0x00008000
3002 #define ANEG_CFG_HD             0x00004000
3003 #define ANEG_CFG_FD             0x00002000
3004 #define ANEG_CFG_INVAL          0x00001f06
3005
3006 };
3007 #define ANEG_OK         0
3008 #define ANEG_DONE       1
3009 #define ANEG_TIMER_ENAB 2
3010 #define ANEG_FAILED     -1
3011
3012 #define ANEG_STATE_SETTLE_TIME  10000
3013
3014 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3015                                    struct tg3_fiber_aneginfo *ap)
3016 {
3017         u16 flowctrl;
3018         unsigned long delta;
3019         u32 rx_cfg_reg;
3020         int ret;
3021
3022         if (ap->state == ANEG_STATE_UNKNOWN) {
3023                 ap->rxconfig = 0;
3024                 ap->link_time = 0;
3025                 ap->cur_time = 0;
3026                 ap->ability_match_cfg = 0;
3027                 ap->ability_match_count = 0;
3028                 ap->ability_match = 0;
3029                 ap->idle_match = 0;
3030                 ap->ack_match = 0;
3031         }
3032         ap->cur_time++;
3033
3034         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3035                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3036
3037                 if (rx_cfg_reg != ap->ability_match_cfg) {
3038                         ap->ability_match_cfg = rx_cfg_reg;
3039                         ap->ability_match = 0;
3040                         ap->ability_match_count = 0;
3041                 } else {
3042                         if (++ap->ability_match_count > 1) {
3043                                 ap->ability_match = 1;
3044                                 ap->ability_match_cfg = rx_cfg_reg;
3045                         }
3046                 }
3047                 if (rx_cfg_reg & ANEG_CFG_ACK)
3048                         ap->ack_match = 1;
3049                 else
3050                         ap->ack_match = 0;
3051
3052                 ap->idle_match = 0;
3053         } else {
3054                 ap->idle_match = 1;
3055                 ap->ability_match_cfg = 0;
3056                 ap->ability_match_count = 0;
3057                 ap->ability_match = 0;
3058                 ap->ack_match = 0;
3059
3060                 rx_cfg_reg = 0;
3061         }
3062
3063         ap->rxconfig = rx_cfg_reg;
3064         ret = ANEG_OK;
3065
3066         switch (ap->state) {
3067         case ANEG_STATE_UNKNOWN:
3068                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3069                         ap->state = ANEG_STATE_AN_ENABLE;
3070
3071                 /* fallthru */
3072         case ANEG_STATE_AN_ENABLE:
3073                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3074                 if (ap->flags & MR_AN_ENABLE) {
3075                         ap->link_time = 0;
3076                         ap->cur_time = 0;
3077                         ap->ability_match_cfg = 0;
3078                         ap->ability_match_count = 0;
3079                         ap->ability_match = 0;
3080                         ap->idle_match = 0;
3081                         ap->ack_match = 0;
3082
3083                         ap->state = ANEG_STATE_RESTART_INIT;
3084                 } else {
3085                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3086                 }
3087                 break;
3088
3089         case ANEG_STATE_RESTART_INIT:
3090                 ap->link_time = ap->cur_time;
3091                 ap->flags &= ~(MR_NP_LOADED);
3092                 ap->txconfig = 0;
3093                 tw32(MAC_TX_AUTO_NEG, 0);
3094                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3095                 tw32_f(MAC_MODE, tp->mac_mode);
3096                 udelay(40);
3097
3098                 ret = ANEG_TIMER_ENAB;
3099                 ap->state = ANEG_STATE_RESTART;
3100
3101                 /* fallthru */
3102         case ANEG_STATE_RESTART:
3103                 delta = ap->cur_time - ap->link_time;
3104                 if (delta > ANEG_STATE_SETTLE_TIME)
3105                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3106                 else
3107                         ret = ANEG_TIMER_ENAB;
3108                 break;
3109
3110         case ANEG_STATE_DISABLE_LINK_OK:
3111                 ret = ANEG_DONE;
3112                 break;
3113
3114         case ANEG_STATE_ABILITY_DETECT_INIT:
3115                 ap->flags &= ~(MR_TOGGLE_TX);
3116                 ap->txconfig = ANEG_CFG_FD;
3117                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3118                 if (flowctrl & ADVERTISE_1000XPAUSE)
3119                         ap->txconfig |= ANEG_CFG_PS1;
3120                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3121                         ap->txconfig |= ANEG_CFG_PS2;
3122                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3123                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3124                 tw32_f(MAC_MODE, tp->mac_mode);
3125                 udelay(40);
3126
3127                 ap->state = ANEG_STATE_ABILITY_DETECT;
3128                 break;
3129
3130         case ANEG_STATE_ABILITY_DETECT:
3131                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3132                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3133                 break;
3134
3135         case ANEG_STATE_ACK_DETECT_INIT:
3136                 ap->txconfig |= ANEG_CFG_ACK;
3137                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3138                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3139                 tw32_f(MAC_MODE, tp->mac_mode);
3140                 udelay(40);
3141
3142                 ap->state = ANEG_STATE_ACK_DETECT;
3143
3144                 /* fallthru */
3145         case ANEG_STATE_ACK_DETECT:
3146                 if (ap->ack_match != 0) {
3147                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3148                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3149                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3150                         } else {
3151                                 ap->state = ANEG_STATE_AN_ENABLE;
3152                         }
3153                 } else if (ap->ability_match != 0 &&
3154                            ap->rxconfig == 0) {
3155                         ap->state = ANEG_STATE_AN_ENABLE;
3156                 }
3157                 break;
3158
3159         case ANEG_STATE_COMPLETE_ACK_INIT:
3160                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3161                         ret = ANEG_FAILED;
3162                         break;
3163                 }
3164                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3165                                MR_LP_ADV_HALF_DUPLEX |
3166                                MR_LP_ADV_SYM_PAUSE |
3167                                MR_LP_ADV_ASYM_PAUSE |
3168                                MR_LP_ADV_REMOTE_FAULT1 |
3169                                MR_LP_ADV_REMOTE_FAULT2 |
3170                                MR_LP_ADV_NEXT_PAGE |
3171                                MR_TOGGLE_RX |
3172                                MR_NP_RX);
3173                 if (ap->rxconfig & ANEG_CFG_FD)
3174                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3175                 if (ap->rxconfig & ANEG_CFG_HD)
3176                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3177                 if (ap->rxconfig & ANEG_CFG_PS1)
3178                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3179                 if (ap->rxconfig & ANEG_CFG_PS2)
3180                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3181                 if (ap->rxconfig & ANEG_CFG_RF1)
3182                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3183                 if (ap->rxconfig & ANEG_CFG_RF2)
3184                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3185                 if (ap->rxconfig & ANEG_CFG_NP)
3186                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3187
3188                 ap->link_time = ap->cur_time;
3189
3190                 ap->flags ^= (MR_TOGGLE_TX);
3191                 if (ap->rxconfig & 0x0008)
3192                         ap->flags |= MR_TOGGLE_RX;
3193                 if (ap->rxconfig & ANEG_CFG_NP)
3194                         ap->flags |= MR_NP_RX;
3195                 ap->flags |= MR_PAGE_RX;
3196
3197                 ap->state = ANEG_STATE_COMPLETE_ACK;
3198                 ret = ANEG_TIMER_ENAB;
3199                 break;
3200
3201         case ANEG_STATE_COMPLETE_ACK:
3202                 if (ap->ability_match != 0 &&
3203                     ap->rxconfig == 0) {
3204                         ap->state = ANEG_STATE_AN_ENABLE;
3205                         break;
3206                 }
3207                 delta = ap->cur_time - ap->link_time;
3208                 if (delta > ANEG_STATE_SETTLE_TIME) {
3209                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3210                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3211                         } else {
3212                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3213                                     !(ap->flags & MR_NP_RX)) {
3214                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3215                                 } else {
3216                                         ret = ANEG_FAILED;
3217                                 }
3218                         }
3219                 }
3220                 break;
3221
3222         case ANEG_STATE_IDLE_DETECT_INIT:
3223                 ap->link_time = ap->cur_time;
3224                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3225                 tw32_f(MAC_MODE, tp->mac_mode);
3226                 udelay(40);
3227
3228                 ap->state = ANEG_STATE_IDLE_DETECT;
3229                 ret = ANEG_TIMER_ENAB;
3230                 break;
3231
3232         case ANEG_STATE_IDLE_DETECT:
3233                 if (ap->ability_match != 0 &&
3234                     ap->rxconfig == 0) {
3235                         ap->state = ANEG_STATE_AN_ENABLE;
3236                         break;
3237                 }
3238                 delta = ap->cur_time - ap->link_time;
3239                 if (delta > ANEG_STATE_SETTLE_TIME) {
3240                         /* XXX another gem from the Broadcom driver :( */
3241                         ap->state = ANEG_STATE_LINK_OK;
3242                 }
3243                 break;
3244
3245         case ANEG_STATE_LINK_OK:
3246                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3247                 ret = ANEG_DONE;
3248                 break;
3249
3250         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3251                 /* ??? unimplemented */
3252                 break;
3253
3254         case ANEG_STATE_NEXT_PAGE_WAIT:
3255                 /* ??? unimplemented */
3256                 break;
3257
3258         default:
3259                 ret = ANEG_FAILED;
3260                 break;
3261         }
3262
3263         return ret;
3264 }
3265
3266 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3267 {
3268         int res = 0;
3269         struct tg3_fiber_aneginfo aninfo;
3270         int status = ANEG_FAILED;
3271         unsigned int tick;
3272         u32 tmp;
3273
3274         tw32_f(MAC_TX_AUTO_NEG, 0);
3275
3276         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3277         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3278         udelay(40);
3279
3280         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3281         udelay(40);
3282
3283         memset(&aninfo, 0, sizeof(aninfo));
3284         aninfo.flags |= MR_AN_ENABLE;
3285         aninfo.state = ANEG_STATE_UNKNOWN;
3286         aninfo.cur_time = 0;
3287         tick = 0;
3288         while (++tick < 195000) {
3289                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3290                 if (status == ANEG_DONE || status == ANEG_FAILED)
3291                         break;
3292
3293                 udelay(1);
3294         }
3295
3296         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3297         tw32_f(MAC_MODE, tp->mac_mode);
3298         udelay(40);
3299
3300         *txflags = aninfo.txconfig;
3301         *rxflags = aninfo.flags;
3302
3303         if (status == ANEG_DONE &&
3304             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3305                              MR_LP_ADV_FULL_DUPLEX)))
3306                 res = 1;
3307
3308         return res;
3309 }
3310
3311 static void tg3_init_bcm8002(struct tg3 *tp)
3312 {
3313         u32 mac_status = tr32(MAC_STATUS);
3314         int i;
3315
3316         /* Reset when initting first time or we have a link. */
3317         if (tg3_flag(tp, INIT_COMPLETE) &&
3318             !(mac_status & MAC_STATUS_PCS_SYNCED))
3319                 return;
3320
3321         /* Set PLL lock range. */
3322         tg3_writephy(tp, 0x16, 0x8007);
3323
3324         /* SW reset */
3325         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3326
3327         /* Wait for reset to complete. */
3328         /* XXX schedule_timeout() ... */
3329         for (i = 0; i < 500; i++)
3330                 udelay(10);
3331
3332         /* Config mode; select PMA/Ch 1 regs. */
3333         tg3_writephy(tp, 0x10, 0x8411);
3334
3335         /* Enable auto-lock and comdet, select txclk for tx. */
3336         tg3_writephy(tp, 0x11, 0x0a10);
3337
3338         tg3_writephy(tp, 0x18, 0x00a0);
3339         tg3_writephy(tp, 0x16, 0x41ff);
3340
3341         /* Assert and deassert POR. */
3342         tg3_writephy(tp, 0x13, 0x0400);
3343         udelay(40);
3344         tg3_writephy(tp, 0x13, 0x0000);
3345
3346         tg3_writephy(tp, 0x11, 0x0a50);
3347         udelay(40);
3348         tg3_writephy(tp, 0x11, 0x0a10);
3349
3350         /* Wait for signal to stabilize */
3351         /* XXX schedule_timeout() ... */
3352         for (i = 0; i < 15000; i++)
3353                 udelay(10);
3354
3355         /* Deselect the channel register so we can read the PHYID
3356          * later.
3357          */
3358         tg3_writephy(tp, 0x10, 0x8011);
3359 }
3360
3361 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3362 {
3363         u16 flowctrl;
3364         u32 sg_dig_ctrl, sg_dig_status;
3365         u32 serdes_cfg, expected_sg_dig_ctrl;
3366         int workaround, port_a;
3367         int current_link_up;
3368
3369         serdes_cfg = 0;
3370         expected_sg_dig_ctrl = 0;
3371         workaround = 0;
3372         port_a = 1;
3373         current_link_up = 0;
3374
3375         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3376             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3377                 workaround = 1;
3378                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3379                         port_a = 0;
3380
3381                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3382                 /* preserve bits 20-23 for voltage regulator */
3383                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3384         }
3385
3386         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3387
3388         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3389                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3390                         if (workaround) {
3391                                 u32 val = serdes_cfg;
3392
3393                                 if (port_a)
3394                                         val |= 0xc010000;
3395                                 else
3396                                         val |= 0x4010000;
3397                                 tw32_f(MAC_SERDES_CFG, val);
3398                         }
3399
3400                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3401                 }
3402                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3403                         tg3_setup_flow_control(tp, 0, 0);
3404                         current_link_up = 1;
3405                 }
3406                 goto out;
3407         }
3408
3409         /* Want auto-negotiation.  */
3410         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3411
3412         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3413         if (flowctrl & ADVERTISE_1000XPAUSE)
3414                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3415         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3416                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3417
3418         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3419                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3420                     tp->serdes_counter &&
3421                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3422                                     MAC_STATUS_RCVD_CFG)) ==
3423                      MAC_STATUS_PCS_SYNCED)) {
3424                         tp->serdes_counter--;
3425                         current_link_up = 1;
3426                         goto out;
3427                 }
3428 restart_autoneg:
3429                 if (workaround)
3430                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3431                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3432                 udelay(5);
3433                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3434
3435                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3436                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3437         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3438                                  MAC_STATUS_SIGNAL_DET)) {
3439                 sg_dig_status = tr32(SG_DIG_STATUS);
3440                 mac_status = tr32(MAC_STATUS);
3441
3442                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3443                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3444                         u32 local_adv = 0, remote_adv = 0;
3445
3446                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3447                                 local_adv |= ADVERTISE_1000XPAUSE;
3448                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3449                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3450
3451                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3452                                 remote_adv |= LPA_1000XPAUSE;
3453                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3454                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3455
3456                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3457                         current_link_up = 1;
3458                         tp->serdes_counter = 0;
3459                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3460                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3461                         if (tp->serdes_counter)
3462                                 tp->serdes_counter--;
3463                         else {
3464                                 if (workaround) {
3465                                         u32 val = serdes_cfg;
3466
3467                                         if (port_a)
3468                                                 val |= 0xc010000;
3469                                         else
3470                                                 val |= 0x4010000;
3471
3472                                         tw32_f(MAC_SERDES_CFG, val);
3473                                 }
3474
3475                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3476                                 udelay(40);
3477
3478                                 /* Link parallel detection - link is up */
3479                                 /* only if we have PCS_SYNC and not */
3480                                 /* receiving config code words */
3481                                 mac_status = tr32(MAC_STATUS);
3482                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3483                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3484                                         tg3_setup_flow_control(tp, 0, 0);
3485                                         current_link_up = 1;
3486                                         tp->phy_flags |=
3487                                                 TG3_PHYFLG_PARALLEL_DETECT;
3488                                         tp->serdes_counter =
3489                                                 SERDES_PARALLEL_DET_TIMEOUT;
3490                                 } else
3491                                         goto restart_autoneg;
3492                         }
3493                 }
3494         } else {
3495                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3496                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3497         }
3498
3499 out:
3500         return current_link_up;
3501 }
3502
3503 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3504 {
3505         int current_link_up = 0;
3506
3507         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3508                 goto out;
3509
3510         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3511                 u32 txflags, rxflags;
3512                 int i;
3513
3514                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3515                         u32 local_adv = 0, remote_adv = 0;
3516
3517                         if (txflags & ANEG_CFG_PS1)
3518                                 local_adv |= ADVERTISE_1000XPAUSE;
3519                         if (txflags & ANEG_CFG_PS2)
3520                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3521
3522                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3523                                 remote_adv |= LPA_1000XPAUSE;
3524                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3525                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3526
3527                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3528
3529                         current_link_up = 1;
3530                 }
3531                 for (i = 0; i < 30; i++) {
3532                         udelay(20);
3533                         tw32_f(MAC_STATUS,
3534                                (MAC_STATUS_SYNC_CHANGED |
3535                                 MAC_STATUS_CFG_CHANGED));
3536                         udelay(40);
3537                         if ((tr32(MAC_STATUS) &
3538                              (MAC_STATUS_SYNC_CHANGED |
3539                               MAC_STATUS_CFG_CHANGED)) == 0)
3540                                 break;
3541                 }
3542
3543                 mac_status = tr32(MAC_STATUS);
3544                 if (current_link_up == 0 &&
3545                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3546                     !(mac_status & MAC_STATUS_RCVD_CFG))
3547                         current_link_up = 1;
3548         } else {
3549                 tg3_setup_flow_control(tp, 0, 0);
3550
3551                 /* Forcing 1000FD link up. */
3552                 current_link_up = 1;
3553
3554                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3555                 udelay(40);
3556
3557                 tw32_f(MAC_MODE, tp->mac_mode);
3558                 udelay(40);
3559         }
3560
3561 out:
3562         return current_link_up;
3563 }
3564
3565 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3566 {
3567         u32 orig_pause_cfg;
3568         u16 orig_active_speed;
3569         u8 orig_active_duplex;
3570         u32 mac_status;
3571         int current_link_up;
3572         int i;
3573
3574         orig_pause_cfg = tp->link_config.active_flowctrl;
3575         orig_active_speed = tp->link_config.active_speed;
3576         orig_active_duplex = tp->link_config.active_duplex;
3577
3578         if (!tg3_flag(tp, HW_AUTONEG) &&
3579             netif_carrier_ok(tp->dev) &&
3580             tg3_flag(tp, INIT_COMPLETE)) {
3581                 mac_status = tr32(MAC_STATUS);
3582                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3583                                MAC_STATUS_SIGNAL_DET |
3584                                MAC_STATUS_CFG_CHANGED |
3585                                MAC_STATUS_RCVD_CFG);
3586                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3587                                    MAC_STATUS_SIGNAL_DET)) {
3588                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3589                                             MAC_STATUS_CFG_CHANGED));
3590                         return 0;
3591                 }
3592         }
3593
3594         tw32_f(MAC_TX_AUTO_NEG, 0);
3595
3596         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3597         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3598         tw32_f(MAC_MODE, tp->mac_mode);
3599         udelay(40);
3600
3601         if (tp->phy_id == TG3_PHY_ID_BCM8002)
3602                 tg3_init_bcm8002(tp);
3603
3604         /* Enable link change event even when serdes polling.  */
3605         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3606         udelay(40);
3607
3608         current_link_up = 0;
3609         mac_status = tr32(MAC_STATUS);
3610
3611         if (tg3_flag(tp, HW_AUTONEG))
3612                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3613         else
3614                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3615
3616         tp->napi[0].hw_status->status =
3617                 (SD_STATUS_UPDATED |
3618                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3619
3620         for (i = 0; i < 100; i++) {
3621                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3622                                     MAC_STATUS_CFG_CHANGED));
3623                 udelay(5);
3624                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3625                                          MAC_STATUS_CFG_CHANGED |
3626                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3627                         break;
3628         }
3629
3630         mac_status = tr32(MAC_STATUS);
3631         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3632                 current_link_up = 0;
3633                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3634                     tp->serdes_counter == 0) {
3635                         tw32_f(MAC_MODE, (tp->mac_mode |
3636                                           MAC_MODE_SEND_CONFIGS));
3637                         udelay(1);
3638                         tw32_f(MAC_MODE, tp->mac_mode);
3639                 }
3640         }
3641
3642         if (current_link_up == 1) {
3643                 tp->link_config.active_speed = SPEED_1000;
3644                 tp->link_config.active_duplex = DUPLEX_FULL;
3645                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3646                                     LED_CTRL_LNKLED_OVERRIDE |
3647                                     LED_CTRL_1000MBPS_ON));
3648         } else {
3649                 tp->link_config.active_speed = SPEED_INVALID;
3650                 tp->link_config.active_duplex = DUPLEX_INVALID;
3651                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3652                                     LED_CTRL_LNKLED_OVERRIDE |
3653                                     LED_CTRL_TRAFFIC_OVERRIDE));
3654         }
3655
3656         if (current_link_up != netif_carrier_ok(tp->dev)) {
3657                 if (current_link_up)
3658                         netif_carrier_on(tp->dev);
3659                 else
3660                         netif_carrier_off(tp->dev);
3661                 tg3_link_report(tp);
3662         } else {
3663                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3664                 if (orig_pause_cfg != now_pause_cfg ||
3665                     orig_active_speed != tp->link_config.active_speed ||
3666                     orig_active_duplex != tp->link_config.active_duplex)
3667                         tg3_link_report(tp);
3668         }
3669
3670         return 0;
3671 }
3672
3673 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3674 {
3675         int current_link_up, err = 0;
3676         u32 bmsr, bmcr;
3677         u16 current_speed;
3678         u8 current_duplex;
3679         u32 local_adv, remote_adv;
3680
3681         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3682         tw32_f(MAC_MODE, tp->mac_mode);
3683         udelay(40);
3684
3685         tw32(MAC_EVENT, 0);
3686
3687         tw32_f(MAC_STATUS,
3688              (MAC_STATUS_SYNC_CHANGED |
3689               MAC_STATUS_CFG_CHANGED |
3690               MAC_STATUS_MI_COMPLETION |
3691               MAC_STATUS_LNKSTATE_CHANGED));
3692         udelay(40);
3693
3694         if (force_reset)
3695                 tg3_phy_reset(tp);
3696
3697         current_link_up = 0;
3698         current_speed = SPEED_INVALID;
3699         current_duplex = DUPLEX_INVALID;
3700
3701         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3702         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3703         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3704                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3705                         bmsr |= BMSR_LSTATUS;
3706                 else
3707                         bmsr &= ~BMSR_LSTATUS;
3708         }
3709
3710         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3711
3712         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3713             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
3714                 /* do nothing, just check for link up at the end */
3715         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3716                 u32 adv, new_adv;
3717
3718                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3719                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3720                                   ADVERTISE_1000XPAUSE |
3721                                   ADVERTISE_1000XPSE_ASYM |
3722                                   ADVERTISE_SLCT);
3723
3724                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3725
3726                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3727                         new_adv |= ADVERTISE_1000XHALF;
3728                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3729                         new_adv |= ADVERTISE_1000XFULL;
3730
3731                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3732                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3733                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3734                         tg3_writephy(tp, MII_BMCR, bmcr);
3735
3736                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3737                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3738                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3739
3740                         return err;
3741                 }
3742         } else {
3743                 u32 new_bmcr;
3744
3745                 bmcr &= ~BMCR_SPEED1000;
3746                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3747
3748                 if (tp->link_config.duplex == DUPLEX_FULL)
3749                         new_bmcr |= BMCR_FULLDPLX;
3750
3751                 if (new_bmcr != bmcr) {
3752                         /* BMCR_SPEED1000 is a reserved bit that needs
3753                          * to be set on write.
3754                          */
3755                         new_bmcr |= BMCR_SPEED1000;
3756
3757                         /* Force a linkdown */
3758                         if (netif_carrier_ok(tp->dev)) {
3759                                 u32 adv;
3760
3761                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3762                                 adv &= ~(ADVERTISE_1000XFULL |
3763                                          ADVERTISE_1000XHALF |
3764                                          ADVERTISE_SLCT);
3765                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3766                                 tg3_writephy(tp, MII_BMCR, bmcr |
3767                                                            BMCR_ANRESTART |
3768                                                            BMCR_ANENABLE);
3769                                 udelay(10);
3770                                 netif_carrier_off(tp->dev);
3771                         }
3772                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3773                         bmcr = new_bmcr;
3774                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3775                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3776                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3777                             ASIC_REV_5714) {
3778                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3779                                         bmsr |= BMSR_LSTATUS;
3780                                 else
3781                                         bmsr &= ~BMSR_LSTATUS;
3782                         }
3783                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3784                 }
3785         }
3786
3787         if (bmsr & BMSR_LSTATUS) {
3788                 current_speed = SPEED_1000;
3789                 current_link_up = 1;
3790                 if (bmcr & BMCR_FULLDPLX)
3791                         current_duplex = DUPLEX_FULL;
3792                 else
3793                         current_duplex = DUPLEX_HALF;
3794
3795                 local_adv = 0;
3796                 remote_adv = 0;
3797
3798                 if (bmcr & BMCR_ANENABLE) {
3799                         u32 common;
3800
3801                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3802                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3803                         common = local_adv & remote_adv;
3804                         if (common & (ADVERTISE_1000XHALF |
3805                                       ADVERTISE_1000XFULL)) {
3806                                 if (common & ADVERTISE_1000XFULL)
3807                                         current_duplex = DUPLEX_FULL;
3808                                 else
3809                                         current_duplex = DUPLEX_HALF;
3810                         } else if (!tg3_flag(tp, 5780_CLASS)) {
3811                                 /* Link is up via parallel detect */
3812                         } else {
3813                                 current_link_up = 0;
3814                         }
3815                 }
3816         }
3817
3818         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3819                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3820
3821         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3822         if (tp->link_config.active_duplex == DUPLEX_HALF)
3823                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3824
3825         tw32_f(MAC_MODE, tp->mac_mode);
3826         udelay(40);
3827
3828         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3829
3830         tp->link_config.active_speed = current_speed;
3831         tp->link_config.active_duplex = current_duplex;
3832
3833         if (current_link_up != netif_carrier_ok(tp->dev)) {
3834                 if (current_link_up)
3835                         netif_carrier_on(tp->dev);
3836                 else {
3837                         netif_carrier_off(tp->dev);
3838                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3839                 }
3840                 tg3_link_report(tp);
3841         }
3842         return err;
3843 }
3844
3845 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3846 {
3847         if (tp->serdes_counter) {
3848                 /* Give autoneg time to complete. */
3849                 tp->serdes_counter--;
3850                 return;
3851         }
3852
3853         if (!netif_carrier_ok(tp->dev) &&
3854             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3855                 u32 bmcr;
3856
3857                 tg3_readphy(tp, MII_BMCR, &bmcr);
3858                 if (bmcr & BMCR_ANENABLE) {
3859                         u32 phy1, phy2;
3860
3861                         /* Select shadow register 0x1f */
3862                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
3863                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
3864
3865                         /* Select expansion interrupt status register */
3866                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
3867                                          MII_TG3_DSP_EXP1_INT_STAT);
3868                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
3869                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
3870
3871                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3872                                 /* We have signal detect and not receiving
3873                                  * config code words, link is up by parallel
3874                                  * detection.
3875                                  */
3876
3877                                 bmcr &= ~BMCR_ANENABLE;
3878                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3879                                 tg3_writephy(tp, MII_BMCR, bmcr);
3880                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
3881                         }
3882                 }
3883         } else if (netif_carrier_ok(tp->dev) &&
3884                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3885                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
3886                 u32 phy2;
3887
3888                 /* Select expansion interrupt status register */
3889                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
3890                                  MII_TG3_DSP_EXP1_INT_STAT);
3891                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
3892                 if (phy2 & 0x20) {
3893                         u32 bmcr;
3894
3895                         /* Config code words received, turn on autoneg. */
3896                         tg3_readphy(tp, MII_BMCR, &bmcr);
3897                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3898
3899                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3900
3901                 }
3902         }
3903 }
3904
3905 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3906 {
3907         u32 val;
3908         int err;
3909
3910         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
3911                 err = tg3_setup_fiber_phy(tp, force_reset);
3912         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3913                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3914         else
3915                 err = tg3_setup_copper_phy(tp, force_reset);
3916
3917         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3918                 u32 scale;
3919
3920                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3921                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3922                         scale = 65;
3923                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3924                         scale = 6;
3925                 else
3926                         scale = 12;
3927
3928                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3929                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3930                 tw32(GRC_MISC_CFG, val);
3931         }
3932
3933         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3934               (6 << TX_LENGTHS_IPG_SHIFT);
3935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
3936                 val |= tr32(MAC_TX_LENGTHS) &
3937                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
3938                         TX_LENGTHS_CNT_DWN_VAL_MSK);
3939
3940         if (tp->link_config.active_speed == SPEED_1000 &&
3941             tp->link_config.active_duplex == DUPLEX_HALF)
3942                 tw32(MAC_TX_LENGTHS, val |
3943                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
3944         else
3945                 tw32(MAC_TX_LENGTHS, val |
3946                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
3947
3948         if (!tg3_flag(tp, 5705_PLUS)) {
3949                 if (netif_carrier_ok(tp->dev)) {
3950                         tw32(HOSTCC_STAT_COAL_TICKS,
3951                              tp->coal.stats_block_coalesce_usecs);
3952                 } else {
3953                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3954                 }
3955         }
3956
3957         if (tg3_flag(tp, ASPM_WORKAROUND)) {
3958                 val = tr32(PCIE_PWR_MGMT_THRESH);
3959                 if (!netif_carrier_ok(tp->dev))
3960                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3961                               tp->pwrmgmt_thresh;
3962                 else
3963                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3964                 tw32(PCIE_PWR_MGMT_THRESH, val);
3965         }
3966
3967         return err;
3968 }
3969
3970 static inline int tg3_irq_sync(struct tg3 *tp)
3971 {
3972         return tp->irq_sync;
3973 }
3974
3975 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
3976 {
3977         int i;
3978
3979         dst = (u32 *)((u8 *)dst + off);
3980         for (i = 0; i < len; i += sizeof(u32))
3981                 *dst++ = tr32(off + i);
3982 }
3983
3984 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
3985 {
3986         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
3987         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
3988         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
3989         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
3990         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
3991         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
3992         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
3993         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
3994         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
3995         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
3996         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
3997         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
3998         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
3999         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4000         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4001         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4002         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4003         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4004         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4005
4006         if (tg3_flag(tp, SUPPORT_MSIX))
4007                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4008
4009         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4010         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4011         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4012         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4013         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4014         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4015         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4016         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4017
4018         if (!tg3_flag(tp, 5705_PLUS)) {
4019                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4020                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4021                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4022         }
4023
4024         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4025         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4026         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4027         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4028         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4029
4030         if (tg3_flag(tp, NVRAM))
4031                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4032 }
4033
4034 static void tg3_dump_state(struct tg3 *tp)
4035 {
4036         int i;
4037         u32 *regs;
4038
4039         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4040         if (!regs) {
4041                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4042                 return;
4043         }
4044
4045         if (tg3_flag(tp, PCI_EXPRESS)) {
4046                 /* Read up to but not including private PCI registers */
4047                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4048                         regs[i / sizeof(u32)] = tr32(i);
4049         } else
4050                 tg3_dump_legacy_regs(tp, regs);
4051
4052         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4053                 if (!regs[i + 0] && !regs[i + 1] &&
4054                     !regs[i + 2] && !regs[i + 3])
4055                         continue;
4056
4057                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4058                            i * 4,
4059                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4060         }
4061
4062         kfree(regs);
4063
4064         for (i = 0; i < tp->irq_cnt; i++) {
4065                 struct tg3_napi *tnapi = &tp->napi[i];
4066
4067                 /* SW status block */
4068                 netdev_err(tp->dev,
4069                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4070                            i,
4071                            tnapi->hw_status->status,
4072                            tnapi->hw_status->status_tag,
4073                            tnapi->hw_status->rx_jumbo_consumer,
4074                            tnapi->hw_status->rx_consumer,
4075                            tnapi->hw_status->rx_mini_consumer,
4076                            tnapi->hw_status->idx[0].rx_producer,
4077                            tnapi->hw_status->idx[0].tx_consumer);
4078
4079                 netdev_err(tp->dev,
4080                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4081                            i,
4082                            tnapi->last_tag, tnapi->last_irq_tag,
4083                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4084                            tnapi->rx_rcb_ptr,
4085                            tnapi->prodring.rx_std_prod_idx,
4086                            tnapi->prodring.rx_std_cons_idx,
4087                            tnapi->prodring.rx_jmb_prod_idx,
4088                            tnapi->prodring.rx_jmb_cons_idx);
4089         }
4090 }
4091
4092 /* This is called whenever we suspect that the system chipset is re-
4093  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4094  * is bogus tx completions. We try to recover by setting the
4095  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4096  * in the workqueue.
4097  */
4098 static void tg3_tx_recover(struct tg3 *tp)
4099 {
4100         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4101                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4102
4103         netdev_warn(tp->dev,
4104                     "The system may be re-ordering memory-mapped I/O "
4105                     "cycles to the network device, attempting to recover. "
4106                     "Please report the problem to the driver maintainer "
4107                     "and include system chipset information.\n");
4108
4109         spin_lock(&tp->lock);
4110         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4111         spin_unlock(&tp->lock);
4112 }
4113
4114 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4115 {
4116         /* Tell compiler to fetch tx indices from memory. */
4117         barrier();
4118         return tnapi->tx_pending -
4119                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4120 }
4121
4122 /* Tigon3 never reports partial packet sends.  So we do not
4123  * need special logic to handle SKBs that have not had all
4124  * of their frags sent yet, like SunGEM does.
4125  */
4126 static void tg3_tx(struct tg3_napi *tnapi)
4127 {
4128         struct tg3 *tp = tnapi->tp;
4129         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4130         u32 sw_idx = tnapi->tx_cons;
4131         struct netdev_queue *txq;
4132         int index = tnapi - tp->napi;
4133
4134         if (tg3_flag(tp, ENABLE_TSS))
4135                 index--;
4136
4137         txq = netdev_get_tx_queue(tp->dev, index);
4138
4139         while (sw_idx != hw_idx) {
4140                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4141                 struct sk_buff *skb = ri->skb;
4142                 int i, tx_bug = 0;
4143
4144                 if (unlikely(skb == NULL)) {
4145                         tg3_tx_recover(tp);
4146                         return;
4147                 }
4148
4149                 pci_unmap_single(tp->pdev,
4150                                  dma_unmap_addr(ri, mapping),
4151                                  skb_headlen(skb),
4152                                  PCI_DMA_TODEVICE);
4153
4154                 ri->skb = NULL;
4155
4156                 sw_idx = NEXT_TX(sw_idx);
4157
4158                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4159                         ri = &tnapi->tx_buffers[sw_idx];
4160                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4161                                 tx_bug = 1;
4162
4163                         pci_unmap_page(tp->pdev,
4164                                        dma_unmap_addr(ri, mapping),
4165                                        skb_shinfo(skb)->frags[i].size,
4166                                        PCI_DMA_TODEVICE);
4167                         sw_idx = NEXT_TX(sw_idx);
4168                 }
4169
4170                 dev_kfree_skb(skb);
4171
4172                 if (unlikely(tx_bug)) {
4173                         tg3_tx_recover(tp);
4174                         return;
4175                 }
4176         }
4177
4178         tnapi->tx_cons = sw_idx;
4179
4180         /* Need to make the tx_cons update visible to tg3_start_xmit()
4181          * before checking for netif_queue_stopped().  Without the
4182          * memory barrier, there is a small possibility that tg3_start_xmit()
4183          * will miss it and cause the queue to be stopped forever.
4184          */
4185         smp_mb();
4186
4187         if (unlikely(netif_tx_queue_stopped(txq) &&
4188                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4189                 __netif_tx_lock(txq, smp_processor_id());
4190                 if (netif_tx_queue_stopped(txq) &&
4191                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4192                         netif_tx_wake_queue(txq);
4193                 __netif_tx_unlock(txq);
4194         }
4195 }
4196
4197 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4198 {
4199         if (!ri->skb)
4200                 return;
4201