net: remove interrupt.h inclusion from netdevice.h
[sfrench/cifs-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 /* length of time before we decide the hardware is borked,
111  * and dev->tx_timeout() should be called to fix the problem
112  */
113
114 #define TG3_TX_TIMEOUT                  (5 * HZ)
115
116 /* hardware minimum and maximum for a single frame's data payload */
117 #define TG3_MIN_MTU                     60
118 #define TG3_MAX_MTU(tp) \
119         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120
121 /* These numbers seem to be hard coded in the NIC firmware somehow.
122  * You can't change the ring sizes, but you can change where you place
123  * them in the NIC onboard memory.
124  */
125 #define TG3_RX_STD_RING_SIZE(tp) \
126         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
127          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
128 #define TG3_DEF_RX_RING_PENDING         200
129 #define TG3_RX_JMB_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
132 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
133 #define TG3_RSS_INDIR_TBL_SIZE          128
134
135 /* Do not place this n-ring entries value into the tp struct itself,
136  * we really want to expose these constants to GCC so that modulo et
137  * al.  operations are done with shifts and masks instead of with
138  * hw multiply/modulo instructions.  Another solution would be to
139  * replace things like '% foo' with '& (foo - 1)'.
140  */
141
142 #define TG3_TX_RING_SIZE                512
143 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
144
145 #define TG3_RX_STD_RING_BYTES(tp) \
146         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
152                                  TG3_TX_RING_SIZE)
153 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154
155 #define TG3_DMA_BYTE_ENAB               64
156
157 #define TG3_RX_STD_DMA_SZ               1536
158 #define TG3_RX_JMB_DMA_SZ               9046
159
160 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
161
162 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172  * that are at least dword aligned when used in PCIX mode.  The driver
173  * works around this bug by double copying the packet.  This workaround
174  * is built into the normal double copy length check for efficiency.
175  *
176  * However, the double copy is only necessary on those architectures
177  * where unaligned memory accesses are inefficient.  For those architectures
178  * where unaligned memory accesses incur little penalty, we can reintegrate
179  * the 5701 in the normal rx path.  Doing so saves a device structure
180  * dereference by hardcoding the double copy threshold in place.
181  */
182 #define TG3_RX_COPY_THRESHOLD           256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
185 #else
186         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
187 #endif
188
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
191
192 #define TG3_RAW_IP_ALIGN 2
193
194 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
195
196 #define FIRMWARE_TG3            "tigon/tg3.bin"
197 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
198 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
199
200 static char version[] __devinitdata =
201         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202
203 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
204 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
205 MODULE_LICENSE("GPL");
206 MODULE_VERSION(DRV_MODULE_VERSION);
207 MODULE_FIRMWARE(FIRMWARE_TG3);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
209 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210
211 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
212 module_param(tg3_debug, int, 0);
213 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214
215 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
295         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
296         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
297         {}
298 };
299
300 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_stats_keys[] = {
305         { "rx_octets" },
306         { "rx_fragments" },
307         { "rx_ucast_packets" },
308         { "rx_mcast_packets" },
309         { "rx_bcast_packets" },
310         { "rx_fcs_errors" },
311         { "rx_align_errors" },
312         { "rx_xon_pause_rcvd" },
313         { "rx_xoff_pause_rcvd" },
314         { "rx_mac_ctrl_rcvd" },
315         { "rx_xoff_entered" },
316         { "rx_frame_too_long_errors" },
317         { "rx_jabbers" },
318         { "rx_undersize_packets" },
319         { "rx_in_length_errors" },
320         { "rx_out_length_errors" },
321         { "rx_64_or_less_octet_packets" },
322         { "rx_65_to_127_octet_packets" },
323         { "rx_128_to_255_octet_packets" },
324         { "rx_256_to_511_octet_packets" },
325         { "rx_512_to_1023_octet_packets" },
326         { "rx_1024_to_1522_octet_packets" },
327         { "rx_1523_to_2047_octet_packets" },
328         { "rx_2048_to_4095_octet_packets" },
329         { "rx_4096_to_8191_octet_packets" },
330         { "rx_8192_to_9022_octet_packets" },
331
332         { "tx_octets" },
333         { "tx_collisions" },
334
335         { "tx_xon_sent" },
336         { "tx_xoff_sent" },
337         { "tx_flow_control" },
338         { "tx_mac_errors" },
339         { "tx_single_collisions" },
340         { "tx_mult_collisions" },
341         { "tx_deferred" },
342         { "tx_excessive_collisions" },
343         { "tx_late_collisions" },
344         { "tx_collide_2times" },
345         { "tx_collide_3times" },
346         { "tx_collide_4times" },
347         { "tx_collide_5times" },
348         { "tx_collide_6times" },
349         { "tx_collide_7times" },
350         { "tx_collide_8times" },
351         { "tx_collide_9times" },
352         { "tx_collide_10times" },
353         { "tx_collide_11times" },
354         { "tx_collide_12times" },
355         { "tx_collide_13times" },
356         { "tx_collide_14times" },
357         { "tx_collide_15times" },
358         { "tx_ucast_packets" },
359         { "tx_mcast_packets" },
360         { "tx_bcast_packets" },
361         { "tx_carrier_sense_errors" },
362         { "tx_discards" },
363         { "tx_errors" },
364
365         { "dma_writeq_full" },
366         { "dma_write_prioq_full" },
367         { "rxbds_empty" },
368         { "rx_discards" },
369         { "rx_errors" },
370         { "rx_threshold_hit" },
371
372         { "dma_readq_full" },
373         { "dma_read_prioq_full" },
374         { "tx_comp_queue_full" },
375
376         { "ring_set_send_prod_index" },
377         { "ring_status_update" },
378         { "nic_irqs" },
379         { "nic_avoided_irqs" },
380         { "nic_tx_threshold_hit" },
381
382         { "mbuf_lwm_thresh_hit" },
383 };
384
385 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
386
387
388 static const struct {
389         const char string[ETH_GSTRING_LEN];
390 } ethtool_test_keys[] = {
391         { "nvram test     (online) " },
392         { "link test      (online) " },
393         { "register test  (offline)" },
394         { "memory test    (offline)" },
395         { "loopback test  (offline)" },
396         { "interrupt test (offline)" },
397 };
398
399 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
400
401
402 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
403 {
404         writel(val, tp->regs + off);
405 }
406
407 static u32 tg3_read32(struct tg3 *tp, u32 off)
408 {
409         return readl(tp->regs + off);
410 }
411
412 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
413 {
414         writel(val, tp->aperegs + off);
415 }
416
417 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
418 {
419         return readl(tp->aperegs + off);
420 }
421
422 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
423 {
424         unsigned long flags;
425
426         spin_lock_irqsave(&tp->indirect_lock, flags);
427         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
428         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
429         spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 }
431
432 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
433 {
434         writel(val, tp->regs + off);
435         readl(tp->regs + off);
436 }
437
438 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
439 {
440         unsigned long flags;
441         u32 val;
442
443         spin_lock_irqsave(&tp->indirect_lock, flags);
444         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
446         spin_unlock_irqrestore(&tp->indirect_lock, flags);
447         return val;
448 }
449
450 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
451 {
452         unsigned long flags;
453
454         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
455                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
456                                        TG3_64BIT_REG_LOW, val);
457                 return;
458         }
459         if (off == TG3_RX_STD_PROD_IDX_REG) {
460                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
461                                        TG3_64BIT_REG_LOW, val);
462                 return;
463         }
464
465         spin_lock_irqsave(&tp->indirect_lock, flags);
466         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
467         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
468         spin_unlock_irqrestore(&tp->indirect_lock, flags);
469
470         /* In indirect mode when disabling interrupts, we also need
471          * to clear the interrupt bit in the GRC local ctrl register.
472          */
473         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
474             (val == 0x1)) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
476                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
477         }
478 }
479
480 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
481 {
482         unsigned long flags;
483         u32 val;
484
485         spin_lock_irqsave(&tp->indirect_lock, flags);
486         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
487         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
488         spin_unlock_irqrestore(&tp->indirect_lock, flags);
489         return val;
490 }
491
492 /* usec_wait specifies the wait time in usec when writing to certain registers
493  * where it is unsafe to read back the register without some delay.
494  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
495  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
496  */
497 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
498 {
499         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
500                 /* Non-posted methods */
501                 tp->write32(tp, off, val);
502         else {
503                 /* Posted method */
504                 tg3_write32(tp, off, val);
505                 if (usec_wait)
506                         udelay(usec_wait);
507                 tp->read32(tp, off);
508         }
509         /* Wait again after the read for the posted method to guarantee that
510          * the wait time is met.
511          */
512         if (usec_wait)
513                 udelay(usec_wait);
514 }
515
516 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
517 {
518         tp->write32_mbox(tp, off, val);
519         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
520                 tp->read32_mbox(tp, off);
521 }
522
523 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
524 {
525         void __iomem *mbox = tp->regs + off;
526         writel(val, mbox);
527         if (tg3_flag(tp, TXD_MBOX_HWBUG))
528                 writel(val, mbox);
529         if (tg3_flag(tp, MBOX_WRITE_REORDER))
530                 readl(mbox);
531 }
532
533 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
534 {
535         return readl(tp->regs + off + GRCMBOX_BASE);
536 }
537
538 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
539 {
540         writel(val, tp->regs + off + GRCMBOX_BASE);
541 }
542
543 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
544 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
545 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
546 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
547 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
548
549 #define tw32(reg, val)                  tp->write32(tp, reg, val)
550 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
551 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
552 #define tr32(reg)                       tp->read32(tp, reg)
553
554 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
555 {
556         unsigned long flags;
557
558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
559             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
560                 return;
561
562         spin_lock_irqsave(&tp->indirect_lock, flags);
563         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
566
567                 /* Always leave this as zero. */
568                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
569         } else {
570                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
571                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
572
573                 /* Always leave this as zero. */
574                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
575         }
576         spin_unlock_irqrestore(&tp->indirect_lock, flags);
577 }
578
579 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
580 {
581         unsigned long flags;
582
583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
584             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
585                 *val = 0;
586                 return;
587         }
588
589         spin_lock_irqsave(&tp->indirect_lock, flags);
590         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
591                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
592                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
593
594                 /* Always leave this as zero. */
595                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
596         } else {
597                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
598                 *val = tr32(TG3PCI_MEM_WIN_DATA);
599
600                 /* Always leave this as zero. */
601                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
602         }
603         spin_unlock_irqrestore(&tp->indirect_lock, flags);
604 }
605
606 static void tg3_ape_lock_init(struct tg3 *tp)
607 {
608         int i;
609         u32 regbase;
610
611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
612                 regbase = TG3_APE_LOCK_GRANT;
613         else
614                 regbase = TG3_APE_PER_LOCK_GRANT;
615
616         /* Make sure the driver hasn't any stale locks. */
617         for (i = 0; i < 8; i++)
618                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
619 }
620
621 static int tg3_ape_lock(struct tg3 *tp, int locknum)
622 {
623         int i, off;
624         int ret = 0;
625         u32 status, req, gnt;
626
627         if (!tg3_flag(tp, ENABLE_APE))
628                 return 0;
629
630         switch (locknum) {
631         case TG3_APE_LOCK_GRC:
632         case TG3_APE_LOCK_MEM:
633                 break;
634         default:
635                 return -EINVAL;
636         }
637
638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
639                 req = TG3_APE_LOCK_REQ;
640                 gnt = TG3_APE_LOCK_GRANT;
641         } else {
642                 req = TG3_APE_PER_LOCK_REQ;
643                 gnt = TG3_APE_PER_LOCK_GRANT;
644         }
645
646         off = 4 * locknum;
647
648         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
649
650         /* Wait for up to 1 millisecond to acquire lock. */
651         for (i = 0; i < 100; i++) {
652                 status = tg3_ape_read32(tp, gnt + off);
653                 if (status == APE_LOCK_GRANT_DRIVER)
654                         break;
655                 udelay(10);
656         }
657
658         if (status != APE_LOCK_GRANT_DRIVER) {
659                 /* Revoke the lock request. */
660                 tg3_ape_write32(tp, gnt + off,
661                                 APE_LOCK_GRANT_DRIVER);
662
663                 ret = -EBUSY;
664         }
665
666         return ret;
667 }
668
669 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
670 {
671         u32 gnt;
672
673         if (!tg3_flag(tp, ENABLE_APE))
674                 return;
675
676         switch (locknum) {
677         case TG3_APE_LOCK_GRC:
678         case TG3_APE_LOCK_MEM:
679                 break;
680         default:
681                 return;
682         }
683
684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
685                 gnt = TG3_APE_LOCK_GRANT;
686         else
687                 gnt = TG3_APE_PER_LOCK_GRANT;
688
689         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
690 }
691
692 static void tg3_disable_ints(struct tg3 *tp)
693 {
694         int i;
695
696         tw32(TG3PCI_MISC_HOST_CTRL,
697              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
698         for (i = 0; i < tp->irq_max; i++)
699                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
700 }
701
702 static void tg3_enable_ints(struct tg3 *tp)
703 {
704         int i;
705
706         tp->irq_sync = 0;
707         wmb();
708
709         tw32(TG3PCI_MISC_HOST_CTRL,
710              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
711
712         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
713         for (i = 0; i < tp->irq_cnt; i++) {
714                 struct tg3_napi *tnapi = &tp->napi[i];
715
716                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717                 if (tg3_flag(tp, 1SHOT_MSI))
718                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719
720                 tp->coal_now |= tnapi->coal_now;
721         }
722
723         /* Force an initial interrupt */
724         if (!tg3_flag(tp, TAGGED_STATUS) &&
725             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
726                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
727         else
728                 tw32(HOSTCC_MODE, tp->coal_now);
729
730         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
731 }
732
733 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
734 {
735         struct tg3 *tp = tnapi->tp;
736         struct tg3_hw_status *sblk = tnapi->hw_status;
737         unsigned int work_exists = 0;
738
739         /* check for phy events */
740         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
741                 if (sblk->status & SD_STATUS_LINK_CHG)
742                         work_exists = 1;
743         }
744         /* check for RX/TX work to do */
745         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
746             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
747                 work_exists = 1;
748
749         return work_exists;
750 }
751
752 /* tg3_int_reenable
753  *  similar to tg3_enable_ints, but it accurately determines whether there
754  *  is new work pending and can return without flushing the PIO write
755  *  which reenables interrupts
756  */
757 static void tg3_int_reenable(struct tg3_napi *tnapi)
758 {
759         struct tg3 *tp = tnapi->tp;
760
761         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
762         mmiowb();
763
764         /* When doing tagged status, this work check is unnecessary.
765          * The last_tag we write above tells the chip which piece of
766          * work we've completed.
767          */
768         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
769                 tw32(HOSTCC_MODE, tp->coalesce_mode |
770                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
771 }
772
773 static void tg3_switch_clocks(struct tg3 *tp)
774 {
775         u32 clock_ctrl;
776         u32 orig_clock_ctrl;
777
778         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
779                 return;
780
781         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
782
783         orig_clock_ctrl = clock_ctrl;
784         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
785                        CLOCK_CTRL_CLKRUN_OENABLE |
786                        0x1f);
787         tp->pci_clock_ctrl = clock_ctrl;
788
789         if (tg3_flag(tp, 5705_PLUS)) {
790                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
791                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
792                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
793                 }
794         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
795                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
796                             clock_ctrl |
797                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
798                             40);
799                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
800                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
801                             40);
802         }
803         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
804 }
805
806 #define PHY_BUSY_LOOPS  5000
807
808 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
809 {
810         u32 frame_val;
811         unsigned int loops;
812         int ret;
813
814         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
815                 tw32_f(MAC_MI_MODE,
816                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
817                 udelay(80);
818         }
819
820         *val = 0x0;
821
822         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
823                       MI_COM_PHY_ADDR_MASK);
824         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
825                       MI_COM_REG_ADDR_MASK);
826         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
827
828         tw32_f(MAC_MI_COM, frame_val);
829
830         loops = PHY_BUSY_LOOPS;
831         while (loops != 0) {
832                 udelay(10);
833                 frame_val = tr32(MAC_MI_COM);
834
835                 if ((frame_val & MI_COM_BUSY) == 0) {
836                         udelay(5);
837                         frame_val = tr32(MAC_MI_COM);
838                         break;
839                 }
840                 loops -= 1;
841         }
842
843         ret = -EBUSY;
844         if (loops != 0) {
845                 *val = frame_val & MI_COM_DATA_MASK;
846                 ret = 0;
847         }
848
849         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
850                 tw32_f(MAC_MI_MODE, tp->mi_mode);
851                 udelay(80);
852         }
853
854         return ret;
855 }
856
857 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
858 {
859         u32 frame_val;
860         unsigned int loops;
861         int ret;
862
863         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
864             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
865                 return 0;
866
867         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
868                 tw32_f(MAC_MI_MODE,
869                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
870                 udelay(80);
871         }
872
873         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
874                       MI_COM_PHY_ADDR_MASK);
875         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
876                       MI_COM_REG_ADDR_MASK);
877         frame_val |= (val & MI_COM_DATA_MASK);
878         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
879
880         tw32_f(MAC_MI_COM, frame_val);
881
882         loops = PHY_BUSY_LOOPS;
883         while (loops != 0) {
884                 udelay(10);
885                 frame_val = tr32(MAC_MI_COM);
886                 if ((frame_val & MI_COM_BUSY) == 0) {
887                         udelay(5);
888                         frame_val = tr32(MAC_MI_COM);
889                         break;
890                 }
891                 loops -= 1;
892         }
893
894         ret = -EBUSY;
895         if (loops != 0)
896                 ret = 0;
897
898         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
899                 tw32_f(MAC_MI_MODE, tp->mi_mode);
900                 udelay(80);
901         }
902
903         return ret;
904 }
905
906 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
907 {
908         int err;
909
910         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
911         if (err)
912                 goto done;
913
914         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
915         if (err)
916                 goto done;
917
918         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
919                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
920         if (err)
921                 goto done;
922
923         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
924
925 done:
926         return err;
927 }
928
929 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
930 {
931         int err;
932
933         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
934         if (err)
935                 goto done;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
942                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
943         if (err)
944                 goto done;
945
946         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
947
948 done:
949         return err;
950 }
951
952 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
953 {
954         int err;
955
956         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
957         if (!err)
958                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
959
960         return err;
961 }
962
963 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
964 {
965         int err;
966
967         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
968         if (!err)
969                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
970
971         return err;
972 }
973
974 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
975 {
976         int err;
977
978         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
979                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
980                            MII_TG3_AUXCTL_SHDWSEL_MISC);
981         if (!err)
982                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
983
984         return err;
985 }
986
987 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
988 {
989         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
990                 set |= MII_TG3_AUXCTL_MISC_WREN;
991
992         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
993 }
994
995 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
996         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
997                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
998                              MII_TG3_AUXCTL_ACTL_TX_6DB)
999
1000 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1001         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1002                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1003
1004 static int tg3_bmcr_reset(struct tg3 *tp)
1005 {
1006         u32 phy_control;
1007         int limit, err;
1008
1009         /* OK, reset it, and poll the BMCR_RESET bit until it
1010          * clears or we time out.
1011          */
1012         phy_control = BMCR_RESET;
1013         err = tg3_writephy(tp, MII_BMCR, phy_control);
1014         if (err != 0)
1015                 return -EBUSY;
1016
1017         limit = 5000;
1018         while (limit--) {
1019                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1020                 if (err != 0)
1021                         return -EBUSY;
1022
1023                 if ((phy_control & BMCR_RESET) == 0) {
1024                         udelay(40);
1025                         break;
1026                 }
1027                 udelay(10);
1028         }
1029         if (limit < 0)
1030                 return -EBUSY;
1031
1032         return 0;
1033 }
1034
1035 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1036 {
1037         struct tg3 *tp = bp->priv;
1038         u32 val;
1039
1040         spin_lock_bh(&tp->lock);
1041
1042         if (tg3_readphy(tp, reg, &val))
1043                 val = -EIO;
1044
1045         spin_unlock_bh(&tp->lock);
1046
1047         return val;
1048 }
1049
1050 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1051 {
1052         struct tg3 *tp = bp->priv;
1053         u32 ret = 0;
1054
1055         spin_lock_bh(&tp->lock);
1056
1057         if (tg3_writephy(tp, reg, val))
1058                 ret = -EIO;
1059
1060         spin_unlock_bh(&tp->lock);
1061
1062         return ret;
1063 }
1064
1065 static int tg3_mdio_reset(struct mii_bus *bp)
1066 {
1067         return 0;
1068 }
1069
1070 static void tg3_mdio_config_5785(struct tg3 *tp)
1071 {
1072         u32 val;
1073         struct phy_device *phydev;
1074
1075         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1076         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1077         case PHY_ID_BCM50610:
1078         case PHY_ID_BCM50610M:
1079                 val = MAC_PHYCFG2_50610_LED_MODES;
1080                 break;
1081         case PHY_ID_BCMAC131:
1082                 val = MAC_PHYCFG2_AC131_LED_MODES;
1083                 break;
1084         case PHY_ID_RTL8211C:
1085                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1086                 break;
1087         case PHY_ID_RTL8201E:
1088                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1089                 break;
1090         default:
1091                 return;
1092         }
1093
1094         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1095                 tw32(MAC_PHYCFG2, val);
1096
1097                 val = tr32(MAC_PHYCFG1);
1098                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1099                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1100                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1101                 tw32(MAC_PHYCFG1, val);
1102
1103                 return;
1104         }
1105
1106         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1107                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1108                        MAC_PHYCFG2_FMODE_MASK_MASK |
1109                        MAC_PHYCFG2_GMODE_MASK_MASK |
1110                        MAC_PHYCFG2_ACT_MASK_MASK   |
1111                        MAC_PHYCFG2_QUAL_MASK_MASK |
1112                        MAC_PHYCFG2_INBAND_ENABLE;
1113
1114         tw32(MAC_PHYCFG2, val);
1115
1116         val = tr32(MAC_PHYCFG1);
1117         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1118                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1119         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1120                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1121                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1122                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1123                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1124         }
1125         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1126                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1127         tw32(MAC_PHYCFG1, val);
1128
1129         val = tr32(MAC_EXT_RGMII_MODE);
1130         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1131                  MAC_RGMII_MODE_RX_QUALITY |
1132                  MAC_RGMII_MODE_RX_ACTIVITY |
1133                  MAC_RGMII_MODE_RX_ENG_DET |
1134                  MAC_RGMII_MODE_TX_ENABLE |
1135                  MAC_RGMII_MODE_TX_LOWPWR |
1136                  MAC_RGMII_MODE_TX_RESET);
1137         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1138                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1139                         val |= MAC_RGMII_MODE_RX_INT_B |
1140                                MAC_RGMII_MODE_RX_QUALITY |
1141                                MAC_RGMII_MODE_RX_ACTIVITY |
1142                                MAC_RGMII_MODE_RX_ENG_DET;
1143                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1144                         val |= MAC_RGMII_MODE_TX_ENABLE |
1145                                MAC_RGMII_MODE_TX_LOWPWR |
1146                                MAC_RGMII_MODE_TX_RESET;
1147         }
1148         tw32(MAC_EXT_RGMII_MODE, val);
1149 }
1150
1151 static void tg3_mdio_start(struct tg3 *tp)
1152 {
1153         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1154         tw32_f(MAC_MI_MODE, tp->mi_mode);
1155         udelay(80);
1156
1157         if (tg3_flag(tp, MDIOBUS_INITED) &&
1158             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1159                 tg3_mdio_config_5785(tp);
1160 }
1161
1162 static int tg3_mdio_init(struct tg3 *tp)
1163 {
1164         int i;
1165         u32 reg;
1166         struct phy_device *phydev;
1167
1168         if (tg3_flag(tp, 5717_PLUS)) {
1169                 u32 is_serdes;
1170
1171                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1172
1173                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1174                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1175                 else
1176                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1177                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1178                 if (is_serdes)
1179                         tp->phy_addr += 7;
1180         } else
1181                 tp->phy_addr = TG3_PHY_MII_ADDR;
1182
1183         tg3_mdio_start(tp);
1184
1185         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1186                 return 0;
1187
1188         tp->mdio_bus = mdiobus_alloc();
1189         if (tp->mdio_bus == NULL)
1190                 return -ENOMEM;
1191
1192         tp->mdio_bus->name     = "tg3 mdio bus";
1193         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1194                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1195         tp->mdio_bus->priv     = tp;
1196         tp->mdio_bus->parent   = &tp->pdev->dev;
1197         tp->mdio_bus->read     = &tg3_mdio_read;
1198         tp->mdio_bus->write    = &tg3_mdio_write;
1199         tp->mdio_bus->reset    = &tg3_mdio_reset;
1200         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1201         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1202
1203         for (i = 0; i < PHY_MAX_ADDR; i++)
1204                 tp->mdio_bus->irq[i] = PHY_POLL;
1205
1206         /* The bus registration will look for all the PHYs on the mdio bus.
1207          * Unfortunately, it does not ensure the PHY is powered up before
1208          * accessing the PHY ID registers.  A chip reset is the
1209          * quickest way to bring the device back to an operational state..
1210          */
1211         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1212                 tg3_bmcr_reset(tp);
1213
1214         i = mdiobus_register(tp->mdio_bus);
1215         if (i) {
1216                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1217                 mdiobus_free(tp->mdio_bus);
1218                 return i;
1219         }
1220
1221         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1222
1223         if (!phydev || !phydev->drv) {
1224                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1225                 mdiobus_unregister(tp->mdio_bus);
1226                 mdiobus_free(tp->mdio_bus);
1227                 return -ENODEV;
1228         }
1229
1230         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1231         case PHY_ID_BCM57780:
1232                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1233                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1234                 break;
1235         case PHY_ID_BCM50610:
1236         case PHY_ID_BCM50610M:
1237                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1238                                      PHY_BRCM_RX_REFCLK_UNUSED |
1239                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1240                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1241                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1242                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1243                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1244                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1245                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1246                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1247                 /* fallthru */
1248         case PHY_ID_RTL8211C:
1249                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1250                 break;
1251         case PHY_ID_RTL8201E:
1252         case PHY_ID_BCMAC131:
1253                 phydev->interface = PHY_INTERFACE_MODE_MII;
1254                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1255                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1256                 break;
1257         }
1258
1259         tg3_flag_set(tp, MDIOBUS_INITED);
1260
1261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1262                 tg3_mdio_config_5785(tp);
1263
1264         return 0;
1265 }
1266
1267 static void tg3_mdio_fini(struct tg3 *tp)
1268 {
1269         if (tg3_flag(tp, MDIOBUS_INITED)) {
1270                 tg3_flag_clear(tp, MDIOBUS_INITED);
1271                 mdiobus_unregister(tp->mdio_bus);
1272                 mdiobus_free(tp->mdio_bus);
1273         }
1274 }
1275
1276 /* tp->lock is held. */
1277 static inline void tg3_generate_fw_event(struct tg3 *tp)
1278 {
1279         u32 val;
1280
1281         val = tr32(GRC_RX_CPU_EVENT);
1282         val |= GRC_RX_CPU_DRIVER_EVENT;
1283         tw32_f(GRC_RX_CPU_EVENT, val);
1284
1285         tp->last_event_jiffies = jiffies;
1286 }
1287
1288 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1289
1290 /* tp->lock is held. */
1291 static void tg3_wait_for_event_ack(struct tg3 *tp)
1292 {
1293         int i;
1294         unsigned int delay_cnt;
1295         long time_remain;
1296
1297         /* If enough time has passed, no wait is necessary. */
1298         time_remain = (long)(tp->last_event_jiffies + 1 +
1299                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1300                       (long)jiffies;
1301         if (time_remain < 0)
1302                 return;
1303
1304         /* Check if we can shorten the wait time. */
1305         delay_cnt = jiffies_to_usecs(time_remain);
1306         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1307                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1308         delay_cnt = (delay_cnt >> 3) + 1;
1309
1310         for (i = 0; i < delay_cnt; i++) {
1311                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1312                         break;
1313                 udelay(8);
1314         }
1315 }
1316
1317 /* tp->lock is held. */
1318 static void tg3_ump_link_report(struct tg3 *tp)
1319 {
1320         u32 reg;
1321         u32 val;
1322
1323         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1324                 return;
1325
1326         tg3_wait_for_event_ack(tp);
1327
1328         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1329
1330         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1331
1332         val = 0;
1333         if (!tg3_readphy(tp, MII_BMCR, &reg))
1334                 val = reg << 16;
1335         if (!tg3_readphy(tp, MII_BMSR, &reg))
1336                 val |= (reg & 0xffff);
1337         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1338
1339         val = 0;
1340         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1341                 val = reg << 16;
1342         if (!tg3_readphy(tp, MII_LPA, &reg))
1343                 val |= (reg & 0xffff);
1344         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1345
1346         val = 0;
1347         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1348                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1349                         val = reg << 16;
1350                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1351                         val |= (reg & 0xffff);
1352         }
1353         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1354
1355         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1356                 val = reg << 16;
1357         else
1358                 val = 0;
1359         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1360
1361         tg3_generate_fw_event(tp);
1362 }
1363
1364 static void tg3_link_report(struct tg3 *tp)
1365 {
1366         if (!netif_carrier_ok(tp->dev)) {
1367                 netif_info(tp, link, tp->dev, "Link is down\n");
1368                 tg3_ump_link_report(tp);
1369         } else if (netif_msg_link(tp)) {
1370                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1371                             (tp->link_config.active_speed == SPEED_1000 ?
1372                              1000 :
1373                              (tp->link_config.active_speed == SPEED_100 ?
1374                               100 : 10)),
1375                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1376                              "full" : "half"));
1377
1378                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1379                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1380                             "on" : "off",
1381                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1382                             "on" : "off");
1383
1384                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1385                         netdev_info(tp->dev, "EEE is %s\n",
1386                                     tp->setlpicnt ? "enabled" : "disabled");
1387
1388                 tg3_ump_link_report(tp);
1389         }
1390 }
1391
1392 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1393 {
1394         u16 miireg;
1395
1396         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1397                 miireg = ADVERTISE_PAUSE_CAP;
1398         else if (flow_ctrl & FLOW_CTRL_TX)
1399                 miireg = ADVERTISE_PAUSE_ASYM;
1400         else if (flow_ctrl & FLOW_CTRL_RX)
1401                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1402         else
1403                 miireg = 0;
1404
1405         return miireg;
1406 }
1407
1408 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1409 {
1410         u16 miireg;
1411
1412         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1413                 miireg = ADVERTISE_1000XPAUSE;
1414         else if (flow_ctrl & FLOW_CTRL_TX)
1415                 miireg = ADVERTISE_1000XPSE_ASYM;
1416         else if (flow_ctrl & FLOW_CTRL_RX)
1417                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1418         else
1419                 miireg = 0;
1420
1421         return miireg;
1422 }
1423
1424 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1425 {
1426         u8 cap = 0;
1427
1428         if (lcladv & ADVERTISE_1000XPAUSE) {
1429                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1430                         if (rmtadv & LPA_1000XPAUSE)
1431                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1432                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1433                                 cap = FLOW_CTRL_RX;
1434                 } else {
1435                         if (rmtadv & LPA_1000XPAUSE)
1436                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1437                 }
1438         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1439                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1440                         cap = FLOW_CTRL_TX;
1441         }
1442
1443         return cap;
1444 }
1445
1446 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1447 {
1448         u8 autoneg;
1449         u8 flowctrl = 0;
1450         u32 old_rx_mode = tp->rx_mode;
1451         u32 old_tx_mode = tp->tx_mode;
1452
1453         if (tg3_flag(tp, USE_PHYLIB))
1454                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1455         else
1456                 autoneg = tp->link_config.autoneg;
1457
1458         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1459                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1460                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1461                 else
1462                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1463         } else
1464                 flowctrl = tp->link_config.flowctrl;
1465
1466         tp->link_config.active_flowctrl = flowctrl;
1467
1468         if (flowctrl & FLOW_CTRL_RX)
1469                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1470         else
1471                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1472
1473         if (old_rx_mode != tp->rx_mode)
1474                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1475
1476         if (flowctrl & FLOW_CTRL_TX)
1477                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1478         else
1479                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1480
1481         if (old_tx_mode != tp->tx_mode)
1482                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1483 }
1484
1485 static void tg3_adjust_link(struct net_device *dev)
1486 {
1487         u8 oldflowctrl, linkmesg = 0;
1488         u32 mac_mode, lcl_adv, rmt_adv;
1489         struct tg3 *tp = netdev_priv(dev);
1490         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1491
1492         spin_lock_bh(&tp->lock);
1493
1494         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1495                                     MAC_MODE_HALF_DUPLEX);
1496
1497         oldflowctrl = tp->link_config.active_flowctrl;
1498
1499         if (phydev->link) {
1500                 lcl_adv = 0;
1501                 rmt_adv = 0;
1502
1503                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1504                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1505                 else if (phydev->speed == SPEED_1000 ||
1506                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1507                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1508                 else
1509                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1510
1511                 if (phydev->duplex == DUPLEX_HALF)
1512                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1513                 else {
1514                         lcl_adv = tg3_advert_flowctrl_1000T(
1515                                   tp->link_config.flowctrl);
1516
1517                         if (phydev->pause)
1518                                 rmt_adv = LPA_PAUSE_CAP;
1519                         if (phydev->asym_pause)
1520                                 rmt_adv |= LPA_PAUSE_ASYM;
1521                 }
1522
1523                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1524         } else
1525                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1526
1527         if (mac_mode != tp->mac_mode) {
1528                 tp->mac_mode = mac_mode;
1529                 tw32_f(MAC_MODE, tp->mac_mode);
1530                 udelay(40);
1531         }
1532
1533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1534                 if (phydev->speed == SPEED_10)
1535                         tw32(MAC_MI_STAT,
1536                              MAC_MI_STAT_10MBPS_MODE |
1537                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1538                 else
1539                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540         }
1541
1542         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1543                 tw32(MAC_TX_LENGTHS,
1544                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1545                       (6 << TX_LENGTHS_IPG_SHIFT) |
1546                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1547         else
1548                 tw32(MAC_TX_LENGTHS,
1549                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1550                       (6 << TX_LENGTHS_IPG_SHIFT) |
1551                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1552
1553         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1554             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1555             phydev->speed != tp->link_config.active_speed ||
1556             phydev->duplex != tp->link_config.active_duplex ||
1557             oldflowctrl != tp->link_config.active_flowctrl)
1558                 linkmesg = 1;
1559
1560         tp->link_config.active_speed = phydev->speed;
1561         tp->link_config.active_duplex = phydev->duplex;
1562
1563         spin_unlock_bh(&tp->lock);
1564
1565         if (linkmesg)
1566                 tg3_link_report(tp);
1567 }
1568
1569 static int tg3_phy_init(struct tg3 *tp)
1570 {
1571         struct phy_device *phydev;
1572
1573         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1574                 return 0;
1575
1576         /* Bring the PHY back to a known state. */
1577         tg3_bmcr_reset(tp);
1578
1579         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1580
1581         /* Attach the MAC to the PHY. */
1582         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1583                              phydev->dev_flags, phydev->interface);
1584         if (IS_ERR(phydev)) {
1585                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1586                 return PTR_ERR(phydev);
1587         }
1588
1589         /* Mask with MAC supported features. */
1590         switch (phydev->interface) {
1591         case PHY_INTERFACE_MODE_GMII:
1592         case PHY_INTERFACE_MODE_RGMII:
1593                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1594                         phydev->supported &= (PHY_GBIT_FEATURES |
1595                                               SUPPORTED_Pause |
1596                                               SUPPORTED_Asym_Pause);
1597                         break;
1598                 }
1599                 /* fallthru */
1600         case PHY_INTERFACE_MODE_MII:
1601                 phydev->supported &= (PHY_BASIC_FEATURES |
1602                                       SUPPORTED_Pause |
1603                                       SUPPORTED_Asym_Pause);
1604                 break;
1605         default:
1606                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1607                 return -EINVAL;
1608         }
1609
1610         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1611
1612         phydev->advertising = phydev->supported;
1613
1614         return 0;
1615 }
1616
1617 static void tg3_phy_start(struct tg3 *tp)
1618 {
1619         struct phy_device *phydev;
1620
1621         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1622                 return;
1623
1624         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1625
1626         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1627                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1628                 phydev->speed = tp->link_config.orig_speed;
1629                 phydev->duplex = tp->link_config.orig_duplex;
1630                 phydev->autoneg = tp->link_config.orig_autoneg;
1631                 phydev->advertising = tp->link_config.orig_advertising;
1632         }
1633
1634         phy_start(phydev);
1635
1636         phy_start_aneg(phydev);
1637 }
1638
1639 static void tg3_phy_stop(struct tg3 *tp)
1640 {
1641         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1642                 return;
1643
1644         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1645 }
1646
1647 static void tg3_phy_fini(struct tg3 *tp)
1648 {
1649         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1650                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1651                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1652         }
1653 }
1654
1655 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1656 {
1657         u32 phytest;
1658
1659         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1660                 u32 phy;
1661
1662                 tg3_writephy(tp, MII_TG3_FET_TEST,
1663                              phytest | MII_TG3_FET_SHADOW_EN);
1664                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1665                         if (enable)
1666                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1667                         else
1668                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1670                 }
1671                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1672         }
1673 }
1674
1675 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1676 {
1677         u32 reg;
1678
1679         if (!tg3_flag(tp, 5705_PLUS) ||
1680             (tg3_flag(tp, 5717_PLUS) &&
1681              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1682                 return;
1683
1684         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1685                 tg3_phy_fet_toggle_apd(tp, enable);
1686                 return;
1687         }
1688
1689         reg = MII_TG3_MISC_SHDW_WREN |
1690               MII_TG3_MISC_SHDW_SCR5_SEL |
1691               MII_TG3_MISC_SHDW_SCR5_LPED |
1692               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1693               MII_TG3_MISC_SHDW_SCR5_SDTL |
1694               MII_TG3_MISC_SHDW_SCR5_C125OE;
1695         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1696                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1697
1698         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1699
1700
1701         reg = MII_TG3_MISC_SHDW_WREN |
1702               MII_TG3_MISC_SHDW_APD_SEL |
1703               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1704         if (enable)
1705                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1706
1707         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1708 }
1709
1710 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1711 {
1712         u32 phy;
1713
1714         if (!tg3_flag(tp, 5705_PLUS) ||
1715             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1716                 return;
1717
1718         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1719                 u32 ephy;
1720
1721                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1722                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1723
1724                         tg3_writephy(tp, MII_TG3_FET_TEST,
1725                                      ephy | MII_TG3_FET_SHADOW_EN);
1726                         if (!tg3_readphy(tp, reg, &phy)) {
1727                                 if (enable)
1728                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1729                                 else
1730                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731                                 tg3_writephy(tp, reg, phy);
1732                         }
1733                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1734                 }
1735         } else {
1736                 int ret;
1737
1738                 ret = tg3_phy_auxctl_read(tp,
1739                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1740                 if (!ret) {
1741                         if (enable)
1742                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1743                         else
1744                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745                         tg3_phy_auxctl_write(tp,
1746                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1747                 }
1748         }
1749 }
1750
1751 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1752 {
1753         int ret;
1754         u32 val;
1755
1756         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1757                 return;
1758
1759         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1760         if (!ret)
1761                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1762                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1763 }
1764
1765 static void tg3_phy_apply_otp(struct tg3 *tp)
1766 {
1767         u32 otp, phy;
1768
1769         if (!tp->phy_otp)
1770                 return;
1771
1772         otp = tp->phy_otp;
1773
1774         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1775                 return;
1776
1777         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1778         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1779         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1780
1781         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1782               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1783         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1784
1785         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1786         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1787         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1788
1789         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1790         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1791
1792         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1793         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1794
1795         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1796               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1797         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1798
1799         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1800 }
1801
1802 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1803 {
1804         u32 val;
1805
1806         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1807                 return;
1808
1809         tp->setlpicnt = 0;
1810
1811         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1812             current_link_up == 1 &&
1813             tp->link_config.active_duplex == DUPLEX_FULL &&
1814             (tp->link_config.active_speed == SPEED_100 ||
1815              tp->link_config.active_speed == SPEED_1000)) {
1816                 u32 eeectl;
1817
1818                 if (tp->link_config.active_speed == SPEED_1000)
1819                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1820                 else
1821                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1822
1823                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1824
1825                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1826                                   TG3_CL45_D7_EEERES_STAT, &val);
1827
1828                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1829                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1830                         tp->setlpicnt = 2;
1831         }
1832
1833         if (!tp->setlpicnt) {
1834                 val = tr32(TG3_CPMU_EEE_MODE);
1835                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1836         }
1837 }
1838
1839 static void tg3_phy_eee_enable(struct tg3 *tp)
1840 {
1841         u32 val;
1842
1843         if (tp->link_config.active_speed == SPEED_1000 &&
1844             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1845              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1846              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1847             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1848                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1849                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1850         }
1851
1852         val = tr32(TG3_CPMU_EEE_MODE);
1853         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1854 }
1855
1856 static int tg3_wait_macro_done(struct tg3 *tp)
1857 {
1858         int limit = 100;
1859
1860         while (limit--) {
1861                 u32 tmp32;
1862
1863                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1864                         if ((tmp32 & 0x1000) == 0)
1865                                 break;
1866                 }
1867         }
1868         if (limit < 0)
1869                 return -EBUSY;
1870
1871         return 0;
1872 }
1873
1874 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1875 {
1876         static const u32 test_pat[4][6] = {
1877         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1878         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1879         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1880         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1881         };
1882         int chan;
1883
1884         for (chan = 0; chan < 4; chan++) {
1885                 int i;
1886
1887                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1888                              (chan * 0x2000) | 0x0200);
1889                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1890
1891                 for (i = 0; i < 6; i++)
1892                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1893                                      test_pat[chan][i]);
1894
1895                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1896                 if (tg3_wait_macro_done(tp)) {
1897                         *resetp = 1;
1898                         return -EBUSY;
1899                 }
1900
1901                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1902                              (chan * 0x2000) | 0x0200);
1903                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1904                 if (tg3_wait_macro_done(tp)) {
1905                         *resetp = 1;
1906                         return -EBUSY;
1907                 }
1908
1909                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1910                 if (tg3_wait_macro_done(tp)) {
1911                         *resetp = 1;
1912                         return -EBUSY;
1913                 }
1914
1915                 for (i = 0; i < 6; i += 2) {
1916                         u32 low, high;
1917
1918                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1919                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1920                             tg3_wait_macro_done(tp)) {
1921                                 *resetp = 1;
1922                                 return -EBUSY;
1923                         }
1924                         low &= 0x7fff;
1925                         high &= 0x000f;
1926                         if (low != test_pat[chan][i] ||
1927                             high != test_pat[chan][i+1]) {
1928                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1929                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1930                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1931
1932                                 return -EBUSY;
1933                         }
1934                 }
1935         }
1936
1937         return 0;
1938 }
1939
1940 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1941 {
1942         int chan;
1943
1944         for (chan = 0; chan < 4; chan++) {
1945                 int i;
1946
1947                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1948                              (chan * 0x2000) | 0x0200);
1949                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1950                 for (i = 0; i < 6; i++)
1951                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1952                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1953                 if (tg3_wait_macro_done(tp))
1954                         return -EBUSY;
1955         }
1956
1957         return 0;
1958 }
1959
1960 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1961 {
1962         u32 reg32, phy9_orig;
1963         int retries, do_phy_reset, err;
1964
1965         retries = 10;
1966         do_phy_reset = 1;
1967         do {
1968                 if (do_phy_reset) {
1969                         err = tg3_bmcr_reset(tp);
1970                         if (err)
1971                                 return err;
1972                         do_phy_reset = 0;
1973                 }
1974
1975                 /* Disable transmitter and interrupt.  */
1976                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1977                         continue;
1978
1979                 reg32 |= 0x3000;
1980                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1981
1982                 /* Set full-duplex, 1000 mbps.  */
1983                 tg3_writephy(tp, MII_BMCR,
1984                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1985
1986                 /* Set to master mode.  */
1987                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1988                         continue;
1989
1990                 tg3_writephy(tp, MII_TG3_CTRL,
1991                              (MII_TG3_CTRL_AS_MASTER |
1992                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1993
1994                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1995                 if (err)
1996                         return err;
1997
1998                 /* Block the PHY control access.  */
1999                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2000
2001                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2002                 if (!err)
2003                         break;
2004         } while (--retries);
2005
2006         err = tg3_phy_reset_chanpat(tp);
2007         if (err)
2008                 return err;
2009
2010         tg3_phydsp_write(tp, 0x8005, 0x0000);
2011
2012         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2013         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2014
2015         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2016
2017         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2018
2019         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2020                 reg32 &= ~0x3000;
2021                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2022         } else if (!err)
2023                 err = -EBUSY;
2024
2025         return err;
2026 }
2027
2028 /* This will reset the tigon3 PHY if there is no valid
2029  * link unless the FORCE argument is non-zero.
2030  */
2031 static int tg3_phy_reset(struct tg3 *tp)
2032 {
2033         u32 val, cpmuctrl;
2034         int err;
2035
2036         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2037                 val = tr32(GRC_MISC_CFG);
2038                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2039                 udelay(40);
2040         }
2041         err  = tg3_readphy(tp, MII_BMSR, &val);
2042         err |= tg3_readphy(tp, MII_BMSR, &val);
2043         if (err != 0)
2044                 return -EBUSY;
2045
2046         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2047                 netif_carrier_off(tp->dev);
2048                 tg3_link_report(tp);
2049         }
2050
2051         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2054                 err = tg3_phy_reset_5703_4_5(tp);
2055                 if (err)
2056                         return err;
2057                 goto out;
2058         }
2059
2060         cpmuctrl = 0;
2061         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2062             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2063                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2064                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2065                         tw32(TG3_CPMU_CTRL,
2066                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2067         }
2068
2069         err = tg3_bmcr_reset(tp);
2070         if (err)
2071                 return err;
2072
2073         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2074                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2075                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2076
2077                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2078         }
2079
2080         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2081             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2082                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2083                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2084                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2085                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2086                         udelay(40);
2087                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2088                 }
2089         }
2090
2091         if (tg3_flag(tp, 5717_PLUS) &&
2092             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2093                 return 0;
2094
2095         tg3_phy_apply_otp(tp);
2096
2097         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2098                 tg3_phy_toggle_apd(tp, true);
2099         else
2100                 tg3_phy_toggle_apd(tp, false);
2101
2102 out:
2103         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2104             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2105                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2106                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2107                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2108         }
2109
2110         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2111                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113         }
2114
2115         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2116                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2118                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2119                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2120                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2121                 }
2122         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2123                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2124                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2125                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2126                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2127                                 tg3_writephy(tp, MII_TG3_TEST1,
2128                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2129                         } else
2130                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2131
2132                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2133                 }
2134         }
2135
2136         /* Set Extended packet length bit (bit 14) on all chips that */
2137         /* support jumbo frames */
2138         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2139                 /* Cannot do read-modify-write on 5401 */
2140                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2141         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2142                 /* Set bit 14 with read-modify-write to preserve other bits */
2143                 err = tg3_phy_auxctl_read(tp,
2144                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2145                 if (!err)
2146                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2147                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2148         }
2149
2150         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2151          * jumbo frames transmission.
2152          */
2153         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2154                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2155                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2156                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2157         }
2158
2159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2160                 /* adjust output voltage */
2161                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2162         }
2163
2164         tg3_phy_toggle_automdix(tp, 1);
2165         tg3_phy_set_wirespeed(tp);
2166         return 0;
2167 }
2168
2169 static void tg3_frob_aux_power(struct tg3 *tp)
2170 {
2171         bool need_vaux = false;
2172
2173         /* The GPIOs do something completely different on 57765. */
2174         if (!tg3_flag(tp, IS_NIC) ||
2175             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2176             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2177                 return;
2178
2179         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2181              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2182              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2183             tp->pdev_peer != tp->pdev) {
2184                 struct net_device *dev_peer;
2185
2186                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2187
2188                 /* remove_one() may have been run on the peer. */
2189                 if (dev_peer) {
2190                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2191
2192                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2193                                 return;
2194
2195                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2196                             tg3_flag(tp_peer, ENABLE_ASF))
2197                                 need_vaux = true;
2198                 }
2199         }
2200
2201         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2202                 need_vaux = true;
2203
2204         if (need_vaux) {
2205                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2206                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2207                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2208                                     (GRC_LCLCTRL_GPIO_OE0 |
2209                                      GRC_LCLCTRL_GPIO_OE1 |
2210                                      GRC_LCLCTRL_GPIO_OE2 |
2211                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2212                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2213                                     100);
2214                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2215                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2216                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2217                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2218                                              GRC_LCLCTRL_GPIO_OE1 |
2219                                              GRC_LCLCTRL_GPIO_OE2 |
2220                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2221                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2222                                              tp->grc_local_ctrl;
2223                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2224
2225                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2226                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2227
2228                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2229                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2230                 } else {
2231                         u32 no_gpio2;
2232                         u32 grc_local_ctrl = 0;
2233
2234                         /* Workaround to prevent overdrawing Amps. */
2235                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2236                             ASIC_REV_5714) {
2237                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2238                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2239                                             grc_local_ctrl, 100);
2240                         }
2241
2242                         /* On 5753 and variants, GPIO2 cannot be used. */
2243                         no_gpio2 = tp->nic_sram_data_cfg &
2244                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2245
2246                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2247                                          GRC_LCLCTRL_GPIO_OE1 |
2248                                          GRC_LCLCTRL_GPIO_OE2 |
2249                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2250                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2251                         if (no_gpio2) {
2252                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2253                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2254                         }
2255                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2256                                                     grc_local_ctrl, 100);
2257
2258                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2259
2260                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2261                                                     grc_local_ctrl, 100);
2262
2263                         if (!no_gpio2) {
2264                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2265                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266                                             grc_local_ctrl, 100);
2267                         }
2268                 }
2269         } else {
2270                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2271                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2272                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273                                     (GRC_LCLCTRL_GPIO_OE1 |
2274                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2275
2276                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277                                     GRC_LCLCTRL_GPIO_OE1, 100);
2278
2279                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2280                                     (GRC_LCLCTRL_GPIO_OE1 |
2281                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2282                 }
2283         }
2284 }
2285
2286 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2287 {
2288         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2289                 return 1;
2290         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2291                 if (speed != SPEED_10)
2292                         return 1;
2293         } else if (speed == SPEED_10)
2294                 return 1;
2295
2296         return 0;
2297 }
2298
2299 static int tg3_setup_phy(struct tg3 *, int);
2300
2301 #define RESET_KIND_SHUTDOWN     0
2302 #define RESET_KIND_INIT         1
2303 #define RESET_KIND_SUSPEND      2
2304
2305 static void tg3_write_sig_post_reset(struct tg3 *, int);
2306 static int tg3_halt_cpu(struct tg3 *, u32);
2307
2308 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2309 {
2310         u32 val;
2311
2312         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2313                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2314                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2315                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2316
2317                         sg_dig_ctrl |=
2318                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2319                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2320                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2321                 }
2322                 return;
2323         }
2324
2325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2326                 tg3_bmcr_reset(tp);
2327                 val = tr32(GRC_MISC_CFG);
2328                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2329                 udelay(40);
2330                 return;
2331         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2332                 u32 phytest;
2333                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2334                         u32 phy;
2335
2336                         tg3_writephy(tp, MII_ADVERTISE, 0);
2337                         tg3_writephy(tp, MII_BMCR,
2338                                      BMCR_ANENABLE | BMCR_ANRESTART);
2339
2340                         tg3_writephy(tp, MII_TG3_FET_TEST,
2341                                      phytest | MII_TG3_FET_SHADOW_EN);
2342                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2343                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2344                                 tg3_writephy(tp,
2345                                              MII_TG3_FET_SHDW_AUXMODE4,
2346                                              phy);
2347                         }
2348                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2349                 }
2350                 return;
2351         } else if (do_low_power) {
2352                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2353                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2354
2355                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2356                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2357                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2358                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2359         }
2360
2361         /* The PHY should not be powered down on some chips because
2362          * of bugs.
2363          */
2364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2366             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2367              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2368                 return;
2369
2370         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2371             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2372                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2373                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2374                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2375                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2376         }
2377
2378         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2379 }
2380
2381 /* tp->lock is held. */
2382 static int tg3_nvram_lock(struct tg3 *tp)
2383 {
2384         if (tg3_flag(tp, NVRAM)) {
2385                 int i;
2386
2387                 if (tp->nvram_lock_cnt == 0) {
2388                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2389                         for (i = 0; i < 8000; i++) {
2390                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2391                                         break;
2392                                 udelay(20);
2393                         }
2394                         if (i == 8000) {
2395                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2396                                 return -ENODEV;
2397                         }
2398                 }
2399                 tp->nvram_lock_cnt++;
2400         }
2401         return 0;
2402 }
2403
2404 /* tp->lock is held. */
2405 static void tg3_nvram_unlock(struct tg3 *tp)
2406 {
2407         if (tg3_flag(tp, NVRAM)) {
2408                 if (tp->nvram_lock_cnt > 0)
2409                         tp->nvram_lock_cnt--;
2410                 if (tp->nvram_lock_cnt == 0)
2411                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2412         }
2413 }
2414
2415 /* tp->lock is held. */
2416 static void tg3_enable_nvram_access(struct tg3 *tp)
2417 {
2418         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2419                 u32 nvaccess = tr32(NVRAM_ACCESS);
2420
2421                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2422         }
2423 }
2424
2425 /* tp->lock is held. */
2426 static void tg3_disable_nvram_access(struct tg3 *tp)
2427 {
2428         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2429                 u32 nvaccess = tr32(NVRAM_ACCESS);
2430
2431                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2432         }
2433 }
2434
2435 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2436                                         u32 offset, u32 *val)
2437 {
2438         u32 tmp;
2439         int i;
2440
2441         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2442                 return -EINVAL;
2443
2444         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2445                                         EEPROM_ADDR_DEVID_MASK |
2446                                         EEPROM_ADDR_READ);
2447         tw32(GRC_EEPROM_ADDR,
2448              tmp |
2449              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2450              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2451               EEPROM_ADDR_ADDR_MASK) |
2452              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2453
2454         for (i = 0; i < 1000; i++) {
2455                 tmp = tr32(GRC_EEPROM_ADDR);
2456
2457                 if (tmp & EEPROM_ADDR_COMPLETE)
2458                         break;
2459                 msleep(1);
2460         }
2461         if (!(tmp & EEPROM_ADDR_COMPLETE))
2462                 return -EBUSY;
2463
2464         tmp = tr32(GRC_EEPROM_DATA);
2465
2466         /*
2467          * The data will always be opposite the native endian
2468          * format.  Perform a blind byteswap to compensate.
2469          */
2470         *val = swab32(tmp);
2471
2472         return 0;
2473 }
2474
2475 #define NVRAM_CMD_TIMEOUT 10000
2476
2477 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2478 {
2479         int i;
2480
2481         tw32(NVRAM_CMD, nvram_cmd);
2482         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2483                 udelay(10);
2484                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2485                         udelay(10);
2486                         break;
2487                 }
2488         }
2489
2490         if (i == NVRAM_CMD_TIMEOUT)
2491                 return -EBUSY;
2492
2493         return 0;
2494 }
2495
2496 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2497 {
2498         if (tg3_flag(tp, NVRAM) &&
2499             tg3_flag(tp, NVRAM_BUFFERED) &&
2500             tg3_flag(tp, FLASH) &&
2501             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2502             (tp->nvram_jedecnum == JEDEC_ATMEL))
2503
2504                 addr = ((addr / tp->nvram_pagesize) <<
2505                         ATMEL_AT45DB0X1B_PAGE_POS) +
2506                        (addr % tp->nvram_pagesize);
2507
2508         return addr;
2509 }
2510
2511 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2512 {
2513         if (tg3_flag(tp, NVRAM) &&
2514             tg3_flag(tp, NVRAM_BUFFERED) &&
2515             tg3_flag(tp, FLASH) &&
2516             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2517             (tp->nvram_jedecnum == JEDEC_ATMEL))
2518
2519                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2520                         tp->nvram_pagesize) +
2521                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2522
2523         return addr;
2524 }
2525
2526 /* NOTE: Data read in from NVRAM is byteswapped according to
2527  * the byteswapping settings for all other register accesses.
2528  * tg3 devices are BE devices, so on a BE machine, the data
2529  * returned will be exactly as it is seen in NVRAM.  On a LE
2530  * machine, the 32-bit value will be byteswapped.
2531  */
2532 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2533 {
2534         int ret;
2535
2536         if (!tg3_flag(tp, NVRAM))
2537                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2538
2539         offset = tg3_nvram_phys_addr(tp, offset);
2540
2541         if (offset > NVRAM_ADDR_MSK)
2542                 return -EINVAL;
2543
2544         ret = tg3_nvram_lock(tp);
2545         if (ret)
2546                 return ret;
2547
2548         tg3_enable_nvram_access(tp);
2549
2550         tw32(NVRAM_ADDR, offset);
2551         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2552                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2553
2554         if (ret == 0)
2555                 *val = tr32(NVRAM_RDDATA);
2556
2557         tg3_disable_nvram_access(tp);
2558
2559         tg3_nvram_unlock(tp);
2560
2561         return ret;
2562 }
2563
2564 /* Ensures NVRAM data is in bytestream format. */
2565 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2566 {
2567         u32 v;
2568         int res = tg3_nvram_read(tp, offset, &v);
2569         if (!res)
2570                 *val = cpu_to_be32(v);
2571         return res;
2572 }
2573
2574 /* tp->lock is held. */
2575 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2576 {
2577         u32 addr_high, addr_low;
2578         int i;
2579
2580         addr_high = ((tp->dev->dev_addr[0] << 8) |
2581                      tp->dev->dev_addr[1]);
2582         addr_low = ((tp->dev->dev_addr[2] << 24) |
2583                     (tp->dev->dev_addr[3] << 16) |
2584                     (tp->dev->dev_addr[4] <<  8) |
2585                     (tp->dev->dev_addr[5] <<  0));
2586         for (i = 0; i < 4; i++) {
2587                 if (i == 1 && skip_mac_1)
2588                         continue;
2589                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2590                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2591         }
2592
2593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2595                 for (i = 0; i < 12; i++) {
2596                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2597                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2598                 }
2599         }
2600
2601         addr_high = (tp->dev->dev_addr[0] +
2602                      tp->dev->dev_addr[1] +
2603                      tp->dev->dev_addr[2] +
2604                      tp->dev->dev_addr[3] +
2605                      tp->dev->dev_addr[4] +
2606                      tp->dev->dev_addr[5]) &
2607                 TX_BACKOFF_SEED_MASK;
2608         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2609 }
2610
2611 static void tg3_enable_register_access(struct tg3 *tp)
2612 {
2613         /*
2614          * Make sure register accesses (indirect or otherwise) will function
2615          * correctly.
2616          */
2617         pci_write_config_dword(tp->pdev,
2618                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2619 }
2620
2621 static int tg3_power_up(struct tg3 *tp)
2622 {
2623         tg3_enable_register_access(tp);
2624
2625         pci_set_power_state(tp->pdev, PCI_D0);
2626
2627         /* Switch out of Vaux if it is a NIC */
2628         if (tg3_flag(tp, IS_NIC))
2629                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2630
2631         return 0;
2632 }
2633
2634 static int tg3_power_down_prepare(struct tg3 *tp)
2635 {
2636         u32 misc_host_ctrl;
2637         bool device_should_wake, do_low_power;
2638
2639         tg3_enable_register_access(tp);
2640
2641         /* Restore the CLKREQ setting. */
2642         if (tg3_flag(tp, CLKREQ_BUG)) {
2643                 u16 lnkctl;
2644
2645                 pci_read_config_word(tp->pdev,
2646                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2647                                      &lnkctl);
2648                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2649                 pci_write_config_word(tp->pdev,
2650                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2651                                       lnkctl);
2652         }
2653
2654         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2655         tw32(TG3PCI_MISC_HOST_CTRL,
2656              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2657
2658         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2659                              tg3_flag(tp, WOL_ENABLE);
2660
2661         if (tg3_flag(tp, USE_PHYLIB)) {
2662                 do_low_power = false;
2663                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2664                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2665                         struct phy_device *phydev;
2666                         u32 phyid, advertising;
2667
2668                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2669
2670                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2671
2672                         tp->link_config.orig_speed = phydev->speed;
2673                         tp->link_config.orig_duplex = phydev->duplex;
2674                         tp->link_config.orig_autoneg = phydev->autoneg;
2675                         tp->link_config.orig_advertising = phydev->advertising;
2676
2677                         advertising = ADVERTISED_TP |
2678                                       ADVERTISED_Pause |
2679                                       ADVERTISED_Autoneg |
2680                                       ADVERTISED_10baseT_Half;
2681
2682                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2683                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2684                                         advertising |=
2685                                                 ADVERTISED_100baseT_Half |
2686                                                 ADVERTISED_100baseT_Full |
2687                                                 ADVERTISED_10baseT_Full;
2688                                 else
2689                                         advertising |= ADVERTISED_10baseT_Full;
2690                         }
2691
2692                         phydev->advertising = advertising;
2693
2694                         phy_start_aneg(phydev);
2695
2696                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2697                         if (phyid != PHY_ID_BCMAC131) {
2698                                 phyid &= PHY_BCM_OUI_MASK;
2699                                 if (phyid == PHY_BCM_OUI_1 ||
2700                                     phyid == PHY_BCM_OUI_2 ||
2701                                     phyid == PHY_BCM_OUI_3)
2702                                         do_low_power = true;
2703                         }
2704                 }
2705         } else {
2706                 do_low_power = true;
2707
2708                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2709                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2710                         tp->link_config.orig_speed = tp->link_config.speed;
2711                         tp->link_config.orig_duplex = tp->link_config.duplex;
2712                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2713                 }
2714
2715                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2716                         tp->link_config.speed = SPEED_10;
2717                         tp->link_config.duplex = DUPLEX_HALF;
2718                         tp->link_config.autoneg = AUTONEG_ENABLE;
2719                         tg3_setup_phy(tp, 0);
2720                 }
2721         }
2722
2723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2724                 u32 val;
2725
2726                 val = tr32(GRC_VCPU_EXT_CTRL);
2727                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2728         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2729                 int i;
2730                 u32 val;
2731
2732                 for (i = 0; i < 200; i++) {
2733                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2734                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2735                                 break;
2736                         msleep(1);
2737                 }
2738         }
2739         if (tg3_flag(tp, WOL_CAP))
2740                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2741                                                      WOL_DRV_STATE_SHUTDOWN |
2742                                                      WOL_DRV_WOL |
2743                                                      WOL_SET_MAGIC_PKT);
2744
2745         if (device_should_wake) {
2746                 u32 mac_mode;
2747
2748                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2749                         if (do_low_power &&
2750                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2751                                 tg3_phy_auxctl_write(tp,
2752                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2753                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2754                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2755                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2756                                 udelay(40);
2757                         }
2758
2759                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2760                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2761                         else
2762                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2763
2764                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2765                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2766                             ASIC_REV_5700) {
2767                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2768                                              SPEED_100 : SPEED_10;
2769                                 if (tg3_5700_link_polarity(tp, speed))
2770                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2771                                 else
2772                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2773                         }
2774                 } else {
2775                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2776                 }
2777
2778                 if (!tg3_flag(tp, 5750_PLUS))
2779                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2780
2781                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2782                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2783                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2784                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2785
2786                 if (tg3_flag(tp, ENABLE_APE))
2787                         mac_mode |= MAC_MODE_APE_TX_EN |
2788                                     MAC_MODE_APE_RX_EN |
2789                                     MAC_MODE_TDE_ENABLE;
2790
2791                 tw32_f(MAC_MODE, mac_mode);
2792                 udelay(100);
2793
2794                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2795                 udelay(10);
2796         }
2797
2798         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2799             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2800              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2801                 u32 base_val;
2802
2803                 base_val = tp->pci_clock_ctrl;
2804                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2805                              CLOCK_CTRL_TXCLK_DISABLE);
2806
2807                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2808                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2809         } else if (tg3_flag(tp, 5780_CLASS) ||
2810                    tg3_flag(tp, CPMU_PRESENT) ||
2811                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2812                 /* do nothing */
2813         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2814                 u32 newbits1, newbits2;
2815
2816                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2817                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2818                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2819                                     CLOCK_CTRL_TXCLK_DISABLE |
2820                                     CLOCK_CTRL_ALTCLK);
2821                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2822                 } else if (tg3_flag(tp, 5705_PLUS)) {
2823                         newbits1 = CLOCK_CTRL_625_CORE;
2824                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2825                 } else {
2826                         newbits1 = CLOCK_CTRL_ALTCLK;
2827                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2828                 }
2829
2830                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2831                             40);
2832
2833                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2834                             40);
2835
2836                 if (!tg3_flag(tp, 5705_PLUS)) {
2837                         u32 newbits3;
2838
2839                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2840                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2841                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2842                                             CLOCK_CTRL_TXCLK_DISABLE |
2843                                             CLOCK_CTRL_44MHZ_CORE);
2844                         } else {
2845                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2846                         }
2847
2848                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2849                                     tp->pci_clock_ctrl | newbits3, 40);
2850                 }
2851         }
2852
2853         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2854                 tg3_power_down_phy(tp, do_low_power);
2855
2856         tg3_frob_aux_power(tp);
2857
2858         /* Workaround for unstable PLL clock */
2859         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2860             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2861                 u32 val = tr32(0x7d00);
2862
2863                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2864                 tw32(0x7d00, val);
2865                 if (!tg3_flag(tp, ENABLE_ASF)) {
2866                         int err;
2867
2868                         err = tg3_nvram_lock(tp);
2869                         tg3_halt_cpu(tp, RX_CPU_BASE);
2870                         if (!err)
2871                                 tg3_nvram_unlock(tp);
2872                 }
2873         }
2874
2875         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2876
2877         return 0;
2878 }
2879
2880 static void tg3_power_down(struct tg3 *tp)
2881 {
2882         tg3_power_down_prepare(tp);
2883
2884         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2885         pci_set_power_state(tp->pdev, PCI_D3hot);
2886 }
2887
2888 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2889 {
2890         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2891         case MII_TG3_AUX_STAT_10HALF:
2892                 *speed = SPEED_10;
2893                 *duplex = DUPLEX_HALF;
2894                 break;
2895
2896         case MII_TG3_AUX_STAT_10FULL:
2897                 *speed = SPEED_10;
2898                 *duplex = DUPLEX_FULL;
2899                 break;
2900
2901         case MII_TG3_AUX_STAT_100HALF:
2902                 *speed = SPEED_100;
2903                 *duplex = DUPLEX_HALF;
2904                 break;
2905
2906         case MII_TG3_AUX_STAT_100FULL:
2907                 *speed = SPEED_100;
2908                 *duplex = DUPLEX_FULL;
2909                 break;
2910
2911         case MII_TG3_AUX_STAT_1000HALF:
2912                 *speed = SPEED_1000;
2913                 *duplex = DUPLEX_HALF;
2914                 break;
2915
2916         case MII_TG3_AUX_STAT_1000FULL:
2917                 *speed = SPEED_1000;
2918                 *duplex = DUPLEX_FULL;
2919                 break;
2920
2921         default:
2922                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2923                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2924                                  SPEED_10;
2925                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2926                                   DUPLEX_HALF;
2927                         break;
2928                 }
2929                 *speed = SPEED_INVALID;
2930                 *duplex = DUPLEX_INVALID;
2931                 break;
2932         }
2933 }
2934
2935 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2936 {
2937         int err = 0;
2938         u32 val, new_adv;
2939
2940         new_adv = ADVERTISE_CSMA;
2941         if (advertise & ADVERTISED_10baseT_Half)
2942                 new_adv |= ADVERTISE_10HALF;
2943         if (advertise & ADVERTISED_10baseT_Full)
2944                 new_adv |= ADVERTISE_10FULL;
2945         if (advertise & ADVERTISED_100baseT_Half)
2946                 new_adv |= ADVERTISE_100HALF;
2947         if (advertise & ADVERTISED_100baseT_Full)
2948                 new_adv |= ADVERTISE_100FULL;
2949
2950         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2951
2952         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2953         if (err)
2954                 goto done;
2955
2956         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2957                 goto done;
2958
2959         new_adv = 0;
2960         if (advertise & ADVERTISED_1000baseT_Half)
2961                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2962         if (advertise & ADVERTISED_1000baseT_Full)
2963                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2964
2965         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2966             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2967                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2968                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2969
2970         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2971         if (err)
2972                 goto done;
2973
2974         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2975                 goto done;
2976
2977         tw32(TG3_CPMU_EEE_MODE,
2978              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2979
2980         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2981         if (!err) {
2982                 u32 err2;
2983
2984                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2985                 case ASIC_REV_5717:
2986                 case ASIC_REV_57765:
2987                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2988                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2989                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2990                         /* Fall through */
2991                 case ASIC_REV_5719:
2992                         val = MII_TG3_DSP_TAP26_ALNOKO |
2993                               MII_TG3_DSP_TAP26_RMRXSTO |
2994                               MII_TG3_DSP_TAP26_OPCSINPT;
2995                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2996                 }
2997
2998                 val = 0;
2999                 /* Advertise 100-BaseTX EEE ability */
3000                 if (advertise & ADVERTISED_100baseT_Full)
3001                         val |= MDIO_AN_EEE_ADV_100TX;
3002                 /* Advertise 1000-BaseT EEE ability */
3003                 if (advertise & ADVERTISED_1000baseT_Full)
3004                         val |= MDIO_AN_EEE_ADV_1000T;
3005                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3006
3007                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3008                 if (!err)
3009                         err = err2;
3010         }
3011
3012 done:
3013         return err;
3014 }
3015
3016 static void tg3_phy_copper_begin(struct tg3 *tp)
3017 {
3018         u32 new_adv;
3019         int i;
3020
3021         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3022                 new_adv = ADVERTISED_10baseT_Half |
3023                           ADVERTISED_10baseT_Full;
3024                 if (tg3_flag(tp, WOL_SPEED_100MB))
3025                         new_adv |= ADVERTISED_100baseT_Half |
3026                                    ADVERTISED_100baseT_Full;
3027
3028                 tg3_phy_autoneg_cfg(tp, new_adv,
3029                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3030         } else if (tp->link_config.speed == SPEED_INVALID) {
3031                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3032                         tp->link_config.advertising &=
3033                                 ~(ADVERTISED_1000baseT_Half |
3034                                   ADVERTISED_1000baseT_Full);
3035
3036                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3037                                     tp->link_config.flowctrl);
3038         } else {
3039                 /* Asking for a specific link mode. */
3040                 if (tp->link_config.speed == SPEED_1000) {
3041                         if (tp->link_config.duplex == DUPLEX_FULL)
3042                                 new_adv = ADVERTISED_1000baseT_Full;
3043                         else
3044                                 new_adv = ADVERTISED_1000baseT_Half;
3045                 } else if (tp->link_config.speed == SPEED_100) {
3046                         if (tp->link_config.duplex == DUPLEX_FULL)
3047                                 new_adv = ADVERTISED_100baseT_Full;
3048                         else
3049                                 new_adv = ADVERTISED_100baseT_Half;
3050                 } else {
3051                         if (tp->link_config.duplex == DUPLEX_FULL)
3052                                 new_adv = ADVERTISED_10baseT_Full;
3053                         else
3054                                 new_adv = ADVERTISED_10baseT_Half;
3055                 }
3056
3057                 tg3_phy_autoneg_cfg(tp, new_adv,
3058                                     tp->link_config.flowctrl);
3059         }
3060
3061         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3062             tp->link_config.speed != SPEED_INVALID) {
3063                 u32 bmcr, orig_bmcr;
3064
3065                 tp->link_config.active_speed = tp->link_config.speed;
3066                 tp->link_config.active_duplex = tp->link_config.duplex;
3067
3068                 bmcr = 0;
3069                 switch (tp->link_config.speed) {
3070                 default:
3071                 case SPEED_10:
3072                         break;
3073
3074                 case SPEED_100:
3075                         bmcr |= BMCR_SPEED100;
3076                         break;
3077
3078                 case SPEED_1000:
3079                         bmcr |= TG3_BMCR_SPEED1000;
3080                         break;
3081                 }
3082
3083                 if (tp->link_config.duplex == DUPLEX_FULL)
3084                         bmcr |= BMCR_FULLDPLX;
3085
3086                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3087                     (bmcr != orig_bmcr)) {
3088                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3089                         for (i = 0; i < 1500; i++) {
3090                                 u32 tmp;
3091
3092                                 udelay(10);
3093                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3094                                     tg3_readphy(tp, MII_BMSR, &tmp))
3095                                         continue;
3096                                 if (!(tmp & BMSR_LSTATUS)) {
3097                                         udelay(40);
3098                                         break;
3099                                 }
3100                         }
3101                         tg3_writephy(tp, MII_BMCR, bmcr);
3102                         udelay(40);
3103                 }
3104         } else {
3105                 tg3_writephy(tp, MII_BMCR,
3106                              BMCR_ANENABLE | BMCR_ANRESTART);
3107         }
3108 }
3109
3110 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3111 {
3112         int err;
3113
3114         /* Turn off tap power management. */
3115         /* Set Extended packet length bit */
3116         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3117
3118         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3119         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3120         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3121         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3122         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3123
3124         udelay(40);
3125
3126         return err;
3127 }
3128
3129 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3130 {
3131         u32 adv_reg, all_mask = 0;
3132
3133         if (mask & ADVERTISED_10baseT_Half)
3134                 all_mask |= ADVERTISE_10HALF;
3135         if (mask & ADVERTISED_10baseT_Full)
3136                 all_mask |= ADVERTISE_10FULL;
3137         if (mask & ADVERTISED_100baseT_Half)
3138                 all_mask |= ADVERTISE_100HALF;
3139         if (mask & ADVERTISED_100baseT_Full)
3140                 all_mask |= ADVERTISE_100FULL;
3141
3142         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3143                 return 0;
3144
3145         if ((adv_reg & all_mask) != all_mask)
3146                 return 0;
3147         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3148                 u32 tg3_ctrl;
3149
3150                 all_mask = 0;
3151                 if (mask & ADVERTISED_1000baseT_Half)
3152                         all_mask |= ADVERTISE_1000HALF;
3153                 if (mask & ADVERTISED_1000baseT_Full)
3154                         all_mask |= ADVERTISE_1000FULL;
3155
3156                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3157                         return 0;
3158
3159                 if ((tg3_ctrl & all_mask) != all_mask)
3160                         return 0;
3161         }
3162         return 1;
3163 }
3164
3165 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3166 {
3167         u32 curadv, reqadv;
3168
3169         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3170                 return 1;
3171
3172         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3173         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3174
3175         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3176                 if (curadv != reqadv)
3177                         return 0;
3178
3179                 if (tg3_flag(tp, PAUSE_AUTONEG))
3180                         tg3_readphy(tp, MII_LPA, rmtadv);
3181         } else {
3182                 /* Reprogram the advertisement register, even if it
3183                  * does not affect the current link.  If the link
3184                  * gets renegotiated in the future, we can save an
3185                  * additional renegotiation cycle by advertising
3186                  * it correctly in the first place.
3187                  */
3188                 if (curadv != reqadv) {
3189                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3190                                      ADVERTISE_PAUSE_ASYM);
3191                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3192                 }
3193         }
3194
3195         return 1;
3196 }
3197
3198 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3199 {
3200         int current_link_up;
3201         u32 bmsr, val;
3202         u32 lcl_adv, rmt_adv;
3203         u16 current_speed;
3204         u8 current_duplex;
3205         int i, err;
3206
3207         tw32(MAC_EVENT, 0);
3208
3209         tw32_f(MAC_STATUS,
3210              (MAC_STATUS_SYNC_CHANGED |
3211               MAC_STATUS_CFG_CHANGED |
3212               MAC_STATUS_MI_COMPLETION |
3213               MAC_STATUS_LNKSTATE_CHANGED));
3214         udelay(40);
3215
3216         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3217                 tw32_f(MAC_MI_MODE,
3218                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3219                 udelay(80);
3220         }
3221
3222         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3223
3224         /* Some third-party PHYs need to be reset on link going
3225          * down.
3226          */
3227         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3228              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3229              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3230             netif_carrier_ok(tp->dev)) {
3231                 tg3_readphy(tp, MII_BMSR, &bmsr);
3232                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3233                     !(bmsr & BMSR_LSTATUS))
3234                         force_reset = 1;
3235         }
3236         if (force_reset)
3237                 tg3_phy_reset(tp);
3238
3239         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3240                 tg3_readphy(tp, MII_BMSR, &bmsr);
3241                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3242                     !tg3_flag(tp, INIT_COMPLETE))
3243                         bmsr = 0;
3244
3245                 if (!(bmsr & BMSR_LSTATUS)) {
3246                         err = tg3_init_5401phy_dsp(tp);
3247                         if (err)
3248                                 return err;
3249
3250                         tg3_readphy(tp, MII_BMSR, &bmsr);
3251                         for (i = 0; i < 1000; i++) {
3252                                 udelay(10);
3253                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3254                                     (bmsr & BMSR_LSTATUS)) {
3255                                         udelay(40);
3256                                         break;
3257                                 }
3258                         }
3259
3260                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3261                             TG3_PHY_REV_BCM5401_B0 &&
3262                             !(bmsr & BMSR_LSTATUS) &&
3263                             tp->link_config.active_speed == SPEED_1000) {
3264                                 err = tg3_phy_reset(tp);
3265                                 if (!err)
3266                                         err = tg3_init_5401phy_dsp(tp);
3267                                 if (err)
3268                                         return err;
3269                         }
3270                 }
3271         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3272                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3273                 /* 5701 {A0,B0} CRC bug workaround */
3274                 tg3_writephy(tp, 0x15, 0x0a75);
3275                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3276                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3277                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3278         }
3279
3280         /* Clear pending interrupts... */
3281         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3283
3284         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3285                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3286         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3287                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3288
3289         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3290             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3291                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3292                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3293                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3294                 else
3295                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3296         }
3297
3298         current_link_up = 0;
3299         current_speed = SPEED_INVALID;
3300         current_duplex = DUPLEX_INVALID;
3301
3302         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3303                 err = tg3_phy_auxctl_read(tp,
3304                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3305                                           &val);
3306                 if (!err && !(val & (1 << 10))) {
3307                         tg3_phy_auxctl_write(tp,
3308                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3309                                              val | (1 << 10));
3310                         goto relink;
3311                 }
3312         }
3313
3314         bmsr = 0;
3315         for (i = 0; i < 100; i++) {
3316                 tg3_readphy(tp, MII_BMSR, &bmsr);
3317                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3318                     (bmsr & BMSR_LSTATUS))
3319                         break;
3320                 udelay(40);
3321         }
3322
3323         if (bmsr & BMSR_LSTATUS) {
3324                 u32 aux_stat, bmcr;
3325
3326                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3327                 for (i = 0; i < 2000; i++) {
3328                         udelay(10);
3329                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3330                             aux_stat)
3331                                 break;
3332                 }
3333
3334                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3335                                              &current_speed,
3336                                              &current_duplex);
3337
3338                 bmcr = 0;
3339                 for (i = 0; i < 200; i++) {
3340                         tg3_readphy(tp, MII_BMCR, &bmcr);
3341                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3342                                 continue;
3343                         if (bmcr && bmcr != 0x7fff)
3344                                 break;
3345                         udelay(10);
3346                 }
3347
3348                 lcl_adv = 0;
3349                 rmt_adv = 0;
3350
3351                 tp->link_config.active_speed = current_speed;
3352                 tp->link_config.active_duplex = current_duplex;
3353
3354                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3355                         if ((bmcr & BMCR_ANENABLE) &&
3356                             tg3_copper_is_advertising_all(tp,
3357                                                 tp->link_config.advertising)) {
3358                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3359                                                                   &rmt_adv))
3360                                         current_link_up = 1;
3361                         }
3362                 } else {
3363                         if (!(bmcr & BMCR_ANENABLE) &&
3364                             tp->link_config.speed == current_speed &&
3365                             tp->link_config.duplex == current_duplex &&
3366                             tp->link_config.flowctrl ==
3367                             tp->link_config.active_flowctrl) {
3368                                 current_link_up = 1;
3369                         }
3370                 }
3371
3372                 if (current_link_up == 1 &&
3373                     tp->link_config.active_duplex == DUPLEX_FULL)
3374                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3375         }
3376
3377 relink:
3378         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3379                 tg3_phy_copper_begin(tp);
3380
3381                 tg3_readphy(tp, MII_BMSR, &bmsr);
3382                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3383                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3384                         current_link_up = 1;
3385         }
3386
3387         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3388         if (current_link_up == 1) {
3389                 if (tp->link_config.active_speed == SPEED_100 ||
3390                     tp->link_config.active_speed == SPEED_10)
3391                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3392                 else
3393                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3394         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3395                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3396         else
3397                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3398
3399         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3400         if (tp->link_config.active_duplex == DUPLEX_HALF)
3401                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3402
3403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3404                 if (current_link_up == 1 &&
3405                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3406                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3407                 else
3408                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3409         }
3410
3411         /* ??? Without this setting Netgear GA302T PHY does not
3412          * ??? send/receive packets...
3413          */
3414         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3415             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3416                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3417                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3418                 udelay(80);
3419         }
3420
3421         tw32_f(MAC_MODE, tp->mac_mode);
3422         udelay(40);
3423
3424         tg3_phy_eee_adjust(tp, current_link_up);
3425
3426         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3427                 /* Polled via timer. */
3428                 tw32_f(MAC_EVENT, 0);
3429         } else {
3430                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3431         }
3432         udelay(40);
3433
3434         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3435             current_link_up == 1 &&
3436             tp->link_config.active_speed == SPEED_1000 &&
3437             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3438                 udelay(120);
3439                 tw32_f(MAC_STATUS,
3440                      (MAC_STATUS_SYNC_CHANGED |
3441                       MAC_STATUS_CFG_CHANGED));
3442                 udelay(40);
3443                 tg3_write_mem(tp,
3444                               NIC_SRAM_FIRMWARE_MBOX,
3445                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3446         }
3447
3448         /* Prevent send BD corruption. */
3449         if (tg3_flag(tp, CLKREQ_BUG)) {
3450                 u16 oldlnkctl, newlnkctl;
3451
3452                 pci_read_config_word(tp->pdev,
3453                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3454                                      &oldlnkctl);
3455                 if (tp->link_config.active_speed == SPEED_100 ||
3456                     tp->link_config.active_speed == SPEED_10)
3457                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3458                 else
3459                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3460                 if (newlnkctl != oldlnkctl)
3461                         pci_write_config_word(tp->pdev,
3462                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3463                                               newlnkctl);
3464         }
3465
3466         if (current_link_up != netif_carrier_ok(tp->dev)) {
3467                 if (current_link_up)
3468                         netif_carrier_on(tp->dev);
3469                 else
3470                         netif_carrier_off(tp->dev);
3471                 tg3_link_report(tp);
3472         }
3473
3474         return 0;
3475 }
3476
3477 struct tg3_fiber_aneginfo {
3478         int state;
3479 #define ANEG_STATE_UNKNOWN              0
3480 #define ANEG_STATE_AN_ENABLE            1
3481 #define ANEG_STATE_RESTART_INIT         2
3482 #define ANEG_STATE_RESTART              3
3483 #define ANEG_STATE_DISABLE_LINK_OK      4
3484 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3485 #define ANEG_STATE_ABILITY_DETECT       6
3486 #define ANEG_STATE_ACK_DETECT_INIT      7
3487 #define ANEG_STATE_ACK_DETECT           8
3488 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3489 #define ANEG_STATE_COMPLETE_ACK         10
3490 #define ANEG_STATE_IDLE_DETECT_INIT     11
3491 #define ANEG_STATE_IDLE_DETECT          12
3492 #define ANEG_STATE_LINK_OK              13
3493 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3494 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3495
3496         u32 flags;
3497 #define MR_AN_ENABLE            0x00000001
3498 #define MR_RESTART_AN           0x00000002
3499 #define MR_AN_COMPLETE          0x00000004
3500 #define MR_PAGE_RX              0x00000008
3501 #define MR_NP_LOADED            0x00000010
3502 #define MR_TOGGLE_TX            0x00000020
3503 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3504 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3505 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3506 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3507 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3508 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3509 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3510 #define MR_TOGGLE_RX            0x00002000
3511 #define MR_NP_RX                0x00004000
3512
3513 #define MR_LINK_OK              0x80000000
3514
3515         unsigned long link_time, cur_time;
3516
3517         u32 ability_match_cfg;
3518         int ability_match_count;
3519
3520         char ability_match, idle_match, ack_match;
3521
3522         u32 txconfig, rxconfig;
3523 #define ANEG_CFG_NP             0x00000080
3524 #define ANEG_CFG_ACK            0x00000040
3525 #define ANEG_CFG_RF2            0x00000020
3526 #define ANEG_CFG_RF1            0x00000010
3527 #define ANEG_CFG_PS2            0x00000001
3528 #define ANEG_CFG_PS1            0x00008000
3529 #define ANEG_CFG_HD             0x00004000
3530 #define ANEG_CFG_FD             0x00002000
3531 #define ANEG_CFG_INVAL          0x00001f06
3532
3533 };
3534 #define ANEG_OK         0
3535 #define ANEG_DONE       1
3536 #define ANEG_TIMER_ENAB 2
3537 #define ANEG_FAILED     -1
3538
3539 #define ANEG_STATE_SETTLE_TIME  10000
3540
3541 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3542                                    struct tg3_fiber_aneginfo *ap)
3543 {
3544         u16 flowctrl;
3545         unsigned long delta;
3546         u32 rx_cfg_reg;
3547         int ret;
3548
3549         if (ap->state == ANEG_STATE_UNKNOWN) {
3550                 ap->rxconfig = 0;
3551                 ap->link_time = 0;
3552                 ap->cur_time = 0;
3553                 ap->ability_match_cfg = 0;
3554                 ap->ability_match_count = 0;
3555                 ap->ability_match = 0;
3556                 ap->idle_match = 0;
3557                 ap->ack_match = 0;
3558         }
3559         ap->cur_time++;
3560
3561         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3562                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3563
3564                 if (rx_cfg_reg != ap->ability_match_cfg) {
3565                         ap->ability_match_cfg = rx_cfg_reg;
3566                         ap->ability_match = 0;
3567                         ap->ability_match_count = 0;
3568                 } else {
3569                         if (++ap->ability_match_count > 1) {
3570                                 ap->ability_match = 1;
3571                                 ap->ability_match_cfg = rx_cfg_reg;
3572                         }
3573                 }
3574                 if (rx_cfg_reg & ANEG_CFG_ACK)
3575                         ap->ack_match = 1;
3576                 else
3577                         ap->ack_match = 0;
3578
3579                 ap->idle_match = 0;
3580         } else {
3581                 ap->idle_match = 1;
3582                 ap->ability_match_cfg = 0;
3583                 ap->ability_match_count = 0;
3584                 ap->ability_match = 0;
3585                 ap->ack_match = 0;
3586
3587                 rx_cfg_reg = 0;
3588         }
3589
3590         ap->rxconfig = rx_cfg_reg;
3591         ret = ANEG_OK;
3592
3593         switch (ap->state) {
3594         case ANEG_STATE_UNKNOWN:
3595                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3596                         ap->state = ANEG_STATE_AN_ENABLE;
3597
3598                 /* fallthru */
3599         case ANEG_STATE_AN_ENABLE:
3600                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3601                 if (ap->flags & MR_AN_ENABLE) {
3602                         ap->link_time = 0;
3603                         ap->cur_time = 0;
3604                         ap->ability_match_cfg = 0;
3605                         ap->ability_match_count = 0;
3606                         ap->ability_match = 0;
3607                         ap->idle_match = 0;
3608                         ap->ack_match = 0;
3609
3610                         ap->state = ANEG_STATE_RESTART_INIT;
3611                 } else {
3612                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3613                 }
3614                 break;
3615
3616         case ANEG_STATE_RESTART_INIT:
3617                 ap->link_time = ap->cur_time;
3618                 ap->flags &= ~(MR_NP_LOADED);
3619                 ap->txconfig = 0;
3620                 tw32(MAC_TX_AUTO_NEG, 0);
3621                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3622                 tw32_f(MAC_MODE, tp->mac_mode);
3623                 udelay(40);
3624
3625                 ret = ANEG_TIMER_ENAB;
3626                 ap->state = ANEG_STATE_RESTART;
3627
3628                 /* fallthru */
3629         case ANEG_STATE_RESTART:
3630                 delta = ap->cur_time - ap->link_time;
3631                 if (delta > ANEG_STATE_SETTLE_TIME)
3632                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3633                 else
3634                         ret = ANEG_TIMER_ENAB;
3635                 break;
3636
3637         case ANEG_STATE_DISABLE_LINK_OK:
3638                 ret = ANEG_DONE;
3639                 break;
3640
3641         case ANEG_STATE_ABILITY_DETECT_INIT:
3642                 ap->flags &= ~(MR_TOGGLE_TX);
3643                 ap->txconfig = ANEG_CFG_FD;
3644                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3645                 if (flowctrl & ADVERTISE_1000XPAUSE)
3646                         ap->txconfig |= ANEG_CFG_PS1;
3647                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3648                         ap->txconfig |= ANEG_CFG_PS2;
3649                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3650                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3651                 tw32_f(MAC_MODE, tp->mac_mode);
3652                 udelay(40);
3653
3654                 ap->state = ANEG_STATE_ABILITY_DETECT;
3655                 break;
3656
3657         case ANEG_STATE_ABILITY_DETECT:
3658                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3659                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3660                 break;
3661
3662         case ANEG_STATE_ACK_DETECT_INIT:
3663                 ap->txconfig |= ANEG_CFG_ACK;
3664                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3665                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3666                 tw32_f(MAC_MODE, tp->mac_mode);
3667                 udelay(40);
3668
3669                 ap->state = ANEG_STATE_ACK_DETECT;
3670
3671                 /* fallthru */
3672         case ANEG_STATE_ACK_DETECT:
3673                 if (ap->ack_match != 0) {
3674                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3675                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3676                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3677                         } else {
3678                                 ap->state = ANEG_STATE_AN_ENABLE;
3679                         }
3680                 } else if (ap->ability_match != 0 &&
3681                            ap->rxconfig == 0) {
3682                         ap->state = ANEG_STATE_AN_ENABLE;
3683                 }
3684                 break;
3685
3686         case ANEG_STATE_COMPLETE_ACK_INIT:
3687                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3688                         ret = ANEG_FAILED;
3689                         break;
3690                 }
3691                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3692                                MR_LP_ADV_HALF_DUPLEX |
3693                                MR_LP_ADV_SYM_PAUSE |
3694                                MR_LP_ADV_ASYM_PAUSE |
3695                                MR_LP_ADV_REMOTE_FAULT1 |
3696                                MR_LP_ADV_REMOTE_FAULT2 |
3697                                MR_LP_ADV_NEXT_PAGE |
3698                                MR_TOGGLE_RX |
3699                                MR_NP_RX);
3700                 if (ap->rxconfig & ANEG_CFG_FD)
3701                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3702                 if (ap->rxconfig & ANEG_CFG_HD)
3703                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3704                 if (ap->rxconfig & ANEG_CFG_PS1)
3705                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3706                 if (ap->rxconfig & ANEG_CFG_PS2)
3707                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3708                 if (ap->rxconfig & ANEG_CFG_RF1)
3709                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3710                 if (ap->rxconfig & ANEG_CFG_RF2)
3711                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3712                 if (ap->rxconfig & ANEG_CFG_NP)
3713                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3714
3715                 ap->link_time = ap->cur_time;
3716
3717                 ap->flags ^= (MR_TOGGLE_TX);
3718                 if (ap->rxconfig & 0x0008)
3719                         ap->flags |= MR_TOGGLE_RX;
3720                 if (ap->rxconfig & ANEG_CFG_NP)
3721                         ap->flags |= MR_NP_RX;
3722                 ap->flags |= MR_PAGE_RX;
3723
3724                 ap->state = ANEG_STATE_COMPLETE_ACK;
3725                 ret = ANEG_TIMER_ENAB;
3726                 break;
3727
3728         case ANEG_STATE_COMPLETE_ACK:
3729                 if (ap->ability_match != 0 &&
3730                     ap->rxconfig == 0) {
3731                         ap->state = ANEG_STATE_AN_ENABLE;
3732                         break;
3733                 }
3734                 delta = ap->cur_time - ap->link_time;
3735                 if (delta > ANEG_STATE_SETTLE_TIME) {
3736                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3737                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3738                         } else {
3739                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3740                                     !(ap->flags & MR_NP_RX)) {
3741                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3742                                 } else {
3743                                         ret = ANEG_FAILED;
3744                                 }
3745                         }
3746                 }
3747                 break;
3748
3749         case ANEG_STATE_IDLE_DETECT_INIT:
3750                 ap->link_time = ap->cur_time;
3751                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3752                 tw32_f(MAC_MODE, tp->mac_mode);
3753                 udelay(40);
3754
3755                 ap->state = ANEG_STATE_IDLE_DETECT;
3756                 ret = ANEG_TIMER_ENAB;
3757                 break;
3758
3759         case ANEG_STATE_IDLE_DETECT:
3760                 if (ap->ability_match != 0 &&
3761                     ap->rxconfig == 0) {
3762                         ap->state = ANEG_STATE_AN_ENABLE;
3763                         break;
3764                 }
3765                 delta = ap->cur_time - ap->link_time;
3766                 if (delta > ANEG_STATE_SETTLE_TIME) {
3767                         /* XXX another gem from the Broadcom driver :( */
3768                         ap->state = ANEG_STATE_LINK_OK;
3769                 }
3770                 break;
3771
3772         case ANEG_STATE_LINK_OK:
3773                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3774                 ret = ANEG_DONE;
3775                 break;
3776
3777         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3778                 /* ??? unimplemented */
3779                 break;
3780
3781         case ANEG_STATE_NEXT_PAGE_WAIT:
3782                 /* ??? unimplemented */
3783                 break;
3784
3785         default:
3786                 ret = ANEG_FAILED;
3787                 break;
3788         }
3789
3790         return ret;
3791 }
3792
3793 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3794 {
3795         int res = 0;
3796         struct tg3_fiber_aneginfo aninfo;
3797         int status = ANEG_FAILED;
3798         unsigned int tick;
3799         u32 tmp;
3800
3801         tw32_f(MAC_TX_AUTO_NEG, 0);
3802
3803         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3804         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3805         udelay(40);
3806
3807         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3808         udelay(40);
3809
3810         memset(&aninfo, 0, sizeof(aninfo));
3811         aninfo.flags |= MR_AN_ENABLE;
3812         aninfo.state = ANEG_STATE_UNKNOWN;
3813         aninfo.cur_time = 0;
3814         tick = 0;
3815         while (++tick < 195000) {
3816                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3817                 if (status == ANEG_DONE || status == ANEG_FAILED)
3818                         break;
3819
3820                 udelay(1);
3821         }
3822
3823         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3824         tw32_f(MAC_MODE, tp->mac_mode);
3825         udelay(40);
3826
3827         *txflags = aninfo.txconfig;
3828         *rxflags = aninfo.flags;
3829
3830         if (status == ANEG_DONE &&
3831             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3832                              MR_LP_ADV_FULL_DUPLEX)))
3833                 res = 1;
3834
3835         return res;
3836 }
3837
3838 static void tg3_init_bcm8002(struct tg3 *tp)
3839 {
3840         u32 mac_status = tr32(MAC_STATUS);
3841         int i;
3842
3843         /* Reset when initting first time or we have a link. */
3844         if (tg3_flag(tp, INIT_COMPLETE) &&
3845             !(mac_status & MAC_STATUS_PCS_SYNCED))
3846                 return;
3847
3848         /* Set PLL lock range. */
3849         tg3_writephy(tp, 0x16, 0x8007);
3850
3851         /* SW reset */
3852         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3853
3854         /* Wait for reset to complete. */
3855         /* XXX schedule_timeout() ... */
3856         for (i = 0; i < 500; i++)
3857                 udelay(10);
3858
3859         /* Config mode; select PMA/Ch 1 regs. */
3860         tg3_writephy(tp, 0x10, 0x8411);
3861
3862         /* Enable auto-lock and comdet, select txclk for tx. */
3863         tg3_writephy(tp, 0x11, 0x0a10);
3864
3865         tg3_writephy(tp, 0x18, 0x00a0);
3866         tg3_writephy(tp, 0x16, 0x41ff);
3867
3868         /* Assert and deassert POR. */
3869         tg3_writephy(tp, 0x13, 0x0400);
3870         udelay(40);
3871         tg3_writephy(tp, 0x13, 0x0000);
3872
3873         tg3_writephy(tp, 0x11, 0x0a50);
3874         udelay(40);
3875         tg3_writephy(tp, 0x11, 0x0a10);
3876
3877         /* Wait for signal to stabilize */
3878         /* XXX schedule_timeout() ... */
3879         for (i = 0; i < 15000; i++)
3880                 udelay(10);
3881
3882         /* Deselect the channel register so we can read the PHYID
3883          * later.
3884          */
3885         tg3_writephy(tp, 0x10, 0x8011);
3886 }
3887
3888 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3889 {
3890         u16 flowctrl;
3891         u32 sg_dig_ctrl, sg_dig_status;
3892         u32 serdes_cfg, expected_sg_dig_ctrl;
3893         int workaround, port_a;
3894         int current_link_up;
3895
3896         serdes_cfg = 0;
3897         expected_sg_dig_ctrl = 0;
3898         workaround = 0;
3899         port_a = 1;
3900         current_link_up = 0;
3901
3902         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3903             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3904                 workaround = 1;
3905                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3906                         port_a = 0;
3907
3908                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3909                 /* preserve bits 20-23 for voltage regulator */
3910                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3911         }
3912
3913         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3914
3915         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3916                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3917                         if (workaround) {
3918                                 u32 val = serdes_cfg;
3919
3920                                 if (port_a)
3921                                         val |= 0xc010000;
3922                                 else
3923                                         val |= 0x4010000;
3924                                 tw32_f(MAC_SERDES_CFG, val);
3925                         }
3926
3927                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3928                 }
3929                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3930                         tg3_setup_flow_control(tp, 0, 0);
3931                         current_link_up = 1;
3932                 }
3933                 goto out;
3934         }
3935
3936         /* Want auto-negotiation.  */
3937         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3938
3939         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3940         if (flowctrl & ADVERTISE_1000XPAUSE)
3941                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3942         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3943                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3944
3945         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3946                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3947                     tp->serdes_counter &&
3948                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3949                                     MAC_STATUS_RCVD_CFG)) ==
3950                      MAC_STATUS_PCS_SYNCED)) {
3951                         tp->serdes_counter--;
3952                         current_link_up = 1;
3953                         goto out;
3954                 }
3955 restart_autoneg:
3956                 if (workaround)
3957                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3958                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3959                 udelay(5);
3960                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3961
3962                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3963                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3964         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3965                                  MAC_STATUS_SIGNAL_DET)) {
3966                 sg_dig_status = tr32(SG_DIG_STATUS);
3967                 mac_status = tr32(MAC_STATUS);
3968
3969                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3970                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3971                         u32 local_adv = 0, remote_adv = 0;
3972
3973                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3974                                 local_adv |= ADVERTISE_1000XPAUSE;
3975                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3976                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3977
3978                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3979                                 remote_adv |= LPA_1000XPAUSE;
3980                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3981                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3982
3983                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3984                         current_link_up = 1;
3985                         tp->serdes_counter = 0;
3986                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3987                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3988                         if (tp->serdes_counter)
3989                                 tp->serdes_counter--;
3990                         else {
3991                                 if (workaround) {
3992                                         u32 val = serdes_cfg;
3993
3994                                         if (port_a)
3995                                                 val |= 0xc010000;
3996                                         else
3997                                                 val |= 0x4010000;
3998
3999                                         tw32_f(MAC_SERDES_CFG, val);
4000                                 }
4001
4002                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4003                                 udelay(40);
4004
4005                                 /* Link parallel detection - link is up */
4006                                 /* only if we have PCS_SYNC and not */
4007                                 /* receiving config code words */
4008                                 mac_status = tr32(MAC_STATUS);
4009                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4010                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4011                                         tg3_setup_flow_control(tp, 0, 0);
4012                                         current_link_up = 1;
4013                                         tp->phy_flags |=
4014                                                 TG3_PHYFLG_PARALLEL_DETECT;
4015                                         tp->serdes_counter =
4016                                                 SERDES_PARALLEL_DET_TIMEOUT;
4017                                 } else
4018                                         goto restart_autoneg;
4019                         }
4020                 }
4021         } else {
4022                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4023                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4024         }
4025
4026 out:
4027         return current_link_up;
4028 }
4029
4030 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4031 {
4032         int current_link_up = 0;
4033
4034         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4035                 goto out;
4036
4037         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4038                 u32 txflags, rxflags;
4039                 int i;
4040
4041                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4042                         u32 local_adv = 0, remote_adv = 0;
4043
4044                         if (txflags & ANEG_CFG_PS1)
4045                                 local_adv |= ADVERTISE_1000XPAUSE;
4046                         if (txflags & ANEG_CFG_PS2)
4047                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4048
4049                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4050                                 remote_adv |= LPA_1000XPAUSE;
4051                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4052                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4053
4054                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4055
4056                         current_link_up = 1;
4057                 }
4058                 for (i = 0; i < 30; i++) {
4059                         udelay(20);
4060                         tw32_f(MAC_STATUS,
4061                                (MAC_STATUS_SYNC_CHANGED |
4062                                 MAC_STATUS_CFG_CHANGED));
4063                         udelay(40);
4064                         if ((tr32(MAC_STATUS) &
4065                              (MAC_STATUS_SYNC_CHANGED |
4066                               MAC_STATUS_CFG_CHANGED)) == 0)
4067                                 break;
4068                 }
4069
4070                 mac_status = tr32(MAC_STATUS);
4071                 if (current_link_up == 0 &&
4072                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4073                     !(mac_status & MAC_STATUS_RCVD_CFG))
4074                         current_link_up = 1;
4075         } else {
4076                 tg3_setup_flow_control(tp, 0, 0);
4077
4078                 /* Forcing 1000FD link up. */
4079                 current_link_up = 1;
4080
4081                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4082                 udelay(40);
4083
4084                 tw32_f(MAC_MODE, tp->mac_mode);
4085                 udelay(40);
4086         }
4087
4088 out:
4089         return current_link_up;
4090 }
4091
4092 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4093 {
4094         u32 orig_pause_cfg;
4095         u16 orig_active_speed;
4096         u8 orig_active_duplex;
4097         u32 mac_status;
4098         int current_link_up;
4099         int i;
4100
4101         orig_pause_cfg = tp->link_config.active_flowctrl;
4102         orig_active_speed = tp->link_config.active_speed;
4103         orig_active_duplex = tp->link_config.active_duplex;
4104
4105         if (!tg3_flag(tp, HW_AUTONEG) &&
4106             netif_carrier_ok(tp->dev) &&
4107             tg3_flag(tp, INIT_COMPLETE)) {
4108                 mac_status = tr32(MAC_STATUS);
4109                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4110                                MAC_STATUS_SIGNAL_DET |
4111                                MAC_STATUS_CFG_CHANGED |
4112                                MAC_STATUS_RCVD_CFG);
4113                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4114                                    MAC_STATUS_SIGNAL_DET)) {
4115                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4116                                             MAC_STATUS_CFG_CHANGED));
4117                         return 0;
4118                 }
4119         }
4120
4121         tw32_f(MAC_TX_AUTO_NEG, 0);
4122
4123         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4124         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4125         tw32_f(MAC_MODE, tp->mac_mode);
4126         udelay(40);
4127
4128         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4129                 tg3_init_bcm8002(tp);
4130
4131         /* Enable link change event even when serdes polling.  */
4132         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4133         udelay(40);
4134
4135         current_link_up = 0;
4136         mac_status = tr32(MAC_STATUS);
4137
4138         if (tg3_flag(tp, HW_AUTONEG))
4139                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4140         else
4141                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4142
4143         tp->napi[0].hw_status->status =
4144                 (SD_STATUS_UPDATED |
4145                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4146
4147         for (i = 0; i < 100; i++) {
4148                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4149                                     MAC_STATUS_CFG_CHANGED));
4150                 udelay(5);
4151                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4152                                          MAC_STATUS_CFG_CHANGED |
4153                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4154                         break;
4155         }
4156
4157         mac_status = tr32(MAC_STATUS);
4158         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4159                 current_link_up = 0;
4160                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4161                     tp->serdes_counter == 0) {
4162                         tw32_f(MAC_MODE, (tp->mac_mode |
4163                                           MAC_MODE_SEND_CONFIGS));
4164                         udelay(1);
4165                         tw32_f(MAC_MODE, tp->mac_mode);
4166                 }
4167         }
4168
4169         if (current_link_up == 1) {
4170                 tp->link_config.active_speed = SPEED_1000;
4171                 tp->link_config.active_duplex = DUPLEX_FULL;
4172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173                                     LED_CTRL_LNKLED_OVERRIDE |
4174                                     LED_CTRL_1000MBPS_ON));
4175         } else {
4176                 tp->link_config.active_speed = SPEED_INVALID;
4177                 tp->link_config.active_duplex = DUPLEX_INVALID;
4178                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4179                                     LED_CTRL_LNKLED_OVERRIDE |
4180                                     LED_CTRL_TRAFFIC_OVERRIDE));
4181         }
4182
4183         if (current_link_up != netif_carrier_ok(tp->dev)) {
4184                 if (current_link_up)
4185                         netif_carrier_on(tp->dev);
4186                 else
4187                         netif_carrier_off(tp->dev);
4188                 tg3_link_report(tp);
4189         } else {
4190                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4191                 if (orig_pause_cfg != now_pause_cfg ||
4192                     orig_active_speed != tp->link_config.active_speed ||
4193                     orig_active_duplex != tp->link_config.active_duplex)
4194                         tg3_link_report(tp);
4195         }
4196
4197         return 0;
4198 }
4199
4200 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4201 {
4202         int current_link_up, err = 0;
4203         u32 bmsr, bmcr;
4204         u16 current_speed;
4205         u8 current_duplex;
4206         u32 local_adv, remote_adv;
4207
4208         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4209         tw32_f(MAC_MODE, tp->mac_mode);
4210         udelay(40);
4211
4212         tw32(MAC_EVENT, 0);
4213
4214         tw32_f(MAC_STATUS,
4215              (MAC_STATUS_SYNC_CHANGED |
4216               MAC_STATUS_CFG_CHANGED |
4217               MAC_STATUS_MI_COMPLETION |
4218               MAC_STATUS_LNKSTATE_CHANGED));
4219         udelay(40);
4220
4221         if (force_reset)
4222                 tg3_phy_reset(tp);
4223
4224         current_link_up = 0;
4225         current_speed = SPEED_INVALID;
4226         current_duplex = DUPLEX_INVALID;
4227
4228         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4231                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4232                         bmsr |= BMSR_LSTATUS;
4233                 else
4234                         bmsr &= ~BMSR_LSTATUS;
4235         }
4236
4237         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4238
4239         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4240             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4241                 /* do nothing, just check for link up at the end */
4242         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4243                 u32 adv, new_adv;
4244
4245                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4246                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4247                                   ADVERTISE_1000XPAUSE |
4248                                   ADVERTISE_1000XPSE_ASYM |
4249                                   ADVERTISE_SLCT);
4250
4251                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4252
4253                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4254                         new_adv |= ADVERTISE_1000XHALF;
4255                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4256                         new_adv |= ADVERTISE_1000XFULL;
4257
4258                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4259                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4260                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4261                         tg3_writephy(tp, MII_BMCR, bmcr);
4262
4263                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4264                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4265                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4266
4267                         return err;
4268                 }
4269         } else {
4270                 u32 new_bmcr;
4271
4272                 bmcr &= ~BMCR_SPEED1000;
4273                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4274
4275                 if (tp->link_config.duplex == DUPLEX_FULL)
4276                         new_bmcr |= BMCR_FULLDPLX;
4277
4278                 if (new_bmcr != bmcr) {
4279                         /* BMCR_SPEED1000 is a reserved bit that needs
4280                          * to be set on write.
4281                          */
4282                         new_bmcr |= BMCR_SPEED1000;
4283
4284                         /* Force a linkdown */
4285                         if (netif_carrier_ok(tp->dev)) {
4286                                 u32 adv;
4287
4288                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4289                                 adv &= ~(ADVERTISE_1000XFULL |
4290                                          ADVERTISE_1000XHALF |
4291                                          ADVERTISE_SLCT);
4292                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4293                                 tg3_writephy(tp, MII_BMCR, bmcr |
4294                                                            BMCR_ANRESTART |
4295                                                            BMCR_ANENABLE);
4296                                 udelay(10);
4297                                 netif_carrier_off(tp->dev);
4298                         }
4299                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4300                         bmcr = new_bmcr;
4301                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4303                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4304                             ASIC_REV_5714) {
4305                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4306                                         bmsr |= BMSR_LSTATUS;
4307                                 else
4308                                         bmsr &= ~BMSR_LSTATUS;
4309                         }
4310                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4311                 }
4312         }
4313
4314         if (bmsr & BMSR_LSTATUS) {
4315                 current_speed = SPEED_1000;
4316                 current_link_up = 1;
4317                 if (bmcr & BMCR_FULLDPLX)
4318                         current_duplex = DUPLEX_FULL;
4319                 else
4320                         current_duplex = DUPLEX_HALF;
4321
4322                 local_adv = 0;
4323                 remote_adv = 0;
4324
4325                 if (bmcr & BMCR_ANENABLE) {
4326                         u32 common;
4327
4328                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4329                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4330                         common = local_adv & remote_adv;
4331                         if (common & (ADVERTISE_1000XHALF |
4332                                       ADVERTISE_1000XFULL)) {
4333                                 if (common & ADVERTISE_1000XFULL)
4334                                         current_duplex = DUPLEX_FULL;
4335                                 else
4336                                         current_duplex = DUPLEX_HALF;
4337                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4338                                 /* Link is up via parallel detect */
4339                         } else {
4340                                 current_link_up = 0;
4341                         }
4342                 }
4343         }
4344
4345         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4346                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4347
4348         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4349         if (tp->link_config.active_duplex == DUPLEX_HALF)
4350                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4351
4352         tw32_f(MAC_MODE, tp->mac_mode);
4353         udelay(40);
4354
4355         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4356
4357         tp->link_config.active_speed = current_speed;
4358         tp->link_config.active_duplex = current_duplex;
4359
4360         if (current_link_up != netif_carrier_ok(tp->dev)) {
4361                 if (current_link_up)
4362                         netif_carrier_on(tp->dev);
4363                 else {
4364                         netif_carrier_off(tp->dev);
4365                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4366                 }
4367                 tg3_link_report(tp);
4368         }
4369         return err;
4370 }
4371
4372 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4373 {
4374         if (tp->serdes_counter) {
4375                 /* Give autoneg time to complete. */
4376                 tp->serdes_counter--;
4377                 return;
4378         }
4379
4380         if (!netif_carrier_ok(tp->dev) &&
4381             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4382                 u32 bmcr;
4383
4384                 tg3_readphy(tp, MII_BMCR, &bmcr);
4385                 if (bmcr & BMCR_ANENABLE) {
4386                         u32 phy1, phy2;
4387
4388                         /* Select shadow register 0x1f */
4389                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4390                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4391
4392                         /* Select expansion interrupt status register */
4393                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4394                                          MII_TG3_DSP_EXP1_INT_STAT);
4395                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4397
4398                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4399                                 /* We have signal detect and not receiving
4400                                  * config code words, link is up by parallel
4401                                  * detection.
4402                                  */
4403
4404                                 bmcr &= ~BMCR_ANENABLE;
4405                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4406                                 tg3_writephy(tp, MII_BMCR, bmcr);
4407                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4408                         }
4409                 }
4410         } else if (netif_carrier_ok(tp->dev) &&
4411                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4412                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4413                 u32 phy2;
4414
4415                 /* Select expansion interrupt status register */
4416                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4417                                  MII_TG3_DSP_EXP1_INT_STAT);
4418                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4419                 if (phy2 & 0x20) {
4420                         u32 bmcr;
4421
4422                         /* Config code words received, turn on autoneg. */
4423                         tg3_readphy(tp, MII_BMCR, &bmcr);
4424                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4425
4426                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4427
4428                 }
4429         }
4430 }
4431
4432 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4433 {
4434         u32 val;
4435         int err;
4436
4437         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4438                 err = tg3_setup_fiber_phy(tp, force_reset);
4439         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4440                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4441         else
4442                 err = tg3_setup_copper_phy(tp, force_reset);
4443
4444         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4445                 u32 scale;
4446
4447                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4448                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4449                         scale = 65;
4450                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4451                         scale = 6;
4452                 else
4453                         scale = 12;
4454
4455                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4456                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4457                 tw32(GRC_MISC_CFG, val);
4458         }
4459
4460         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4461               (6 << TX_LENGTHS_IPG_SHIFT);
4462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4463                 val |= tr32(MAC_TX_LENGTHS) &
4464                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4465                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4466
4467         if (tp->link_config.active_speed == SPEED_1000 &&
4468             tp->link_config.active_duplex == DUPLEX_HALF)
4469                 tw32(MAC_TX_LENGTHS, val |
4470                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4471         else
4472                 tw32(MAC_TX_LENGTHS, val |
4473                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4474
4475         if (!tg3_flag(tp, 5705_PLUS)) {
4476                 if (netif_carrier_ok(tp->dev)) {
4477                         tw32(HOSTCC_STAT_COAL_TICKS,
4478                              tp->coal.stats_block_coalesce_usecs);
4479                 } else {
4480                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4481                 }
4482         }
4483
4484         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4485                 val = tr32(PCIE_PWR_MGMT_THRESH);
4486                 if (!netif_carrier_ok(tp->dev))
4487                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4488                               tp->pwrmgmt_thresh;
4489                 else
4490                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4491                 tw32(PCIE_PWR_MGMT_THRESH, val);
4492         }
4493
4494         return err;
4495 }
4496
4497 static inline int tg3_irq_sync(struct tg3 *tp)
4498 {
4499         return tp->irq_sync;
4500 }
4501
4502 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4503 {
4504         int i;
4505
4506         dst = (u32 *)((u8 *)dst + off);
4507         for (i = 0; i < len; i += sizeof(u32))
4508                 *dst++ = tr32(off + i);
4509 }
4510
4511 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4512 {
4513         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4514         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4515         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4516         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4517         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4518         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4519         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4520         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4521         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4522         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4523         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4524         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4525         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4526         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4527         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4528         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4529         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4530         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4531         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4532
4533         if (tg3_flag(tp, SUPPORT_MSIX))
4534                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4535
4536         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4537         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4538         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4539         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4540         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4541         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4542         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4543         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4544
4545         if (!tg3_flag(tp, 5705_PLUS)) {
4546                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4547                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4548                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4549         }
4550
4551         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4552         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4553         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4554         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4555         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4556
4557         if (tg3_flag(tp, NVRAM))
4558                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4559 }
4560
4561 static void tg3_dump_state(struct tg3 *tp)
4562 {
4563         int i;
4564         u32 *regs;
4565
4566         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4567         if (!regs) {
4568                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4569                 return;
4570         }
4571
4572         if (tg3_flag(tp, PCI_EXPRESS)) {
4573                 /* Read up to but not including private PCI registers */
4574                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4575                         regs[i / sizeof(u32)] = tr32(i);
4576         } else
4577                 tg3_dump_legacy_regs(tp, regs);
4578
4579         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4580                 if (!regs[i + 0] && !regs[i + 1] &&
4581                     !regs[i + 2] && !regs[i + 3])
4582                         continue;
4583
4584                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4585                            i * 4,
4586                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4587         }
4588
4589         kfree(regs);
4590
4591         for (i = 0; i < tp->irq_cnt; i++) {
4592                 struct tg3_napi *tnapi = &tp->napi[i];
4593
4594                 /* SW status block */
4595                 netdev_err(tp->dev,
4596                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4597                            i,
4598                            tnapi->hw_status->status,
4599                            tnapi->hw_status->status_tag,
4600                            tnapi->hw_status->rx_jumbo_consumer,
4601                            tnapi->hw_status->rx_consumer,
4602                            tnapi->hw_status->rx_mini_consumer,
4603                            tnapi->hw_status->idx[0].rx_producer,
4604                            tnapi->hw_status->idx[0].tx_consumer);
4605
4606                 netdev_err(tp->dev,
4607                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4608                            i,
4609                            tnapi->last_tag, tnapi->last_irq_tag,
4610                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4611                            tnapi->rx_rcb_ptr,
4612                            tnapi->prodring.rx_std_prod_idx,
4613                            tnapi->prodring.rx_std_cons_idx,
4614                            tnapi->prodring.rx_jmb_prod_idx,
4615                            tnapi->prodring.rx_jmb_cons_idx);
4616         }
4617 }
4618
4619 /* This is called whenever we suspect that the system chipset is re-
4620  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4621  * is bogus tx completions. We try to recover by setting the
4622  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4623  * in the workqueue.
4624  */
4625 static void tg3_tx_recover(struct tg3 *tp)
4626 {
4627         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4628                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4629
4630         netdev_warn(tp->dev,
4631                     "The system may be re-ordering memory-mapped I/O "
4632                     "cycles to the network device, attempting to recover. "
4633                     "Please report the problem to the driver maintainer "
4634                     "and include system chipset information.\n");
4635
4636         spin_lock(&tp->lock);
4637         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4638         spin_unlock(&tp->lock);
4639 }
4640
4641 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4642 {
4643         /* Tell compiler to fetch tx indices from memory. */
4644         barrier();
4645         return tnapi->tx_pending -
4646                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4647 }
4648
4649 /* Tigon3 never reports partial packet sends.  So we do not
4650  * need special logic to handle SKBs that have not had all
4651  * of their frags sent yet, like SunGEM does.
4652  */
4653 static void tg3_tx(struct tg3_napi *tnapi)
4654 {
4655         struct tg3 *tp = tnapi->tp;
4656         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4657         u32 sw_idx = tnapi->tx_cons;
4658         struct netdev_queue *txq;
4659         int index = tnapi - tp->napi;
4660
4661         if (tg3_flag(tp, ENABLE_TSS))
4662                 index--;
4663
4664         txq = netdev_get_tx_queue(tp->dev, index);
4665
4666         while (sw_idx != hw_idx) {
4667                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4668                 struct sk_buff *skb = ri->skb;
4669                 int i, tx_bug = 0;
4670
4671                 if (unlikely(skb == NULL)) {
4672                         tg3_tx_recover(tp);
4673                         return;
4674                 }
4675
4676                 pci_unmap_single(tp->pdev,
4677                                  dma_unmap_addr(ri, mapping),
4678                                  skb_headlen(skb),
4679                                  PCI_DMA_TODEVICE);
4680
4681                 ri->skb = NULL;
4682
4683                 sw_idx = NEXT_TX(sw_idx);
4684
4685                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4686                         ri = &tnapi->tx_buffers[sw_idx];
4687                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4688                                 tx_bug = 1;
4689
4690                         pci_unmap_page(tp->pdev,
4691                                        dma_unmap_addr(ri, mapping),
4692                                        skb_shinfo(skb)->frags[i].size,
4693                                        PCI_DMA_TODEVICE);
4694                         sw_idx = NEXT_TX(sw_idx);
4695                 }
4696
4697                 dev_kfree_skb(skb);
4698
4699                 if (unlikely(tx_bug)) {
4700                         tg3_tx_recover(tp);
4701                         return;
4702                 }
4703         }
4704
4705         tnapi->tx_cons = sw_idx;
4706
4707         /* Need to make the tx_cons update visible to tg3_start_xmit()
4708          * before checking for netif_queue_stopped().  Without the
4709          * memory barrier, there is a small possibility that tg3_start_xmit()
4710          * will miss it and cause the queue to be stopped forever.
4711          */
4712         smp_mb();
4713
4714         if (unlikely(netif_tx_queue_stopped(txq) &&
4715                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4716                 __netif_tx_lock(txq, smp_processor_id());
4717                 if (netif_tx_queue_stopped(txq) &&
4718                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4719                         netif_tx_wake_queue(txq);
4720                 __netif_tx_unlock(txq);
4721         }
4722 }
4723
4724 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4725 {
4726         if (!ri->skb)
4727                 return;
4728
4729         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4730                          map_sz, PCI_DMA_FROMDEVICE);
4731         dev_kfree_skb_any(ri->skb);
4732         ri->skb = NULL;
4733 }
4734
4735 /* Returns size of skb allocated or < 0 on error.
4736  *
4737  * We only need to fill in the address because the other members
4738  * of the RX descriptor are invariant, see tg3_init_rings.
4739  *
4740  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4741  * posting buffers we only dirty the first cache line of the RX
4742  * descriptor (containing the address).  Whereas for the RX status
4743  * buffers the cpu only reads the last cacheline of the RX descriptor
4744  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4745  */
4746 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4747                             u32 opaque_key, u32 dest_idx_unmasked)
4748 {
4749         struct tg3_rx_buffer_desc *desc;
4750         struct ring_info *map;
4751         struct sk_buff *skb;
4752         dma_addr_t mapping;
4753         int skb_size, dest_idx;
4754
4755         switch (opaque_key) {
4756         case RXD_OPAQUE_RING_STD:
4757                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4758                 desc = &tpr->rx_std[dest_idx];
4759                 map = &tpr->rx_std_buffers[dest_idx];
4760                 skb_size = tp->rx_pkt_map_sz;
4761                 break;
4762
4763         case RXD_OPAQUE_RING_JUMBO:
4764                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4765                 desc = &tpr->rx_jmb[dest_idx].std;
4766                 map = &tpr->rx_jmb_buffers[dest_idx];
4767                 skb_size = TG3_RX_JMB_MAP_SZ;
4768                 break;
4769
4770         default:
4771                 return -EINVAL;
4772         }
4773
4774         /* Do not overwrite any of the map or rp information
4775          * until we are sure we can commit to a new buffer.
4776          *
4777          * Callers depend upon this behavior and assume that
4778          * we leave everything unchanged if we fail.
4779          */
4780         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4781         if (skb == NULL)
4782                 return -ENOMEM;
4783
4784         skb_reserve(skb, tp->rx_offset);
4785
4786         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4787                                  PCI_DMA_FROMDEVICE);
4788         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4789                 dev_kfree_skb(skb);
4790                 return -EIO;
4791         }
4792
4793         map->skb = skb;
4794         dma_unmap_addr_set(map, mapping, mapping);
4795
4796         desc->addr_hi = ((u64)mapping >> 32);
4797         desc->addr_lo = ((u64)mapping & 0xffffffff);
4798
4799         return skb_size;
4800 }
4801
4802 /* We only need to move over in the address because the other
4803  * members of the RX descriptor are invariant.  See notes above
4804  * tg3_alloc_rx_skb for full details.
4805  */
4806 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4807                            struct tg3_rx_prodring_set *dpr,
4808                            u32 opaque_key, int src_idx,
4809                            u32 dest_idx_unmasked)
4810 {
4811         struct tg3 *tp = tnapi->tp;
4812         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4813         struct ring_info *src_map, *dest_map;
4814         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4815         int dest_idx;
4816
4817         switch (opaque_key) {
4818         case RXD_OPAQUE_RING_STD:
4819                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4820                 dest_desc = &dpr->rx_std[dest_idx];
4821                 dest_map = &dpr->rx_std_buffers[dest_idx];
4822                 src_desc = &spr->rx_std[src_idx];
4823                 src_map = &spr->rx_std_buffers[src_idx];
4824                 break;
4825
4826         case RXD_OPAQUE_RING_JUMBO:
4827                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4828                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4829                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4830                 src_desc = &spr->rx_jmb[src_idx].std;
4831                 src_map = &spr->rx_jmb_buffers[src_idx];
4832                 break;
4833
4834         default:
4835                 return;
4836         }
4837
4838         dest_map->skb = src_map->skb;
4839         dma_unmap_addr_set(dest_map, mapping,
4840                            dma_unmap_addr(src_map, mapping));
4841         dest_desc->addr_hi = src_desc->addr_hi;
4842         dest_desc->addr_lo = src_desc->addr_lo;
4843
4844         /* Ensure that the update to the skb happens after the physical
4845          * addresses have been transferred to the new BD location.
4846          */
4847         smp_wmb();
4848
4849         src_map->skb = NULL;
4850 }
4851
4852 /* The RX ring scheme is composed of multiple rings which post fresh
4853  * buffers to the chip, and one special ring the chip uses to report
4854  * status back to the host.
4855  *
4856  * The special ring reports the status of received packets to the
4857  * host.  The chip does not write into the original descriptor the
4858  * RX buffer was obtained from.  The chip simply takes the original
4859  * descriptor as provided by the host, updates the status and length
4860  * field, then writes this into the next status ring entry.
4861  *
4862  * Each ring the host uses to post buffers to the chip is described
4863  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4864  * it is first placed into the on-chip ram.  When the packet's length
4865  * is known, it walks down the TG3_BDINFO entries to select the ring.
4866  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4867  * which is within the range of the new packet's length is chosen.
4868  *
4869  * The "separate ring for rx status" scheme may sound queer, but it makes
4870  * sense from a cache coherency perspective.  If only the host writes
4871  * to the buffer post rings, and only the chip writes to the rx status
4872  * rings, then cache lines never move beyond shared-modified state.
4873  * If both the host and chip were to write into the same ring, cache line
4874  * eviction could occur since both entities want it in an exclusive state.
4875  */
4876 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4877 {
4878         struct tg3 *tp = tnapi->tp;
4879         u32 work_mask, rx_std_posted = 0;
4880         u32 std_prod_idx, jmb_prod_idx;
4881         u32 sw_idx = tnapi->rx_rcb_ptr;
4882         u16 hw_idx;
4883         int received;
4884         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4885
4886         hw_idx = *(tnapi->rx_rcb_prod_idx);
4887         /*
4888          * We need to order the read of hw_idx and the read of
4889          * the opaque cookie.
4890          */
4891         rmb();
4892         work_mask = 0;
4893         received = 0;
4894         std_prod_idx = tpr->rx_std_prod_idx;
4895         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4896         while (sw_idx != hw_idx && budget > 0) {
4897                 struct ring_info *ri;
4898                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4899                 unsigned int len;
4900                 struct sk_buff *skb;
4901                 dma_addr_t dma_addr;
4902                 u32 opaque_key, desc_idx, *post_ptr;
4903
4904                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4905                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4906                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4907                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4908                         dma_addr = dma_unmap_addr(ri, mapping);
4909                         skb = ri->skb;
4910                         post_ptr = &std_prod_idx;
4911                         rx_std_posted++;
4912                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4913                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4914                         dma_addr = dma_unmap_addr(ri, mapping);
4915                         skb = ri->skb;
4916                         post_ptr = &jmb_prod_idx;
4917                 } else
4918                         goto next_pkt_nopost;
4919
4920                 work_mask |= opaque_key;
4921
4922                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4923                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4924                 drop_it:
4925                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4926                                        desc_idx, *post_ptr);
4927                 drop_it_no_recycle:
4928                         /* Other statistics kept track of by card. */
4929                         tp->rx_dropped++;
4930                         goto next_pkt;
4931                 }
4932
4933                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4934                       ETH_FCS_LEN;
4935
4936                 if (len > TG3_RX_COPY_THRESH(tp)) {
4937                         int skb_size;
4938
4939                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4940                                                     *post_ptr);
4941                         if (skb_size < 0)
4942                                 goto drop_it;
4943
4944                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4945                                          PCI_DMA_FROMDEVICE);
4946
4947                         /* Ensure that the update to the skb happens
4948                          * after the usage of the old DMA mapping.
4949                          */
4950                         smp_wmb();
4951
4952                         ri->skb = NULL;
4953
4954                         skb_put(skb, len);
4955                 } else {
4956                         struct sk_buff *copy_skb;
4957
4958                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4959                                        desc_idx, *post_ptr);
4960
4961                         copy_skb = netdev_alloc_skb(tp->dev, len +
4962                                                     TG3_RAW_IP_ALIGN);
4963                         if (copy_skb == NULL)
4964                                 goto drop_it_no_recycle;
4965
4966                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4967                         skb_put(copy_skb, len);
4968                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4970                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4971
4972                         /* We'll reuse the original ring buffer. */
4973                         skb = copy_skb;
4974                 }
4975
4976                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4977                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4978                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4979                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4980                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4981                 else
4982                         skb_checksum_none_assert(skb);
4983
4984                 skb->protocol = eth_type_trans(skb, tp->dev);
4985
4986                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4987                     skb->protocol != htons(ETH_P_8021Q)) {
4988                         dev_kfree_skb(skb);
4989                         goto drop_it_no_recycle;
4990                 }
4991
4992                 if (desc->type_flags & RXD_FLAG_VLAN &&
4993                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4994                         __vlan_hwaccel_put_tag(skb,
4995                                                desc->err_vlan & RXD_VLAN_MASK);
4996
4997                 napi_gro_receive(&tnapi->napi, skb);
4998
4999                 received++;
5000                 budget--;
5001
5002 next_pkt:
5003                 (*post_ptr)++;
5004
5005                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5006                         tpr->rx_std_prod_idx = std_prod_idx &
5007                                                tp->rx_std_ring_mask;
5008                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5009                                      tpr->rx_std_prod_idx);
5010                         work_mask &= ~RXD_OPAQUE_RING_STD;
5011                         rx_std_posted = 0;
5012                 }
5013 next_pkt_nopost:
5014                 sw_idx++;
5015                 sw_idx &= tp->rx_ret_ring_mask;
5016
5017                 /* Refresh hw_idx to see if there is new work */
5018                 if (sw_idx == hw_idx) {
5019                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5020                         rmb();
5021                 }
5022         }
5023
5024         /* ACK the status ring. */
5025         tnapi->rx_rcb_ptr = sw_idx;
5026         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5027
5028         /* Refill RX ring(s). */
5029         if (!tg3_flag(tp, ENABLE_RSS)) {
5030                 if (work_mask & RXD_OPAQUE_RING_STD) {
5031                         tpr->rx_std_prod_idx = std_prod_idx &
5032                                                tp->rx_std_ring_mask;
5033                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5034                                      tpr->rx_std_prod_idx);
5035                 }
5036                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5037                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5038                                                tp->rx_jmb_ring_mask;
5039                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5040                                      tpr->rx_jmb_prod_idx);
5041                 }
5042                 mmiowb();
5043         } else if (work_mask) {
5044                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5045                  * updated before the producer indices can be updated.
5046                  */
5047                 smp_wmb();
5048
5049                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5050                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5051
5052                 if (tnapi != &tp->napi[1])
5053                         napi_schedule(&tp->napi[1].napi);
5054         }
5055
5056         return received;
5057 }
5058
5059 static void tg3_poll_link(struct tg3 *tp)
5060 {
5061         /* handle link change and other phy events */
5062         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5063                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5064
5065                 if (sblk->status & SD_STATUS_LINK_CHG) {
5066                         sblk->status = SD_STATUS_UPDATED |
5067                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5068                         spin_lock(&tp->lock);
5069                         if (tg3_flag(tp, USE_PHYLIB)) {
5070                                 tw32_f(MAC_STATUS,
5071                                      (MAC_STATUS_SYNC_CHANGED |
5072                                       MAC_STATUS_CFG_CHANGED |
5073                                       MAC_STATUS_MI_COMPLETION |
5074                                       MAC_STATUS_LNKSTATE_CHANGED));
5075                                 udelay(40);
5076                         } else
5077                                 tg3_setup_phy(tp, 0);
5078                         spin_unlock(&tp->lock);
5079                 }
5080         }
5081 }
5082
5083 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5084                                 struct tg3_rx_prodring_set *dpr,
5085                                 struct tg3_rx_prodring_set *spr)
5086 {
5087         u32 si, di, cpycnt, src_prod_idx;
5088         int i, err = 0;
5089
5090         while (1) {
5091                 src_prod_idx = spr->rx_std_prod_idx;
5092
5093                 /* Make sure updates to the rx_std_buffers[] entries and the
5094                  * standard producer index are seen in the correct order.
5095                  */
5096                 smp_rmb();
5097
5098                 if (spr->rx_std_cons_idx == src_prod_idx)
5099                         break;
5100
5101                 if (spr->rx_std_cons_idx < src_prod_idx)
5102                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5103                 else
5104                         cpycnt = tp->rx_std_ring_mask + 1 -
5105                                  spr->rx_std_cons_idx;
5106
5107                 cpycnt = min(cpycnt,
5108                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5109
5110                 si = spr->rx_std_cons_idx;
5111                 di = dpr->rx_std_prod_idx;
5112
5113                 for (i = di; i < di + cpycnt; i++) {
5114                         if (dpr->rx_std_buffers[i].skb) {
5115                                 cpycnt = i - di;
5116                                 err = -ENOSPC;
5117                                 break;
5118                         }
5119                 }
5120
5121                 if (!cpycnt)
5122                         break;
5123
5124                 /* Ensure that updates to the rx_std_buffers ring and the
5125                  * shadowed hardware producer ring from tg3_recycle_skb() are
5126                  * ordered correctly WRT the skb check above.
5127                  */
5128                 smp_rmb();
5129
5130                 memcpy(&dpr->rx_std_buffers[di],
5131                        &spr->rx_std_buffers[si],
5132                        cpycnt * sizeof(struct ring_info));
5133
5134                 for (i = 0; i < cpycnt; i++, di++, si++) {
5135                         struct tg3_rx_buffer_desc *sbd, *dbd;
5136                         sbd = &spr->rx_std[si];
5137                         dbd = &dpr->rx_std[di];
5138                         dbd->addr_hi = sbd->addr_hi;
5139                         dbd->addr_lo = sbd->addr_lo;
5140                 }
5141
5142                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5143                                        tp->rx_std_ring_mask;
5144                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5145                                        tp->rx_std_ring_mask;
5146         }
5147
5148         while (1) {
5149                 src_prod_idx = spr->rx_jmb_prod_idx;
5150
5151                 /* Make sure updates to the rx_jmb_buffers[] entries and
5152                  * the jumbo producer index are seen in the correct order.
5153                  */
5154                 smp_rmb();
5155
5156                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5157                         break;
5158
5159                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5160                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5161                 else
5162                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5163                                  spr->rx_jmb_cons_idx;
5164
5165                 cpycnt = min(cpycnt,
5166                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5167
5168                 si = spr->rx_jmb_cons_idx;
5169                 di = dpr->rx_jmb_prod_idx;
5170
5171                 for (i = di; i < di + cpycnt; i++) {
5172                         if (dpr->rx_jmb_buffers[i].skb) {
5173                                 cpycnt = i - di;
5174                                 err = -ENOSPC;
5175                                 break;
5176                         }
5177                 }
5178
5179                 if (!cpycnt)
5180                         break;
5181
5182                 /* Ensure that updates to the rx_jmb_buffers ring and the
5183                  * shadowed hardware producer ring from tg3_recycle_skb() are
5184                  * ordered correctly WRT the skb check above.
5185                  */
5186                 smp_rmb();
5187
5188                 memcpy(&dpr->rx_jmb_buffers[di],
5189                        &spr->rx_jmb_buffers[si],
5190                        cpycnt * sizeof(struct ring_info));
5191
5192                 for (i = 0; i < cpycnt; i++, di++, si++) {
5193                         struct tg3_rx_buffer_desc *sbd, *dbd;
5194                         sbd = &spr->rx_jmb[si].std;
5195                         dbd = &dpr->rx_jmb[di].std;
5196                         dbd->addr_hi = sbd->addr_hi;
5197                         dbd->addr_lo = sbd->addr_lo;
5198                 }
5199
5200                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5201                                        tp->rx_jmb_ring_mask;
5202                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5203                                        tp->rx_jmb_ring_mask;
5204         }
5205
5206         return err;
5207 }
5208
5209 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5210 {
5211         struct tg3 *tp = tnapi->tp;
5212
5213         /* run TX completion thread */
5214         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5215                 tg3_tx(tnapi);
5216                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5217                         return work_done;
5218         }
5219
5220         /* run RX thread, within the bounds set by NAPI.
5221          * All RX "locking" is done by ensuring outside
5222          * code synchronizes with tg3->napi.poll()
5223          */
5224         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5225                 work_done += tg3_rx(tnapi, budget - work_done);
5226
5227         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5228                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5229                 int i, err = 0;
5230                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5231                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5232
5233                 for (i = 1; i < tp->irq_cnt; i++)
5234                         err |= tg3_rx_prodring_xfer(tp, dpr,
5235                                                     &tp->napi[i].prodring);
5236
5237                 wmb();
5238
5239                 if (std_prod_idx != dpr->rx_std_prod_idx)
5240                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5241                                      dpr->rx_std_prod_idx);
5242
5243                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5244                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5245                                      dpr->rx_jmb_prod_idx);
5246
5247                 mmiowb();
5248
5249                 if (err)
5250                         tw32_f(HOSTCC_MODE, tp->coal_now);
5251         }
5252
5253         return work_done;
5254 }
5255
5256 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5257 {
5258         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5259         struct tg3 *tp = tnapi->tp;
5260         int work_done = 0;
5261         struct tg3_hw_status *sblk = tnapi->hw_status;
5262
5263         while (1) {
5264                 work_done = tg3_poll_work(tnapi, work_done, budget);
5265
5266                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5267                         goto tx_recovery;
5268
5269                 if (unlikely(work_done >= budget))
5270                         break;
5271
5272                 /* tp->last_tag is used in tg3_int_reenable() below
5273                  * to tell the hw how much work has been processed,
5274                  * so we must read it before checking for more work.
5275                  */
5276                 tnapi->last_tag = sblk->status_tag;
5277                 tnapi->last_irq_tag = tnapi->last_tag;
5278                 rmb();
5279
5280                 /* check for RX/TX work to do */
5281                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5282                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5283                         napi_complete(napi);
5284                         /* Reenable interrupts. */
5285                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5286                         mmiowb();
5287                         break;
5288                 }
5289         }
5290
5291         return work_done;
5292
5293 tx_recovery:
5294         /* work_done is guaranteed to be less than budget. */
5295         napi_complete(napi);
5296         schedule_work(&tp->reset_task);
5297         return work_done;
5298 }
5299
5300 static void tg3_process_error(struct tg3 *tp)
5301 {
5302         u32 val;
5303         bool real_error = false;
5304
5305         if (tg3_flag(tp, ERROR_PROCESSED))
5306                 return;
5307
5308         /* Check Flow Attention register */
5309         val = tr32(HOSTCC_FLOW_ATTN);
5310         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5311                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5312                 real_error = true;
5313         }
5314
5315         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5316                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5317                 real_error = true;
5318         }
5319
5320         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5321                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5322                 real_error = true;
5323         }
5324
5325         if (!real_error)
5326                 return;
5327
5328         tg3_dump_state(tp);
5329
5330         tg3_flag_set(tp, ERROR_PROCESSED);
5331         schedule_work(&tp->reset_task);
5332 }
5333
5334 static int tg3_poll(struct napi_struct *napi, int budget)
5335 {
5336         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5337         struct tg3 *tp = tnapi->tp;
5338         int work_done = 0;
5339         struct tg3_hw_status *sblk = tnapi->hw_status;
5340
5341         while (1) {
5342                 if (sblk->status & SD_STATUS_ERROR)
5343                         tg3_process_error(tp);
5344
5345                 tg3_poll_link(tp);
5346
5347                 work_done = tg3_poll_work(tnapi, work_done, budget);
5348
5349                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5350                         goto tx_recovery;
5351
5352                 if (unlikely(work_done >= budget))
5353                         break;
5354
5355                 if (tg3_flag(tp, TAGGED_STATUS)) {
5356                         /* tp->last_tag is used in tg3_int_reenable() below
5357                          * to tell the hw how much work has been processed,
5358                          * so we must read it before checking for more work.
5359                          */
5360                         tnapi->last_tag = sblk->status_tag;
5361                         tnapi->last_irq_tag = tnapi->last_tag;
5362                         rmb();
5363                 } else
5364                         sblk->status &= ~SD_STATUS_UPDATED;
5365
5366                 if (likely(!tg3_has_work(tnapi))) {
5367                         napi_complete(napi);
5368                         tg3_int_reenable(tnapi);
5369                         break;
5370                 }
5371         }
5372
5373         return work_done;
5374
5375 tx_recovery:
5376         /* work_done is guaranteed to be less than budget. */
5377         napi_complete(napi);
5378         schedule_work(&tp->reset_task);
5379         return work_done;
5380 }
5381
5382 static void tg3_napi_disable(struct tg3 *tp)
5383 {
5384         int i;
5385
5386         for (i = tp->irq_cnt - 1; i >= 0; i--)
5387                 napi_disable(&tp->napi[i].napi);
5388 }
5389
5390 static void tg3_napi_enable(struct tg3 *tp)
5391 {
5392         int i;
5393
5394         for (i = 0; i < tp->irq_cnt; i++)
5395                 napi_enable(&tp->napi[i].napi);
5396 }
5397
5398 static void tg3_napi_init(struct tg3 *tp)
5399 {
5400         int i;
5401
5402         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5403         for (i = 1; i < tp->irq_cnt; i++)
5404                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5405 }
5406
5407 static void tg3_napi_fini(struct tg3 *tp)
5408 {
5409         int i;
5410
5411         for (i = 0; i < tp->irq_cnt; i++)
5412                 netif_napi_del(&tp->napi[i].napi);
5413 }
5414
5415 static inline void tg3_netif_stop(struct tg3 *tp)
5416 {
5417         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5418         tg3_napi_disable(tp);
5419         netif_tx_disable(tp->dev);
5420 }
5421
5422 static inline void tg3_netif_start(struct tg3 *tp)
5423 {
5424         /* NOTE: unconditional netif_tx_wake_all_queues is only
5425          * appropriate so long as all callers are assured to
5426          * have free tx slots (such as after tg3_init_hw)
5427          */
5428         netif_tx_wake_all_queues(tp->dev);
5429
5430         tg3_napi_enable(tp);
5431         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5432         tg3_enable_ints(tp);
5433 }
5434
5435 static void tg3_irq_quiesce(struct tg3 *tp)
5436 {
5437         int i;
5438
5439         BUG_ON(tp->irq_sync);
5440
5441         tp->irq_sync = 1;
5442         smp_mb();
5443
5444         for (i = 0; i < tp->irq_cnt; i++)
5445                 synchronize_irq(tp->napi[i].irq_vec);
5446 }
5447
5448 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5449  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5450  * with as well.  Most of the time, this is not necessary except when
5451  * shutting down the device.
5452  */
5453 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5454 {
5455         spin_lock_bh(&tp->lock);
5456         if (irq_sync)
5457                 tg3_irq_quiesce(tp);
5458 }
5459
5460 static inline void tg3_full_unlock(struct tg3 *tp)
5461 {
5462         spin_unlock_bh(&tp->lock);
5463 }
5464
5465 /* One-shot MSI handler - Chip automatically disables interrupt
5466  * after sending MSI so driver doesn't have to do it.
5467  */
5468 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5469 {
5470         struct tg3_napi *tnapi = dev_id;
5471         struct tg3 *tp = tnapi->tp;
5472
5473         prefetch(tnapi->hw_status);
5474         if (tnapi->rx_rcb)
5475                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5476
5477         if (likely(!tg3_irq_sync(tp)))
5478                 napi_schedule(&tnapi->napi);
5479
5480         return IRQ_HANDLED;
5481 }
5482
5483 /* MSI ISR - No need to check for interrupt sharing and no need to
5484  * flush status block and interrupt mailbox. PCI ordering rules
5485  * guarantee that MSI will arrive after the status block.
5486  */
5487 static irqreturn_t tg3_msi(int irq, void *dev_id)
5488 {
5489         struct tg3_napi *tnapi = dev_id;
5490         struct tg3 *tp = tnapi->tp;
5491
5492         prefetch(tnapi->hw_status);
5493         if (tnapi->rx_rcb)
5494                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5495         /*
5496          * Writing any value to intr-mbox-0 clears PCI INTA# and
5497          * chip-internal interrupt pending events.
5498          * Writing non-zero to intr-mbox-0 additional tells the
5499          * NIC to stop sending us irqs, engaging "in-intr-handler"
5500          * event coalescing.
5501          */
5502         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5503         if (likely(!tg3_irq_sync(tp)))
5504                 napi_schedule(&tnapi->napi);
5505
5506         return IRQ_RETVAL(1);
5507 }
5508
5509 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5510 {
5511         struct tg3_napi *tnapi = dev_id;
5512         struct tg3 *tp = tnapi->tp;
5513         struct tg3_hw_status *sblk = tnapi->hw_status;
5514         unsigned int handled = 1;
5515
5516         /* In INTx mode, it is possible for the interrupt to arrive at
5517          * the CPU before the status block posted prior to the interrupt.
5518          * Reading the PCI State register will confirm whether the
5519          * interrupt is ours and will flush the status block.
5520          */
5521         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5522                 if (tg3_flag(tp, CHIP_RESETTING) ||
5523                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5524                         handled = 0;
5525                         goto out;
5526                 }
5527         }
5528
5529         /*
5530          * Writing any value to intr-mbox-0 clears PCI INTA# and
5531          * chip-internal interrupt pending events.
5532          * Writing non-zero to intr-mbox-0 additional tells the
5533          * NIC to stop sending us irqs, engaging "in-intr-handler"
5534          * event coalescing.
5535          *
5536          * Flush the mailbox to de-assert the IRQ immediately to prevent
5537          * spurious interrupts.  The flush impacts performance but
5538          * excessive spurious interrupts can be worse in some cases.
5539          */
5540         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5541         if (tg3_irq_sync(tp))
5542                 goto out;
5543         sblk->status &= ~SD_STATUS_UPDATED;
5544         if (likely(tg3_has_work(tnapi))) {
5545                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5546                 napi_schedule(&tnapi->napi);
5547         } else {
5548                 /* No work, shared interrupt perhaps?  re-enable
5549                  * interrupts, and flush that PCI write
5550                  */
5551                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5552                                0x00000000);
5553         }
5554 out:
5555         return IRQ_RETVAL(handled);
5556 }
5557
5558 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5559 {
5560         struct tg3_napi *tnapi = dev_id;
5561         struct tg3 *tp = tnapi->tp;
5562         struct tg3_hw_status *sblk = tnapi->hw_status;
5563         unsigned int handled = 1;
5564
5565         /* In INTx mode, it is possible for the interrupt to arrive at
5566          * the CPU before the status block posted prior to the interrupt.
5567          * Reading the PCI State register will confirm whether the
5568          * interrupt is ours and will flush the status block.
5569          */
5570         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5571                 if (tg3_flag(tp, CHIP_RESETTING) ||
5572                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5573                         handled = 0;
5574                         goto out;
5575                 }
5576         }
5577
5578         /*
5579          * writing any value to intr-mbox-0 clears PCI INTA# and
5580          * chip-internal interrupt pending events.
5581          * writing non-zero to intr-mbox-0 additional tells the
5582          * NIC to stop sending us irqs, engaging "in-intr-handler"
5583          * event coalescing.
5584          *
5585          * Flush the mailbox to de-assert the IRQ immediately to prevent
5586          * spurious interrupts.  The flush impacts performance but
5587          * excessive spurious interrupts can be worse in some cases.
5588          */
5589         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5590
5591         /*
5592          * In a shared interrupt configuration, sometimes other devices'
5593          * interrupts will scream.  We record the current status tag here
5594          * so that the above check can report that the screaming interrupts
5595          * are unhandled.  Eventually they will be silenced.
5596          */
5597         tnapi->last_irq_tag = sblk->status_tag;
5598
5599         if (tg3_irq_sync(tp))
5600                 goto out;
5601
5602         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5603
5604         napi_schedule(&tnapi->napi);
5605
5606 out:
5607         return IRQ_RETVAL(handled);
5608 }
5609
5610 /* ISR for interrupt test */
5611 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5612 {
5613         struct tg3_napi *tnapi = dev_id;
5614         struct tg3 *tp = tnapi->tp;
5615         struct tg3_hw_status *sblk = tnapi->hw_status;
5616
5617         if ((sblk->status & SD_STATUS_UPDATED) ||
5618             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5619                 tg3_disable_ints(tp);
5620                 return IRQ_RETVAL(1);
5621         }
5622         return IRQ_RETVAL(0);
5623 }
5624
5625 static int tg3_init_hw(struct tg3 *, int);
5626 static int tg3_halt(struct tg3 *, int, int);
5627
5628 /* Restart hardware after configuration changes, self-test, etc.
5629  * Invoked with tp->lock held.
5630  */
5631 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5632         __releases(tp->lock)
5633         __acquires(tp->lock)
5634 {
5635         int err;
5636
5637         err = tg3_init_hw(tp, reset_phy);
5638         if (err) {
5639                 netdev_err(tp->dev,
5640                            "Failed to re-initialize device, aborting\n");
5641                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5642                 tg3_full_unlock(tp);
5643                 del_timer_sync(&tp->timer);
5644                 tp->irq_sync = 0;
5645                 tg3_napi_enable(tp);
5646                 dev_close(tp->dev);
5647                 tg3_full_lock(tp, 0);
5648         }
5649         return err;
5650 }
5651
5652 #ifdef CONFIG_NET_POLL_CONTROLLER
5653 static void tg3_poll_controller(struct net_device *dev)
5654 {
5655         int i;
5656         struct tg3 *tp = netdev_priv(dev);
5657
5658         for (i = 0; i < tp->irq_cnt; i++)
5659                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5660 }
5661 #endif
5662
5663 static void tg3_reset_task(struct work_struct *work)
5664 {
5665         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5666         int err;
5667         unsigned int restart_timer;
5668
5669         tg3_full_lock(tp, 0);
5670
5671         if (!netif_running(tp->dev)) {
5672                 tg3_full_unlock(tp);
5673                 return;
5674         }
5675
5676         tg3_full_unlock(tp);
5677
5678         tg3_phy_stop(tp);
5679
5680         tg3_netif_stop(tp);
5681
5682         tg3_full_lock(tp, 1);
5683
5684         restart_timer = tg3_flag(tp, RESTART_TIMER);
5685         tg3_flag_clear(tp, RESTART_TIMER);
5686
5687         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5688                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5689                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5690                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5691                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5692         }
5693
5694         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5695         err = tg3_init_hw(tp, 1);
5696         if (err)
5697                 goto out;
5698
5699         tg3_netif_start(tp);
5700
5701         if (restart_timer)
5702                 mod_timer(&tp->timer, jiffies + 1);
5703
5704 out:
5705         tg3_full_unlock(tp);
5706
5707         if (!err)
5708                 tg3_phy_start(tp);
5709 }
5710
5711 static void tg3_tx_timeout(struct net_device *dev)
5712 {
5713         struct tg3 *tp = netdev_priv(dev);
5714
5715         if (netif_msg_tx_err(tp)) {
5716                 netdev_err(dev, "transmit timed out, resetting\n");
5717                 tg3_dump_state(tp);
5718         }
5719
5720         schedule_work(&tp->reset_task);
5721 }
5722
5723 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5724 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5725 {
5726         u32 base = (u32) mapping & 0xffffffff;
5727
5728         return (base > 0xffffdcc0) && (base + len + 8 < base);
5729 }
5730
5731 /* Test for DMA addresses > 40-bit */
5732 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5733                                           int len)
5734 {
5735 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5736         if (tg3_flag(tp, 40BIT_DMA_BUG))
5737                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5738         return 0;
5739 #else
5740         return 0;
5741 #endif
5742 }
5743
5744 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5745                         dma_addr_t mapping, int len, u32 flags,
5746                         u32 mss_and_is_end)
5747 {
5748         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5749         int is_end = (mss_and_is_end & 0x1);
5750         u32 mss = (mss_and_is_end >> 1);
5751         u32 vlan_tag = 0;
5752
5753         if (is_end)
5754                 flags |= TXD_FLAG_END;
5755         if (flags & TXD_FLAG_VLAN) {
5756                 vlan_tag = flags >> 16;
5757                 flags &= 0xffff;
5758         }
5759         vlan_tag |= (mss << TXD_MSS_SHIFT);
5760
5761         txd->addr_hi = ((u64) mapping >> 32);
5762         txd->addr_lo = ((u64) mapping & 0xffffffff);
5763         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5764         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5765 }
5766
5767 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5768                                 struct sk_buff *skb, int last)
5769 {
5770         int i;
5771         u32 entry = tnapi->tx_prod;
5772         struct ring_info *txb = &tnapi->tx_buffers[entry];
5773
5774         pci_unmap_single(tnapi->tp->pdev,
5775                          dma_unmap_addr(txb, mapping),
5776                          skb_headlen(skb),
5777                          PCI_DMA_TODEVICE);
5778         for (i = 0; i < last; i++) {
5779                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5780
5781                 entry = NEXT_TX(entry);
5782                 txb = &tnapi->tx_buffers[entry];
5783
5784                 pci_unmap_page(tnapi->tp->pdev,
5785                                dma_unmap_addr(txb, mapping),
5786                                frag->size, PCI_DMA_TODEVICE);
5787         }
5788 }
5789
5790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5792                                        struct sk_buff *skb,
5793                                        u32 base_flags, u32 mss)
5794 {
5795         struct tg3 *tp = tnapi->tp;
5796         struct sk_buff *new_skb;
5797         dma_addr_t new_addr = 0;
5798         u32 entry = tnapi->tx_prod;
5799         int ret = 0;
5800
5801         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5802                 new_skb = skb_copy(skb, GFP_ATOMIC);
5803         else {
5804                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5805
5806                 new_skb = skb_copy_expand(skb,
5807                                           skb_headroom(skb) + more_headroom,
5808                                           skb_tailroom(skb), GFP_ATOMIC);
5809         }
5810
5811         if (!new_skb) {
5812                 ret = -1;
5813         } else {
5814                 /* New SKB is guaranteed to be linear. */
5815                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5816                                           PCI_DMA_TODEVICE);
5817                 /* Make sure the mapping succeeded */
5818                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5819                         ret = -1;
5820                         dev_kfree_skb(new_skb);
5821
5822                 /* Make sure new skb does not cross any 4G boundaries.
5823                  * Drop the packet if it does.
5824                  */
5825                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5826                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5827                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5828                                          PCI_DMA_TODEVICE);
5829                         ret = -1;
5830                         dev_kfree_skb(new_skb);
5831                 } else {
5832                         tnapi->tx_buffers[entry].skb = new_skb;
5833                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5834                                            mapping, new_addr);
5835
5836                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5837                                     base_flags, 1 | (mss << 1));
5838                 }
5839         }
5840
5841         dev_kfree_skb(skb);
5842
5843         return ret;
5844 }
5845
5846 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5847
5848 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5849  * TSO header is greater than 80 bytes.
5850  */
5851 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5852 {
5853         struct sk_buff *segs, *nskb;
5854         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5855
5856         /* Estimate the number of fragments in the worst case */
5857         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5858                 netif_stop_queue(tp->dev);
5859
5860                 /* netif_tx_stop_queue() must be done before checking
5861                  * checking tx index in tg3_tx_avail() below, because in
5862                  * tg3_tx(), we update tx index before checking for
5863                  * netif_tx_queue_stopped().
5864                  */
5865                 smp_mb();
5866                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5867                         return NETDEV_TX_BUSY;
5868
5869                 netif_wake_queue(tp->dev);
5870         }
5871
5872         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5873         if (IS_ERR(segs))
5874                 goto tg3_tso_bug_end;
5875
5876         do {
5877                 nskb = segs;
5878                 segs = segs->next;
5879                 nskb->next = NULL;
5880                 tg3_start_xmit(nskb, tp->dev);
5881         } while (segs);
5882
5883 tg3_tso_bug_end:
5884         dev_kfree_skb(skb);
5885
5886         return NETDEV_TX_OK;
5887 }
5888
5889 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5890  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5891  */
5892 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5893 {
5894         struct tg3 *tp = netdev_priv(dev);
5895         u32 len, entry, base_flags, mss;
5896         int i = -1, would_hit_hwbug;
5897         dma_addr_t mapping;
5898         struct tg3_napi *tnapi;
5899         struct netdev_queue *txq;
5900         unsigned int last;
5901
5902         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5903         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5904         if (tg3_flag(tp, ENABLE_TSS))
5905                 tnapi++;
5906
5907         /* We are running in BH disabled context with netif_tx_lock
5908          * and TX reclaim runs via tp->napi.poll inside of a software
5909          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5910          * no IRQ context deadlocks to worry about either.  Rejoice!
5911          */
5912         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5913                 if (!netif_tx_queue_stopped(txq)) {
5914                         netif_tx_stop_queue(txq);
5915
5916                         /* This is a hard error, log it. */
5917                         netdev_err(dev,
5918                                    "BUG! Tx Ring full when queue awake!\n");
5919                 }
5920                 return NETDEV_TX_BUSY;
5921         }
5922
5923         entry = tnapi->tx_prod;
5924         base_flags = 0;
5925         if (skb->ip_summed == CHECKSUM_PARTIAL)
5926                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5927
5928         mss = skb_shinfo(skb)->gso_size;
5929         if (mss) {
5930                 struct iphdr *iph;
5931                 u32 tcp_opt_len, hdr_len;
5932
5933                 if (skb_header_cloned(skb) &&
5934                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5935                         dev_kfree_skb(skb);
5936                         goto out_unlock;
5937                 }
5938
5939                 iph = ip_hdr(skb);
5940                 tcp_opt_len = tcp_optlen(skb);
5941
5942                 if (skb_is_gso_v6(skb)) {
5943                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5944                 } else {
5945                         u32 ip_tcp_len;
5946
5947                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5948                         hdr_len = ip_tcp_len + tcp_opt_len;
5949
5950                         iph->check = 0;
5951                         iph->tot_len = htons(mss + hdr_len);
5952                 }
5953
5954                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5955                     tg3_flag(tp, TSO_BUG))
5956                         return tg3_tso_bug(tp, skb);
5957
5958                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5959                                TXD_FLAG_CPU_POST_DMA);
5960
5961                 if (tg3_flag(tp, HW_TSO_1) ||
5962                     tg3_flag(tp, HW_TSO_2) ||
5963                     tg3_flag(tp, HW_TSO_3)) {
5964                         tcp_hdr(skb)->check = 0;
5965                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5966                 } else
5967                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5968                                                                  iph->daddr, 0,
5969                                                                  IPPROTO_TCP,
5970                                                                  0);
5971
5972                 if (tg3_flag(tp, HW_TSO_3)) {
5973                         mss |= (hdr_len & 0xc) << 12;
5974                         if (hdr_len & 0x10)
5975                                 base_flags |= 0x00000010;
5976                         base_flags |= (hdr_len & 0x3e0) << 5;
5977                 } else if (tg3_flag(tp, HW_TSO_2))
5978                         mss |= hdr_len << 9;
5979                 else if (tg3_flag(tp, HW_TSO_1) ||
5980                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5981                         if (tcp_opt_len || iph->ihl > 5) {
5982                                 int tsflags;
5983
5984                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5985                                 mss |= (tsflags << 11);
5986                         }
5987                 } else {
5988                         if (tcp_opt_len || iph->ihl > 5) {
5989                                 int tsflags;
5990
5991                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5992                                 base_flags |= tsflags << 12;
5993                         }
5994                 }
5995         }
5996
5997         if (vlan_tx_tag_present(skb))
5998                 base_flags |= (TXD_FLAG_VLAN |
5999                                (vlan_tx_tag_get(skb) << 16));
6000
6001         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6002             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6003                 base_flags |= TXD_FLAG_JMB_PKT;
6004
6005         len = skb_headlen(skb);
6006
6007         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6008         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6009                 dev_kfree_skb(skb);
6010                 goto out_unlock;
6011         }
6012
6013         tnapi->tx_buffers[entry].skb = skb;
6014         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6015
6016         would_hit_hwbug = 0;
6017
6018         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6019                 would_hit_hwbug = 1;
6020
6021         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6022             tg3_4g_overflow_test(mapping, len))
6023                 would_hit_hwbug = 1;
6024
6025         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6026             tg3_40bit_overflow_test(tp, mapping, len))
6027                 would_hit_hwbug = 1;
6028
6029         if (tg3_flag(tp, 5701_DMA_BUG))
6030                 would_hit_hwbug = 1;
6031
6032         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6033                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6034
6035         entry = NEXT_TX(entry);
6036
6037         /* Now loop through additional data fragments, and queue them. */
6038         if (skb_shinfo(skb)->nr_frags > 0) {
6039                 last = skb_shinfo(skb)->nr_frags - 1;
6040                 for (i = 0; i <= last; i++) {
6041                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6042
6043                         len = frag->size;
6044                         mapping = pci_map_page(tp->pdev,
6045                                                frag->page,
6046                                                frag->page_offset,
6047                                                len, PCI_DMA_TODEVICE);
6048
6049                         tnapi->tx_buffers[entry].skb = NULL;
6050                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6051                                            mapping);
6052                         if (pci_dma_mapping_error(tp->pdev, mapping))
6053                                 goto dma_error;
6054
6055                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6056                             len <= 8)
6057                                 would_hit_hwbug = 1;
6058
6059                         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6060                             tg3_4g_overflow_test(mapping, len))
6061                                 would_hit_hwbug = 1;
6062
6063                         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6064                             tg3_40bit_overflow_test(tp, mapping, len))
6065                                 would_hit_hwbug = 1;
6066
6067                         if (tg3_flag(tp, HW_TSO_1) ||
6068                             tg3_flag(tp, HW_TSO_2) ||
6069                             tg3_flag(tp, HW_TSO_3))
6070                                 tg3_set_txd(tnapi, entry, mapping, len,
6071                                             base_flags, (i == last)|(mss << 1));
6072                         else
6073                                 tg3_set_txd(tnapi, entry, mapping, len,
6074                                             base_flags, (i == last));
6075
6076                         entry = NEXT_TX(entry);
6077                 }
6078         }
6079
6080         if (would_hit_hwbug) {
6081                 tg3_skb_error_unmap(tnapi, skb, i);
6082
6083                 /* If the workaround fails due to memory/mapping
6084                  * failure, silently drop this packet.
6085                  */
6086                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6087                         goto out_unlock;
6088
6089                 entry = NEXT_TX(tnapi->tx_prod);
6090         }
6091
6092         /* Packets are ready, update Tx producer idx local and on card. */
6093         tw32_tx_mbox(tnapi->prodmbox, entry);
6094
6095         tnapi->tx_prod = entry;
6096         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6097                 netif_tx_stop_queue(txq);
6098
6099                 /* netif_tx_stop_queue() must be done before checking
6100                  * checking tx index in tg3_tx_avail() below, because in
6101                  * tg3_tx(), we update tx index before checking for
6102                  * netif_tx_queue_stopped().
6103                  */
6104                 smp_mb();
6105                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6106                         netif_tx_wake_queue(txq);
6107         }
6108
6109 out_unlock:
6110         mmiowb();
6111
6112         return NETDEV_TX_OK;
6113
6114 dma_error:
6115         tg3_skb_error_unmap(tnapi, skb, i);
6116         dev_kfree_skb(skb);
6117         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6118         return NETDEV_TX_OK;
6119 }
6120
6121 static void tg3_set_loopback(struct net_device *dev, u32 features)
6122 {
6123         struct tg3 *tp = netdev_priv(dev);
6124
6125         if (features & NETIF_F_LOOPBACK) {
6126                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6127                         return;
6128
6129                 /*
6130                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6131                  * loopback mode if Half-Duplex mode was negotiated earlier.
6132                  */
6133                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6134
6135                 /* Enable internal MAC loopback mode */
6136                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6137                 spin_lock_bh(&tp->lock);
6138                 tw32(MAC_MODE, tp->mac_mode);
6139                 netif_carrier_on(tp->dev);
6140                 spin_unlock_bh(&tp->lock);
6141                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6142         } else {
6143                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6144                         return;
6145
6146                 /* Disable internal MAC loopback mode */
6147                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6148                 spin_lock_bh(&tp->lock);
6149                 tw32(MAC_MODE, tp->mac_mode);
6150                 /* Force link status check */
6151                 tg3_setup_phy(tp, 1);
6152                 spin_unlock_bh(&tp->lock);
6153                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6154         }
6155 }
6156
6157 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6158 {
6159         struct tg3 *tp = netdev_priv(dev);
6160
6161         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6162                 features &= ~NETIF_F_ALL_TSO;
6163
6164         return features;
6165 }
6166
6167 static int tg3_set_features(struct net_device *dev, u32 features)
6168 {
6169         u32 changed = dev->features ^ features;
6170
6171         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6172                 tg3_set_loopback(dev, features);
6173
6174         return 0;
6175 }
6176
6177 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6178                                int new_mtu)
6179 {
6180         dev->mtu = new_mtu;
6181
6182         if (new_mtu > ETH_DATA_LEN) {
6183                 if (tg3_flag(tp, 5780_CLASS)) {
6184                         netdev_update_features(dev);
6185                         tg3_flag_clear(tp, TSO_CAPABLE);
6186                 } else {
6187                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6188                 }
6189         } else {
6190                 if (tg3_flag(tp, 5780_CLASS)) {
6191                         tg3_flag_set(tp, TSO_CAPABLE);
6192                         netdev_update_features(dev);
6193                 }
6194                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6195         }
6196 }
6197
6198 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6199 {
6200         struct tg3 *tp = netdev_priv(dev);
6201         int err;
6202
6203         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6204                 return -EINVAL;
6205
6206         if (!netif_running(dev)) {
6207                 /* We'll just catch it later when the
6208                  * device is up'd.
6209                  */
6210                 tg3_set_mtu(dev, tp, new_mtu);
6211                 return 0;
6212         }
6213
6214         tg3_phy_stop(tp);
6215
6216         tg3_netif_stop(tp);
6217
6218         tg3_full_lock(tp, 1);
6219
6220         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6221
6222         tg3_set_mtu(dev, tp, new_mtu);
6223
6224         err = tg3_restart_hw(tp, 0);
6225
6226         if (!err)
6227                 tg3_netif_start(tp);
6228
6229         tg3_full_unlock(tp);
6230
6231         if (!err)
6232                 tg3_phy_start(tp);
6233
6234         return err;
6235 }
6236
6237 static void tg3_rx_prodring_free(struct tg3 *tp,
6238                                  struct tg3_rx_prodring_set *tpr)
6239 {
6240         int i;
6241
6242         if (tpr != &tp->napi[0].prodring) {
6243                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6244                      i = (i + 1) & tp->rx_std_ring_mask)
6245                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6246                                         tp->rx_pkt_map_sz);
6247
6248                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6249                         for (i = tpr->rx_jmb_cons_idx;
6250                              i != tpr->rx_jmb_prod_idx;
6251                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6252                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6253                                                 TG3_RX_JMB_MAP_SZ);
6254                         }
6255                 }
6256
6257                 return;
6258         }
6259
6260         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6261                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6262                                 tp->rx_pkt_map_sz);
6263
6264         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6265                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6266                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6267                                         TG3_RX_JMB_MAP_SZ);
6268         }
6269 }
6270
6271 /* Initialize rx rings for packet processing.
6272  *
6273  * The chip has been shut down and the driver detached from
6274  * the networking, so no interrupts or new tx packets will
6275  * end up in the driver.  tp->{tx,}lock are held and thus
6276  * we may not sleep.
6277  */
6278 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6279                                  struct tg3_rx_prodring_set *tpr)
6280 {
6281         u32 i, rx_pkt_dma_sz;
6282
6283         tpr->rx_std_cons_idx = 0;
6284         tpr->rx_std_prod_idx = 0;
6285         tpr->rx_jmb_cons_idx = 0;
6286         tpr->rx_jmb_prod_idx = 0;
6287
6288         if (tpr != &tp->napi[0].prodring) {
6289                 memset(&tpr->rx_std_buffers[0], 0,
6290                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6291                 if (tpr->rx_jmb_buffers)
6292                         memset(&tpr->rx_jmb_buffers[0], 0,
6293                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6294                 goto done;
6295         }
6296
6297         /* Zero out all descriptors. */
6298         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6299
6300         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6301         if (tg3_flag(tp, 5780_CLASS) &&
6302             tp->dev->mtu > ETH_DATA_LEN)
6303                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6304         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6305
6306         /* Initialize invariants of the rings, we only set this
6307          * stuff once.  This works because the card does not
6308          * write into the rx buffer posting rings.
6309          */
6310         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6311                 struct tg3_rx_buffer_desc *rxd;
6312
6313                 rxd = &tpr->rx_std[i];
6314                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6315                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6316                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6317                                (i << RXD_OPAQUE_INDEX_SHIFT));
6318         }
6319
6320         /* Now allocate fresh SKBs for each rx ring. */
6321         for (i = 0; i < tp->rx_pending; i++) {
6322                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6323                         netdev_warn(tp->dev,
6324                                     "Using a smaller RX standard ring. Only "
6325                                     "%d out of %d buffers were allocated "
6326                                     "successfully\n", i, tp->rx_pending);
6327                         if (i == 0)
6328                                 goto initfail;
6329                         tp->rx_pending = i;
6330                         break;
6331                 }
6332         }
6333
6334         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6335                 goto done;
6336
6337         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6338
6339         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6340                 goto done;
6341
6342         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6343                 struct tg3_rx_buffer_desc *rxd;
6344
6345                 rxd = &tpr->rx_jmb[i].std;
6346                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6347                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6348                                   RXD_FLAG_JUMBO;
6349                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6350                        (i << RXD_OPAQUE_INDEX_SHIFT));
6351         }
6352
6353         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6354                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6355                         netdev_warn(tp->dev,
6356                                     "Using a smaller RX jumbo ring. Only %d "
6357                                     "out of %d buffers were allocated "
6358                                     "successfully\n", i, tp->rx_jumbo_pending);
6359                         if (i == 0)
6360                                 goto initfail;
6361                         tp->rx_jumbo_pending = i;
6362                         break;
6363                 }
6364         }
6365
6366 done:
6367         return 0;
6368
6369 initfail:
6370         tg3_rx_prodring_free(tp, tpr);
6371         return -ENOMEM;
6372 }
6373
6374 static void tg3_rx_prodring_fini(struct tg3 *tp,
6375                                  struct tg3_rx_prodring_set *tpr)
6376 {
6377         kfree(tpr->rx_std_buffers);
6378         tpr->rx_std_buffers = NULL;
6379         kfree(tpr->rx_jmb_buffers);
6380         tpr->rx_jmb_buffers = NULL;
6381         if (tpr->rx_std) {
6382                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6383                                   tpr->rx_std, tpr->rx_std_mapping);
6384                 tpr->rx_std = NULL;
6385         }
6386         if (tpr->rx_jmb) {
6387                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6388                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6389                 tpr->rx_jmb = NULL;
6390         }
6391 }
6392
6393 static int tg3_rx_prodring_init(struct tg3 *tp,
6394                                 struct tg3_rx_prodring_set *tpr)
6395 {
6396         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6397                                       GFP_KERNEL);
6398         if (!tpr->rx_std_buffers)
6399                 return -ENOMEM;
6400
6401         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6402                                          TG3_RX_STD_RING_BYTES(tp),
6403                                          &tpr->rx_std_mapping,
6404                                          GFP_KERNEL);
6405         if (!tpr->rx_std)
6406                 goto err_out;
6407
6408         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6409                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6410                                               GFP_KERNEL);
6411                 if (!tpr->rx_jmb_buffers)
6412                         goto err_out;
6413
6414                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6415                                                  TG3_RX_JMB_RING_BYTES(tp),
6416                                                  &tpr->rx_jmb_mapping,
6417                                                  GFP_KERNEL);
6418                 if (!tpr->rx_jmb)
6419                         goto err_out;
6420         }
6421
6422         return 0;
6423
6424 err_out:
6425         tg3_rx_prodring_fini(tp, tpr);
6426         return -ENOMEM;
6427 }
6428
6429 /* Free up pending packets in all rx/tx rings.
6430  *
6431  * The chip has been shut down and the driver detached from
6432  * the networking, so no interrupts or new tx packets will
6433  * end up in the driver.  tp->{tx,}lock is not held and we are not
6434  * in an interrupt context and thus may sleep.
6435  */
6436 static void tg3_free_rings(struct tg3 *tp)
6437 {
6438         int i, j;
6439
6440         for (j = 0; j < tp->irq_cnt; j++) {
6441                 struct tg3_napi *tnapi = &tp->napi[j];
6442
6443                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6444
6445                 if (!tnapi->tx_buffers)
6446                         continue;
6447
6448                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6449                         struct ring_info *txp;
6450                         struct sk_buff *skb;
6451                         unsigned int k;
6452
6453                         txp = &tnapi->tx_buffers[i];
6454                         skb = txp->skb;
6455
6456                         if (skb == NULL) {
6457                                 i++;
6458                                 continue;
6459                         }
6460
6461                         pci_unmap_single(tp->pdev,
6462                                          dma_unmap_addr(txp, mapping),
6463                                          skb_headlen(skb),
6464                                          PCI_DMA_TODEVICE);
6465                         txp->skb = NULL;
6466
6467                         i++;
6468
6469                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6470                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6471                                 pci_unmap_page(tp->pdev,
6472                                                dma_unmap_addr(txp, mapping),
6473                                                skb_shinfo(skb)->frags[k].size,
6474                                                PCI_DMA_TODEVICE);
6475                                 i++;
6476                         }
6477
6478                         dev_kfree_skb_any(skb);
6479                 }
6480         }
6481 }
6482
6483 /* Initialize tx/rx rings for packet processing.
6484  *
6485  * The chip has been shut down and the driver detached from
6486  * the networking, so no interrupts or new tx packets will
6487  * end up in the driver.  tp->{tx,}lock are held and thus
6488  * we may not sleep.
6489  */
6490 static int tg3_init_rings(struct tg3 *tp)
6491 {
6492         int i;
6493
6494         /* Free up all the SKBs. */
6495         tg3_free_rings(tp);
6496
6497         for (i = 0; i < tp->irq_cnt; i++) {
6498                 struct tg3_napi *tnapi = &tp->napi[i];
6499
6500                 tnapi->last_tag = 0;
6501                 tnapi->last_irq_tag = 0;
6502                 tnapi->hw_status->status = 0;
6503                 tnapi->hw_status->status_tag = 0;
6504                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6505
6506                 tnapi->tx_prod = 0;
6507                 tnapi->tx_cons = 0;
6508                 if (tnapi->tx_ring)
6509                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6510
6511                 tnapi->rx_rcb_ptr = 0;
6512                 if (tnapi->rx_rcb)
6513                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6514
6515                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6516                         tg3_free_rings(tp);
6517                         return -ENOMEM;
6518                 }
6519         }
6520
6521         return 0;
6522 }
6523
6524 /*
6525  * Must not be invoked with interrupt sources disabled and
6526  * the hardware shutdown down.
6527  */
6528 static void tg3_free_consistent(struct tg3 *tp)
6529 {
6530         int i;
6531
6532         for (i = 0; i < tp->irq_cnt; i++) {
6533                 struct tg3_napi *tnapi = &tp->napi[i];
6534
6535                 if (tnapi->tx_ring) {
6536                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6537                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6538                         tnapi->tx_ring = NULL;
6539                 }
6540
6541                 kfree(tnapi->tx_buffers);
6542                 tnapi->tx_buffers = NULL;
6543
6544                 if (tnapi->rx_rcb) {
6545                         dma_free_coherent(&tp->pdev->dev,
6546                                           TG3_RX_RCB_RING_BYTES(tp),
6547                                           tnapi->rx_rcb,
6548                                           tnapi->rx_rcb_mapping);
6549                         tnapi->rx_rcb = NULL;
6550                 }
6551
6552                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6553
6554                 if (tnapi->hw_status) {
6555                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6556                                           tnapi->hw_status,
6557                                           tnapi->status_mapping);
6558                         tnapi->hw_status = NULL;
6559                 }
6560         }
6561
6562         if (tp->hw_stats) {
6563                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6564                                   tp->hw_stats, tp->stats_mapping);
6565                 tp->hw_stats = NULL;
6566         }
6567 }
6568
6569 /*
6570  * Must not be invoked with interrupt sources disabled and
6571  * the hardware shutdown down.  Can sleep.
6572  */
6573 static int tg3_alloc_consistent(struct tg3 *tp)
6574 {
6575         int i;
6576
6577         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6578                                           sizeof(struct tg3_hw_stats),
6579                                           &tp->stats_mapping,
6580                                           GFP_KERNEL);
6581         if (!tp->hw_stats)
6582                 goto err_out;
6583
6584         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6585
6586         for (i = 0; i < tp->irq_cnt; i++) {
6587                 struct tg3_napi *tnapi = &tp->napi[i];
6588                 struct tg3_hw_status *sblk;
6589
6590                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6591                                                       TG3_HW_STATUS_SIZE,
6592                                                       &tnapi->status_mapping,
6593                                                       GFP_KERNEL);
6594                 if (!tnapi->hw_status)
6595                         goto err_out;
6596
6597                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6598                 sblk = tnapi->hw_status;
6599
6600                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6601                         goto err_out;
6602
6603                 /* If multivector TSS is enabled, vector 0 does not handle
6604                  * tx interrupts.  Don't allocate any resources for it.
6605                  */
6606                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6607                     (i && tg3_flag(tp, ENABLE_TSS))) {
6608                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6609                                                     TG3_TX_RING_SIZE,
6610                                                     GFP_KERNEL);
6611                         if (!tnapi->tx_buffers)
6612                                 goto err_out;
6613
6614                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6615                                                             TG3_TX_RING_BYTES,
6616                                                         &tnapi->tx_desc_mapping,
6617                                                             GFP_KERNEL);
6618                         if (!tnapi->tx_ring)
6619                                 goto err_out;
6620                 }
6621
6622                 /*
6623                  * When RSS is enabled, the status block format changes
6624                  * slightly.  The "rx_jumbo_consumer", "reserved",
6625                  * and "rx_mini_consumer" members get mapped to the
6626                  * other three rx return ring producer indexes.
6627                  */
6628                 switch (i) {
6629                 default:
6630                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6631                         break;
6632                 case 2:
6633                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6634                         break;
6635                 case 3:
6636                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6637                         break;
6638                 case 4:
6639                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6640                         break;
6641                 }
6642
6643                 /*
6644                  * If multivector RSS is enabled, vector 0 does not handle
6645                  * rx or tx interrupts.  Don't allocate any resources for it.
6646                  */
6647                 if (!i && tg3_flag(tp, ENABLE_RSS))
6648                         continue;
6649
6650                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6651                                                    TG3_RX_RCB_RING_BYTES(tp),
6652                                                    &tnapi->rx_rcb_mapping,
6653                                                    GFP_KERNEL);
6654                 if (!tnapi->rx_rcb)
6655                         goto err_out;
6656
6657                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6658         }
6659
6660         return 0;
6661
6662 err_out:
6663         tg3_free_consistent(tp);
6664         return -ENOMEM;
6665 }
6666
6667 #define MAX_WAIT_CNT 1000
6668
6669 /* To stop a block, clear the enable bit and poll till it
6670  * clears.  tp->lock is held.
6671  */
6672 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6673 {
6674         unsigned int i;
6675         u32 val;
6676
6677         if (tg3_flag(tp, 5705_PLUS)) {
6678                 switch (ofs) {
6679                 case RCVLSC_MODE:
6680                 case DMAC_MODE:
6681                 case MBFREE_MODE:
6682                 case BUFMGR_MODE:
6683                 case MEMARB_MODE:
6684                         /* We can't enable/disable these bits of the
6685                          * 5705/5750, just say success.
6686                          */
6687                         return 0;
6688
6689                 default:
6690                         break;
6691                 }
6692         }
6693
6694         val = tr32(ofs);
6695         val &= ~enable_bit;
6696         tw32_f(ofs, val);
6697
6698         for (i = 0; i < MAX_WAIT_CNT; i++) {
6699                 udelay(100);
6700                 val = tr32(ofs);
6701                 if ((val & enable_bit) == 0)
6702                         break;
6703         }
6704
6705         if (i == MAX_WAIT_CNT && !silent) {
6706                 dev_err(&tp->pdev->dev,
6707                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6708                         ofs, enable_bit);
6709                 return -ENODEV;
6710         }
6711
6712         return 0;
6713 }
6714
6715 /* tp->lock is held. */
6716 static int tg3_abort_hw(struct tg3 *tp, int silent)
6717 {
6718         int i, err;
6719
6720         tg3_disable_ints(tp);
6721
6722         tp->rx_mode &= ~RX_MODE_ENABLE;
6723         tw32_f(MAC_RX_MODE, tp->rx_mode);
6724         udelay(10);
6725
6726         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6727         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6728         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6729         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6730         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6731         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6732
6733         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6734         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6735         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6736         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6737         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6738         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6739         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6740
6741         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6742         tw32_f(MAC_MODE, tp->mac_mode);
6743         udelay(40);
6744
6745         tp->tx_mode &= ~TX_MODE_ENABLE;
6746         tw32_f(MAC_TX_MODE, tp->tx_mode);
6747
6748         for (i = 0; i < MAX_WAIT_CNT; i++) {
6749                 udelay(100);
6750                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6751                         break;
6752         }
6753         if (i >= MAX_WAIT_CNT) {
6754                 dev_err(&tp->pdev->dev,
6755                         "%s timed out, TX_MODE_ENABLE will not clear "
6756                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6757                 err |= -ENODEV;
6758         }
6759
6760         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6761         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6762         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6763
6764         tw32(FTQ_RESET, 0xffffffff);
6765         tw32(FTQ_RESET, 0x00000000);
6766
6767         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6768         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6769
6770         for (i = 0; i < tp->irq_cnt; i++) {
6771                 struct tg3_napi *tnapi = &tp->napi[i];
6772                 if (tnapi->hw_status)
6773                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6774         }
6775         if (tp->hw_stats)
6776                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6777
6778         return err;
6779 }
6780
6781 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6782 {
6783         int i;
6784         u32 apedata;
6785
6786         /* NCSI does not support APE events */
6787         if (tg3_flag(tp, APE_HAS_NCSI))
6788                 return;
6789
6790         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6791         if (apedata != APE_SEG_SIG_MAGIC)
6792                 return;
6793
6794         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6795         if (!(apedata & APE_FW_STATUS_READY))
6796                 return;
6797
6798         /* Wait for up to 1 millisecond for APE to service previous event. */
6799         for (i = 0; i < 10; i++) {
6800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6801                         return;
6802
6803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6804
6805                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6806                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6807                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6808
6809                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6810
6811                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6812                         break;
6813
6814                 udelay(100);
6815         }
6816
6817         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6818                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6819 }
6820
6821 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6822 {
6823         u32 event;
6824         u32 apedata;
6825
6826         if (!tg3_flag(tp, ENABLE_APE))
6827                 return;
6828
6829         switch (kind) {
6830         case RESET_KIND_INIT:
6831                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6832                                 APE_HOST_SEG_SIG_MAGIC);
6833                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6834                                 APE_HOST_SEG_LEN_MAGIC);
6835                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6836                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6837                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6838                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6839                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6840                                 APE_HOST_BEHAV_NO_PHYLOCK);
6841                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6842                                     TG3_APE_HOST_DRVR_STATE_START);
6843
6844                 event = APE_EVENT_STATUS_STATE_START;
6845                 break;
6846         case RESET_KIND_SHUTDOWN:
6847                 /* With the interface we are currently using,
6848                  * APE does not track driver state.  Wiping
6849                  * out the HOST SEGMENT SIGNATURE forces
6850                  * the APE to assume OS absent status.
6851                  */
6852                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6853
6854                 if (device_may_wakeup(&tp->pdev->dev) &&
6855                     tg3_flag(tp, WOL_ENABLE)) {
6856                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6857                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6858                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6859                 } else
6860                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6861
6862                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6863
6864                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6865                 break;
6866         case RESET_KIND_SUSPEND:
6867                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6868                 break;
6869         default:
6870                 return;
6871         }
6872
6873         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6874
6875         tg3_ape_send_event(tp, event);
6876 }
6877
6878 /* tp->lock is held. */
6879 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6880 {
6881         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6882                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6883
6884         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6885                 switch (kind) {
6886                 case RESET_KIND_INIT:
6887                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6888                                       DRV_STATE_START);
6889                         break;
6890
6891                 case RESET_KIND_SHUTDOWN:
6892                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6893                                       DRV_STATE_UNLOAD);
6894                         break;
6895
6896                 case RESET_KIND_SUSPEND:
6897                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6898                                       DRV_STATE_SUSPEND);
6899                         break;
6900
6901                 default:
6902                         break;
6903                 }
6904         }
6905
6906         if (kind == RESET_KIND_INIT ||
6907             kind == RESET_KIND_SUSPEND)
6908                 tg3_ape_driver_state_change(tp, kind);
6909 }
6910
6911 /* tp->lock is held. */
6912 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6913 {
6914         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6915                 switch (kind) {
6916                 case RESET_KIND_INIT:
6917                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6918                                       DRV_STATE_START_DONE);
6919                         break;
6920
6921                 case RESET_KIND_SHUTDOWN:
6922                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6923                                       DRV_STATE_UNLOAD_DONE);
6924                         break;
6925
6926                 default:
6927                         break;
6928                 }
6929         }
6930
6931         if (kind == RESET_KIND_SHUTDOWN)
6932                 tg3_ape_driver_state_change(tp, kind);
6933 }
6934
6935 /* tp->lock is held. */
6936 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6937 {
6938         if (tg3_flag(tp, ENABLE_ASF)) {
6939                 switch (kind) {
6940                 case RESET_KIND_INIT:
6941                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6942                                       DRV_STATE_START);
6943                         break;
6944
6945                 case RESET_KIND_SHUTDOWN:
6946                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6947                                       DRV_STATE_UNLOAD);
6948                         break;
6949
6950                 case RESET_KIND_SUSPEND:
6951                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6952                                       DRV_STATE_SUSPEND);
6953                         break;
6954
6955                 default:
6956                         break;
6957                 }
6958         }
6959 }
6960
6961 static int tg3_poll_fw(struct tg3 *tp)
6962 {
6963         int i;
6964         u32 val;
6965
6966         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6967                 /* Wait up to 20ms for init done. */
6968                 for (i = 0; i < 200; i++) {
6969                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6970                                 return 0;
6971                         udelay(100);
6972                 }
6973                 return -ENODEV;
6974         }
6975
6976         /* Wait for firmware initialization to complete. */
6977         for (i = 0; i < 100000; i++) {
6978                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6979                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6980                         break;
6981                 udelay(10);
6982         }
6983
6984         /* Chip might not be fitted with firmware.  Some Sun onboard
6985          * parts are configured like that.  So don't signal the timeout
6986          * of the above loop as an error, but do report the lack of
6987          * running firmware once.
6988          */
6989         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6990                 tg3_flag_set(tp, NO_FWARE_REPORTED);
6991
6992                 netdev_info(tp->dev, "No firmware running\n");
6993         }
6994
6995         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6996                 /* The 57765 A0 needs a little more
6997                  * time to do some important work.
6998                  */
6999                 mdelay(10);
7000         }
7001
7002         return 0;
7003 }
7004
7005 /* Save PCI command register before chip reset */
7006 static void tg3_save_pci_state(struct tg3 *tp)
7007 {
7008         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7009 }
7010
7011 /* Restore PCI state after chip reset */
7012 static void tg3_restore_pci_state(struct tg3 *tp)
7013 {
7014         u32 val;
7015
7016         /* Re-enable indirect register accesses. */
7017         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7018                                tp->misc_host_ctrl);
7019
7020         /* Set MAX PCI retry to zero. */
7021         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7022         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7023             tg3_flag(tp, PCIX_MODE))
7024                 val |= PCISTATE_RETRY_SAME_DMA;
7025         /* Allow reads and writes to the APE register and memory space. */
7026         if (tg3_flag(tp, ENABLE_APE))
7027                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7028                        PCISTATE_ALLOW_APE_SHMEM_WR |
7029                        PCISTATE_ALLOW_APE_PSPACE_WR;
7030         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7031
7032         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7033
7034         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7035                 if (tg3_flag(tp, PCI_EXPRESS))
7036                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7037                 else {
7038                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7039                                               tp->pci_cacheline_sz);
7040                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7041                                               tp->pci_lat_timer);
7042                 }
7043         }
7044
7045         /* Make sure PCI-X relaxed ordering bit is clear. */
7046         if (tg3_flag(tp, PCIX_MODE)) {
7047                 u16 pcix_cmd;
7048
7049                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7050                                      &pcix_cmd);
7051                 pcix_cmd &= ~PCI_X_CMD_ERO;
7052                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7053                                       pcix_cmd);
7054         }
7055
7056         if (tg3_flag(tp, 5780_CLASS)) {
7057
7058                 /* Chip reset on 5780 will reset MSI enable bit,
7059                  * so need to restore it.
7060                  */
7061                 if (tg3_flag(tp, USING_MSI)) {
7062                         u16 ctrl;
7063
7064                         pci_read_config_word(tp->pdev,
7065                                              tp->msi_cap + PCI_MSI_FLAGS,
7066                                              &ctrl);
7067                         pci_write_config_word(tp->pdev,
7068                                               tp->msi_cap + PCI_MSI_FLAGS,
7069                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7070                         val = tr32(MSGINT_MODE);
7071                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7072                 }
7073         }
7074 }
7075
7076 static void tg3_stop_fw(struct tg3 *);
7077
7078 /* tp->lock is held. */
7079 static int tg3_chip_reset(struct tg3 *tp)
7080 {
7081         u32 val;
7082         void (*write_op)(struct tg3 *, u32, u32);
7083         int i, err;
7084
7085         tg3_nvram_lock(tp);
7086
7087         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7088
7089         /* No matching tg3_nvram_unlock() after this because
7090          * chip reset below will undo the nvram lock.
7091          */
7092         tp->nvram_lock_cnt = 0;
7093
7094         /* GRC_MISC_CFG core clock reset will clear the memory
7095          * enable bit in PCI register 4 and the MSI enable bit
7096          * on some chips, so we save relevant registers here.
7097          */
7098         tg3_save_pci_state(tp);
7099
7100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7101             tg3_flag(tp, 5755_PLUS))
7102                 tw32(GRC_FASTBOOT_PC, 0);
7103
7104         /*
7105          * We must avoid the readl() that normally takes place.
7106          * It locks machines, causes machine checks, and other
7107          * fun things.  So, temporarily disable the 5701
7108          * hardware workaround, while we do the reset.
7109          */
7110         write_op = tp->write32;
7111         if (write_op == tg3_write_flush_reg32)
7112                 tp->write32 = tg3_write32;
7113
7114         /* Prevent the irq handler from reading or writing PCI registers
7115          * during chip reset when the memory enable bit in the PCI command
7116          * register may be cleared.  The chip does not generate interrupt
7117          * at this time, but the irq handler may still be called due to irq
7118          * sharing or irqpoll.
7119          */
7120         tg3_flag_set(tp, CHIP_RESETTING);
7121         for (i = 0; i < tp->irq_cnt; i++) {
7122                 struct tg3_napi *tnapi = &tp->napi[i];
7123                 if (tnapi->hw_status) {
7124                         tnapi->hw_status->status = 0;
7125                         tnapi->hw_status->status_tag = 0;
7126                 }
7127                 tnapi->last_tag = 0;
7128                 tnapi->last_irq_tag = 0;
7129         }
7130         smp_mb();
7131
7132         for (i = 0; i < tp->irq_cnt; i++)
7133                 synchronize_irq(tp->napi[i].irq_vec);
7134
7135         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7136                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7137                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7138         }
7139
7140         /* do the reset */
7141         val = GRC_MISC_CFG_CORECLK_RESET;
7142
7143         if (tg3_flag(tp, PCI_EXPRESS)) {
7144                 /* Force PCIe 1.0a mode */
7145                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7146                     !tg3_flag(tp, 57765_PLUS) &&
7147                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7148                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7149                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7150
7151                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7152                         tw32(GRC_MISC_CFG, (1 << 29));
7153                         val |= (1 << 29);
7154                 }
7155         }
7156
7157         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7158                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7159                 tw32(GRC_VCPU_EXT_CTRL,
7160                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7161         }
7162
7163         /* Manage gphy power for all CPMU absent PCIe devices. */
7164         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7165                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7166
7167         tw32(GRC_MISC_CFG, val);
7168
7169         /* restore 5701 hardware bug workaround write method */
7170         tp->write32 = write_op;
7171
7172         /* Unfortunately, we have to delay before the PCI read back.
7173          * Some 575X chips even will not respond to a PCI cfg access
7174          * when the reset command is given to the chip.
7175          *
7176          * How do these hardware designers expect things to work
7177          * properly if the PCI write is posted for a long period
7178          * of time?  It is always necessary to have some method by
7179          * which a register read back can occur to push the write
7180          * out which does the reset.
7181          *
7182          * For most tg3 variants the trick below was working.
7183          * Ho hum...
7184          */
7185         udelay(120);
7186
7187         /* Flush PCI posted writes.  The normal MMIO registers
7188          * are inaccessible at this time so this is the only
7189          * way to make this reliably (actually, this is no longer
7190          * the case, see above).  I tried to use indirect
7191          * register read/write but this upset some 5701 variants.
7192          */
7193         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7194
7195         udelay(120);
7196
7197         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7198                 u16 val16;
7199
7200                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7201                         int i;
7202                         u32 cfg_val;
7203
7204                         /* Wait for link training to complete.  */
7205                         for (i = 0; i < 5000; i++)
7206                                 udelay(100);
7207
7208                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7209                         pci_write_config_dword(tp->pdev, 0xc4,
7210                                                cfg_val | (1 << 15));
7211                 }
7212
7213                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7214                 pci_read_config_word(tp->pdev,
7215                                      tp->pcie_cap + PCI_EXP_DEVCTL,
7216                                      &val16);
7217                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7218                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7219                 /*
7220                  * Older PCIe devices only support the 128 byte
7221                  * MPS setting.  Enforce the restriction.
7222                  */
7223                 if (!tg3_flag(tp, CPMU_PRESENT))
7224                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7225                 pci_write_config_word(tp->pdev,
7226                                       tp->pcie_cap + PCI_EXP_DEVCTL,
7227                                       val16);
7228
7229                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7230
7231                 /* Clear error status */
7232                 pci_write_config_word(tp->pdev,
7233                                       tp->pcie_cap + PCI_EXP_DEVSTA,
7234                                       PCI_EXP_DEVSTA_CED |
7235                                       PCI_EXP_DEVSTA_NFED |
7236                                       PCI_EXP_DEVSTA_FED |
7237                                       PCI_EXP_DEVSTA_URD);
7238         }
7239
7240         tg3_restore_pci_state(tp);
7241
7242         tg3_flag_clear(tp, CHIP_RESETTING);
7243         tg3_flag_clear(tp, ERROR_PROCESSED);
7244
7245         val = 0;
7246         if (tg3_flag(tp, 5780_CLASS))
7247                 val = tr32(MEMARB_MODE);
7248         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7249
7250         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7251                 tg3_stop_fw(tp);
7252                 tw32(0x5000, 0x400);
7253         }
7254
7255         tw32(GRC_MODE, tp->grc_mode);
7256
7257         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7258                 val = tr32(0xc4);
7259
7260                 tw32(0xc4, val | (1 << 15));
7261         }
7262
7263         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7265                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7266                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7267                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7268                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7269         }
7270
7271         if (tg3_flag(tp, ENABLE_APE))
7272                 tp->mac_mode = MAC_MODE_APE_TX_EN |
7273                                MAC_MODE_APE_RX_EN |
7274                                MAC_MODE_TDE_ENABLE;
7275
7276         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7277                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7278                 val = tp->mac_mode;
7279         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7280                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7281                 val = tp->mac_mode;
7282         } else
7283                 val = 0;
7284
7285         tw32_f(MAC_MODE, val);
7286         udelay(40);
7287
7288         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7289
7290         err = tg3_poll_fw(tp);
7291         if (err)
7292                 return err;
7293
7294         tg3_mdio_start(tp);
7295
7296         if (tg3_flag(tp, PCI_EXPRESS) &&
7297             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7298             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7299             !tg3_flag(tp, 57765_PLUS)) {
7300                 val = tr32(0x7c00);
7301
7302                 tw32(0x7c00, val | (1 << 25));
7303         }
7304
7305         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7306                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7307                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7308         }
7309
7310         /* Reprobe ASF enable state.  */
7311         tg3_flag_clear(tp, ENABLE_ASF);
7312         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7313         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7314         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7315                 u32 nic_cfg;
7316
7317                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7318                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7319                         tg3_flag_set(tp, ENABLE_ASF);
7320                         tp->last_event_jiffies = jiffies;
7321                         if (tg3_flag(tp, 5750_PLUS))
7322                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7323                 }
7324         }
7325
7326         return 0;
7327 }
7328
7329 /* tp->lock is held. */
7330 static void tg3_stop_fw(struct tg3 *tp)
7331 {
7332         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7333                 /* Wait for RX cpu to ACK the previous event. */
7334                 tg3_wait_for_event_ack(tp);
7335
7336                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7337
7338                 tg3_generate_fw_event(tp);
7339
7340                 /* Wait for RX cpu to ACK this event. */
7341                 tg3_wait_for_event_ack(tp);
7342         }
7343 }
7344
7345 /* tp->lock is held. */
7346 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7347 {
7348         int err;
7349
7350         tg3_stop_fw(tp);
7351
7352         tg3_write_sig_pre_reset(tp, kind);
7353
7354         tg3_abort_hw(tp, silent);
7355         err = tg3_chip_reset(tp);
7356
7357         __tg3_set_mac_addr(tp, 0);
7358
7359         tg3_write_sig_legacy(tp, kind);
7360         tg3_write_sig_post_reset(tp, kind);
7361
7362         if (err)
7363                 return err;
7364
7365         return 0;
7366 }
7367
7368 #define RX_CPU_SCRATCH_BASE     0x30000
7369 #define RX_CPU_SCRATCH_SIZE     0x04000
7370 #define TX_CPU_SCRATCH_BASE     0x34000
7371 #define TX_CPU_SCRATCH_SIZE     0x04000
7372
7373 /* tp->lock is held. */
7374 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7375 {
7376         int i;
7377
7378         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7379
7380         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7381                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7382
7383                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7384                 return 0;
7385         }
7386         if (offset == RX_CPU_BASE) {
7387                 for (i = 0; i < 10000; i++) {
7388                         tw32(offset + CPU_STATE, 0xffffffff);
7389                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7390                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7391                                 break;
7392                 }
7393
7394                 tw32(offset + CPU_STATE, 0xffffffff);
7395                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7396                 udelay(10);
7397         } else {
7398                 for (i = 0; i < 10000; i++) {
7399                         tw32(offset + CPU_STATE, 0xffffffff);
7400                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7401                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7402                                 break;
7403                 }
7404         }
7405
7406         if (i >= 10000) {
7407                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7408                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7409                 return -ENODEV;
7410         }
7411
7412         /* Clear firmware's nvram arbitration. */
7413         if (tg3_flag(tp, NVRAM))
7414                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7415         return 0;
7416 }
7417
7418 struct fw_info {
7419         unsigned int fw_base;
7420         unsigned int fw_len;
7421         const __be32 *fw_data;
7422 };
7423
7424 /* tp->lock is held. */
7425 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7426                                  int cpu_scratch_size, struct fw_info *info)
7427 {
7428         int err, lock_err, i;
7429         void (*write_op)(struct tg3 *, u32, u32);
7430
7431         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7432                 netdev_err(tp->dev,
7433                            "%s: Trying to load TX cpu firmware which is 5705\n",
7434                            __func__);
7435                 return -EINVAL;
7436         }
7437
7438         if (tg3_flag(tp, 5705_PLUS))
7439                 write_op = tg3_write_mem;
7440         else
7441                 write_op = tg3_write_indirect_reg32;
7442
7443         /* It is possible that bootcode is still loading at this point.
7444          * Get the nvram lock first before halting the cpu.
7445          */
7446         lock_err = tg3_nvram_lock(tp);
7447         err = tg3_halt_cpu(tp, cpu_base);
7448         if (!lock_err)
7449                 tg3_nvram_unlock(tp);
7450         if (err)
7451                 goto out;
7452
7453         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7454                 write_op(tp, cpu_scratch_base + i, 0);
7455         tw32(cpu_base + CPU_STATE, 0xffffffff);
7456         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7457         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7458                 write_op(tp, (cpu_scratch_base +
7459                               (info->fw_base & 0xffff) +
7460                               (i * sizeof(u32))),
7461                               be32_to_cpu(info->fw_data[i]));
7462
7463         err = 0;
7464
7465 out:
7466         return err;
7467 }
7468
7469 /* tp->lock is held. */
7470 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7471 {
7472         struct fw_info info;
7473         const __be32 *fw_data;
7474         int err, i;
7475
7476         fw_data = (void *)tp->fw->data;
7477
7478         /* Firmware blob starts with version numbers, followed by
7479            start address and length. We are setting complete length.
7480            length = end_address_of_bss - start_address_of_text.
7481            Remainder is the blob to be loaded contiguously
7482            from start address. */
7483
7484         info.fw_base = be32_to_cpu(fw_data[1]);
7485         info.fw_len = tp->fw->size - 12;
7486         info.fw_data = &fw_data[3];
7487
7488         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7489                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7490                                     &info);
7491         if (err)
7492                 return err;
7493
7494         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7495                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7496                                     &info);
7497         if (err)
7498                 return err;
7499
7500         /* Now startup only the RX cpu. */
7501         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7502         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7503
7504         for (i = 0; i < 5; i++) {
7505                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7506                         break;
7507                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7508                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7509                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7510                 udelay(1000);
7511         }
7512         if (i >= 5) {
7513                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7514                            "should be %08x\n", __func__,
7515                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7516                 return -ENODEV;
7517         }
7518         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7519         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7520
7521         return 0;
7522 }
7523
7524 /* tp->lock is held. */
7525 static int tg3_load_tso_firmware(struct tg3 *tp)
7526 {
7527         struct fw_info info;
7528         const __be32 *fw_data;
7529         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7530         int err, i;
7531
7532         if (tg3_flag(tp, HW_TSO_1) ||
7533             tg3_flag(tp, HW_TSO_2) ||
7534             tg3_flag(tp, HW_TSO_3))
7535                 return 0;
7536
7537         fw_data = (void *)tp->fw->data;
7538
7539         /* Firmware blob starts with version numbers, followed by
7540            start address and length. We are setting complete length.
7541            length = end_address_of_bss - start_address_of_text.
7542            Remainder is the blob to be loaded contiguously
7543            from start address. */
7544
7545         info.fw_base = be32_to_cpu(fw_data[1]);
7546         cpu_scratch_size = tp->fw_len;
7547         info.fw_len = tp->fw->size - 12;
7548         info.fw_data = &fw_data[3];
7549
7550         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7551                 cpu_base = RX_CPU_BASE;
7552                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7553         } else {
7554                 cpu_base = TX_CPU_BASE;
7555                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7556                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7557         }
7558
7559         err = tg3_load_firmware_cpu(tp, cpu_base,
7560                                     cpu_scratch_base, cpu_scratch_size,
7561                                     &info);
7562         if (err)
7563                 return err;
7564
7565         /* Now startup the cpu. */
7566         tw32(cpu_base + CPU_STATE, 0xffffffff);
7567         tw32_f(cpu_base + CPU_PC, info.fw_base);
7568
7569         for (i = 0; i < 5; i++) {
7570                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7571                         break;
7572                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7573                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7574                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7575                 udelay(1000);
7576         }
7577         if (i >= 5) {
7578                 netdev_err(tp->dev,
7579                            "%s fails to set CPU PC, is %08x should be %08x\n",
7580                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7581                 return -ENODEV;
7582         }
7583         tw32(cpu_base + CPU_STATE, 0xffffffff);
7584         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7585         return 0;
7586 }
7587
7588
7589 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7590 {
7591         struct tg3 *tp = netdev_priv(dev);
7592         struct sockaddr *addr = p;
7593         int err = 0, skip_mac_1 = 0;
7594
7595         if (!is_valid_ether_addr(addr->sa_data))
7596                 return -EINVAL;
7597
7598         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7599
7600         if (!netif_running(dev))
7601                 return 0;
7602
7603         if (tg3_flag(tp, ENABLE_ASF)) {
7604                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7605
7606                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7607                 addr0_low = tr32(MAC_ADDR_0_LOW);
7608                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7609                 addr1_low = tr32(MAC_ADDR_1_LOW);
7610
7611                 /* Skip MAC addr 1 if ASF is using it. */
7612                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7613                     !(addr1_high == 0 && addr1_low == 0))
7614                         skip_mac_1 = 1;
7615         }
7616         spin_lock_bh(&tp->lock);
7617         __tg3_set_mac_addr(tp, skip_mac_1);
7618         spin_unlock_bh(&tp->lock);
7619
7620         return err;
7621 }
7622
7623 /* tp->lock is held. */
7624 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7625                            dma_addr_t mapping, u32 maxlen_flags,
7626                            u32 nic_addr)
7627 {
7628         tg3_write_mem(tp,
7629                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7630                       ((u64) mapping >> 32));
7631         tg3_write_mem(tp,
7632                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7633                       ((u64) mapping & 0xffffffff));
7634         tg3_write_mem(tp,
7635                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7636                        maxlen_flags);
7637
7638         if (!tg3_flag(tp, 5705_PLUS))
7639                 tg3_write_mem(tp,
7640                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7641                               nic_addr);
7642 }
7643
7644 static void __tg3_set_rx_mode(struct net_device *);
7645 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7646 {
7647         int i;
7648
7649         if (!tg3_flag(tp, ENABLE_TSS)) {
7650                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7651                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7652                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7653         } else {
7654                 tw32(HOSTCC_TXCOL_TICKS, 0);
7655                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7656                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7657         }
7658
7659         if (!tg3_flag(tp, ENABLE_RSS)) {
7660                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7661                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7662                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7663         } else {
7664                 tw32(HOSTCC_RXCOL_TICKS, 0);
7665                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7666                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7667         }
7668
7669         if (!tg3_flag(tp, 5705_PLUS)) {
7670                 u32 val = ec->stats_block_coalesce_usecs;
7671
7672                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7673                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7674
7675                 if (!netif_carrier_ok(tp->dev))
7676                         val = 0;
7677
7678                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7679         }
7680
7681         for (i = 0; i < tp->irq_cnt - 1; i++) {
7682                 u32 reg;
7683
7684                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7685                 tw32(reg, ec->rx_coalesce_usecs);
7686                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7687                 tw32(reg, ec->rx_max_coalesced_frames);
7688                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7689                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7690
7691                 if (tg3_flag(tp, ENABLE_TSS)) {
7692                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7693                         tw32(reg, ec->tx_coalesce_usecs);
7694                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7695                         tw32(reg, ec->tx_max_coalesced_frames);
7696                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7697                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7698                 }
7699         }
7700
7701         for (; i < tp->irq_max - 1; i++) {
7702                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7703                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7704                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7705
7706                 if (tg3_flag(tp, ENABLE_TSS)) {
7707                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7708                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7709                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7710                 }
7711         }
7712 }
7713
7714 /* tp->lock is held. */
7715 static void tg3_rings_reset(struct tg3 *tp)
7716 {
7717         int i;
7718         u32 stblk, txrcb, rxrcb, limit;
7719         struct tg3_napi *tnapi = &tp->napi[0];
7720
7721         /* Disable all transmit rings but the first. */
7722         if (!tg3_flag(tp, 5705_PLUS))
7723                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7724         else if (tg3_flag(tp, 5717_PLUS))
7725                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7726         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7727                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7728         else
7729                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7730
7731         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7732              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7733                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7734                               BDINFO_FLAGS_DISABLED);
7735
7736
7737         /* Disable all receive return rings but the first. */
7738         if (tg3_flag(tp, 5717_PLUS))
7739                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7740         else if (!tg3_flag(tp, 5705_PLUS))
7741                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7742         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7743                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7744                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7745         else
7746                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7747
7748         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7749              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7750                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7751                               BDINFO_FLAGS_DISABLED);
7752
7753         /* Disable interrupts */
7754         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7755
7756         /* Zero mailbox registers. */
7757         if (tg3_flag(tp, SUPPORT_MSIX)) {
7758                 for (i = 1; i < tp->irq_max; i++) {
7759                         tp->napi[i].tx_prod = 0;
7760                         tp->napi[i].tx_cons = 0;
7761                         if (tg3_flag(tp, ENABLE_TSS))
7762                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7763                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7764                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7765                 }
7766                 if (!tg3_flag(tp, ENABLE_TSS))
7767                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7768         } else {
7769                 tp->napi[0].tx_prod = 0;
7770                 tp->napi[0].tx_cons = 0;
7771                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7772                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7773         }
7774
7775         /* Make sure the NIC-based send BD rings are disabled. */
7776         if (!tg3_flag(tp, 5705_PLUS)) {
7777                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7778                 for (i = 0; i < 16; i++)
7779                         tw32_tx_mbox(mbox + i * 8, 0);
7780         }
7781
7782         txrcb = NIC_SRAM_SEND_RCB;
7783         rxrcb = NIC_SRAM_RCV_RET_RCB;
7784
7785         /* Clear status block in ram. */
7786         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7787
7788         /* Set status block DMA address */
7789         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7790              ((u64) tnapi->status_mapping >> 32));
7791         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7792              ((u64) tnapi->status_mapping & 0xffffffff));
7793
7794         if (tnapi->tx_ring) {
7795                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7796                                (TG3_TX_RING_SIZE <<
7797                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7798                                NIC_SRAM_TX_BUFFER_DESC);
7799                 txrcb += TG3_BDINFO_SIZE;
7800         }
7801
7802         if (tnapi->rx_rcb) {
7803                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7804                                (tp->rx_ret_ring_mask + 1) <<
7805                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7806                 rxrcb += TG3_BDINFO_SIZE;
7807         }
7808
7809         stblk = HOSTCC_STATBLCK_RING1;
7810
7811         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7812                 u64 mapping = (u64)tnapi->status_mapping;
7813                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7814                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7815
7816                 /* Clear status block in ram. */
7817                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7818
7819                 if (tnapi->tx_ring) {
7820                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7821                                        (TG3_TX_RING_SIZE <<
7822                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7823                                        NIC_SRAM_TX_BUFFER_DESC);
7824                         txrcb += TG3_BDINFO_SIZE;
7825                 }
7826
7827                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7828                                ((tp->rx_ret_ring_mask + 1) <<
7829                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7830
7831                 stblk += 8;
7832                 rxrcb += TG3_BDINFO_SIZE;
7833         }
7834 }
7835
7836 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7837 {
7838         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7839
7840         if (!tg3_flag(tp, 5750_PLUS) ||
7841             tg3_flag(tp, 5780_CLASS) ||
7842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7843             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7844                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7845         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7846                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7847                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7848         else
7849                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7850
7851         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7852         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7853
7854         val = min(nic_rep_thresh, host_rep_thresh);
7855         tw32(RCVBDI_STD_THRESH, val);
7856
7857         if (tg3_flag(tp, 57765_PLUS))
7858                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7859
7860         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7861                 return;
7862
7863         if (!tg3_flag(tp, 5705_PLUS))
7864                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7865         else
7866                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7867
7868         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7869
7870         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7871         tw32(RCVBDI_JUMBO_THRESH, val);
7872
7873         if (tg3_flag(tp, 57765_PLUS))
7874                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7875 }
7876
7877 /* tp->lock is held. */
7878 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7879 {
7880         u32 val, rdmac_mode;
7881         int i, err, limit;
7882         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7883
7884         tg3_disable_ints(tp);
7885
7886         tg3_stop_fw(tp);
7887
7888         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7889
7890         if (tg3_flag(tp, INIT_COMPLETE))
7891                 tg3_abort_hw(tp, 1);
7892
7893         /* Enable MAC control of LPI */
7894         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7895                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7896                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7897                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7898
7899                 tw32_f(TG3_CPMU_EEE_CTRL,
7900                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7901
7902                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7903                       TG3_CPMU_EEEMD_LPI_IN_TX |
7904                       TG3_CPMU_EEEMD_LPI_IN_RX |
7905                       TG3_CPMU_EEEMD_EEE_ENABLE;
7906
7907                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7908                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7909
7910                 if (tg3_flag(tp, ENABLE_APE))
7911                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7912
7913                 tw32_f(TG3_CPMU_EEE_MODE, val);
7914
7915                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7916                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7917                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7918
7919                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7920                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7921                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7922         }
7923
7924         if (reset_phy)
7925                 tg3_phy_reset(tp);
7926
7927         err = tg3_chip_reset(tp);
7928         if (err)
7929                 return err;
7930
7931         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7932
7933         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7934                 val = tr32(TG3_CPMU_CTRL);
7935                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7936                 tw32(TG3_CPMU_CTRL, val);
7937
7938                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7939                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7940                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7941                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7942
7943                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7944                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7945                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7946                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7947
7948                 val = tr32(TG3_CPMU_HST_ACC);
7949                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7950                 val |= CPMU_HST_ACC_MACCLK_6_25;
7951                 tw32(TG3_CPMU_HST_ACC, val);
7952         }
7953
7954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7955                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7956                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7957                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7958                 tw32(PCIE_PWR_MGMT_THRESH, val);
7959
7960                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7961                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7962
7963                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7964
7965                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7966                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7967         }
7968
7969         if (tg3_flag(tp, L1PLLPD_EN)) {
7970                 u32 grc_mode = tr32(GRC_MODE);
7971
7972                 /* Access the lower 1K of PL PCIE block registers. */
7973                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7974                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7975
7976                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7977                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7978                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7979
7980                 tw32(GRC_MODE, grc_mode);
7981         }
7982
7983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7984                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7985                         u32 grc_mode = tr32(GRC_MODE);
7986
7987                         /* Access the lower 1K of PL PCIE block registers. */
7988                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7989                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7990
7991                         val = tr32(TG3_PCIE_TLDLPL_PORT +
7992                                    TG3_PCIE_PL_LO_PHYCTL5);
7993                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7994                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7995
7996                         tw32(GRC_MODE, grc_mode);
7997                 }
7998
7999                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8000                         u32 grc_mode = tr32(GRC_MODE);
8001
8002                         /* Access the lower 1K of DL PCIE block registers. */
8003                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8004                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8005
8006                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8007                                    TG3_PCIE_DL_LO_FTSMAX);
8008                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8009                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8010                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8011
8012                         tw32(GRC_MODE, grc_mode);
8013                 }
8014
8015                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8016                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8017                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8018                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8019         }
8020
8021         /* This works around an issue with Athlon chipsets on
8022          * B3 tigon3 silicon.  This bit has no effect on any
8023          * other revision.  But do not set this on PCI Express
8024          * chips and don't even touch the clocks if the CPMU is present.
8025          */
8026         if (!tg3_flag(tp, CPMU_PRESENT)) {
8027                 if (!tg3_flag(tp, PCI_EXPRESS))
8028                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8029                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8030         }
8031
8032         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8033             tg3_flag(tp, PCIX_MODE)) {
8034                 val = tr32(TG3PCI_PCISTATE);
8035                 val |= PCISTATE_RETRY_SAME_DMA;
8036                 tw32(TG3PCI_PCISTATE, val);
8037         }
8038
8039         if (tg3_flag(tp, ENABLE_APE)) {
8040                 /* Allow reads and writes to the
8041                  * APE register and memory space.
8042                  */
8043                 val = tr32(TG3PCI_PCISTATE);
8044                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8045                        PCISTATE_ALLOW_APE_SHMEM_WR |
8046                        PCISTATE_ALLOW_APE_PSPACE_WR;
8047                 tw32(TG3PCI_PCISTATE, val);
8048         }
8049
8050         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8051                 /* Enable some hw fixes.  */
8052                 val = tr32(TG3PCI_MSI_DATA);
8053                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8054                 tw32(TG3PCI_MSI_DATA, val);
8055         }
8056
8057         /* Descriptor ring init may make accesses to the
8058          * NIC SRAM area to setup the TX descriptors, so we
8059          * can only do this after the hardware has been
8060          * successfully reset.
8061          */
8062         err = tg3_init_rings(tp);
8063         if (err)
8064                 return err;
8065
8066         if (tg3_flag(tp, 57765_PLUS)) {
8067                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8068                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8069                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8070                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8071                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8072                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8073                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8074                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8075         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8076                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8077                 /* This value is determined during the probe time DMA
8078                  * engine test, tg3_test_dma.
8079                  */
8080                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8081         }
8082
8083         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8084                           GRC_MODE_4X_NIC_SEND_RINGS |
8085                           GRC_MODE_NO_TX_PHDR_CSUM |
8086                           GRC_MODE_NO_RX_PHDR_CSUM);
8087         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8088
8089         /* Pseudo-header checksum is done by hardware logic and not
8090          * the offload processers, so make the chip do the pseudo-
8091          * header checksums on receive.  For transmit it is more
8092          * convenient to do the pseudo-header checksum in software
8093          * as Linux does that on transmit for us in all cases.
8094          */
8095         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8096
8097         tw32(GRC_MODE,
8098              tp->grc_mode |
8099              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8100
8101         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8102         val = tr32(GRC_MISC_CFG);
8103         val &= ~0xff;
8104         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8105         tw32(GRC_MISC_CFG, val);
8106
8107         /* Initialize MBUF/DESC pool. */
8108         if (tg3_flag(tp, 5750_PLUS)) {
8109                 /* Do nothing.  */
8110         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8111                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8112                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8113                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8114                 else
8115                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8116                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8117                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8118         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8119                 int fw_len;
8120
8121                 fw_len = tp->fw_len;
8122                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8123                 tw32(BUFMGR_MB_POOL_ADDR,
8124                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8125                 tw32(BUFMGR_MB_POOL_SIZE,
8126                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8127         }
8128
8129         if (tp->dev->mtu <= ETH_DATA_LEN) {
8130                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8131                      tp->bufmgr_config.mbuf_read_dma_low_water);
8132                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8133                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8134                 tw32(BUFMGR_MB_HIGH_WATER,
8135                      tp->bufmgr_config.mbuf_high_water);
8136         } else {
8137                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8138                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8139                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8140                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8141                 tw32(BUFMGR_MB_HIGH_WATER,
8142                      tp->bufmgr_config.mbuf_high_water_jumbo);
8143         }
8144         tw32(BUFMGR_DMA_LOW_WATER,
8145              tp->bufmgr_config.dma_low_water);
8146         tw32(BUFMGR_DMA_HIGH_WATER,
8147              tp->bufmgr_config.dma_high_water);
8148
8149         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8150         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8151                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8152         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8153             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8154             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8155                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8156         tw32(BUFMGR_MODE, val);
8157         for (i = 0; i < 2000; i++) {
8158                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8159                         break;
8160                 udelay(10);
8161         }
8162         if (i >= 2000) {
8163                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8164                 return -ENODEV;
8165         }
8166
8167         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8168                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8169
8170         tg3_setup_rxbd_thresholds(tp);
8171
8172         /* Initialize TG3_BDINFO's at:
8173          *  RCVDBDI_STD_BD:     standard eth size rx ring
8174          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8175          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8176          *
8177          * like so:
8178          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8179          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8180          *                              ring attribute flags
8181          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8182          *
8183          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8184          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8185          *
8186          * The size of each ring is fixed in the firmware, but the location is
8187          * configurable.
8188          */
8189         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8190              ((u64) tpr->rx_std_mapping >> 32));
8191         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8192              ((u64) tpr->rx_std_mapping & 0xffffffff));
8193         if (!tg3_flag(tp, 5717_PLUS))
8194                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8195                      NIC_SRAM_RX_BUFFER_DESC);
8196
8197         /* Disable the mini ring */
8198         if (!tg3_flag(tp, 5705_PLUS))
8199                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8200                      BDINFO_FLAGS_DISABLED);
8201
8202         /* Program the jumbo buffer descriptor ring control
8203          * blocks on those devices that have them.
8204          */
8205         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8206             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8207
8208                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8209                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8210                              ((u64) tpr->rx_jmb_mapping >> 32));
8211                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8212                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8213                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8214                               BDINFO_FLAGS_MAXLEN_SHIFT;
8215                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8216                              val | BDINFO_FLAGS_USE_EXT_RECV);
8217                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8218                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8219                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8220                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8221                 } else {
8222                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8223                              BDINFO_FLAGS_DISABLED);
8224                 }
8225
8226                 if (tg3_flag(tp, 57765_PLUS)) {
8227                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8228                                 val = TG3_RX_STD_MAX_SIZE_5700;
8229                         else
8230                                 val = TG3_RX_STD_MAX_SIZE_5717;
8231                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8232                         val |= (TG3_RX_STD_DMA_SZ << 2);
8233                 } else
8234                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8235         } else
8236                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8237
8238         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8239
8240         tpr->rx_std_prod_idx = tp->rx_pending;
8241         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8242
8243         tpr->rx_jmb_prod_idx =
8244                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8245         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8246
8247         tg3_rings_reset(tp);
8248
8249         /* Initialize MAC address and backoff seed. */
8250         __tg3_set_mac_addr(tp, 0);
8251
8252         /* MTU + ethernet header + FCS + optional VLAN tag */
8253         tw32(MAC_RX_MTU_SIZE,
8254              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8255
8256         /* The slot time is changed by tg3_setup_phy if we
8257          * run at gigabit with half duplex.
8258          */
8259         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8260               (6 << TX_LENGTHS_IPG_SHIFT) |
8261               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8262
8263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8264                 val |= tr32(MAC_TX_LENGTHS) &
8265                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8266                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8267
8268         tw32(MAC_TX_LENGTHS, val);
8269
8270         /* Receive rules. */
8271         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8272         tw32(RCVLPC_CONFIG, 0x0181);
8273
8274         /* Calculate RDMAC_MODE setting early, we need it to determine
8275          * the RCVLPC_STATE_ENABLE mask.
8276          */
8277         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8278                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8279                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8280                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8281                       RDMAC_MODE_LNGREAD_ENAB);
8282
8283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8284                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8285
8286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8289                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8290                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8291                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8292
8293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8294             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8295                 if (tg3_flag(tp, TSO_CAPABLE) &&
8296                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8297                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8298                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8299                            !tg3_flag(tp, IS_5788)) {
8300                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8301                 }
8302         }
8303
8304         if (tg3_flag(tp, PCI_EXPRESS))
8305                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8306
8307         if (tg3_flag(tp, HW_TSO_1) ||
8308             tg3_flag(tp, HW_TSO_2) ||
8309             tg3_flag(tp, HW_TSO_3))
8310                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8311
8312         if (tg3_flag(tp, 57765_PLUS) ||
8313             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8314             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8315                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8316
8317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8318                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8319
8320         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8321             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8323             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8324             tg3_flag(tp, 57765_PLUS)) {
8325                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8326                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8327                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8328                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8329                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8330                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8331                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8332                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8333                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8334                 }
8335                 tw32(TG3_RDMA_RSRVCTRL_REG,
8336                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8337         }
8338
8339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8340             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8341                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8342                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8343                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8344                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8345         }
8346
8347         /* Receive/send statistics. */
8348         if (tg3_flag(tp, 5750_PLUS)) {
8349                 val = tr32(RCVLPC_STATS_ENABLE);
8350                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8351                 tw32(RCVLPC_STATS_ENABLE, val);
8352         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8353                    tg3_flag(tp, TSO_CAPABLE)) {
8354                 val = tr32(RCVLPC_STATS_ENABLE);
8355                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8356                 tw32(RCVLPC_STATS_ENABLE, val);
8357         } else {
8358                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8359         }
8360         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8361         tw32(SNDDATAI_STATSENAB, 0xffffff);
8362         tw32(SNDDATAI_STATSCTRL,
8363              (SNDDATAI_SCTRL_ENABLE |
8364               SNDDATAI_SCTRL_FASTUPD));
8365
8366         /* Setup host coalescing engine. */
8367         tw32(HOSTCC_MODE, 0);
8368         for (i = 0; i < 2000; i++) {
8369                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8370                         break;
8371                 udelay(10);
8372         }
8373
8374         __tg3_set_coalesce(tp, &tp->coal);
8375
8376         if (!tg3_flag(tp, 5705_PLUS)) {
8377                 /* Status/statistics block address.  See tg3_timer,
8378                  * the tg3_periodic_fetch_stats call there, and
8379                  * tg3_get_stats to see how this works for 5705/5750 chips.
8380                  */
8381                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8382                      ((u64) tp->stats_mapping >> 32));
8383                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8384                      ((u64) tp->stats_mapping & 0xffffffff));
8385                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8386
8387                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8388
8389                 /* Clear statistics and status block memory areas */
8390                 for (i = NIC_SRAM_STATS_BLK;
8391                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8392                      i += sizeof(u32)) {
8393                         tg3_write_mem(tp, i, 0);
8394                         udelay(40);
8395                 }
8396         }
8397
8398         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8399
8400         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8401         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8402         if (!tg3_flag(tp, 5705_PLUS))
8403                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8404
8405         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8406                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8407                 /* reset to prevent losing 1st rx packet intermittently */
8408                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8409                 udelay(10);
8410         }
8411
8412         if (tg3_flag(tp, ENABLE_APE))
8413                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8414         else
8415                 tp->mac_mode = 0;
8416         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8417                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8418         if (!tg3_flag(tp, 5705_PLUS) &&
8419             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8420             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8421                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8422         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8423         udelay(40);
8424
8425         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8426          * If TG3_FLAG_IS_NIC is zero, we should read the
8427          * register to preserve the GPIO settings for LOMs. The GPIOs,
8428          * whether used as inputs or outputs, are set by boot code after
8429          * reset.
8430          */
8431         if (!tg3_flag(tp, IS_NIC)) {
8432                 u32 gpio_mask;
8433
8434                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8435                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8436                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8437
8438                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8439                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8440                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8441
8442                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8443                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8444
8445                 tp->grc_local_ctrl &= ~gpio_mask;
8446                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8447
8448                 /* GPIO1 must be driven high for eeprom write protect */
8449                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8450                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8451                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8452         }
8453         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8454         udelay(100);
8455
8456         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8457                 val = tr32(MSGINT_MODE);
8458                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8459                 tw32(MSGINT_MODE, val);
8460         }
8461
8462         if (!tg3_flag(tp, 5705_PLUS)) {
8463                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8464                 udelay(40);
8465         }
8466
8467         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8468                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8469                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8470                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8471                WDMAC_MODE_LNGREAD_ENAB);
8472
8473         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8474             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8475                 if (tg3_flag(tp, TSO_CAPABLE) &&
8476                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8477                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8478                         /* nothing */
8479                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8480                            !tg3_flag(tp, IS_5788)) {
8481                         val |= WDMAC_MODE_RX_ACCEL;
8482                 }
8483         }
8484
8485         /* Enable host coalescing bug fix */
8486         if (tg3_flag(tp, 5755_PLUS))
8487                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8488
8489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8490                 val |= WDMAC_MODE_BURST_ALL_DATA;
8491
8492         tw32_f(WDMAC_MODE, val);
8493         udelay(40);
8494
8495         if (tg3_flag(tp, PCIX_MODE)) {
8496                 u16 pcix_cmd;
8497
8498                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8499                                      &pcix_cmd);
8500                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8501                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8502                         pcix_cmd |= PCI_X_CMD_READ_2K;
8503                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8504                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8505                         pcix_cmd |= PCI_X_CMD_READ_2K;
8506                 }
8507                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8508                                       pcix_cmd);
8509         }
8510
8511         tw32_f(RDMAC_MODE, rdmac_mode);
8512         udelay(40);
8513
8514         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8515         if (!tg3_flag(tp, 5705_PLUS))
8516                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8517
8518         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8519                 tw32(SNDDATAC_MODE,
8520                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8521         else
8522                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8523
8524         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8525         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8526         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8527         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8528                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8529         tw32(RCVDBDI_MODE, val);
8530         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8531         if (tg3_flag(tp, HW_TSO_1) ||
8532             tg3_flag(tp, HW_TSO_2) ||
8533             tg3_flag(tp, HW_TSO_3))
8534                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8535         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8536         if (tg3_flag(tp, ENABLE_TSS))
8537                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8538         tw32(SNDBDI_MODE, val);
8539         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8540
8541         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8542                 err = tg3_load_5701_a0_firmware_fix(tp);
8543                 if (err)
8544                         return err;
8545         }
8546
8547         if (tg3_flag(tp, TSO_CAPABLE)) {
8548                 err = tg3_load_tso_firmware(tp);
8549                 if (err)
8550                         return err;
8551         }
8552
8553         tp->tx_mode = TX_MODE_ENABLE;
8554
8555         if (tg3_flag(tp, 5755_PLUS) ||
8556             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8557                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8558
8559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8560                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8561                 tp->tx_mode &= ~val;
8562                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8563         }
8564
8565         tw32_f(MAC_TX_MODE, tp->tx_mode);
8566         udelay(100);
8567
8568         if (tg3_flag(tp, ENABLE_RSS)) {
8569                 u32 reg = MAC_RSS_INDIR_TBL_0;
8570                 u8 *ent = (u8 *)&val;
8571
8572                 /* Setup the indirection table */
8573                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8574                         int idx = i % sizeof(val);
8575
8576                         ent[idx] = i % (tp->irq_cnt - 1);
8577                         if (idx == sizeof(val) - 1) {
8578                                 tw32(reg, val);
8579                                 reg += 4;
8580                         }
8581                 }
8582
8583                 /* Setup the "secret" hash key. */
8584                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8585                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8586                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8587                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8588                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8589                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8590                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8591                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8592                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8593                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8594         }
8595
8596         tp->rx_mode = RX_MODE_ENABLE;
8597         if (tg3_flag(tp, 5755_PLUS))
8598                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8599
8600         if (tg3_flag(tp, ENABLE_RSS))
8601                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8602                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8603                                RX_MODE_RSS_IPV6_HASH_EN |
8604                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8605                                RX_MODE_RSS_IPV4_HASH_EN |
8606                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8607
8608         tw32_f(MAC_RX_MODE, tp->rx_mode);
8609         udelay(10);
8610
8611         tw32(MAC_LED_CTRL, tp->led_ctrl);
8612
8613         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8614         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8615                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8616                 udelay(10);
8617         }
8618         tw32_f(MAC_RX_MODE, tp->rx_mode);
8619         udelay(10);
8620
8621         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8622                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8623                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8624                         /* Set drive transmission level to 1.2V  */
8625                         /* only if the signal pre-emphasis bit is not set  */
8626                         val = tr32(MAC_SERDES_CFG);
8627                         val &= 0xfffff000;
8628                         val |= 0x880;
8629                         tw32(MAC_SERDES_CFG, val);
8630                 }
8631                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8632                         tw32(MAC_SERDES_CFG, 0x616000);
8633         }
8634
8635         /* Prevent chip from dropping frames when flow control
8636          * is enabled.
8637          */
8638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8639                 val = 1;
8640         else
8641                 val = 2;
8642         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8643
8644         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8645             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8646                 /* Use hardware link auto-negotiation */
8647                 tg3_flag_set(tp, HW_AUTONEG);
8648         }
8649
8650         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8651             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8652                 u32 tmp;
8653
8654                 tmp = tr32(SERDES_RX_CTRL);
8655                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8656                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8657                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8658                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8659         }
8660
8661         if (!tg3_flag(tp, USE_PHYLIB)) {
8662                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8663                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8664                         tp->link_config.speed = tp->link_config.orig_speed;
8665                         tp->link_config.duplex = tp->link_config.orig_duplex;
8666                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8667                 }
8668
8669                 err = tg3_setup_phy(tp, 0);
8670                 if (err)
8671                         return err;
8672
8673                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8674                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8675                         u32 tmp;
8676
8677                         /* Clear CRC stats. */
8678                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8679                                 tg3_writephy(tp, MII_TG3_TEST1,
8680                                              tmp | MII_TG3_TEST1_CRC_EN);
8681                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8682                         }
8683                 }
8684         }
8685
8686         __tg3_set_rx_mode(tp->dev);
8687
8688         /* Initialize receive rules. */
8689         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8690         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8691         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8692         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8693
8694         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8695                 limit = 8;
8696         else
8697                 limit = 16;
8698         if (tg3_flag(tp, ENABLE_ASF))
8699                 limit -= 4;
8700         switch (limit) {
8701         case 16:
8702                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8703         case 15:
8704                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8705         case 14:
8706                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8707         case 13:
8708                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8709         case 12:
8710                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8711         case 11:
8712                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8713         case 10:
8714                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8715         case 9:
8716                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8717         case 8:
8718                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8719         case 7:
8720                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8721         case 6:
8722                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8723         case 5:
8724                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8725         case 4:
8726                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8727         case 3:
8728                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8729         case 2:
8730         case 1:
8731
8732         default:
8733                 break;
8734         }
8735
8736         if (tg3_flag(tp, ENABLE_APE))
8737                 /* Write our heartbeat update interval to APE. */
8738                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8739                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8740
8741         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8742
8743         return 0;
8744 }
8745
8746 /* Called at device open time to get the chip ready for
8747  * packet processing.  Invoked with tp->lock held.
8748  */
8749 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8750 {
8751         tg3_switch_clocks(tp);
8752
8753         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8754
8755         return tg3_reset_hw(tp, reset_phy);
8756 }
8757
8758 #define TG3_STAT_ADD32(PSTAT, REG) \
8759 do {    u32 __val = tr32(REG); \
8760         (PSTAT)->low += __val; \
8761         if ((PSTAT)->low < __val) \
8762                 (PSTAT)->high += 1; \
8763 } while (0)
8764
8765 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8766 {
8767         struct tg3_hw_stats *sp = tp->hw_stats;
8768
8769         if (!netif_carrier_ok(tp->dev))
8770                 return;
8771
8772         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8773         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8774         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8775         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8776         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8777         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8778         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8779         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8780         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8781         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8782         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8783         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8784         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8785
8786         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8787         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8788         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8789         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8790         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8791         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8792         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8793         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8794         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8795         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8796         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8797         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8798         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8799         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8800
8801         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8802         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8803             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8804             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8805                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8806         } else {
8807                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8808                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8809                 if (val) {
8810                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8811                         sp->rx_discards.low += val;
8812                         if (sp->rx_discards.low < val)
8813                                 sp->rx_discards.high += 1;
8814                 }
8815                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8816         }
8817         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8818 }
8819
8820 static void tg3_timer(unsigned long __opaque)
8821 {
8822         struct tg3 *tp = (struct tg3 *) __opaque;
8823
8824         if (tp->irq_sync)
8825                 goto restart_timer;
8826
8827         spin_lock(&tp->lock);
8828
8829         if (!tg3_flag(tp, TAGGED_STATUS)) {
8830                 /* All of this garbage is because when using non-tagged
8831                  * IRQ status the mailbox/status_block protocol the chip
8832                  * uses with the cpu is race prone.
8833                  */
8834                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8835                         tw32(GRC_LOCAL_CTRL,
8836                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8837                 } else {
8838                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8839                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8840                 }
8841
8842                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8843                         tg3_flag_set(tp, RESTART_TIMER);
8844                         spin_unlock(&tp->lock);
8845                         schedule_work(&tp->reset_task);
8846                         return;
8847                 }
8848         }
8849
8850         /* This part only runs once per second. */
8851         if (!--tp->timer_counter) {
8852                 if (tg3_flag(tp, 5705_PLUS))
8853                         tg3_periodic_fetch_stats(tp);
8854
8855                 if (tp->setlpicnt && !--tp->setlpicnt)
8856                         tg3_phy_eee_enable(tp);
8857
8858                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8859                         u32 mac_stat;
8860                         int phy_event;
8861
8862                         mac_stat = tr32(MAC_STATUS);
8863
8864                         phy_event = 0;
8865                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8866                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8867                                         phy_event = 1;
8868                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8869                                 phy_event = 1;
8870
8871                         if (phy_event)
8872                                 tg3_setup_phy(tp, 0);
8873                 } else if (tg3_flag(tp, POLL_SERDES)) {
8874                         u32 mac_stat = tr32(MAC_STATUS);
8875                         int need_setup = 0;
8876
8877                         if (netif_carrier_ok(tp->dev) &&
8878                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8879                                 need_setup = 1;
8880                         }
8881                         if (!netif_carrier_ok(tp->dev) &&
8882                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8883                                          MAC_STATUS_SIGNAL_DET))) {
8884                                 need_setup = 1;
8885                         }
8886                         if (need_setup) {
8887                                 if (!tp->serdes_counter) {
8888                                         tw32_f(MAC_MODE,
8889                                              (tp->mac_mode &
8890                                               ~MAC_MODE_PORT_MODE_MASK));
8891                                         udelay(40);
8892                                         tw32_f(MAC_MODE, tp->mac_mode);
8893                                         udelay(40);
8894                                 }
8895                                 tg3_setup_phy(tp, 0);
8896                         }
8897                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8898                            tg3_flag(tp, 5780_CLASS)) {
8899                         tg3_serdes_parallel_detect(tp);
8900                 }
8901
8902                 tp->timer_counter = tp->timer_multiplier;
8903         }
8904
8905         /* Heartbeat is only sent once every 2 seconds.
8906          *
8907          * The heartbeat is to tell the ASF firmware that the host
8908          * driver is still alive.  In the event that the OS crashes,
8909          * ASF needs to reset the hardware to free up the FIFO space
8910          * that may be filled with rx packets destined for the host.
8911          * If the FIFO is full, ASF will no longer function properly.
8912          *
8913          * Unintended resets have been reported on real time kernels
8914          * where the timer doesn't run on time.  Netpoll will also have
8915          * same problem.
8916          *
8917          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8918          * to check the ring condition when the heartbeat is expiring
8919          * before doing the reset.  This will prevent most unintended
8920          * resets.
8921          */
8922         if (!--tp->asf_counter) {
8923                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8924                         tg3_wait_for_event_ack(tp);
8925
8926                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8927                                       FWCMD_NICDRV_ALIVE3);
8928                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8929                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8930                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8931
8932                         tg3_generate_fw_event(tp);
8933                 }
8934                 tp->asf_counter = tp->asf_multiplier;
8935         }
8936
8937         spin_unlock(&tp->lock);
8938
8939 restart_timer:
8940         tp->timer.expires = jiffies + tp->timer_offset;
8941         add_timer(&tp->timer);
8942 }
8943
8944 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8945 {
8946         irq_handler_t fn;
8947         unsigned long flags;
8948         char *name;
8949         struct tg3_napi *tnapi = &tp->napi[irq_num];
8950
8951         if (tp->irq_cnt == 1)
8952                 name = tp->dev->name;
8953         else {
8954                 name = &tnapi->irq_lbl[0];
8955                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8956                 name[IFNAMSIZ-1] = 0;
8957         }
8958
8959         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8960                 fn = tg3_msi;
8961                 if (tg3_flag(tp, 1SHOT_MSI))
8962                         fn = tg3_msi_1shot;
8963                 flags = 0;
8964         } else {
8965                 fn = tg3_interrupt;
8966                 if (tg3_flag(tp, TAGGED_STATUS))
8967                         fn = tg3_interrupt_tagged;
8968                 flags = IRQF_SHARED;
8969         }
8970
8971         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8972 }
8973
8974 static int tg3_test_interrupt(struct tg3 *tp)
8975 {
8976         struct tg3_napi *tnapi = &tp->napi[0];
8977         struct net_device *dev = tp->dev;
8978         int err, i, intr_ok = 0;
8979         u32 val;
8980
8981         if (!netif_running(dev))
8982                 return -ENODEV;
8983
8984         tg3_disable_ints(tp);
8985
8986         free_irq(tnapi->irq_vec, tnapi);
8987
8988         /*
8989          * Turn off MSI one shot mode.  Otherwise this test has no
8990          * observable way to know whether the interrupt was delivered.
8991          */
8992         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8993                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8994                 tw32(MSGINT_MODE, val);
8995         }
8996
8997         err = request_irq(tnapi->irq_vec, tg3_test_isr,
8998                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8999         if (err)
9000                 return err;
9001
9002         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9003         tg3_enable_ints(tp);
9004
9005         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9006                tnapi->coal_now);
9007
9008         for (i = 0; i < 5; i++) {
9009                 u32 int_mbox, misc_host_ctrl;
9010
9011                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9012                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9013
9014                 if ((int_mbox != 0) ||
9015                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9016                         intr_ok = 1;
9017                         break;
9018                 }
9019
9020                 msleep(10);
9021         }
9022
9023         tg3_disable_ints(tp);
9024
9025         free_irq(tnapi->irq_vec, tnapi);
9026
9027         err = tg3_request_irq(tp, 0);
9028
9029         if (err)
9030                 return err;
9031
9032         if (intr_ok) {
9033                 /* Reenable MSI one shot mode. */
9034                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9035                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9036                         tw32(MSGINT_MODE, val);
9037                 }
9038                 return 0;
9039         }
9040
9041         return -EIO;
9042 }
9043
9044 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9045  * successfully restored
9046  */
9047 static int tg3_test_msi(struct tg3 *tp)
9048 {
9049         int err;
9050         u16 pci_cmd;
9051
9052         if (!tg3_flag(tp, USING_MSI))
9053                 return 0;
9054
9055         /* Turn off SERR reporting in case MSI terminates with Master
9056          * Abort.
9057          */
9058         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9059         pci_write_config_word(tp->pdev, PCI_COMMAND,
9060                               pci_cmd & ~PCI_COMMAND_SERR);
9061
9062         err = tg3_test_interrupt(tp);
9063
9064         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9065
9066         if (!err)
9067                 return 0;
9068
9069         /* other failures */
9070         if (err != -EIO)
9071                 return err;
9072
9073         /* MSI test failed, go back to INTx mode */
9074         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9075                     "to INTx mode. Please report this failure to the PCI "
9076                     "maintainer and include system chipset information\n");
9077
9078         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9079
9080         pci_disable_msi(tp->pdev);
9081
9082         tg3_flag_clear(tp, USING_MSI);
9083         tp->napi[0].irq_vec = tp->pdev->irq;
9084
9085         err = tg3_request_irq(tp, 0);
9086         if (err)
9087                 return err;
9088
9089         /* Need to reset the chip because the MSI cycle may have terminated
9090          * with Master Abort.
9091          */
9092         tg3_full_lock(tp, 1);
9093
9094         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9095         err = tg3_init_hw(tp, 1);
9096
9097         tg3_full_unlock(tp);
9098
9099         if (err)
9100                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9101
9102         return err;
9103 }
9104
9105 static int tg3_request_firmware(struct tg3 *tp)
9106 {
9107         const __be32 *fw_data;
9108
9109         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9110                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9111                            tp->fw_needed);
9112                 return -ENOENT;
9113         }
9114
9115         fw_data = (void *)tp->fw->data;
9116
9117         /* Firmware blob starts with version numbers, followed by
9118          * start address and _full_ length including BSS sections
9119          * (which must be longer than the actual data, of course
9120          */
9121
9122         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9123         if (tp->fw_len < (tp->fw->size - 12)) {
9124                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9125                            tp->fw_len, tp->fw_needed);
9126                 release_firmware(tp->fw);
9127                 tp->fw = NULL;
9128                 return -EINVAL;
9129         }
9130
9131         /* We no longer need firmware; we have it. */
9132         tp->fw_needed = NULL;
9133         return 0;
9134 }
9135
9136 static bool tg3_enable_msix(struct tg3 *tp)
9137 {
9138         int i, rc, cpus = num_online_cpus();
9139         struct msix_entry msix_ent[tp->irq_max];
9140
9141         if (cpus == 1)
9142                 /* Just fallback to the simpler MSI mode. */
9143                 return false;
9144
9145         /*
9146          * We want as many rx rings enabled as there are cpus.
9147          * The first MSIX vector only deals with link interrupts, etc,
9148          * so we add one to the number of vectors we are requesting.
9149          */
9150         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9151
9152         for (i = 0; i < tp->irq_max; i++) {
9153                 msix_ent[i].entry  = i;
9154                 msix_ent[i].vector = 0;
9155         }
9156
9157         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9158         if (rc < 0) {
9159                 return false;
9160         } else if (rc != 0) {
9161                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9162                         return false;
9163                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9164                               tp->irq_cnt, rc);
9165                 tp->irq_cnt = rc;
9166         }
9167
9168         for (i = 0; i < tp->irq_max; i++)
9169                 tp->napi[i].irq_vec = msix_ent[i].vector;
9170
9171         netif_set_real_num_tx_queues(tp->dev, 1);
9172         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9173         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9174                 pci_disable_msix(tp->pdev);
9175                 return false;
9176         }
9177
9178         if (tp->irq_cnt > 1) {
9179                 tg3_flag_set(tp, ENABLE_RSS);
9180
9181                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9182                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9183                         tg3_flag_set(tp, ENABLE_TSS);
9184                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9185                 }
9186         }
9187
9188         return true;
9189 }
9190
9191 static void tg3_ints_init(struct tg3 *tp)
9192 {
9193         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9194             !tg3_flag(tp, TAGGED_STATUS)) {
9195                 /* All MSI supporting chips should support tagged
9196                  * status.  Assert that this is the case.
9197                  */
9198                 netdev_warn(tp->dev,
9199                             "MSI without TAGGED_STATUS? Not using MSI\n");
9200                 goto defcfg;
9201         }
9202
9203         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9204                 tg3_flag_set(tp, USING_MSIX);
9205         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9206                 tg3_flag_set(tp, USING_MSI);
9207
9208         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9209                 u32 msi_mode = tr32(MSGINT_MODE);
9210                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9211                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9212                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9213         }
9214 defcfg:
9215         if (!tg3_flag(tp, USING_MSIX)) {
9216                 tp->irq_cnt = 1;
9217                 tp->napi[0].irq_vec = tp->pdev->irq;
9218                 netif_set_real_num_tx_queues(tp->dev, 1);
9219                 netif_set_real_num_rx_queues(tp->dev, 1);
9220         }
9221 }
9222
9223 static void tg3_ints_fini(struct tg3 *tp)
9224 {
9225         if (tg3_flag(tp, USING_MSIX))
9226                 pci_disable_msix(tp->pdev);
9227         else if (tg3_flag(tp, USING_MSI))
9228                 pci_disable_msi(tp->pdev);
9229         tg3_flag_clear(tp, USING_MSI);
9230         tg3_flag_clear(tp, USING_MSIX);
9231         tg3_flag_clear(tp, ENABLE_RSS);
9232         tg3_flag_clear(tp, ENABLE_TSS);
9233 }
9234
9235 static int tg3_open(struct net_device *dev)
9236 {
9237         struct tg3 *tp = netdev_priv(dev);
9238         int i, err;
9239
9240         if (tp->fw_needed) {
9241                 err = tg3_request_firmware(tp);
9242                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9243                         if (err)
9244                                 return err;
9245                 } else if (err) {
9246                         netdev_warn(tp->dev, "TSO capability disabled\n");
9247                         tg3_flag_clear(tp, TSO_CAPABLE);
9248                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9249                         netdev_notice(tp->dev, "TSO capability restored\n");
9250                         tg3_flag_set(tp, TSO_CAPABLE);
9251                 }
9252         }
9253
9254         netif_carrier_off(tp->dev);
9255
9256         err = tg3_power_up(tp);
9257         if (err)
9258                 return err;
9259
9260         tg3_full_lock(tp, 0);
9261
9262         tg3_disable_ints(tp);
9263         tg3_flag_clear(tp, INIT_COMPLETE);
9264
9265         tg3_full_unlock(tp);
9266
9267         /*
9268          * Setup interrupts first so we know how
9269          * many NAPI resources to allocate
9270          */
9271         tg3_ints_init(tp);
9272
9273         /* The placement of this call is tied
9274          * to the setup and use of Host TX descriptors.
9275          */
9276         err = tg3_alloc_consistent(tp);
9277         if (err)
9278                 goto err_out1;
9279
9280         tg3_napi_init(tp);
9281
9282         tg3_napi_enable(tp);
9283
9284         for (i = 0; i < tp->irq_cnt; i++) {
9285                 struct tg3_napi *tnapi = &tp->napi[i];
9286                 err = tg3_request_irq(tp, i);
9287                 if (err) {
9288                         for (i--; i >= 0; i--)
9289                                 free_irq(tnapi->irq_vec, tnapi);
9290                         break;
9291                 }
9292         }
9293
9294         if (err)
9295                 goto err_out2;
9296
9297         tg3_full_lock(tp, 0);
9298
9299         err = tg3_init_hw(tp, 1);
9300         if (err) {
9301                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9302                 tg3_free_rings(tp);
9303         } else {
9304                 if (tg3_flag(tp, TAGGED_STATUS))
9305                         tp->timer_offset = HZ;
9306                 else
9307                         tp->timer_offset = HZ / 10;
9308
9309                 BUG_ON(tp->timer_offset > HZ);
9310                 tp->timer_counter = tp->timer_multiplier =
9311                         (HZ / tp->timer_offset);
9312                 tp->asf_counter = tp->asf_multiplier =
9313                         ((HZ / tp->timer_offset) * 2);
9314
9315                 init_timer(&tp->timer);
9316                 tp->timer.expires = jiffies + tp->timer_offset;
9317                 tp->timer.data = (unsigned long) tp;
9318                 tp->timer.function = tg3_timer;
9319         }
9320
9321         tg3_full_unlock(tp);
9322
9323         if (err)
9324                 goto err_out3;
9325
9326         if (tg3_flag(tp, USING_MSI)) {
9327                 err = tg3_test_msi(tp);
9328
9329                 if (err) {
9330                         tg3_full_lock(tp, 0);
9331                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9332                         tg3_free_rings(tp);
9333                         tg3_full_unlock(tp);
9334
9335                         goto err_out2;
9336                 }
9337
9338                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9339                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9340
9341                         tw32(PCIE_TRANSACTION_CFG,
9342                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9343                 }
9344         }
9345
9346         tg3_phy_start(tp);
9347
9348         tg3_full_lock(tp, 0);
9349
9350         add_timer(&tp->timer);
9351         tg3_flag_set(tp, INIT_COMPLETE);
9352         tg3_enable_ints(tp);
9353
9354         tg3_full_unlock(tp);
9355
9356         netif_tx_start_all_queues(dev);
9357
9358         /*
9359          * Reset loopback feature if it was turned on while the device was down
9360          * make sure that it's installed properly now.
9361          */
9362         if (dev->features & NETIF_F_LOOPBACK)
9363                 tg3_set_loopback(dev, dev->features);
9364
9365         return 0;
9366
9367 err_out3:
9368         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9369                 struct tg3_napi *tnapi = &tp->napi[i];
9370                 free_irq(tnapi->irq_vec, tnapi);
9371         }
9372
9373 err_out2:
9374         tg3_napi_disable(tp);
9375         tg3_napi_fini(tp);
9376         tg3_free_consistent(tp);
9377
9378 err_out1:
9379         tg3_ints_fini(tp);
9380         return err;
9381 }
9382
9383 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9384                                                  struct rtnl_link_stats64 *);
9385 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9386
9387 static int tg3_close(struct net_device *dev)
9388 {
9389         int i;
9390         struct tg3 *tp = netdev_priv(dev);
9391
9392         tg3_napi_disable(tp);
9393         cancel_work_sync(&tp->reset_task);
9394
9395         netif_tx_stop_all_queues(dev);
9396
9397         del_timer_sync(&tp->timer);
9398
9399         tg3_phy_stop(tp);
9400
9401         tg3_full_lock(tp, 1);
9402
9403         tg3_disable_ints(tp);
9404
9405         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9406         tg3_free_rings(tp);
9407         tg3_flag_clear(tp, INIT_COMPLETE);
9408
9409         tg3_full_unlock(tp);
9410
9411         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9412                 struct tg3_napi *tnapi = &tp->napi[i];
9413                 free_irq(tnapi->irq_vec, tnapi);
9414         }
9415
9416         tg3_ints_fini(tp);
9417
9418         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9419
9420         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9421                sizeof(tp->estats_prev));
9422
9423         tg3_napi_fini(tp);
9424
9425         tg3_free_consistent(tp);
9426
9427         tg3_power_down(tp);
9428
9429         netif_carrier_off(tp->dev);
9430
9431         return 0;
9432 }
9433
9434 static inline u64 get_stat64(tg3_stat64_t *val)
9435 {
9436        return ((u64)val->high << 32) | ((u64)val->low);
9437 }
9438
9439 static u64 calc_crc_errors(struct tg3 *tp)
9440 {
9441         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9442
9443         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9444             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9445              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9446                 u32 val;
9447
9448                 spin_lock_bh(&tp->lock);
9449                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9450                         tg3_writephy(tp, MII_TG3_TEST1,
9451                                      val | MII_TG3_TEST1_CRC_EN);
9452                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9453                 } else
9454                         val = 0;
9455                 spin_unlock_bh(&tp->lock);
9456
9457                 tp->phy_crc_errors += val;
9458
9459                 return tp->phy_crc_errors;
9460         }
9461
9462         return get_stat64(&hw_stats->rx_fcs_errors);
9463 }
9464
9465 #define ESTAT_ADD(member) \
9466         estats->member =        old_estats->member + \
9467                                 get_stat64(&hw_stats->member)
9468
9469 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9470 {
9471         struct tg3_ethtool_stats *estats = &tp->estats;
9472         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9473         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9474
9475         if (!hw_stats)
9476                 return old_estats;
9477
9478         ESTAT_ADD(rx_octets);
9479         ESTAT_ADD(rx_fragments);
9480         ESTAT_ADD(rx_ucast_packets);
9481         ESTAT_ADD(rx_mcast_packets);
9482         ESTAT_ADD(rx_bcast_packets);
9483         ESTAT_ADD(rx_fcs_errors);
9484         ESTAT_ADD(rx_align_errors);
9485         ESTAT_ADD(rx_xon_pause_rcvd);
9486         ESTAT_ADD(rx_xoff_pause_rcvd);
9487         ESTAT_ADD(rx_mac_ctrl_rcvd);
9488         ESTAT_ADD(rx_xoff_entered);
9489         ESTAT_ADD(rx_frame_too_long_errors);
9490         ESTAT_ADD(rx_jabbers);
9491         ESTAT_ADD(rx_undersize_packets);
9492         ESTAT_ADD(rx_in_length_errors);
9493         ESTAT_ADD(rx_out_length_errors);
9494         ESTAT_ADD(rx_64_or_less_octet_packets);
9495         ESTAT_ADD(rx_65_to_127_octet_packets);
9496         ESTAT_ADD(rx_128_to_255_octet_packets);
9497         ESTAT_ADD(rx_256_to_511_octet_packets);
9498         ESTAT_ADD(rx_512_to_1023_octet_packets);
9499         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9500         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9501         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9502         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9503         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9504
9505         ESTAT_ADD(tx_octets);
9506         ESTAT_ADD(tx_collisions);
9507         ESTAT_ADD(tx_xon_sent);
9508         ESTAT_ADD(tx_xoff_sent);
9509         ESTAT_ADD(tx_flow_control);
9510         ESTAT_ADD(tx_mac_errors);
9511         ESTAT_ADD(tx_single_collisions);
9512         ESTAT_ADD(tx_mult_collisions);
9513         ESTAT_ADD(tx_deferred);
9514         ESTAT_ADD(tx_excessive_collisions);
9515         ESTAT_ADD(tx_late_collisions);
9516         ESTAT_ADD(tx_collide_2times);
9517         ESTAT_ADD(tx_collide_3times);
9518         ESTAT_ADD(tx_collide_4times);
9519         ESTAT_ADD(tx_collide_5times);
9520         ESTAT_ADD(tx_collide_6times);
9521         ESTAT_ADD(tx_collide_7times);
9522         ESTAT_ADD(tx_collide_8times);
9523         ESTAT_ADD(tx_collide_9times);
9524         ESTAT_ADD(tx_collide_10times);
9525         ESTAT_ADD(tx_collide_11times);
9526         ESTAT_ADD(tx_collide_12times);
9527         ESTAT_ADD(tx_collide_13times);
9528         ESTAT_ADD(tx_collide_14times);
9529         ESTAT_ADD(tx_collide_15times);
9530         ESTAT_ADD(tx_ucast_packets);
9531         ESTAT_ADD(tx_mcast_packets);
9532         ESTAT_ADD(tx_bcast_packets);
9533         ESTAT_ADD(tx_carrier_sense_errors);
9534         ESTAT_ADD(tx_discards);
9535         ESTAT_ADD(tx_errors);
9536
9537         ESTAT_ADD(dma_writeq_full);
9538         ESTAT_ADD(dma_write_prioq_full);
9539         ESTAT_ADD(rxbds_empty);
9540         ESTAT_ADD(rx_discards);
9541         ESTAT_ADD(rx_errors);
9542         ESTAT_ADD(rx_threshold_hit);
9543
9544         ESTAT_ADD(dma_readq_full);
9545         ESTAT_ADD(dma_read_prioq_full);
9546         ESTAT_ADD(tx_comp_queue_full);
9547
9548         ESTAT_ADD(ring_set_send_prod_index);
9549         ESTAT_ADD(ring_status_update);
9550         ESTAT_ADD(nic_irqs);
9551         ESTAT_ADD(nic_avoided_irqs);
9552         ESTAT_ADD(nic_tx_threshold_hit);
9553
9554         ESTAT_ADD(mbuf_lwm_thresh_hit);
9555
9556         return estats;
9557 }
9558
9559 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9560                                                  struct rtnl_link_stats64 *stats)
9561 {
9562         struct tg3 *tp = netdev_priv(dev);
9563         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9564         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9565
9566         if (!hw_stats)
9567                 return old_stats;
9568
9569         stats->rx_packets = old_stats->rx_packets +
9570                 get_stat64(&hw_stats->rx_ucast_packets) +
9571                 get_stat64(&hw_stats->rx_mcast_packets) +
9572                 get_stat64(&hw_stats->rx_bcast_packets);
9573
9574         stats->tx_packets = old_stats->tx_packets +
9575                 get_stat64(&hw_stats->tx_ucast_packets) +
9576                 get_stat64(&hw_stats->tx_mcast_packets) +
9577                 get_stat64(&hw_stats->tx_bcast_packets);
9578
9579         stats->rx_bytes = old_stats->rx_bytes +
9580                 get_stat64(&hw_stats->rx_octets);
9581         stats->tx_bytes = old_stats->tx_bytes +
9582                 get_stat64(&hw_stats->tx_octets);
9583
9584         stats->rx_errors = old_stats->rx_errors +
9585                 get_stat64(&hw_stats->rx_errors);
9586         stats->tx_errors = old_stats->tx_errors +
9587                 get_stat64(&hw_stats->tx_errors) +
9588                 get_stat64(&hw_stats->tx_mac_errors) +
9589                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9590                 get_stat64(&hw_stats->tx_discards);
9591
9592         stats->multicast = old_stats->multicast +
9593                 get_stat64(&hw_stats->rx_mcast_packets);
9594         stats->collisions = old_stats->collisions +
9595                 get_stat64(&hw_stats->tx_collisions);
9596
9597         stats->rx_length_errors = old_stats->rx_length_errors +
9598                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9599                 get_stat64(&hw_stats->rx_undersize_packets);
9600
9601         stats->rx_over_errors = old_stats->rx_over_errors +
9602                 get_stat64(&hw_stats->rxbds_empty);
9603         stats->rx_frame_errors = old_stats->rx_frame_errors +
9604                 get_stat64(&hw_stats->rx_align_errors);
9605         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9606                 get_stat64(&hw_stats->tx_discards);
9607         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9608                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9609
9610         stats->rx_crc_errors = old_stats->rx_crc_errors +
9611                 calc_crc_errors(tp);
9612
9613         stats->rx_missed_errors = old_stats->rx_missed_errors +
9614                 get_stat64(&hw_stats->rx_discards);
9615
9616         stats->rx_dropped = tp->rx_dropped;
9617
9618         return stats;
9619 }
9620
9621 static inline u32 calc_crc(unsigned char *buf, int len)
9622 {
9623         u32 reg;
9624         u32 tmp;
9625         int j, k;
9626
9627         reg = 0xffffffff;
9628
9629         for (j = 0; j < len; j++) {
9630                 reg ^= buf[j];
9631
9632                 for (k = 0; k < 8; k++) {
9633                         tmp = reg & 0x01;
9634
9635                         reg >>= 1;
9636
9637                         if (tmp)
9638                                 reg ^= 0xedb88320;
9639                 }
9640         }
9641
9642         return ~reg;
9643 }
9644
9645 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9646 {
9647         /* accept or reject all multicast frames */
9648         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9649         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9650         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9651         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9652 }
9653
9654 static void __tg3_set_rx_mode(struct net_device *dev)
9655 {
9656         struct tg3 *tp = netdev_priv(dev);
9657         u32 rx_mode;
9658
9659         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9660                                   RX_MODE_KEEP_VLAN_TAG);
9661
9662 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9663         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9664          * flag clear.
9665          */
9666         if (!tg3_flag(tp, ENABLE_ASF))
9667                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9668 #endif
9669
9670         if (dev->flags & IFF_PROMISC) {
9671                 /* Promiscuous mode. */
9672                 rx_mode |= RX_MODE_PROMISC;
9673         } else if (dev->flags & IFF_ALLMULTI) {
9674                 /* Accept all multicast. */
9675                 tg3_set_multi(tp, 1);
9676         } else if (netdev_mc_empty(dev)) {
9677                 /* Reject all multicast. */
9678                 tg3_set_multi(tp, 0);
9679         } else {
9680                 /* Accept one or more multicast(s). */
9681                 struct netdev_hw_addr *ha;
9682                 u32 mc_filter[4] = { 0, };
9683                 u32 regidx;
9684                 u32 bit;
9685                 u32 crc;
9686
9687                 netdev_for_each_mc_addr(ha, dev) {
9688                         crc = calc_crc(ha->addr, ETH_ALEN);
9689                         bit = ~crc & 0x7f;
9690                         regidx = (bit & 0x60) >> 5;
9691                         bit &= 0x1f;
9692                         mc_filter[regidx] |= (1 << bit);
9693                 }
9694
9695                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9696                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9697                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9698                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9699         }
9700
9701         if (rx_mode != tp->rx_mode) {
9702                 tp->rx_mode = rx_mode;
9703                 tw32_f(MAC_RX_MODE, rx_mode);
9704                 udelay(10);
9705         }
9706 }
9707
9708 static void tg3_set_rx_mode(struct net_device *dev)
9709 {
9710         struct tg3 *tp = netdev_priv(dev);
9711
9712         if (!netif_running(dev))
9713                 return;
9714
9715         tg3_full_lock(tp, 0);
9716         __tg3_set_rx_mode(dev);
9717         tg3_full_unlock(tp);
9718 }
9719
9720 static int tg3_get_regs_len(struct net_device *dev)
9721 {
9722         return TG3_REG_BLK_SIZE;
9723 }
9724
9725 static void tg3_get_regs(struct net_device *dev,
9726                 struct ethtool_regs *regs, void *_p)
9727 {
9728         struct tg3 *tp = netdev_priv(dev);
9729
9730         regs->version = 0;
9731
9732         memset(_p, 0, TG3_REG_BLK_SIZE);
9733
9734         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9735                 return;
9736
9737         tg3_full_lock(tp, 0);
9738
9739         tg3_dump_legacy_regs(tp, (u32 *)_p);
9740
9741         tg3_full_unlock(tp);
9742 }
9743
9744 static int tg3_get_eeprom_len(struct net_device *dev)
9745 {
9746         struct tg3 *tp = netdev_priv(dev);
9747
9748         return tp->nvram_size;
9749 }
9750
9751 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9752 {
9753         struct tg3 *tp = netdev_priv(dev);
9754         int ret;
9755         u8  *pd;
9756         u32 i, offset, len, b_offset, b_count;
9757         __be32 val;
9758
9759         if (tg3_flag(tp, NO_NVRAM))
9760                 return -EINVAL;
9761
9762         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9763                 return -EAGAIN;
9764
9765         offset = eeprom->offset;
9766         len = eeprom->len;
9767         eeprom->len = 0;
9768
9769         eeprom->magic = TG3_EEPROM_MAGIC;
9770
9771         if (offset & 3) {
9772                 /* adjustments to start on required 4 byte boundary */
9773                 b_offset = offset & 3;
9774                 b_count = 4 - b_offset;
9775                 if (b_count > len) {
9776                         /* i.e. offset=1 len=2 */
9777                         b_count = len;
9778                 }
9779                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9780                 if (ret)
9781                         return ret;
9782                 memcpy(data, ((char *)&val) + b_offset, b_count);
9783                 len -= b_count;
9784                 offset += b_count;
9785                 eeprom->len += b_count;
9786         }
9787
9788         /* read bytes up to the last 4 byte boundary */
9789         pd = &data[eeprom->len];
9790         for (i = 0; i < (len - (len & 3)); i += 4) {
9791                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9792                 if (ret) {
9793                         eeprom->len += i;
9794                         return ret;
9795                 }
9796                 memcpy(pd + i, &val, 4);
9797         }
9798         eeprom->len += i;
9799
9800         if (len & 3) {
9801                 /* read last bytes not ending on 4 byte boundary */
9802                 pd = &data[eeprom->len];
9803                 b_count = len & 3;
9804                 b_offset = offset + len - b_count;
9805                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9806                 if (ret)
9807                         return ret;
9808                 memcpy(pd, &val, b_count);
9809                 eeprom->len += b_count;
9810         }
9811         return 0;
9812 }
9813
9814 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9815
9816 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9817 {
9818         struct tg3 *tp = netdev_priv(dev);
9819         int ret;
9820         u32 offset, len, b_offset, odd_len;
9821         u8 *buf;
9822         __be32 start, end;
9823
9824         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9825                 return -EAGAIN;
9826
9827         if (tg3_flag(tp, NO_NVRAM) ||
9828             eeprom->magic != TG3_EEPROM_MAGIC)
9829                 return -EINVAL;
9830
9831         offset = eeprom->offset;
9832         len = eeprom->len;
9833
9834         if ((b_offset = (offset & 3))) {
9835                 /* adjustments to start on required 4 byte boundary */
9836                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9837                 if (ret)
9838                         return ret;
9839                 len += b_offset;
9840                 offset &= ~3;
9841                 if (len < 4)
9842                         len = 4;
9843         }
9844
9845         odd_len = 0;
9846         if (len & 3) {
9847                 /* adjustments to end on required 4 byte boundary */
9848                 odd_len = 1;
9849                 len = (len + 3) & ~3;
9850                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9851                 if (ret)
9852                         return ret;
9853         }
9854
9855         buf = data;
9856         if (b_offset || odd_len) {
9857                 buf = kmalloc(len, GFP_KERNEL);
9858                 if (!buf)
9859                         return -ENOMEM;
9860                 if (b_offset)
9861                         memcpy(buf, &start, 4);
9862                 if (odd_len)
9863                         memcpy(buf+len-4, &end, 4);
9864                 memcpy(buf + b_offset, data, eeprom->len);
9865         }
9866
9867         ret = tg3_nvram_write_block(tp, offset, len, buf);
9868
9869         if (buf != data)
9870                 kfree(buf);
9871
9872         return ret;
9873 }
9874
9875 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9876 {
9877         struct tg3 *tp = netdev_priv(dev);
9878
9879         if (tg3_flag(tp, USE_PHYLIB)) {
9880                 struct phy_device *phydev;
9881                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9882                         return -EAGAIN;
9883                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9884                 return phy_ethtool_gset(phydev, cmd);
9885         }
9886
9887         cmd->supported = (SUPPORTED_Autoneg);
9888
9889         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9890                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9891                                    SUPPORTED_1000baseT_Full);
9892
9893         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9894                 cmd->supported |= (SUPPORTED_100baseT_Half |
9895                                   SUPPORTED_100baseT_Full |
9896                                   SUPPORTED_10baseT_Half |
9897                                   SUPPORTED_10baseT_Full |
9898                                   SUPPORTED_TP);
9899                 cmd->port = PORT_TP;
9900         } else {
9901                 cmd->supported |= SUPPORTED_FIBRE;
9902                 cmd->port = PORT_FIBRE;
9903         }
9904
9905         cmd->advertising = tp->link_config.advertising;
9906         if (netif_running(dev)) {
9907                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9908                 cmd->duplex = tp->link_config.active_duplex;
9909         } else {
9910                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9911                 cmd->duplex = DUPLEX_INVALID;
9912         }
9913         cmd->phy_address = tp->phy_addr;
9914         cmd->transceiver = XCVR_INTERNAL;
9915         cmd->autoneg = tp->link_config.autoneg;
9916         cmd->maxtxpkt = 0;
9917         cmd->maxrxpkt = 0;
9918         return 0;
9919 }
9920
9921 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9922 {
9923         struct tg3 *tp = netdev_priv(dev);
9924         u32 speed = ethtool_cmd_speed(cmd);
9925
9926         if (tg3_flag(tp, USE_PHYLIB)) {
9927                 struct phy_device *phydev;
9928                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9929                         return -EAGAIN;
9930                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9931                 return phy_ethtool_sset(phydev, cmd);
9932         }
9933
9934         if (cmd->autoneg != AUTONEG_ENABLE &&
9935             cmd->autoneg != AUTONEG_DISABLE)
9936                 return -EINVAL;
9937
9938         if (cmd->autoneg == AUTONEG_DISABLE &&
9939             cmd->duplex != DUPLEX_FULL &&
9940             cmd->duplex != DUPLEX_HALF)
9941                 return -EINVAL;
9942
9943         if (cmd->autoneg == AUTONEG_ENABLE) {
9944                 u32 mask = ADVERTISED_Autoneg |
9945                            ADVERTISED_Pause |
9946                            ADVERTISED_Asym_Pause;
9947
9948                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9949                         mask |= ADVERTISED_1000baseT_Half |
9950                                 ADVERTISED_1000baseT_Full;
9951
9952                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9953                         mask |= ADVERTISED_100baseT_Half |
9954                                 ADVERTISED_100baseT_Full |
9955                                 ADVERTISED_10baseT_Half |
9956                                 ADVERTISED_10baseT_Full |
9957                                 ADVERTISED_TP;
9958                 else
9959                         mask |= ADVERTISED_FIBRE;
9960
9961                 if (cmd->advertising & ~mask)
9962                         return -EINVAL;
9963
9964                 mask &= (ADVERTISED_1000baseT_Half |
9965                          ADVERTISED_1000baseT_Full |
9966                          ADVERTISED_100baseT_Half |
9967                          ADVERTISED_100baseT_Full |
9968                          ADVERTISED_10baseT_Half |
9969                          ADVERTISED_10baseT_Full);
9970
9971                 cmd->advertising &= mask;
9972         } else {
9973                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9974                         if (speed != SPEED_1000)
9975                                 return -EINVAL;
9976
9977                         if (cmd->duplex != DUPLEX_FULL)
9978                                 return -EINVAL;
9979                 } else {
9980                         if (speed != SPEED_100 &&
9981                             speed != SPEED_10)
9982                                 return -EINVAL;
9983                 }
9984         }
9985
9986         tg3_full_lock(tp, 0);
9987
9988         tp->link_config.autoneg = cmd->autoneg;
9989         if (cmd->autoneg == AUTONEG_ENABLE) {
9990                 tp->link_config.advertising = (cmd->advertising |
9991                                               ADVERTISED_Autoneg);
9992                 tp->link_config.speed = SPEED_INVALID;
9993                 tp->link_config.duplex = DUPLEX_INVALID;
9994         } else {
9995                 tp->link_config.advertising = 0;
9996                 tp->link_config.speed = speed;
9997                 tp->link_config.duplex = cmd->duplex;
9998         }
9999
10000         tp->link_config.orig_speed = tp->link_config.speed;
10001         tp->link_config.orig_duplex = tp->link_config.duplex;
10002         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10003
10004         if (netif_running(dev))
10005                 tg3_setup_phy(tp, 1);
10006
10007         tg3_full_unlock(tp);
10008
10009         return 0;
10010 }
10011
10012 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10013 {
10014         struct tg3 *tp = netdev_priv(dev);
10015
10016         strcpy(info->driver, DRV_MODULE_NAME);
10017         strcpy(info->version, DRV_MODULE_VERSION);
10018         strcpy(info->fw_version, tp->fw_ver);
10019         strcpy(info->bus_info, pci_name(tp->pdev));
10020 }
10021
10022 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10023 {
10024         struct tg3 *tp = netdev_priv(dev);
10025
10026         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10027                 wol->supported = WAKE_MAGIC;
10028         else
10029                 wol->supported = 0;
10030         wol->wolopts = 0;
10031         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10032                 wol->wolopts = WAKE_MAGIC;
10033         memset(&wol->sopass, 0, sizeof(wol->sopass));
10034 }
10035
10036 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10037 {
10038         struct tg3 *tp = netdev_priv(dev);
10039         struct device *dp = &tp->pdev->dev;
10040
10041         if (wol->wolopts & ~WAKE_MAGIC)
10042                 return -EINVAL;
10043         if ((wol->wolopts & WAKE_MAGIC) &&
10044             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10045                 return -EINVAL;
10046
10047         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10048
10049         spin_lock_bh(&tp->lock);
10050         if (device_may_wakeup(dp))
10051                 tg3_flag_set(tp, WOL_ENABLE);
10052         else
10053                 tg3_flag_clear(tp, WOL_ENABLE);
10054         spin_unlock_bh(&tp->lock);
10055
10056         return 0;
10057 }
10058
10059 static u32 tg3_get_msglevel(struct net_device *dev)
10060 {
10061         struct tg3 *tp = netdev_priv(dev);
10062         return tp->msg_enable;
10063 }
10064
10065 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10066 {
10067         struct tg3 *tp = netdev_priv(dev);
10068         tp->msg_enable = value;
10069 }
10070
10071 static int tg3_nway_reset(struct net_device *dev)
10072 {
10073         struct tg3 *tp = netdev_priv(dev);
10074         int r;
10075
10076         if (!netif_running(dev))
10077                 return -EAGAIN;
10078
10079         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10080                 return -EINVAL;
10081
10082         if (tg3_flag(tp, USE_PHYLIB)) {
10083                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10084                         return -EAGAIN;
10085                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10086         } else {
10087                 u32 bmcr;
10088
10089                 spin_lock_bh(&tp->lock);
10090                 r = -EINVAL;
10091                 tg3_readphy(tp, MII_BMCR, &bmcr);
10092                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10093                     ((bmcr & BMCR_ANENABLE) ||
10094                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10095                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10096                                                    BMCR_ANENABLE);
10097                         r = 0;
10098                 }
10099                 spin_unlock_bh(&tp->lock);
10100         }
10101
10102         return r;
10103 }
10104
10105 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10106 {
10107         struct tg3 *tp = netdev_priv(dev);
10108
10109         ering->rx_max_pending = tp->rx_std_ring_mask;
10110         ering->rx_mini_max_pending = 0;
10111         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10112                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10113         else
10114                 ering->rx_jumbo_max_pending = 0;
10115
10116         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10117
10118         ering->rx_pending = tp->rx_pending;
10119         ering->rx_mini_pending = 0;
10120         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10121                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10122         else
10123                 ering->rx_jumbo_pending = 0;
10124
10125         ering->tx_pending = tp->napi[0].tx_pending;
10126 }
10127
10128 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10129 {
10130         struct tg3 *tp = netdev_priv(dev);
10131         int i, irq_sync = 0, err = 0;
10132
10133         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10134             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10135             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10136             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10137             (tg3_flag(tp, TSO_BUG) &&
10138              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10139                 return -EINVAL;
10140
10141         if (netif_running(dev)) {
10142                 tg3_phy_stop(tp);
10143                 tg3_netif_stop(tp);
10144                 irq_sync = 1;
10145         }
10146
10147         tg3_full_lock(tp, irq_sync);
10148
10149         tp->rx_pending = ering->rx_pending;
10150
10151         if (tg3_flag(tp, MAX_RXPEND_64) &&
10152             tp->rx_pending > 63)
10153                 tp->rx_pending = 63;
10154         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10155
10156         for (i = 0; i < tp->irq_max; i++)
10157                 tp->napi[i].tx_pending = ering->tx_pending;
10158
10159         if (netif_running(dev)) {
10160                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10161                 err = tg3_restart_hw(tp, 1);
10162                 if (!err)
10163                         tg3_netif_start(tp);
10164         }
10165
10166         tg3_full_unlock(tp);
10167
10168         if (irq_sync && !err)
10169                 tg3_phy_start(tp);
10170
10171         return err;
10172 }
10173
10174 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10175 {
10176         struct tg3 *tp = netdev_priv(dev);
10177
10178         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10179
10180         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10181                 epause->rx_pause = 1;
10182         else
10183                 epause->rx_pause = 0;
10184
10185         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10186                 epause->tx_pause = 1;
10187         else
10188                 epause->tx_pause = 0;
10189 }
10190
10191 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10192 {
10193         struct tg3 *tp = netdev_priv(dev);
10194         int err = 0;
10195
10196         if (tg3_flag(tp, USE_PHYLIB)) {
10197                 u32 newadv;
10198                 struct phy_device *phydev;
10199
10200                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10201
10202                 if (!(phydev->supported & SUPPORTED_Pause) ||
10203                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10204                      (epause->rx_pause != epause->tx_pause)))
10205                         return -EINVAL;
10206
10207                 tp->link_config.flowctrl = 0;
10208                 if (epause->rx_pause) {
10209                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10210
10211                         if (epause->tx_pause) {
10212                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10213                                 newadv = ADVERTISED_Pause;
10214                         } else
10215                                 newadv = ADVERTISED_Pause |
10216                                          ADVERTISED_Asym_Pause;
10217                 } else if (epause->tx_pause) {
10218                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10219                         newadv = ADVERTISED_Asym_Pause;
10220                 } else
10221                         newadv = 0;
10222
10223                 if (epause->autoneg)
10224                         tg3_flag_set(tp, PAUSE_AUTONEG);
10225                 else
10226                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10227
10228                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10229                         u32 oldadv = phydev->advertising &
10230                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10231                         if (oldadv != newadv) {
10232                                 phydev->advertising &=
10233                                         ~(ADVERTISED_Pause |
10234                                           ADVERTISED_Asym_Pause);
10235                                 phydev->advertising |= newadv;
10236                                 if (phydev->autoneg) {
10237                                         /*
10238                                          * Always renegotiate the link to
10239                                          * inform our link partner of our
10240                                          * flow control settings, even if the
10241                                          * flow control is forced.  Let
10242                                          * tg3_adjust_link() do the final
10243                                          * flow control setup.
10244                                          */
10245                                         return phy_start_aneg(phydev);
10246                                 }
10247                         }
10248
10249                         if (!epause->autoneg)
10250                                 tg3_setup_flow_control(tp, 0, 0);
10251                 } else {
10252                         tp->link_config.orig_advertising &=
10253                                         ~(ADVERTISED_Pause |
10254                                           ADVERTISED_Asym_Pause);
10255                         tp->link_config.orig_advertising |= newadv;
10256                 }
10257         } else {
10258                 int irq_sync = 0;
10259
10260                 if (netif_running(dev)) {
10261                         tg3_netif_stop(tp);
10262                         irq_sync = 1;
10263                 }
10264
10265                 tg3_full_lock(tp, irq_sync);
10266
10267                 if (epause->autoneg)
10268                         tg3_flag_set(tp, PAUSE_AUTONEG);
10269                 else
10270                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10271                 if (epause->rx_pause)
10272                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10273                 else
10274                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10275                 if (epause->tx_pause)
10276                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10277                 else
10278                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10279
10280                 if (netif_running(dev)) {
10281                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10282                         err = tg3_restart_hw(tp, 1);
10283                         if (!err)
10284                                 tg3_netif_start(tp);
10285                 }
10286
10287                 tg3_full_unlock(tp);
10288         }
10289
10290         return err;
10291 }
10292
10293 static int tg3_get_sset_count(struct net_device *dev, int sset)
10294 {
10295         switch (sset) {
10296         case ETH_SS_TEST:
10297                 return TG3_NUM_TEST;
10298         case ETH_SS_STATS:
10299                 return TG3_NUM_STATS;
10300         default:
10301                 return -EOPNOTSUPP;
10302         }
10303 }
10304
10305 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10306 {
10307         switch (stringset) {
10308         case ETH_SS_STATS:
10309                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10310                 break;
10311         case ETH_SS_TEST:
10312                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10313                 break;
10314         default:
10315                 WARN_ON(1);     /* we need a WARN() */
10316                 break;
10317         }
10318 }
10319
10320 static int tg3_set_phys_id(struct net_device *dev,
10321                             enum ethtool_phys_id_state state)
10322 {
10323         struct tg3 *tp = netdev_priv(dev);
10324
10325         if (!netif_running(tp->dev))
10326                 return -EAGAIN;
10327
10328         switch (state) {
10329         case ETHTOOL_ID_ACTIVE:
10330                 return 1;       /* cycle on/off once per second */
10331
10332         case ETHTOOL_ID_ON:
10333                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10334                      LED_CTRL_1000MBPS_ON |
10335                      LED_CTRL_100MBPS_ON |
10336                      LED_CTRL_10MBPS_ON |
10337                      LED_CTRL_TRAFFIC_OVERRIDE |
10338                      LED_CTRL_TRAFFIC_BLINK |
10339                      LED_CTRL_TRAFFIC_LED);
10340                 break;
10341
10342         case ETHTOOL_ID_OFF:
10343                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10344                      LED_CTRL_TRAFFIC_OVERRIDE);
10345                 break;
10346
10347         case ETHTOOL_ID_INACTIVE:
10348                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10349                 break;
10350         }
10351
10352         return 0;
10353 }
10354
10355 static void tg3_get_ethtool_stats(struct net_device *dev,
10356                                    struct ethtool_stats *estats, u64 *tmp_stats)
10357 {
10358         struct tg3 *tp = netdev_priv(dev);
10359         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10360 }
10361
10362 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10363 {
10364         int i;
10365         __be32 *buf;
10366         u32 offset = 0, len = 0;
10367         u32 magic, val;
10368
10369         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10370                 return NULL;
10371
10372         if (magic == TG3_EEPROM_MAGIC) {
10373                 for (offset = TG3_NVM_DIR_START;
10374                      offset < TG3_NVM_DIR_END;
10375                      offset += TG3_NVM_DIRENT_SIZE) {
10376                         if (tg3_nvram_read(tp, offset, &val))
10377                                 return NULL;
10378
10379                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10380                             TG3_NVM_DIRTYPE_EXTVPD)
10381                                 break;
10382                 }
10383
10384                 if (offset != TG3_NVM_DIR_END) {
10385                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10386                         if (tg3_nvram_read(tp, offset + 4, &offset))
10387                                 return NULL;
10388
10389                         offset = tg3_nvram_logical_addr(tp, offset);
10390                 }
10391         }
10392
10393         if (!offset || !len) {
10394                 offset = TG3_NVM_VPD_OFF;
10395                 len = TG3_NVM_VPD_LEN;
10396         }
10397
10398         buf = kmalloc(len, GFP_KERNEL);
10399         if (buf == NULL)
10400                 return NULL;
10401
10402         if (magic == TG3_EEPROM_MAGIC) {
10403                 for (i = 0; i < len; i += 4) {
10404                         /* The data is in little-endian format in NVRAM.
10405                          * Use the big-endian read routines to preserve
10406                          * the byte order as it exists in NVRAM.
10407                          */
10408                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10409                                 goto error;
10410                 }
10411         } else {
10412                 u8 *ptr;
10413                 ssize_t cnt;
10414                 unsigned int pos = 0;
10415
10416                 ptr = (u8 *)&buf[0];
10417                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10418                         cnt = pci_read_vpd(tp->pdev, pos,
10419                                            len - pos, ptr);
10420                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10421                                 cnt = 0;
10422                         else if (cnt < 0)
10423                                 goto error;
10424                 }
10425                 if (pos != len)
10426                         goto error;
10427         }
10428
10429         return buf;
10430
10431 error:
10432         kfree(buf);
10433         return NULL;
10434 }
10435
10436 #define NVRAM_TEST_SIZE 0x100
10437 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10438 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10439 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10440 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10441 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10442
10443 static int tg3_test_nvram(struct tg3 *tp)
10444 {
10445         u32 csum, magic;
10446         __be32 *buf;
10447         int i, j, k, err = 0, size;
10448
10449         if (tg3_flag(tp, NO_NVRAM))
10450                 return 0;
10451
10452         if (tg3_nvram_read(tp, 0, &magic) != 0)
10453                 return -EIO;
10454
10455         if (magic == TG3_EEPROM_MAGIC)
10456                 size = NVRAM_TEST_SIZE;
10457         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10458                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10459                     TG3_EEPROM_SB_FORMAT_1) {
10460                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10461                         case TG3_EEPROM_SB_REVISION_0:
10462                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10463                                 break;
10464                         case TG3_EEPROM_SB_REVISION_2:
10465                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10466                                 break;
10467                         case TG3_EEPROM_SB_REVISION_3:
10468                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10469                                 break;
10470                         default:
10471                                 return 0;
10472                         }
10473                 } else
10474                         return 0;
10475         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10476                 size = NVRAM_SELFBOOT_HW_SIZE;
10477         else
10478                 return -EIO;
10479
10480         buf = kmalloc(size, GFP_KERNEL);
10481         if (buf == NULL)
10482                 return -ENOMEM;
10483
10484         err = -EIO;
10485         for (i = 0, j = 0; i < size; i += 4, j++) {
10486                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10487                 if (err)
10488                         break;
10489         }
10490         if (i < size)
10491                 goto out;
10492
10493         /* Selfboot format */
10494         magic = be32_to_cpu(buf[0]);
10495         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10496             TG3_EEPROM_MAGIC_FW) {
10497                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10498
10499                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10500                     TG3_EEPROM_SB_REVISION_2) {
10501                         /* For rev 2, the csum doesn't include the MBA. */
10502                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10503                                 csum8 += buf8[i];
10504                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10505                                 csum8 += buf8[i];
10506                 } else {
10507                         for (i = 0; i < size; i++)
10508                                 csum8 += buf8[i];
10509                 }
10510
10511                 if (csum8 == 0) {
10512                         err = 0;
10513                         goto out;
10514                 }
10515
10516                 err = -EIO;
10517                 goto out;
10518         }
10519
10520         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10521             TG3_EEPROM_MAGIC_HW) {
10522                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10523                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10524                 u8 *buf8 = (u8 *) buf;
10525
10526                 /* Separate the parity bits and the data bytes.  */
10527                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10528                         if ((i == 0) || (i == 8)) {
10529                                 int l;
10530                                 u8 msk;
10531
10532                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10533                                         parity[k++] = buf8[i] & msk;
10534                                 i++;
10535                         } else if (i == 16) {
10536                                 int l;
10537                                 u8 msk;
10538
10539                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10540                                         parity[k++] = buf8[i] & msk;
10541                                 i++;
10542
10543                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10544                                         parity[k++] = buf8[i] & msk;
10545                                 i++;
10546                         }
10547                         data[j++] = buf8[i];
10548                 }
10549
10550                 err = -EIO;
10551                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10552                         u8 hw8 = hweight8(data[i]);
10553
10554                         if ((hw8 & 0x1) && parity[i])
10555                                 goto out;
10556                         else if (!(hw8 & 0x1) && !parity[i])
10557                                 goto out;
10558                 }
10559                 err = 0;
10560                 goto out;
10561         }
10562
10563         err = -EIO;
10564
10565         /* Bootstrap checksum at offset 0x10 */
10566         csum = calc_crc((unsigned char *) buf, 0x10);
10567         if (csum != le32_to_cpu(buf[0x10/4]))
10568                 goto out;
10569
10570         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10571         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10572         if (csum != le32_to_cpu(buf[0xfc/4]))
10573                 goto out;
10574
10575         kfree(buf);
10576
10577         buf = tg3_vpd_readblock(tp);
10578         if (!buf)
10579                 return -ENOMEM;
10580
10581         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10582                              PCI_VPD_LRDT_RO_DATA);
10583         if (i > 0) {
10584                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10585                 if (j < 0)
10586                         goto out;
10587
10588                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10589                         goto out;
10590
10591                 i += PCI_VPD_LRDT_TAG_SIZE;
10592                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10593                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10594                 if (j > 0) {
10595                         u8 csum8 = 0;
10596
10597                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10598
10599                         for (i = 0; i <= j; i++)
10600                                 csum8 += ((u8 *)buf)[i];
10601
10602                         if (csum8)
10603                                 goto out;
10604                 }
10605         }
10606
10607         err = 0;
10608
10609 out:
10610         kfree(buf);
10611         return err;
10612 }
10613
10614 #define TG3_SERDES_TIMEOUT_SEC  2
10615 #define TG3_COPPER_TIMEOUT_SEC  6
10616
10617 static int tg3_test_link(struct tg3 *tp)
10618 {
10619         int i, max;
10620
10621         if (!netif_running(tp->dev))
10622                 return -ENODEV;
10623
10624         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10625                 max = TG3_SERDES_TIMEOUT_SEC;
10626         else
10627                 max = TG3_COPPER_TIMEOUT_SEC;
10628
10629         for (i = 0; i < max; i++) {
10630                 if (netif_carrier_ok(tp->dev))
10631                         return 0;
10632
10633                 if (msleep_interruptible(1000))
10634                         break;
10635         }
10636
10637         return -EIO;
10638 }
10639
10640 /* Only test the commonly used registers */
10641 static int tg3_test_registers(struct tg3 *tp)
10642 {
10643         int i, is_5705, is_5750;
10644         u32 offset, read_mask, write_mask, val, save_val, read_val;
10645         static struct {
10646                 u16 offset;
10647                 u16 flags;
10648 #define TG3_FL_5705     0x1
10649 #define TG3_FL_NOT_5705 0x2
10650 #define TG3_FL_NOT_5788 0x4
10651 #define TG3_FL_NOT_5750 0x8
10652                 u32 read_mask;
10653                 u32 write_mask;
10654         } reg_tbl[] = {
10655                 /* MAC Control Registers */
10656                 { MAC_MODE, TG3_FL_NOT_5705,
10657                         0x00000000, 0x00ef6f8c },
10658                 { MAC_MODE, TG3_FL_5705,
10659                         0x00000000, 0x01ef6b8c },
10660                 { MAC_STATUS, TG3_FL_NOT_5705,
10661                         0x03800107, 0x00000000 },
10662                 { MAC_STATUS, TG3_FL_5705,
10663                         0x03800100, 0x00000000 },
10664                 { MAC_ADDR_0_HIGH, 0x0000,
10665                         0x00000000, 0x0000ffff },
10666                 { MAC_ADDR_0_LOW, 0x0000,
10667                         0x00000000, 0xffffffff },
10668                 { MAC_RX_MTU_SIZE, 0x0000,
10669                         0x00000000, 0x0000ffff },
10670                 { MAC_TX_MODE, 0x0000,
10671                         0x00000000, 0x00000070 },
10672                 { MAC_TX_LENGTHS, 0x0000,
10673                         0x00000000, 0x00003fff },
10674                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10675                         0x00000000, 0x000007fc },
10676                 { MAC_RX_MODE, TG3_FL_5705,
10677                         0x00000000, 0x000007dc },
10678                 { MAC_HASH_REG_0, 0x0000,
10679                         0x00000000, 0xffffffff },
10680                 { MAC_HASH_REG_1, 0x0000,
10681                         0x00000000, 0xffffffff },
10682                 { MAC_HASH_REG_2, 0x0000,
10683                         0x00000000, 0xffffffff },
10684                 { MAC_HASH_REG_3, 0x0000,
10685                         0x00000000, 0xffffffff },
10686
10687                 /* Receive Data and Receive BD Initiator Control Registers. */
10688                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10689                         0x00000000, 0xffffffff },
10690                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10691                         0x00000000, 0xffffffff },
10692                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10693                         0x00000000, 0x00000003 },
10694                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10695                         0x00000000, 0xffffffff },
10696                 { RCVDBDI_STD_BD+0, 0x0000,
10697                         0x00000000, 0xffffffff },
10698                 { RCVDBDI_STD_BD+4, 0x0000,
10699                         0x00000000, 0xffffffff },
10700                 { RCVDBDI_STD_BD+8, 0x0000,
10701                         0x00000000, 0xffff0002 },
10702                 { RCVDBDI_STD_BD+0xc, 0x0000,
10703                         0x00000000, 0xffffffff },
10704
10705                 /* Receive BD Initiator Control Registers. */
10706                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10707                         0x00000000, 0xffffffff },
10708                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10709                         0x00000000, 0x000003ff },
10710                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10711                         0x00000000, 0xffffffff },
10712
10713                 /* Host Coalescing Control Registers. */
10714                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10715                         0x00000000, 0x00000004 },
10716                 { HOSTCC_MODE, TG3_FL_5705,
10717                         0x00000000, 0x000000f6 },
10718                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10719                         0x00000000, 0xffffffff },
10720                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10721                         0x00000000, 0x000003ff },
10722                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10723                         0x00000000, 0xffffffff },
10724                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10725                         0x00000000, 0x000003ff },
10726                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10727                         0x00000000, 0xffffffff },
10728                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10729                         0x00000000, 0x000000ff },
10730                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10731                         0x00000000, 0xffffffff },
10732                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10733                         0x00000000, 0x000000ff },
10734                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10735                         0x00000000, 0xffffffff },
10736                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10737                         0x00000000, 0xffffffff },
10738                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10739                         0x00000000, 0xffffffff },
10740                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10741                         0x00000000, 0x000000ff },
10742                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10743                         0x00000000, 0xffffffff },
10744                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10745                         0x00000000, 0x000000ff },
10746                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10747                         0x00000000, 0xffffffff },
10748                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10749                         0x00000000, 0xffffffff },
10750                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10751                         0x00000000, 0xffffffff },
10752                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10753                         0x00000000, 0xffffffff },
10754                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10755                         0x00000000, 0xffffffff },
10756                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10757                         0xffffffff, 0x00000000 },
10758                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10759                         0xffffffff, 0x00000000 },
10760
10761                 /* Buffer Manager Control Registers. */
10762                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10763                         0x00000000, 0x007fff80 },
10764                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10765                         0x00000000, 0x007fffff },
10766                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10767                         0x00000000, 0x0000003f },
10768                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10769                         0x00000000, 0x000001ff },
10770                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10771                         0x00000000, 0x000001ff },
10772                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10773                         0xffffffff, 0x00000000 },
10774                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10775                         0xffffffff, 0x00000000 },
10776
10777                 /* Mailbox Registers */
10778                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10779                         0x00000000, 0x000001ff },
10780                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10781                         0x00000000, 0x000001ff },
10782                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10783                         0x00000000, 0x000007ff },
10784                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10785                         0x00000000, 0x000001ff },
10786
10787                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10788         };
10789
10790         is_5705 = is_5750 = 0;
10791         if (tg3_flag(tp, 5705_PLUS)) {
10792                 is_5705 = 1;
10793                 if (tg3_flag(tp, 5750_PLUS))
10794                         is_5750 = 1;
10795         }
10796
10797         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10798                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10799                         continue;
10800
10801                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10802                         continue;
10803
10804                 if (tg3_flag(tp, IS_5788) &&
10805                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10806                         continue;
10807
10808                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10809                         continue;
10810
10811                 offset = (u32) reg_tbl[i].offset;
10812                 read_mask = reg_tbl[i].read_mask;
10813                 write_mask = reg_tbl[i].write_mask;
10814
10815                 /* Save the original register content */
10816                 save_val = tr32(offset);
10817
10818                 /* Determine the read-only value. */
10819                 read_val = save_val & read_mask;
10820
10821                 /* Write zero to the register, then make sure the read-only bits
10822                  * are not changed and the read/write bits are all zeros.
10823                  */
10824                 tw32(offset, 0);
10825
10826                 val = tr32(offset);
10827
10828                 /* Test the read-only and read/write bits. */
10829                 if (((val & read_mask) != read_val) || (val & write_mask))
10830                         goto out;
10831
10832                 /* Write ones to all the bits defined by RdMask and WrMask, then
10833                  * make sure the read-only bits are not changed and the
10834                  * read/write bits are all ones.
10835                  */
10836                 tw32(offset, read_mask | write_mask);
10837
10838                 val = tr32(offset);
10839
10840                 /* Test the read-only bits. */
10841                 if ((val & read_mask) != read_val)
10842                         goto out;
10843
10844                 /* Test the read/write bits. */
10845                 if ((val & write_mask) != write_mask)
10846                         goto out;
10847
10848                 tw32(offset, save_val);
10849         }
10850
10851         return 0;
10852
10853 out:
10854         if (netif_msg_hw(tp))
10855                 netdev_err(tp->dev,
10856                            "Register test failed at offset %x\n", offset);
10857         tw32(offset, save_val);
10858         return -EIO;
10859 }
10860
10861 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10862 {
10863         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10864         int i;
10865         u32 j;
10866
10867         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10868                 for (j = 0; j < len; j += 4) {
10869                         u32 val;
10870
10871                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10872                         tg3_read_mem(tp, offset + j, &val);
10873                         if (val != test_pattern[i])
10874                                 return -EIO;
10875                 }
10876         }
10877         return 0;
10878 }
10879
10880 static int tg3_test_memory(struct tg3 *tp)
10881 {
10882         static struct mem_entry {
10883                 u32 offset;
10884                 u32 len;
10885         } mem_tbl_570x[] = {
10886                 { 0x00000000, 0x00b50},
10887                 { 0x00002000, 0x1c000},
10888                 { 0xffffffff, 0x00000}
10889         }, mem_tbl_5705[] = {
10890                 { 0x00000100, 0x0000c},
10891                 { 0x00000200, 0x00008},
10892                 { 0x00004000, 0x00800},
10893                 { 0x00006000, 0x01000},
10894                 { 0x00008000, 0x02000},
10895                 { 0x00010000, 0x0e000},
10896                 { 0xffffffff, 0x00000}
10897         }, mem_tbl_5755[] = {
10898                 { 0x00000200, 0x00008},
10899                 { 0x00004000, 0x00800},
10900                 { 0x00006000, 0x00800},
10901                 { 0x00008000, 0x02000},
10902                 { 0x00010000, 0x0c000},
10903                 { 0xffffffff, 0x00000}
10904         }, mem_tbl_5906[] = {
10905                 { 0x00000200, 0x00008},
10906                 { 0x00004000, 0x00400},
10907                 { 0x00006000, 0x00400},
10908                 { 0x00008000, 0x01000},
10909                 { 0x00010000, 0x01000},
10910                 { 0xffffffff, 0x00000}
10911         }, mem_tbl_5717[] = {
10912                 { 0x00000200, 0x00008},
10913                 { 0x00010000, 0x0a000},
10914                 { 0x00020000, 0x13c00},
10915                 { 0xffffffff, 0x00000}
10916         }, mem_tbl_57765[] = {
10917                 { 0x00000200, 0x00008},
10918                 { 0x00004000, 0x00800},
10919                 { 0x00006000, 0x09800},
10920                 { 0x00010000, 0x0a000},
10921                 { 0xffffffff, 0x00000}
10922         };
10923         struct mem_entry *mem_tbl;
10924         int err = 0;
10925         int i;
10926
10927         if (tg3_flag(tp, 5717_PLUS))
10928                 mem_tbl = mem_tbl_5717;
10929         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10930                 mem_tbl = mem_tbl_57765;
10931         else if (tg3_flag(tp, 5755_PLUS))
10932                 mem_tbl = mem_tbl_5755;
10933         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10934                 mem_tbl = mem_tbl_5906;
10935         else if (tg3_flag(tp, 5705_PLUS))
10936                 mem_tbl = mem_tbl_5705;
10937         else
10938                 mem_tbl = mem_tbl_570x;
10939
10940         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10941                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10942                 if (err)
10943                         break;
10944         }
10945
10946         return err;
10947 }
10948
10949 #define TG3_MAC_LOOPBACK        0
10950 #define TG3_PHY_LOOPBACK        1
10951 #define TG3_TSO_LOOPBACK        2
10952
10953 #define TG3_TSO_MSS             500
10954
10955 #define TG3_TSO_IP_HDR_LEN      20
10956 #define TG3_TSO_TCP_HDR_LEN     20
10957 #define TG3_TSO_TCP_OPT_LEN     12
10958
10959 static const u8 tg3_tso_header[] = {
10960 0x08, 0x00,
10961 0x45, 0x00, 0x00, 0x00,
10962 0x00, 0x00, 0x40, 0x00,
10963 0x40, 0x06, 0x00, 0x00,
10964 0x0a, 0x00, 0x00, 0x01,
10965 0x0a, 0x00, 0x00, 0x02,
10966 0x0d, 0x00, 0xe0, 0x00,
10967 0x00, 0x00, 0x01, 0x00,
10968 0x00, 0x00, 0x02, 0x00,
10969 0x80, 0x10, 0x10, 0x00,
10970 0x14, 0x09, 0x00, 0x00,
10971 0x01, 0x01, 0x08, 0x0a,
10972 0x11, 0x11, 0x11, 0x11,
10973 0x11, 0x11, 0x11, 0x11,
10974 };
10975
10976 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10977 {
10978         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10979         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10980         struct sk_buff *skb, *rx_skb;
10981         u8 *tx_data;
10982         dma_addr_t map;
10983         int num_pkts, tx_len, rx_len, i, err;
10984         struct tg3_rx_buffer_desc *desc;
10985         struct tg3_napi *tnapi, *rnapi;
10986         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10987
10988         tnapi = &tp->napi[0];
10989         rnapi = &tp->napi[0];
10990         if (tp->irq_cnt > 1) {
10991                 if (tg3_flag(tp, ENABLE_RSS))
10992                         rnapi = &tp->napi[1];
10993                 if (tg3_flag(tp, ENABLE_TSS))
10994                         tnapi = &tp->napi[1];
10995         }
10996         coal_now = tnapi->coal_now | rnapi->coal_now;
10997
10998         if (loopback_mode == TG3_MAC_LOOPBACK) {
10999                 /* HW errata - mac loopback fails in some cases on 5780.
11000                  * Normal traffic and PHY loopback are not affected by
11001                  * errata.  Also, the MAC loopback test is deprecated for
11002                  * all newer ASIC revisions.
11003                  */
11004                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11005                     tg3_flag(tp, CPMU_PRESENT))
11006                         return 0;
11007
11008                 mac_mode = tp->mac_mode &
11009                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11010                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11011                 if (!tg3_flag(tp, 5705_PLUS))
11012                         mac_mode |= MAC_MODE_LINK_POLARITY;
11013                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11014                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11015                 else
11016                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11017                 tw32(MAC_MODE, mac_mode);
11018         } else {
11019                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11020                         tg3_phy_fet_toggle_apd(tp, false);
11021                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11022                 } else
11023                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11024
11025                 tg3_phy_toggle_automdix(tp, 0);
11026
11027                 tg3_writephy(tp, MII_BMCR, val);
11028                 udelay(40);
11029
11030                 mac_mode = tp->mac_mode &
11031                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11032                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11033                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11034                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11035                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11036                         /* The write needs to be flushed for the AC131 */
11037                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11038                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11039                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11040                 } else
11041                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11042
11043                 /* reset to prevent losing 1st rx packet intermittently */
11044                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11045                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11046                         udelay(10);
11047                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11048                 }
11049                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11050                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11051                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11052                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11053                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11054                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11055                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11056                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11057                 }
11058                 tw32(MAC_MODE, mac_mode);
11059
11060                 /* Wait for link */
11061                 for (i = 0; i < 100; i++) {
11062                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11063                                 break;
11064                         mdelay(1);
11065                 }
11066         }
11067
11068         err = -EIO;
11069
11070         tx_len = pktsz;
11071         skb = netdev_alloc_skb(tp->dev, tx_len);
11072         if (!skb)
11073                 return -ENOMEM;
11074
11075         tx_data = skb_put(skb, tx_len);
11076         memcpy(tx_data, tp->dev->dev_addr, 6);
11077         memset(tx_data + 6, 0x0, 8);
11078
11079         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11080
11081         if (loopback_mode == TG3_TSO_LOOPBACK) {
11082                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11083
11084                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11085                               TG3_TSO_TCP_OPT_LEN;
11086
11087                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11088                        sizeof(tg3_tso_header));
11089                 mss = TG3_TSO_MSS;
11090
11091                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11092                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11093
11094                 /* Set the total length field in the IP header */
11095                 iph->tot_len = htons((u16)(mss + hdr_len));
11096
11097                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11098                               TXD_FLAG_CPU_POST_DMA);
11099
11100                 if (tg3_flag(tp, HW_TSO_1) ||
11101                     tg3_flag(tp, HW_TSO_2) ||
11102                     tg3_flag(tp, HW_TSO_3)) {
11103                         struct tcphdr *th;
11104                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11105                         th = (struct tcphdr *)&tx_data[val];
11106                         th->check = 0;
11107                 } else
11108                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11109
11110                 if (tg3_flag(tp, HW_TSO_3)) {
11111                         mss |= (hdr_len & 0xc) << 12;
11112                         if (hdr_len & 0x10)
11113                                 base_flags |= 0x00000010;
11114                         base_flags |= (hdr_len & 0x3e0) << 5;
11115                 } else if (tg3_flag(tp, HW_TSO_2))
11116                         mss |= hdr_len << 9;
11117                 else if (tg3_flag(tp, HW_TSO_1) ||
11118                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11119                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11120                 } else {
11121                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11122                 }
11123
11124                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11125         } else {
11126                 num_pkts = 1;
11127                 data_off = ETH_HLEN;
11128         }
11129
11130         for (i = data_off; i < tx_len; i++)
11131                 tx_data[i] = (u8) (i & 0xff);
11132
11133         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11134         if (pci_dma_mapping_error(tp->pdev, map)) {
11135                 dev_kfree_skb(skb);
11136                 return -EIO;
11137         }
11138
11139         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11140                rnapi->coal_now);
11141
11142         udelay(10);
11143
11144         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11145
11146         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11147                     base_flags, (mss << 1) | 1);
11148
11149         tnapi->tx_prod++;
11150
11151         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11152         tr32_mailbox(tnapi->prodmbox);
11153
11154         udelay(10);
11155
11156         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11157         for (i = 0; i < 35; i++) {
11158                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11159                        coal_now);
11160
11161                 udelay(10);
11162
11163                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11164                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11165                 if ((tx_idx == tnapi->tx_prod) &&
11166                     (rx_idx == (rx_start_idx + num_pkts)))
11167                         break;
11168         }
11169
11170         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11171         dev_kfree_skb(skb);
11172
11173         if (tx_idx != tnapi->tx_prod)
11174                 goto out;
11175
11176         if (rx_idx != rx_start_idx + num_pkts)
11177                 goto out;
11178
11179         val = data_off;
11180         while (rx_idx != rx_start_idx) {
11181                 desc = &rnapi->rx_rcb[rx_start_idx++];
11182                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11183                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11184
11185                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11186                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11187                         goto out;
11188
11189                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11190                          - ETH_FCS_LEN;
11191
11192                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11193                         if (rx_len != tx_len)
11194                                 goto out;
11195
11196                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11197                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11198                                         goto out;
11199                         } else {
11200                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11201                                         goto out;
11202                         }
11203                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11204                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11205                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11206                         goto out;
11207                 }
11208
11209                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11210                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11211                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11212                                              mapping);
11213                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11214                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11215                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11216                                              mapping);
11217                 } else
11218                         goto out;
11219
11220                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11221                                             PCI_DMA_FROMDEVICE);
11222
11223                 for (i = data_off; i < rx_len; i++, val++) {
11224                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11225                                 goto out;
11226                 }
11227         }
11228
11229         err = 0;
11230
11231         /* tg3_free_rings will unmap and free the rx_skb */
11232 out:
11233         return err;
11234 }
11235
11236 #define TG3_STD_LOOPBACK_FAILED         1
11237 #define TG3_JMB_LOOPBACK_FAILED         2
11238 #define TG3_TSO_LOOPBACK_FAILED         4
11239
11240 #define TG3_MAC_LOOPBACK_SHIFT          0
11241 #define TG3_PHY_LOOPBACK_SHIFT          4
11242 #define TG3_LOOPBACK_FAILED             0x00000077
11243
11244 static int tg3_test_loopback(struct tg3 *tp)
11245 {
11246         int err = 0;
11247         u32 eee_cap, cpmuctrl = 0;
11248
11249         if (!netif_running(tp->dev))
11250                 return TG3_LOOPBACK_FAILED;
11251
11252         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11253         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11254
11255         err = tg3_reset_hw(tp, 1);
11256         if (err) {
11257                 err = TG3_LOOPBACK_FAILED;
11258                 goto done;
11259         }
11260
11261         if (tg3_flag(tp, ENABLE_RSS)) {
11262                 int i;
11263
11264                 /* Reroute all rx packets to the 1st queue */
11265                 for (i = MAC_RSS_INDIR_TBL_0;
11266                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11267                         tw32(i, 0x0);
11268         }
11269
11270         /* Turn off gphy autopowerdown. */
11271         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11272                 tg3_phy_toggle_apd(tp, false);
11273
11274         if (tg3_flag(tp, CPMU_PRESENT)) {
11275                 int i;
11276                 u32 status;
11277
11278                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11279
11280                 /* Wait for up to 40 microseconds to acquire lock. */
11281                 for (i = 0; i < 4; i++) {
11282                         status = tr32(TG3_CPMU_MUTEX_GNT);
11283                         if (status == CPMU_MUTEX_GNT_DRIVER)
11284                                 break;
11285                         udelay(10);
11286                 }
11287
11288                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11289                         err = TG3_LOOPBACK_FAILED;
11290                         goto done;
11291                 }
11292
11293                 /* Turn off link-based power management. */
11294                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11295                 tw32(TG3_CPMU_CTRL,
11296                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11297                                   CPMU_CTRL_LINK_AWARE_MODE));
11298         }
11299
11300         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11301                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11302
11303         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11304             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11305                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11306
11307         if (tg3_flag(tp, CPMU_PRESENT)) {
11308                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11309
11310                 /* Release the mutex */
11311                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11312         }
11313
11314         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11315             !tg3_flag(tp, USE_PHYLIB)) {
11316                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11317                         err |= TG3_STD_LOOPBACK_FAILED <<
11318                                TG3_PHY_LOOPBACK_SHIFT;
11319                 if (tg3_flag(tp, TSO_CAPABLE) &&
11320                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11321                         err |= TG3_TSO_LOOPBACK_FAILED <<
11322                                TG3_PHY_LOOPBACK_SHIFT;
11323                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11324                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11325                         err |= TG3_JMB_LOOPBACK_FAILED <<
11326                                TG3_PHY_LOOPBACK_SHIFT;
11327         }
11328
11329         /* Re-enable gphy autopowerdown. */
11330         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11331                 tg3_phy_toggle_apd(tp, true);
11332
11333 done:
11334         tp->phy_flags |= eee_cap;
11335
11336         return err;
11337 }
11338
11339 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11340                           u64 *data)
11341 {
11342         struct tg3 *tp = netdev_priv(dev);
11343
11344         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11345                 tg3_power_up(tp);
11346
11347         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11348
11349         if (tg3_test_nvram(tp) != 0) {
11350                 etest->flags |= ETH_TEST_FL_FAILED;
11351                 data[0] = 1;
11352         }
11353         if (tg3_test_link(tp) != 0) {
11354                 etest->flags |= ETH_TEST_FL_FAILED;
11355                 data[1] = 1;
11356         }
11357         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11358                 int err, err2 = 0, irq_sync = 0;
11359
11360                 if (netif_running(dev)) {
11361                         tg3_phy_stop(tp);
11362                         tg3_netif_stop(tp);
11363                         irq_sync = 1;
11364                 }
11365
11366                 tg3_full_lock(tp, irq_sync);
11367
11368                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11369                 err = tg3_nvram_lock(tp);
11370                 tg3_halt_cpu(tp, RX_CPU_BASE);
11371                 if (!tg3_flag(tp, 5705_PLUS))
11372                         tg3_halt_cpu(tp, TX_CPU_BASE);
11373                 if (!err)
11374                         tg3_nvram_unlock(tp);
11375
11376                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11377                         tg3_phy_reset(tp);
11378
11379                 if (tg3_test_registers(tp) != 0) {
11380                         etest->flags |= ETH_TEST_FL_FAILED;
11381                         data[2] = 1;
11382                 }
11383                 if (tg3_test_memory(tp) != 0) {
11384                         etest->flags |= ETH_TEST_FL_FAILED;
11385                         data[3] = 1;
11386                 }
11387                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11388                         etest->flags |= ETH_TEST_FL_FAILED;
11389
11390                 tg3_full_unlock(tp);
11391
11392                 if (tg3_test_interrupt(tp) != 0) {
11393                         etest->flags |= ETH_TEST_FL_FAILED;
11394                         data[5] = 1;
11395                 }
11396
11397                 tg3_full_lock(tp, 0);
11398
11399                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11400                 if (netif_running(dev)) {
11401                         tg3_flag_set(tp, INIT_COMPLETE);
11402                         err2 = tg3_restart_hw(tp, 1);
11403                         if (!err2)
11404                                 tg3_netif_start(tp);
11405                 }
11406
11407                 tg3_full_unlock(tp);
11408
11409                 if (irq_sync && !err2)
11410                         tg3_phy_start(tp);
11411         }
11412         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11413                 tg3_power_down(tp);
11414
11415 }
11416
11417 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11418 {
11419         struct mii_ioctl_data *data = if_mii(ifr);
11420         struct tg3 *tp = netdev_priv(dev);
11421         int err;
11422
11423         if (tg3_flag(tp, USE_PHYLIB)) {
11424                 struct phy_device *phydev;
11425                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11426                         return -EAGAIN;
11427                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11428                 return phy_mii_ioctl(phydev, ifr, cmd);
11429         }
11430
11431         switch (cmd) {
11432         case SIOCGMIIPHY:
11433                 data->phy_id = tp->phy_addr;
11434
11435                 /* fallthru */
11436         case SIOCGMIIREG: {
11437                 u32 mii_regval;
11438
11439                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11440                         break;                  /* We have no PHY */
11441
11442                 if (!netif_running(dev))
11443                         return -EAGAIN;
11444
11445                 spin_lock_bh(&tp->lock);
11446                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11447                 spin_unlock_bh(&tp->lock);
11448
11449                 data->val_out = mii_regval;
11450
11451                 return err;
11452         }
11453
11454         case SIOCSMIIREG:
11455                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11456                         break;                  /* We have no PHY */
11457
11458                 if (!netif_running(dev))
11459                         return -EAGAIN;
11460
11461                 spin_lock_bh(&tp->lock);
11462                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11463                 spin_unlock_bh(&tp->lock);
11464
11465                 return err;
11466
11467         default:
11468                 /* do nothing */
11469                 break;
11470         }
11471         return -EOPNOTSUPP;
11472 }
11473
11474 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11475 {
11476         struct tg3 *tp = netdev_priv(dev);
11477
11478         memcpy(ec, &tp->coal, sizeof(*ec));
11479         return 0;
11480 }
11481
11482 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11483 {
11484         struct tg3 *tp = netdev_priv(dev);
11485         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11486         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11487
11488         if (!tg3_flag(tp, 5705_PLUS)) {
11489                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11490                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11491                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11492                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11493         }
11494
11495         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11496             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11497             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11498             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11499             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11500             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11501             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11502             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11503             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11504             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11505                 return -EINVAL;
11506
11507         /* No rx interrupts will be generated if both are zero */
11508         if ((ec->rx_coalesce_usecs == 0) &&
11509             (ec->rx_max_coalesced_frames == 0))
11510                 return -EINVAL;
11511
11512         /* No tx interrupts will be generated if both are zero */
11513         if ((ec->tx_coalesce_usecs == 0) &&
11514             (ec->tx_max_coalesced_frames == 0))
11515                 return -EINVAL;
11516
11517         /* Only copy relevant parameters, ignore all others. */
11518         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11519         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11520         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11521         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11522         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11523         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11524         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11525         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11526         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11527
11528         if (netif_running(dev)) {
11529                 tg3_full_lock(tp, 0);
11530                 __tg3_set_coalesce(tp, &tp->coal);
11531                 tg3_full_unlock(tp);
11532         }
11533         return 0;
11534 }
11535
11536 static const struct ethtool_ops tg3_ethtool_ops = {
11537         .get_settings           = tg3_get_settings,
11538         .set_settings           = tg3_set_settings,
11539         .get_drvinfo            = tg3_get_drvinfo,
11540         .get_regs_len           = tg3_get_regs_len,
11541         .get_regs               = tg3_get_regs,
11542         .get_wol                = tg3_get_wol,
11543         .set_wol                = tg3_set_wol,
11544         .get_msglevel           = tg3_get_msglevel,
11545         .set_msglevel           = tg3_set_msglevel,
11546         .nway_reset             = tg3_nway_reset,
11547         .get_link               = ethtool_op_get_link,
11548         .get_eeprom_len         = tg3_get_eeprom_len,
11549         .get_eeprom             = tg3_get_eeprom,
11550         .set_eeprom             = tg3_set_eeprom,
11551         .get_ringparam          = tg3_get_ringparam,
11552         .set_ringparam          = tg3_set_ringparam,
11553         .get_pauseparam         = tg3_get_pauseparam,
11554         .set_pauseparam         = tg3_set_pauseparam,
11555         .self_test              = tg3_self_test,
11556         .get_strings            = tg3_get_strings,
11557         .set_phys_id            = tg3_set_phys_id,
11558         .get_ethtool_stats      = tg3_get_ethtool_stats,
11559         .get_coalesce           = tg3_get_coalesce,
11560         .set_coalesce           = tg3_set_coalesce,
11561         .get_sset_count         = tg3_get_sset_count,
11562 };
11563
11564 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11565 {
11566         u32 cursize, val, magic;
11567
11568         tp->nvram_size = EEPROM_CHIP_SIZE;
11569
11570         if (tg3_nvram_read(tp, 0, &magic) != 0)
11571                 return;
11572
11573         if ((magic != TG3_EEPROM_MAGIC) &&
11574             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11575             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11576                 return;
11577
11578         /*
11579          * Size the chip by reading offsets at increasing powers of two.
11580          * When we encounter our validation signature, we know the addressing
11581          * has wrapped around, and thus have our chip size.
11582          */
11583         cursize = 0x10;
11584
11585         while (cursize < tp->nvram_size) {
11586                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11587                         return;
11588
11589                 if (val == magic)
11590                         break;
11591
11592                 cursize <<= 1;
11593         }
11594
11595         tp->nvram_size = cursize;
11596 }
11597
11598 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11599 {
11600         u32 val;
11601
11602         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11603                 return;
11604
11605         /* Selfboot format */
11606         if (val != TG3_EEPROM_MAGIC) {
11607                 tg3_get_eeprom_size(tp);
11608                 return;
11609         }
11610
11611         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11612                 if (val != 0) {
11613                         /* This is confusing.  We want to operate on the
11614                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11615                          * call will read from NVRAM and byteswap the data
11616                          * according to the byteswapping settings for all
11617                          * other register accesses.  This ensures the data we
11618                          * want will always reside in the lower 16-bits.
11619                          * However, the data in NVRAM is in LE format, which
11620                          * means the data from the NVRAM read will always be
11621                          * opposite the endianness of the CPU.  The 16-bit
11622                          * byteswap then brings the data to CPU endianness.
11623                          */
11624                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11625                         return;
11626                 }
11627         }
11628         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11629 }
11630
11631 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11632 {
11633         u32 nvcfg1;
11634
11635         nvcfg1 = tr32(NVRAM_CFG1);
11636         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11637                 tg3_flag_set(tp, FLASH);
11638         } else {
11639                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11640                 tw32(NVRAM_CFG1, nvcfg1);
11641         }
11642
11643         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11644             tg3_flag(tp, 5780_CLASS)) {
11645                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11646                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11647                         tp->nvram_jedecnum = JEDEC_ATMEL;
11648                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11649                         tg3_flag_set(tp, NVRAM_BUFFERED);
11650                         break;
11651                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11652                         tp->nvram_jedecnum = JEDEC_ATMEL;
11653                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11654                         break;
11655                 case FLASH_VENDOR_ATMEL_EEPROM:
11656                         tp->nvram_jedecnum = JEDEC_ATMEL;
11657                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11658                         tg3_flag_set(tp, NVRAM_BUFFERED);
11659                         break;
11660                 case FLASH_VENDOR_ST:
11661                         tp->nvram_jedecnum = JEDEC_ST;
11662                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11663                         tg3_flag_set(tp, NVRAM_BUFFERED);
11664                         break;
11665                 case FLASH_VENDOR_SAIFUN:
11666                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11667                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11668                         break;
11669                 case FLASH_VENDOR_SST_SMALL:
11670                 case FLASH_VENDOR_SST_LARGE:
11671                         tp->nvram_jedecnum = JEDEC_SST;
11672                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11673                         break;
11674                 }
11675         } else {
11676                 tp->nvram_jedecnum = JEDEC_ATMEL;
11677                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11678                 tg3_flag_set(tp, NVRAM_BUFFERED);
11679         }
11680 }
11681
11682 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11683 {
11684         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11685         case FLASH_5752PAGE_SIZE_256:
11686                 tp->nvram_pagesize = 256;
11687                 break;
11688         case FLASH_5752PAGE_SIZE_512:
11689                 tp->nvram_pagesize = 512;
11690                 break;
11691         case FLASH_5752PAGE_SIZE_1K:
11692                 tp->nvram_pagesize = 1024;
11693                 break;
11694         case FLASH_5752PAGE_SIZE_2K:
11695                 tp->nvram_pagesize = 2048;
11696                 break;
11697         case FLASH_5752PAGE_SIZE_4K:
11698                 tp->nvram_pagesize = 4096;
11699                 break;
11700         case FLASH_5752PAGE_SIZE_264:
11701                 tp->nvram_pagesize = 264;
11702                 break;
11703         case FLASH_5752PAGE_SIZE_528:
11704                 tp->nvram_pagesize = 528;
11705                 break;
11706         }
11707 }
11708
11709 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11710 {
11711         u32 nvcfg1;
11712
11713         nvcfg1 = tr32(NVRAM_CFG1);
11714
11715         /* NVRAM protection for TPM */
11716         if (nvcfg1 & (1 << 27))
11717                 tg3_flag_set(tp, PROTECTED_NVRAM);
11718
11719         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11720         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11721         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11722                 tp->nvram_jedecnum = JEDEC_ATMEL;
11723                 tg3_flag_set(tp, NVRAM_BUFFERED);
11724                 break;
11725         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11726                 tp->nvram_jedecnum = JEDEC_ATMEL;
11727                 tg3_flag_set(tp, NVRAM_BUFFERED);
11728                 tg3_flag_set(tp, FLASH);
11729                 break;
11730         case FLASH_5752VENDOR_ST_M45PE10:
11731         case FLASH_5752VENDOR_ST_M45PE20:
11732         case FLASH_5752VENDOR_ST_M45PE40:
11733                 tp->nvram_jedecnum = JEDEC_ST;
11734                 tg3_flag_set(tp, NVRAM_BUFFERED);
11735                 tg3_flag_set(tp, FLASH);
11736                 break;
11737         }
11738
11739         if (tg3_flag(tp, FLASH)) {
11740                 tg3_nvram_get_pagesize(tp, nvcfg1);
11741         } else {
11742                 /* For eeprom, set pagesize to maximum eeprom size */
11743                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11744
11745                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11746                 tw32(NVRAM_CFG1, nvcfg1);
11747         }
11748 }
11749
11750 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11751 {
11752         u32 nvcfg1, protect = 0;
11753
11754         nvcfg1 = tr32(NVRAM_CFG1);
11755
11756         /* NVRAM protection for TPM */
11757         if (nvcfg1 & (1 << 27)) {
11758                 tg3_flag_set(tp, PROTECTED_NVRAM);
11759                 protect = 1;
11760         }
11761
11762         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11763         switch (nvcfg1) {
11764         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11765         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11766         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11767         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11768                 tp->nvram_jedecnum = JEDEC_ATMEL;
11769                 tg3_flag_set(tp, NVRAM_BUFFERED);
11770                 tg3_flag_set(tp, FLASH);
11771                 tp->nvram_pagesize = 264;
11772                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11773                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11774                         tp->nvram_size = (protect ? 0x3e200 :
11775                                           TG3_NVRAM_SIZE_512KB);
11776                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11777                         tp->nvram_size = (protect ? 0x1f200 :
11778                                           TG3_NVRAM_SIZE_256KB);
11779                 else
11780                         tp->nvram_size = (protect ? 0x1f200 :
11781                                           TG3_NVRAM_SIZE_128KB);
11782                 break;
11783         case FLASH_5752VENDOR_ST_M45PE10:
11784         case FLASH_5752VENDOR_ST_M45PE20:
11785         case FLASH_5752VENDOR_ST_M45PE40:
11786                 tp->nvram_jedecnum = JEDEC_ST;
11787                 tg3_flag_set(tp, NVRAM_BUFFERED);
11788                 tg3_flag_set(tp, FLASH);
11789                 tp->nvram_pagesize = 256;
11790                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11791                         tp->nvram_size = (protect ?
11792                                           TG3_NVRAM_SIZE_64KB :
11793                                           TG3_NVRAM_SIZE_128KB);
11794                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11795                         tp->nvram_size = (protect ?
11796                                           TG3_NVRAM_SIZE_64KB :
11797                                           TG3_NVRAM_SIZE_256KB);
11798                 else
11799                         tp->nvram_size = (protect ?
11800                                           TG3_NVRAM_SIZE_128KB :
11801                                           TG3_NVRAM_SIZE_512KB);
11802                 break;
11803         }
11804 }
11805
11806 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11807 {
11808         u32 nvcfg1;
11809
11810         nvcfg1 = tr32(NVRAM_CFG1);
11811
11812         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11813         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11814         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11815         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11816         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11817                 tp->nvram_jedecnum = JEDEC_ATMEL;
11818                 tg3_flag_set(tp, NVRAM_BUFFERED);
11819                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11820
11821                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11822                 tw32(NVRAM_CFG1, nvcfg1);
11823                 break;
11824         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11825         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11826         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11827         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11828                 tp->nvram_jedecnum = JEDEC_ATMEL;
11829                 tg3_flag_set(tp, NVRAM_BUFFERED);
11830                 tg3_flag_set(tp, FLASH);
11831                 tp->nvram_pagesize = 264;
11832                 break;
11833         case FLASH_5752VENDOR_ST_M45PE10:
11834         case FLASH_5752VENDOR_ST_M45PE20:
11835         case FLASH_5752VENDOR_ST_M45PE40:
11836                 tp->nvram_jedecnum = JEDEC_ST;
11837                 tg3_flag_set(tp, NVRAM_BUFFERED);
11838                 tg3_flag_set(tp, FLASH);
11839                 tp->nvram_pagesize = 256;
11840                 break;
11841         }
11842 }
11843
11844 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11845 {
11846         u32 nvcfg1, protect = 0;
11847
11848         nvcfg1 = tr32(NVRAM_CFG1);
11849
11850         /* NVRAM protection for TPM */
11851         if (nvcfg1 & (1 << 27)) {
11852                 tg3_flag_set(tp, PROTECTED_NVRAM);
11853                 protect = 1;
11854         }
11855
11856         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11857         switch (nvcfg1) {
11858         case FLASH_5761VENDOR_ATMEL_ADB021D:
11859         case FLASH_5761VENDOR_ATMEL_ADB041D:
11860         case FLASH_5761VENDOR_ATMEL_ADB081D:
11861         case FLASH_5761VENDOR_ATMEL_ADB161D:
11862         case FLASH_5761VENDOR_ATMEL_MDB021D:
11863         case FLASH_5761VENDOR_ATMEL_MDB041D:
11864         case FLASH_5761VENDOR_ATMEL_MDB081D:
11865         case FLASH_5761VENDOR_ATMEL_MDB161D:
11866                 tp->nvram_jedecnum = JEDEC_ATMEL;
11867                 tg3_flag_set(tp, NVRAM_BUFFERED);
11868                 tg3_flag_set(tp, FLASH);
11869                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11870                 tp->nvram_pagesize = 256;
11871                 break;
11872         case FLASH_5761VENDOR_ST_A_M45PE20:
11873         case FLASH_5761VENDOR_ST_A_M45PE40:
11874         case FLASH_5761VENDOR_ST_A_M45PE80:
11875         case FLASH_5761VENDOR_ST_A_M45PE16:
11876         case FLASH_5761VENDOR_ST_M_M45PE20:
11877         case FLASH_5761VENDOR_ST_M_M45PE40:
11878         case FLASH_5761VENDOR_ST_M_M45PE80:
11879         case FLASH_5761VENDOR_ST_M_M45PE16:
11880                 tp->nvram_jedecnum = JEDEC_ST;
11881                 tg3_flag_set(tp, NVRAM_BUFFERED);
11882                 tg3_flag_set(tp, FLASH);
11883                 tp->nvram_pagesize = 256;
11884                 break;
11885         }
11886
11887         if (protect) {
11888                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11889         } else {
11890                 switch (nvcfg1) {
11891                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11892                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11893                 case FLASH_5761VENDOR_ST_A_M45PE16:
11894                 case FLASH_5761VENDOR_ST_M_M45PE16:
11895                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11896                         break;
11897                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11898                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11899                 case FLASH_5761VENDOR_ST_A_M45PE80:
11900                 case FLASH_5761VENDOR_ST_M_M45PE80:
11901                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11902                         break;
11903                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11904                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11905                 case FLASH_5761VENDOR_ST_A_M45PE40:
11906                 case FLASH_5761VENDOR_ST_M_M45PE40:
11907                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11908                         break;
11909                 case FLASH_5761VENDOR_ATMEL_ADB021D:
11910                 case FLASH_5761VENDOR_ATMEL_MDB021D:
11911                 case FLASH_5761VENDOR_ST_A_M45PE20:
11912                 case FLASH_5761VENDOR_ST_M_M45PE20:
11913                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11914                         break;
11915                 }
11916         }
11917 }
11918
11919 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11920 {
11921         tp->nvram_jedecnum = JEDEC_ATMEL;
11922         tg3_flag_set(tp, NVRAM_BUFFERED);
11923         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11924 }
11925
11926 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11927 {
11928         u32 nvcfg1;
11929
11930         nvcfg1 = tr32(NVRAM_CFG1);
11931
11932         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11933         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11934         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11935                 tp->nvram_jedecnum = JEDEC_ATMEL;
11936                 tg3_flag_set(tp, NVRAM_BUFFERED);
11937                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11938
11939                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11940                 tw32(NVRAM_CFG1, nvcfg1);
11941                 return;
11942         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11943         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11944         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11945         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11946         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11947         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11948         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11949                 tp->nvram_jedecnum = JEDEC_ATMEL;
11950                 tg3_flag_set(tp, NVRAM_BUFFERED);
11951                 tg3_flag_set(tp, FLASH);
11952
11953                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11954                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11955                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11956                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11957                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11958                         break;
11959                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11960                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11961                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11962                         break;
11963                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11964                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11965                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11966                         break;
11967                 }
11968                 break;
11969         case FLASH_5752VENDOR_ST_M45PE10:
11970         case FLASH_5752VENDOR_ST_M45PE20:
11971         case FLASH_5752VENDOR_ST_M45PE40:
11972                 tp->nvram_jedecnum = JEDEC_ST;
11973                 tg3_flag_set(tp, NVRAM_BUFFERED);
11974                 tg3_flag_set(tp, FLASH);
11975
11976                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11977                 case FLASH_5752VENDOR_ST_M45PE10:
11978                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11979                         break;
11980                 case FLASH_5752VENDOR_ST_M45PE20:
11981                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11982                         break;
11983                 case FLASH_5752VENDOR_ST_M45PE40:
11984                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11985                         break;
11986                 }
11987                 break;
11988         default:
11989                 tg3_flag_set(tp, NO_NVRAM);
11990                 return;
11991         }
11992
11993         tg3_nvram_get_pagesize(tp, nvcfg1);
11994         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11995                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11996 }
11997
11998
11999 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12000 {
12001         u32 nvcfg1;
12002
12003         nvcfg1 = tr32(NVRAM_CFG1);
12004
12005         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12006         case FLASH_5717VENDOR_ATMEL_EEPROM:
12007         case FLASH_5717VENDOR_MICRO_EEPROM:
12008                 tp->nvram_jedecnum = JEDEC_ATMEL;
12009                 tg3_flag_set(tp, NVRAM_BUFFERED);
12010                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12011
12012                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12013                 tw32(NVRAM_CFG1, nvcfg1);
12014                 return;
12015         case FLASH_5717VENDOR_ATMEL_MDB011D:
12016         case FLASH_5717VENDOR_ATMEL_ADB011B:
12017         case FLASH_5717VENDOR_ATMEL_ADB011D:
12018         case FLASH_5717VENDOR_ATMEL_MDB021D:
12019         case FLASH_5717VENDOR_ATMEL_ADB021B:
12020         case FLASH_5717VENDOR_ATMEL_ADB021D:
12021         case FLASH_5717VENDOR_ATMEL_45USPT:
12022                 tp->nvram_jedecnum = JEDEC_ATMEL;
12023                 tg3_flag_set(tp, NVRAM_BUFFERED);
12024                 tg3_flag_set(tp, FLASH);
12025
12026                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12027                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12028                         /* Detect size with tg3_nvram_get_size() */
12029                         break;
12030                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12031                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12032                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12033                         break;
12034                 default:
12035                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12036                         break;
12037                 }
12038                 break;
12039         case FLASH_5717VENDOR_ST_M_M25PE10:
12040         case FLASH_5717VENDOR_ST_A_M25PE10:
12041         case FLASH_5717VENDOR_ST_M_M45PE10:
12042         case FLASH_5717VENDOR_ST_A_M45PE10:
12043         case FLASH_5717VENDOR_ST_M_M25PE20:
12044         case FLASH_5717VENDOR_ST_A_M25PE20:
12045         case FLASH_5717VENDOR_ST_M_M45PE20:
12046         case FLASH_5717VENDOR_ST_A_M45PE20:
12047         case FLASH_5717VENDOR_ST_25USPT:
12048         case FLASH_5717VENDOR_ST_45USPT:
12049                 tp->nvram_jedecnum = JEDEC_ST;
12050                 tg3_flag_set(tp, NVRAM_BUFFERED);
12051                 tg3_flag_set(tp, FLASH);
12052
12053                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12054                 case FLASH_5717VENDOR_ST_M_M25PE20:
12055                 case FLASH_5717VENDOR_ST_M_M45PE20:
12056                         /* Detect size with tg3_nvram_get_size() */
12057                         break;
12058                 case FLASH_5717VENDOR_ST_A_M25PE20:
12059                 case FLASH_5717VENDOR_ST_A_M45PE20:
12060                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12061                         break;
12062                 default:
12063                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12064                         break;
12065                 }
12066                 break;
12067         default:
12068                 tg3_flag_set(tp, NO_NVRAM);
12069                 return;
12070         }
12071
12072         tg3_nvram_get_pagesize(tp, nvcfg1);
12073         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12074                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12075 }
12076
12077 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12078 {
12079         u32 nvcfg1, nvmpinstrp;
12080
12081         nvcfg1 = tr32(NVRAM_CFG1);
12082         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12083
12084         switch (nvmpinstrp) {
12085         case FLASH_5720_EEPROM_HD:
12086         case FLASH_5720_EEPROM_LD:
12087                 tp->nvram_jedecnum = JEDEC_ATMEL;
12088                 tg3_flag_set(tp, NVRAM_BUFFERED);
12089
12090                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12091                 tw32(NVRAM_CFG1, nvcfg1);
12092                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12093                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12094                 else
12095                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12096                 return;
12097         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12098         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12099         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12100         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12101         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12102         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12103         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12104         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12105         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12106         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12107         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12108         case FLASH_5720VENDOR_ATMEL_45USPT:
12109                 tp->nvram_jedecnum = JEDEC_ATMEL;
12110                 tg3_flag_set(tp, NVRAM_BUFFERED);
12111                 tg3_flag_set(tp, FLASH);
12112
12113                 switch (nvmpinstrp) {
12114                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12115                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12116                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12117                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12118                         break;
12119                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12120                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12121                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12122                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12123                         break;
12124                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12125                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12126                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12127                         break;
12128                 default:
12129                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12130                         break;
12131                 }
12132                 break;
12133         case FLASH_5720VENDOR_M_ST_M25PE10:
12134         case FLASH_5720VENDOR_M_ST_M45PE10:
12135         case FLASH_5720VENDOR_A_ST_M25PE10:
12136         case FLASH_5720VENDOR_A_ST_M45PE10:
12137         case FLASH_5720VENDOR_M_ST_M25PE20:
12138         case FLASH_5720VENDOR_M_ST_M45PE20:
12139         case FLASH_5720VENDOR_A_ST_M25PE20:
12140         case FLASH_5720VENDOR_A_ST_M45PE20:
12141         case FLASH_5720VENDOR_M_ST_M25PE40:
12142         case FLASH_5720VENDOR_M_ST_M45PE40:
12143         case FLASH_5720VENDOR_A_ST_M25PE40:
12144         case FLASH_5720VENDOR_A_ST_M45PE40:
12145         case FLASH_5720VENDOR_M_ST_M25PE80:
12146         case FLASH_5720VENDOR_M_ST_M45PE80:
12147         case FLASH_5720VENDOR_A_ST_M25PE80:
12148         case FLASH_5720VENDOR_A_ST_M45PE80:
12149         case FLASH_5720VENDOR_ST_25USPT:
12150         case FLASH_5720VENDOR_ST_45USPT:
12151                 tp->nvram_jedecnum = JEDEC_ST;
12152                 tg3_flag_set(tp, NVRAM_BUFFERED);
12153                 tg3_flag_set(tp, FLASH);
12154
12155                 switch (nvmpinstrp) {
12156                 case FLASH_5720VENDOR_M_ST_M25PE20:
12157                 case FLASH_5720VENDOR_M_ST_M45PE20:
12158                 case FLASH_5720VENDOR_A_ST_M25PE20:
12159                 case FLASH_5720VENDOR_A_ST_M45PE20:
12160                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12161                         break;
12162                 case FLASH_5720VENDOR_M_ST_M25PE40:
12163                 case FLASH_5720VENDOR_M_ST_M45PE40:
12164                 case FLASH_5720VENDOR_A_ST_M25PE40:
12165                 case FLASH_5720VENDOR_A_ST_M45PE40:
12166                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12167                         break;
12168                 case FLASH_5720VENDOR_M_ST_M25PE80:
12169                 case FLASH_5720VENDOR_M_ST_M45PE80:
12170                 case FLASH_5720VENDOR_A_ST_M25PE80:
12171                 case FLASH_5720VENDOR_A_ST_M45PE80:
12172                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12173                         break;
12174                 default:
12175                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12176                         break;
12177                 }
12178                 break;
12179         default:
12180                 tg3_flag_set(tp, NO_NVRAM);
12181                 return;
12182         }
12183
12184         tg3_nvram_get_pagesize(tp, nvcfg1);
12185         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12186                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12187 }
12188
12189 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12190 static void __devinit tg3_nvram_init(struct tg3 *tp)
12191 {
12192         tw32_f(GRC_EEPROM_ADDR,
12193              (EEPROM_ADDR_FSM_RESET |
12194               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12195                EEPROM_ADDR_CLKPERD_SHIFT)));
12196
12197         msleep(1);
12198
12199         /* Enable seeprom accesses. */
12200         tw32_f(GRC_LOCAL_CTRL,
12201              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12202         udelay(100);
12203
12204         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12205             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12206                 tg3_flag_set(tp, NVRAM);
12207
12208                 if (tg3_nvram_lock(tp)) {
12209                         netdev_warn(tp->dev,
12210                                     "Cannot get nvram lock, %s failed\n",
12211                                     __func__);
12212                         return;
12213                 }
12214                 tg3_enable_nvram_access(tp);
12215
12216                 tp->nvram_size = 0;
12217
12218                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12219                         tg3_get_5752_nvram_info(tp);
12220                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12221                         tg3_get_5755_nvram_info(tp);
12222                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12223                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12224                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12225                         tg3_get_5787_nvram_info(tp);
12226                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12227                         tg3_get_5761_nvram_info(tp);
12228                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12229                         tg3_get_5906_nvram_info(tp);
12230                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12231                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12232                         tg3_get_57780_nvram_info(tp);
12233                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12234                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12235                         tg3_get_5717_nvram_info(tp);
12236                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12237                         tg3_get_5720_nvram_info(tp);
12238                 else
12239                         tg3_get_nvram_info(tp);
12240
12241                 if (tp->nvram_size == 0)
12242                         tg3_get_nvram_size(tp);
12243
12244                 tg3_disable_nvram_access(tp);
12245                 tg3_nvram_unlock(tp);
12246
12247         } else {
12248                 tg3_flag_clear(tp, NVRAM);
12249                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12250
12251                 tg3_get_eeprom_size(tp);
12252         }
12253 }
12254
12255 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12256                                     u32 offset, u32 len, u8 *buf)
12257 {
12258         int i, j, rc = 0;
12259         u32 val;
12260
12261         for (i = 0; i < len; i += 4) {
12262                 u32 addr;
12263                 __be32 data;
12264
12265                 addr = offset + i;
12266
12267                 memcpy(&data, buf + i, 4);
12268
12269                 /*
12270                  * The SEEPROM interface expects the data to always be opposite
12271                  * the native endian format.  We accomplish this by reversing
12272                  * all the operations that would have been performed on the
12273                  * data from a call to tg3_nvram_read_be32().
12274                  */
12275                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12276
12277                 val = tr32(GRC_EEPROM_ADDR);
12278                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12279
12280                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12281                         EEPROM_ADDR_READ);
12282                 tw32(GRC_EEPROM_ADDR, val |
12283                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12284                         (addr & EEPROM_ADDR_ADDR_MASK) |
12285                         EEPROM_ADDR_START |
12286                         EEPROM_ADDR_WRITE);
12287
12288                 for (j = 0; j < 1000; j++) {
12289                         val = tr32(GRC_EEPROM_ADDR);
12290
12291                         if (val & EEPROM_ADDR_COMPLETE)
12292                                 break;
12293                         msleep(1);
12294                 }
12295                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12296                         rc = -EBUSY;
12297                         break;
12298                 }
12299         }
12300
12301         return rc;
12302 }
12303
12304 /* offset and length are dword aligned */
12305 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12306                 u8 *buf)
12307 {
12308         int ret = 0;
12309         u32 pagesize = tp->nvram_pagesize;
12310         u32 pagemask = pagesize - 1;
12311         u32 nvram_cmd;
12312         u8 *tmp;
12313
12314         tmp = kmalloc(pagesize, GFP_KERNEL);
12315         if (tmp == NULL)
12316                 return -ENOMEM;
12317
12318         while (len) {
12319                 int j;
12320                 u32 phy_addr, page_off, size;
12321
12322                 phy_addr = offset & ~pagemask;
12323
12324                 for (j = 0; j < pagesize; j += 4) {
12325                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12326                                                   (__be32 *) (tmp + j));
12327                         if (ret)
12328                                 break;
12329                 }
12330                 if (ret)
12331                         break;
12332
12333                 page_off = offset & pagemask;
12334                 size = pagesize;
12335                 if (len < size)
12336                         size = len;
12337
12338                 len -= size;
12339
12340                 memcpy(tmp + page_off, buf, size);
12341
12342                 offset = offset + (pagesize - page_off);
12343
12344                 tg3_enable_nvram_access(tp);
12345
12346                 /*
12347                  * Before we can erase the flash page, we need
12348                  * to issue a special "write enable" command.
12349                  */
12350                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12351
12352                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12353                         break;
12354
12355                 /* Erase the target page */
12356                 tw32(NVRAM_ADDR, phy_addr);
12357
12358                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12359                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12360
12361                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12362                         break;
12363
12364                 /* Issue another write enable to start the write. */
12365                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12366
12367                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12368                         break;
12369
12370                 for (j = 0; j < pagesize; j += 4) {
12371                         __be32 data;
12372
12373                         data = *((__be32 *) (tmp + j));
12374
12375                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12376
12377                         tw32(NVRAM_ADDR, phy_addr + j);
12378
12379                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12380                                 NVRAM_CMD_WR;
12381
12382                         if (j == 0)
12383                                 nvram_cmd |= NVRAM_CMD_FIRST;
12384                         else if (j == (pagesize - 4))
12385                                 nvram_cmd |= NVRAM_CMD_LAST;
12386
12387                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12388                                 break;
12389                 }
12390                 if (ret)
12391                         break;
12392         }
12393
12394         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12395         tg3_nvram_exec_cmd(tp, nvram_cmd);
12396
12397         kfree(tmp);
12398
12399         return ret;
12400 }
12401
12402 /* offset and length are dword aligned */
12403 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12404                 u8 *buf)
12405 {
12406         int i, ret = 0;
12407
12408         for (i = 0; i < len; i += 4, offset += 4) {
12409                 u32 page_off, phy_addr, nvram_cmd;
12410                 __be32 data;
12411
12412                 memcpy(&data, buf + i, 4);
12413                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12414
12415                 page_off = offset % tp->nvram_pagesize;
12416
12417                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12418
12419                 tw32(NVRAM_ADDR, phy_addr);
12420
12421                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12422
12423                 if (page_off == 0 || i == 0)
12424                         nvram_cmd |= NVRAM_CMD_FIRST;
12425                 if (page_off == (tp->nvram_pagesize - 4))
12426                         nvram_cmd |= NVRAM_CMD_LAST;
12427
12428                 if (i == (len - 4))
12429                         nvram_cmd |= NVRAM_CMD_LAST;
12430
12431                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12432                     !tg3_flag(tp, 5755_PLUS) &&
12433                     (tp->nvram_jedecnum == JEDEC_ST) &&
12434                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12435
12436                         if ((ret = tg3_nvram_exec_cmd(tp,
12437                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12438                                 NVRAM_CMD_DONE)))
12439
12440                                 break;
12441                 }
12442                 if (!tg3_flag(tp, FLASH)) {
12443                         /* We always do complete word writes to eeprom. */
12444                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12445                 }
12446
12447                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12448                         break;
12449         }
12450         return ret;
12451 }
12452
12453 /* offset and length are dword aligned */
12454 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12455 {
12456         int ret;
12457
12458         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12459                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12460                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12461                 udelay(40);
12462         }
12463
12464         if (!tg3_flag(tp, NVRAM)) {
12465                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12466         } else {
12467                 u32 grc_mode;
12468
12469                 ret = tg3_nvram_lock(tp);
12470                 if (ret)
12471                         return ret;
12472
12473                 tg3_enable_nvram_access(tp);
12474                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12475                         tw32(NVRAM_WRITE1, 0x406);
12476
12477                 grc_mode = tr32(GRC_MODE);
12478                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12479
12480                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12481                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12482                                 buf);
12483                 } else {
12484                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12485                                 buf);
12486                 }
12487
12488                 grc_mode = tr32(GRC_MODE);
12489                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12490
12491                 tg3_disable_nvram_access(tp);
12492                 tg3_nvram_unlock(tp);
12493         }
12494
12495         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12496                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12497                 udelay(40);
12498         }
12499
12500         return ret;
12501 }
12502
12503 struct subsys_tbl_ent {
12504         u16 subsys_vendor, subsys_devid;
12505         u32 phy_id;
12506 };
12507
12508 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12509         /* Broadcom boards. */
12510         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12511           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12512         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12513           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12514         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12515           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12516         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12517           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12518         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12519           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12520         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12521           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12522         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12523           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12524         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12525           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12526         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12527           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12528         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12529           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12530         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12531           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12532
12533         /* 3com boards. */
12534         { TG3PCI_SUBVENDOR_ID_3COM,
12535           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12536         { TG3PCI_SUBVENDOR_ID_3COM,
12537           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12538         { TG3PCI_SUBVENDOR_ID_3COM,
12539           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12540         { TG3PCI_SUBVENDOR_ID_3COM,
12541           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12542         { TG3PCI_SUBVENDOR_ID_3COM,
12543           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12544
12545         /* DELL boards. */
12546         { TG3PCI_SUBVENDOR_ID_DELL,
12547           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12548         { TG3PCI_SUBVENDOR_ID_DELL,
12549           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12550         { TG3PCI_SUBVENDOR_ID_DELL,
12551           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12552         { TG3PCI_SUBVENDOR_ID_DELL,
12553           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12554
12555         /* Compaq boards. */
12556         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12557           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12558         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12559           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12560         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12561           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12562         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12563           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12564         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12565           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12566
12567         /* IBM boards. */
12568         { TG3PCI_SUBVENDOR_ID_IBM,
12569           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12570 };
12571
12572 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12573 {
12574         int i;
12575
12576         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12577                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12578                      tp->pdev->subsystem_vendor) &&
12579                     (subsys_id_to_phy_id[i].subsys_devid ==
12580                      tp->pdev->subsystem_device))
12581                         return &subsys_id_to_phy_id[i];
12582         }
12583         return NULL;
12584 }
12585
12586 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12587 {
12588         u32 val;
12589         u16 pmcsr;
12590
12591         /* On some early chips the SRAM cannot be accessed in D3hot state,
12592          * so need make sure we're in D0.
12593          */
12594         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12595         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12596         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12597         msleep(1);
12598
12599         /* Make sure register accesses (indirect or otherwise)
12600          * will function correctly.
12601          */
12602         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12603                                tp->misc_host_ctrl);
12604
12605         /* The memory arbiter has to be enabled in order for SRAM accesses
12606          * to succeed.  Normally on powerup the tg3 chip firmware will make
12607          * sure it is enabled, but other entities such as system netboot
12608          * code might disable it.
12609          */
12610         val = tr32(MEMARB_MODE);
12611         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12612
12613         tp->phy_id = TG3_PHY_ID_INVALID;
12614         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12615
12616         /* Assume an onboard device and WOL capable by default.  */
12617         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12618         tg3_flag_set(tp, WOL_CAP);
12619
12620         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12621                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12622                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12623                         tg3_flag_set(tp, IS_NIC);
12624                 }
12625                 val = tr32(VCPU_CFGSHDW);
12626                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12627                         tg3_flag_set(tp, ASPM_WORKAROUND);
12628                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12629                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12630                         tg3_flag_set(tp, WOL_ENABLE);
12631                         device_set_wakeup_enable(&tp->pdev->dev, true);
12632                 }
12633                 goto done;
12634         }
12635
12636         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12637         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12638                 u32 nic_cfg, led_cfg;
12639                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12640                 int eeprom_phy_serdes = 0;
12641
12642                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12643                 tp->nic_sram_data_cfg = nic_cfg;
12644
12645                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12646                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12647                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12648                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12649                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12650                     (ver > 0) && (ver < 0x100))
12651                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12652
12653                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12654                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12655
12656                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12657                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12658                         eeprom_phy_serdes = 1;
12659
12660                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12661                 if (nic_phy_id != 0) {
12662                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12663                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12664
12665                         eeprom_phy_id  = (id1 >> 16) << 10;
12666                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12667                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12668                 } else
12669                         eeprom_phy_id = 0;
12670
12671                 tp->phy_id = eeprom_phy_id;
12672                 if (eeprom_phy_serdes) {
12673                         if (!tg3_flag(tp, 5705_PLUS))
12674                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12675                         else
12676                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12677                 }
12678
12679                 if (tg3_flag(tp, 5750_PLUS))
12680                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12681                                     SHASTA_EXT_LED_MODE_MASK);
12682                 else
12683                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12684
12685                 switch (led_cfg) {
12686                 default:
12687                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12688                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12689                         break;
12690
12691                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12692                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12693                         break;
12694
12695                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12696                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12697
12698                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12699                          * read on some older 5700/5701 bootcode.
12700                          */
12701                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12702                             ASIC_REV_5700 ||
12703                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12704                             ASIC_REV_5701)
12705                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12706
12707                         break;
12708
12709                 case SHASTA_EXT_LED_SHARED:
12710                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12711                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12712                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12713                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12714                                                  LED_CTRL_MODE_PHY_2);
12715                         break;
12716
12717                 case SHASTA_EXT_LED_MAC:
12718                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12719                         break;
12720
12721                 case SHASTA_EXT_LED_COMBO:
12722                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12723                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12724                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12725                                                  LED_CTRL_MODE_PHY_2);
12726                         break;
12727
12728                 }
12729
12730                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12731                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12732                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12733                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12734
12735                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12736                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12737
12738                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12739                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12740                         if ((tp->pdev->subsystem_vendor ==
12741                              PCI_VENDOR_ID_ARIMA) &&
12742                             (tp->pdev->subsystem_device == 0x205a ||
12743                              tp->pdev->subsystem_device == 0x2063))
12744                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12745                 } else {
12746                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12747                         tg3_flag_set(tp, IS_NIC);
12748                 }
12749
12750                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12751                         tg3_flag_set(tp, ENABLE_ASF);
12752                         if (tg3_flag(tp, 5750_PLUS))
12753                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12754                 }
12755
12756                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12757                     tg3_flag(tp, 5750_PLUS))
12758                         tg3_flag_set(tp, ENABLE_APE);
12759
12760                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12761                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12762                         tg3_flag_clear(tp, WOL_CAP);
12763
12764                 if (tg3_flag(tp, WOL_CAP) &&
12765                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12766                         tg3_flag_set(tp, WOL_ENABLE);
12767                         device_set_wakeup_enable(&tp->pdev->dev, true);
12768                 }
12769
12770                 if (cfg2 & (1 << 17))
12771                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12772
12773                 /* serdes signal pre-emphasis in register 0x590 set by */
12774                 /* bootcode if bit 18 is set */
12775                 if (cfg2 & (1 << 18))
12776                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12777
12778                 if ((tg3_flag(tp, 57765_PLUS) ||
12779                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12780                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12781                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12782                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12783
12784                 if (tg3_flag(tp, PCI_EXPRESS) &&
12785                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12786                     !tg3_flag(tp, 57765_PLUS)) {
12787                         u32 cfg3;
12788
12789                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12790                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12791                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12792                 }
12793
12794                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12795                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12796                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12797                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12798                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12799                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12800         }
12801 done:
12802         if (tg3_flag(tp, WOL_CAP))
12803                 device_set_wakeup_enable(&tp->pdev->dev,
12804                                          tg3_flag(tp, WOL_ENABLE));
12805         else
12806                 device_set_wakeup_capable(&tp->pdev->dev, false);
12807 }
12808
12809 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12810 {
12811         int i;
12812         u32 val;
12813
12814         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12815         tw32(OTP_CTRL, cmd);
12816
12817         /* Wait for up to 1 ms for command to execute. */
12818         for (i = 0; i < 100; i++) {
12819                 val = tr32(OTP_STATUS);
12820                 if (val & OTP_STATUS_CMD_DONE)
12821                         break;
12822                 udelay(10);
12823         }
12824
12825         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12826 }
12827
12828 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12829  * configuration is a 32-bit value that straddles the alignment boundary.
12830  * We do two 32-bit reads and then shift and merge the results.
12831  */
12832 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12833 {
12834         u32 bhalf_otp, thalf_otp;
12835
12836         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12837
12838         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12839                 return 0;
12840
12841         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12842
12843         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12844                 return 0;
12845
12846         thalf_otp = tr32(OTP_READ_DATA);
12847
12848         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12849
12850         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12851                 return 0;
12852
12853         bhalf_otp = tr32(OTP_READ_DATA);
12854
12855         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12856 }
12857
12858 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12859 {
12860         u32 adv = ADVERTISED_Autoneg |
12861                   ADVERTISED_Pause;
12862
12863         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12864                 adv |= ADVERTISED_1000baseT_Half |
12865                        ADVERTISED_1000baseT_Full;
12866
12867         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12868                 adv |= ADVERTISED_100baseT_Half |
12869                        ADVERTISED_100baseT_Full |
12870                        ADVERTISED_10baseT_Half |
12871                        ADVERTISED_10baseT_Full |
12872                        ADVERTISED_TP;
12873         else
12874                 adv |= ADVERTISED_FIBRE;
12875
12876         tp->link_config.advertising = adv;
12877         tp->link_config.speed = SPEED_INVALID;
12878         tp->link_config.duplex = DUPLEX_INVALID;
12879         tp->link_config.autoneg = AUTONEG_ENABLE;
12880         tp->link_config.active_speed = SPEED_INVALID;
12881         tp->link_config.active_duplex = DUPLEX_INVALID;
12882         tp->link_config.orig_speed = SPEED_INVALID;
12883         tp->link_config.orig_duplex = DUPLEX_INVALID;
12884         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12885 }
12886
12887 static int __devinit tg3_phy_probe(struct tg3 *tp)
12888 {
12889         u32 hw_phy_id_1, hw_phy_id_2;
12890         u32 hw_phy_id, hw_phy_id_masked;
12891         int err;
12892
12893         /* flow control autonegotiation is default behavior */
12894         tg3_flag_set(tp, PAUSE_AUTONEG);
12895         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12896
12897         if (tg3_flag(tp, USE_PHYLIB))
12898                 return tg3_phy_init(tp);
12899
12900         /* Reading the PHY ID register can conflict with ASF
12901          * firmware access to the PHY hardware.
12902          */
12903         err = 0;
12904         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12905                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12906         } else {
12907                 /* Now read the physical PHY_ID from the chip and verify
12908                  * that it is sane.  If it doesn't look good, we fall back
12909                  * to either the hard-coded table based PHY_ID and failing
12910                  * that the value found in the eeprom area.
12911                  */
12912                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12913                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12914
12915                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12916                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12917                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12918
12919                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12920         }
12921
12922         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12923                 tp->phy_id = hw_phy_id;
12924                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12925                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12926                 else
12927                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12928         } else {
12929                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12930                         /* Do nothing, phy ID already set up in
12931                          * tg3_get_eeprom_hw_cfg().
12932                          */
12933                 } else {
12934                         struct subsys_tbl_ent *p;
12935
12936                         /* No eeprom signature?  Try the hardcoded
12937                          * subsys device table.
12938                          */
12939                         p = tg3_lookup_by_subsys(tp);
12940                         if (!p)
12941                                 return -ENODEV;
12942
12943                         tp->phy_id = p->phy_id;
12944                         if (!tp->phy_id ||
12945                             tp->phy_id == TG3_PHY_ID_BCM8002)
12946                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12947                 }
12948         }
12949
12950         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12951             ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12952               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12953              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12954               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12955                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12956
12957         tg3_phy_init_link_config(tp);
12958
12959         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12960             !tg3_flag(tp, ENABLE_APE) &&
12961             !tg3_flag(tp, ENABLE_ASF)) {
12962                 u32 bmsr, mask;
12963
12964                 tg3_readphy(tp, MII_BMSR, &bmsr);
12965                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12966                     (bmsr & BMSR_LSTATUS))
12967                         goto skip_phy_reset;
12968
12969                 err = tg3_phy_reset(tp);
12970                 if (err)
12971                         return err;
12972
12973                 tg3_phy_set_wirespeed(tp);
12974
12975                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12976                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12977                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12978                 if (!tg3_copper_is_advertising_all(tp, mask)) {
12979                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12980                                             tp->link_config.flowctrl);
12981
12982                         tg3_writephy(tp, MII_BMCR,
12983                                      BMCR_ANENABLE | BMCR_ANRESTART);
12984                 }
12985         }
12986
12987 skip_phy_reset:
12988         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12989                 err = tg3_init_5401phy_dsp(tp);
12990                 if (err)
12991                         return err;
12992
12993                 err = tg3_init_5401phy_dsp(tp);
12994         }
12995
12996         return err;
12997 }
12998
12999 static void __devinit tg3_read_vpd(struct tg3 *tp)
13000 {
13001         u8 *vpd_data;
13002         unsigned int block_end, rosize, len;
13003         int j, i = 0;
13004
13005         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13006         if (!vpd_data)
13007                 goto out_no_vpd;
13008
13009         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13010                              PCI_VPD_LRDT_RO_DATA);
13011         if (i < 0)
13012                 goto out_not_found;
13013
13014         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13015         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13016         i += PCI_VPD_LRDT_TAG_SIZE;
13017
13018         if (block_end > TG3_NVM_VPD_LEN)
13019                 goto out_not_found;
13020
13021         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13022                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13023         if (j > 0) {
13024                 len = pci_vpd_info_field_size(&vpd_data[j]);
13025
13026                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13027                 if (j + len > block_end || len != 4 ||
13028                     memcmp(&vpd_data[j], "1028", 4))
13029                         goto partno;
13030
13031                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13032                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13033                 if (j < 0)
13034                         goto partno;
13035
13036                 len = pci_vpd_info_field_size(&vpd_data[j]);
13037
13038                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13039                 if (j + len > block_end)
13040                         goto partno;
13041
13042                 memcpy(tp->fw_ver, &vpd_data[j], len);
13043                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13044         }
13045
13046 partno:
13047         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13048                                       PCI_VPD_RO_KEYWORD_PARTNO);
13049         if (i < 0)
13050                 goto out_not_found;
13051
13052         len = pci_vpd_info_field_size(&vpd_data[i]);
13053
13054         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13055         if (len > TG3_BPN_SIZE ||
13056             (len + i) > TG3_NVM_VPD_LEN)
13057                 goto out_not_found;
13058
13059         memcpy(tp->board_part_number, &vpd_data[i], len);
13060
13061 out_not_found:
13062         kfree(vpd_data);
13063         if (tp->board_part_number[0])
13064                 return;
13065
13066 out_no_vpd:
13067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13068                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13069                         strcpy(tp->board_part_number, "BCM5717");
13070                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13071                         strcpy(tp->board_part_number, "BCM5718");
13072                 else
13073                         goto nomatch;
13074         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13075                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13076                         strcpy(tp->board_part_number, "BCM57780");
13077                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13078                         strcpy(tp->board_part_number, "BCM57760");
13079                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13080                         strcpy(tp->board_part_number, "BCM57790");
13081                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13082                         strcpy(tp->board_part_number, "BCM57788");
13083                 else
13084                         goto nomatch;
13085         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13086                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13087                         strcpy(tp->board_part_number, "BCM57761");
13088                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13089                         strcpy(tp->board_part_number, "BCM57765");
13090                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13091                         strcpy(tp->board_part_number, "BCM57781");
13092                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13093                         strcpy(tp->board_part_number, "BCM57785");
13094                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13095                         strcpy(tp->board_part_number, "BCM57791");
13096                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13097                         strcpy(tp->board_part_number, "BCM57795");
13098                 else
13099                         goto nomatch;
13100         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13101                 strcpy(tp->board_part_number, "BCM95906");
13102         } else {
13103 nomatch:
13104                 strcpy(tp->board_part_number, "none");
13105         }
13106 }
13107
13108 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13109 {
13110         u32 val;
13111
13112         if (tg3_nvram_read(tp, offset, &val) ||
13113             (val & 0xfc000000) != 0x0c000000 ||
13114             tg3_nvram_read(tp, offset + 4, &val) ||
13115             val != 0)
13116                 return 0;
13117
13118         return 1;
13119 }
13120
13121 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13122 {
13123         u32 val, offset, start, ver_offset;
13124         int i, dst_off;
13125         bool newver = false;
13126
13127         if (tg3_nvram_read(tp, 0xc, &offset) ||
13128             tg3_nvram_read(tp, 0x4, &start))
13129                 return;
13130
13131         offset = tg3_nvram_logical_addr(tp, offset);
13132
13133         if (tg3_nvram_read(tp, offset, &val))
13134                 return;
13135
13136         if ((val & 0xfc000000) == 0x0c000000) {
13137                 if (tg3_nvram_read(tp, offset + 4, &val))
13138                         return;
13139
13140                 if (val == 0)
13141                         newver = true;
13142         }
13143
13144         dst_off = strlen(tp->fw_ver);
13145
13146         if (newver) {
13147                 if (TG3_VER_SIZE - dst_off < 16 ||
13148                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13149                         return;
13150
13151                 offset = offset + ver_offset - start;
13152                 for (i = 0; i < 16; i += 4) {
13153                         __be32 v;
13154                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13155                                 return;
13156
13157                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13158                 }
13159         } else {
13160                 u32 major, minor;
13161
13162                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13163                         return;
13164
13165                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13166                         TG3_NVM_BCVER_MAJSFT;
13167                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13168                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13169                          "v%d.%02d", major, minor);
13170         }
13171 }
13172
13173 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13174 {
13175         u32 val, major, minor;
13176
13177         /* Use native endian representation */
13178         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13179                 return;
13180
13181         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13182                 TG3_NVM_HWSB_CFG1_MAJSFT;
13183         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13184                 TG3_NVM_HWSB_CFG1_MINSFT;
13185
13186         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13187 }
13188
13189 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13190 {
13191         u32 offset, major, minor, build;
13192
13193         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13194
13195         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13196                 return;
13197
13198         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13199         case TG3_EEPROM_SB_REVISION_0:
13200                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13201                 break;
13202         case TG3_EEPROM_SB_REVISION_2:
13203                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13204                 break;
13205         case TG3_EEPROM_SB_REVISION_3:
13206                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13207                 break;
13208         case TG3_EEPROM_SB_REVISION_4:
13209                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13210                 break;
13211         case TG3_EEPROM_SB_REVISION_5:
13212                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13213                 break;
13214         case TG3_EEPROM_SB_REVISION_6:
13215                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13216                 break;
13217         default:
13218                 return;
13219         }
13220
13221         if (tg3_nvram_read(tp, offset, &val))
13222                 return;
13223
13224         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13225                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13226         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13227                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13228         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13229
13230         if (minor > 99 || build > 26)
13231                 return;
13232
13233         offset = strlen(tp->fw_ver);
13234         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13235                  " v%d.%02d", major, minor);
13236
13237         if (build > 0) {
13238                 offset = strlen(tp->fw_ver);
13239                 if (offset < TG3_VER_SIZE - 1)
13240                         tp->fw_ver[offset] = 'a' + build - 1;
13241         }
13242 }
13243
13244 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13245 {
13246         u32 val, offset, start;
13247         int i, vlen;
13248
13249         for (offset = TG3_NVM_DIR_START;
13250              offset < TG3_NVM_DIR_END;
13251              offset += TG3_NVM_DIRENT_SIZE) {
13252                 if (tg3_nvram_read(tp, offset, &val))
13253                         return;
13254
13255                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13256                         break;
13257         }
13258
13259         if (offset == TG3_NVM_DIR_END)
13260                 return;
13261
13262         if (!tg3_flag(tp, 5705_PLUS))
13263                 start = 0x08000000;
13264         else if (tg3_nvram_read(tp, offset - 4, &start))
13265                 return;
13266
13267         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13268             !tg3_fw_img_is_valid(tp, offset) ||
13269             tg3_nvram_read(tp, offset + 8, &val))
13270                 return;
13271
13272         offset += val - start;
13273
13274         vlen = strlen(tp->fw_ver);
13275
13276         tp->fw_ver[vlen++] = ',';
13277         tp->fw_ver[vlen++] = ' ';
13278
13279         for (i = 0; i < 4; i++) {
13280                 __be32 v;
13281                 if (tg3_nvram_read_be32(tp, offset, &v))
13282                         return;
13283
13284                 offset += sizeof(v);
13285
13286                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13287                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13288                         break;
13289                 }
13290
13291                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13292                 vlen += sizeof(v);
13293         }
13294 }
13295
13296 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13297 {
13298         int vlen;
13299         u32 apedata;
13300         char *fwtype;
13301
13302         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13303                 return;
13304
13305         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13306         if (apedata != APE_SEG_SIG_MAGIC)
13307                 return;
13308
13309         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13310         if (!(apedata & APE_FW_STATUS_READY))
13311                 return;
13312
13313         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13314
13315         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13316                 tg3_flag_set(tp, APE_HAS_NCSI);
13317                 fwtype = "NCSI";
13318         } else {
13319                 fwtype = "DASH";
13320         }
13321
13322         vlen = strlen(tp->fw_ver);
13323
13324         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13325                  fwtype,
13326                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13327                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13328                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13329                  (apedata & APE_FW_VERSION_BLDMSK));
13330 }
13331
13332 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13333 {
13334         u32 val;
13335         bool vpd_vers = false;
13336
13337         if (tp->fw_ver[0] != 0)
13338                 vpd_vers = true;
13339
13340         if (tg3_flag(tp, NO_NVRAM)) {
13341                 strcat(tp->fw_ver, "sb");
13342                 return;
13343         }
13344
13345         if (tg3_nvram_read(tp, 0, &val))
13346                 return;
13347
13348         if (val == TG3_EEPROM_MAGIC)
13349                 tg3_read_bc_ver(tp);
13350         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13351                 tg3_read_sb_ver(tp, val);
13352         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13353                 tg3_read_hwsb_ver(tp);
13354         else
13355                 return;
13356
13357         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13358                 goto done;
13359
13360         tg3_read_mgmtfw_ver(tp);
13361
13362 done:
13363         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13364 }
13365
13366 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13367
13368 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13369 {
13370         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13371                 return TG3_RX_RET_MAX_SIZE_5717;
13372         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13373                 return TG3_RX_RET_MAX_SIZE_5700;
13374         else
13375                 return TG3_RX_RET_MAX_SIZE_5705;
13376 }
13377
13378 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13379         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13380         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13381         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13382         { },
13383 };
13384
13385 static int __devinit tg3_get_invariants(struct tg3 *tp)
13386 {
13387         u32 misc_ctrl_reg;
13388         u32 pci_state_reg, grc_misc_cfg;
13389         u32 val;
13390         u16 pci_cmd;
13391         int err;
13392
13393         /* Force memory write invalidate off.  If we leave it on,
13394          * then on 5700_BX chips we have to enable a workaround.
13395          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13396          * to match the cacheline size.  The Broadcom driver have this
13397          * workaround but turns MWI off all the times so never uses
13398          * it.  This seems to suggest that the workaround is insufficient.
13399          */
13400         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13401         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13402         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13403
13404         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13405          * has the register indirect write enable bit set before
13406          * we try to access any of the MMIO registers.  It is also
13407          * critical that the PCI-X hw workaround situation is decided
13408          * before that as well.
13409          */
13410         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13411                               &misc_ctrl_reg);
13412
13413         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13414                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13416                 u32 prod_id_asic_rev;
13417
13418                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13419                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13420                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13421                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13422                         pci_read_config_dword(tp->pdev,
13423                                               TG3PCI_GEN2_PRODID_ASICREV,
13424                                               &prod_id_asic_rev);
13425                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13426                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13427                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13428                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13429                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13430                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13431                         pci_read_config_dword(tp->pdev,
13432                                               TG3PCI_GEN15_PRODID_ASICREV,
13433                                               &prod_id_asic_rev);
13434                 else
13435                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13436                                               &prod_id_asic_rev);
13437
13438                 tp->pci_chip_rev_id = prod_id_asic_rev;
13439         }
13440
13441         /* Wrong chip ID in 5752 A0. This code can be removed later
13442          * as A0 is not in production.
13443          */
13444         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13445                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13446
13447         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13448          * we need to disable memory and use config. cycles
13449          * only to access all registers. The 5702/03 chips
13450          * can mistakenly decode the special cycles from the
13451          * ICH chipsets as memory write cycles, causing corruption
13452          * of register and memory space. Only certain ICH bridges
13453          * will drive special cycles with non-zero data during the
13454          * address phase which can fall within the 5703's address
13455          * range. This is not an ICH bug as the PCI spec allows
13456          * non-zero address during special cycles. However, only
13457          * these ICH bridges are known to drive non-zero addresses
13458          * during special cycles.
13459          *
13460          * Since special cycles do not cross PCI bridges, we only
13461          * enable this workaround if the 5703 is on the secondary
13462          * bus of these ICH bridges.
13463          */
13464         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13465             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13466                 static struct tg3_dev_id {
13467                         u32     vendor;
13468                         u32     device;
13469                         u32     rev;
13470                 } ich_chipsets[] = {
13471                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13472                           PCI_ANY_ID },
13473                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13474                           PCI_ANY_ID },
13475                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13476                           0xa },
13477                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13478                           PCI_ANY_ID },
13479                         { },
13480                 };
13481                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13482                 struct pci_dev *bridge = NULL;
13483
13484                 while (pci_id->vendor != 0) {
13485                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13486                                                 bridge);
13487                         if (!bridge) {
13488                                 pci_id++;
13489                                 continue;
13490                         }
13491                         if (pci_id->rev != PCI_ANY_ID) {
13492                                 if (bridge->revision > pci_id->rev)
13493                                         continue;
13494                         }
13495                         if (bridge->subordinate &&
13496                             (bridge->subordinate->number ==
13497                              tp->pdev->bus->number)) {
13498                                 tg3_flag_set(tp, ICH_WORKAROUND);
13499                                 pci_dev_put(bridge);
13500                                 break;
13501                         }
13502                 }
13503         }
13504
13505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13506                 static struct tg3_dev_id {
13507                         u32     vendor;
13508                         u32     device;
13509                 } bridge_chipsets[] = {
13510                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13511                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13512                         { },
13513                 };
13514                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13515                 struct pci_dev *bridge = NULL;
13516
13517                 while (pci_id->vendor != 0) {
13518                         bridge = pci_get_device(pci_id->vendor,
13519                                                 pci_id->device,
13520                                                 bridge);
13521                         if (!bridge) {
13522                                 pci_id++;
13523                                 continue;
13524                         }
13525                         if (bridge->subordinate &&
13526                             (bridge->subordinate->number <=
13527                              tp->pdev->bus->number) &&
13528                             (bridge->subordinate->subordinate >=
13529                              tp->pdev->bus->number)) {
13530                                 tg3_flag_set(tp, 5701_DMA_BUG);
13531                                 pci_dev_put(bridge);
13532                                 break;
13533                         }
13534                 }
13535         }
13536
13537         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13538          * DMA addresses > 40-bit. This bridge may have other additional
13539          * 57xx devices behind it in some 4-port NIC designs for example.
13540          * Any tg3 device found behind the bridge will also need the 40-bit
13541          * DMA workaround.
13542          */
13543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13545                 tg3_flag_set(tp, 5780_CLASS);
13546                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13547                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13548         } else {
13549                 struct pci_dev *bridge = NULL;
13550
13551                 do {
13552                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13553                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13554                                                 bridge);
13555                         if (bridge && bridge->subordinate &&
13556                             (bridge->subordinate->number <=
13557                              tp->pdev->bus->number) &&
13558                             (bridge->subordinate->subordinate >=
13559                              tp->pdev->bus->number)) {
13560                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13561                                 pci_dev_put(bridge);
13562                                 break;
13563                         }
13564                 } while (bridge);
13565         }
13566
13567         /* Initialize misc host control in PCI block. */
13568         tp->misc_host_ctrl |= (misc_ctrl_reg &
13569                                MISC_HOST_CTRL_CHIPREV);
13570         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13571                                tp->misc_host_ctrl);
13572
13573         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13574             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13576             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13577                 tp->pdev_peer = tg3_find_peer(tp);
13578
13579         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13580             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13581             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13582                 tg3_flag_set(tp, 5717_PLUS);
13583
13584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13585             tg3_flag(tp, 5717_PLUS))
13586                 tg3_flag_set(tp, 57765_PLUS);
13587
13588         /* Intentionally exclude ASIC_REV_5906 */
13589         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13590             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13595             tg3_flag(tp, 57765_PLUS))
13596                 tg3_flag_set(tp, 5755_PLUS);
13597
13598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13599             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13600             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13601             tg3_flag(tp, 5755_PLUS) ||
13602             tg3_flag(tp, 5780_CLASS))
13603                 tg3_flag_set(tp, 5750_PLUS);
13604
13605         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13606             tg3_flag(tp, 5750_PLUS))
13607                 tg3_flag_set(tp, 5705_PLUS);
13608
13609         /* Determine TSO capabilities */
13610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13611                 ; /* Do nothing. HW bug. */
13612         else if (tg3_flag(tp, 57765_PLUS))
13613                 tg3_flag_set(tp, HW_TSO_3);
13614         else if (tg3_flag(tp, 5755_PLUS) ||
13615                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13616                 tg3_flag_set(tp, HW_TSO_2);
13617         else if (tg3_flag(tp, 5750_PLUS)) {
13618                 tg3_flag_set(tp, HW_TSO_1);
13619                 tg3_flag_set(tp, TSO_BUG);
13620                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13621                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13622                         tg3_flag_clear(tp, TSO_BUG);
13623         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13624                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13625                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13626                         tg3_flag_set(tp, TSO_BUG);
13627                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13628                         tp->fw_needed = FIRMWARE_TG3TSO5;
13629                 else
13630                         tp->fw_needed = FIRMWARE_TG3TSO;
13631         }
13632
13633         /* Selectively allow TSO based on operating conditions */
13634         if (tg3_flag(tp, HW_TSO_1) ||
13635             tg3_flag(tp, HW_TSO_2) ||
13636             tg3_flag(tp, HW_TSO_3) ||
13637             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13638                 tg3_flag_set(tp, TSO_CAPABLE);
13639         else {
13640                 tg3_flag_clear(tp, TSO_CAPABLE);
13641                 tg3_flag_clear(tp, TSO_BUG);
13642                 tp->fw_needed = NULL;
13643         }
13644
13645         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13646                 tp->fw_needed = FIRMWARE_TG3;
13647
13648         tp->irq_max = 1;
13649
13650         if (tg3_flag(tp, 5750_PLUS)) {
13651                 tg3_flag_set(tp, SUPPORT_MSI);
13652                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13653                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13654                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13655                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13656                      tp->pdev_peer == tp->pdev))
13657                         tg3_flag_clear(tp, SUPPORT_MSI);
13658
13659                 if (tg3_flag(tp, 5755_PLUS) ||
13660                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13661                         tg3_flag_set(tp, 1SHOT_MSI);
13662                 }
13663
13664                 if (tg3_flag(tp, 57765_PLUS)) {
13665                         tg3_flag_set(tp, SUPPORT_MSIX);
13666                         tp->irq_max = TG3_IRQ_MAX_VECS;
13667                 }
13668         }
13669
13670         /* All chips can get confused if TX buffers
13671          * straddle the 4GB address boundary.
13672          */
13673         tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13674
13675         if (tg3_flag(tp, 5755_PLUS))
13676                 tg3_flag_set(tp, SHORT_DMA_BUG);
13677         else
13678                 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13679
13680         if (tg3_flag(tp, 5717_PLUS))
13681                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13682
13683         if (tg3_flag(tp, 57765_PLUS) &&
13684             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13685                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13686
13687         if (!tg3_flag(tp, 5705_PLUS) ||
13688             tg3_flag(tp, 5780_CLASS) ||
13689             tg3_flag(tp, USE_JUMBO_BDFLAG))
13690                 tg3_flag_set(tp, JUMBO_CAPABLE);
13691
13692         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13693                               &pci_state_reg);
13694
13695         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13696         if (tp->pcie_cap != 0) {
13697                 u16 lnkctl;
13698
13699                 tg3_flag_set(tp, PCI_EXPRESS);
13700
13701                 tp->pcie_readrq = 4096;
13702                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13703                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13704                         tp->pcie_readrq = 2048;
13705
13706                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13707
13708                 pci_read_config_word(tp->pdev,
13709                                      tp->pcie_cap + PCI_EXP_LNKCTL,
13710                                      &lnkctl);
13711                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13712                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13713                             ASIC_REV_5906) {
13714                                 tg3_flag_clear(tp, HW_TSO_2);
13715                                 tg3_flag_clear(tp, TSO_CAPABLE);
13716                         }
13717                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13718                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13719                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13720                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13721                                 tg3_flag_set(tp, CLKREQ_BUG);
13722                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13723                         tg3_flag_set(tp, L1PLLPD_EN);
13724                 }
13725         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13726                 tg3_flag_set(tp, PCI_EXPRESS);
13727         } else if (!tg3_flag(tp, 5705_PLUS) ||
13728                    tg3_flag(tp, 5780_CLASS)) {
13729                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13730                 if (!tp->pcix_cap) {
13731                         dev_err(&tp->pdev->dev,
13732                                 "Cannot find PCI-X capability, aborting\n");
13733                         return -EIO;
13734                 }
13735
13736                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13737                         tg3_flag_set(tp, PCIX_MODE);
13738         }
13739
13740         /* If we have an AMD 762 or VIA K8T800 chipset, write
13741          * reordering to the mailbox registers done by the host
13742          * controller can cause major troubles.  We read back from
13743          * every mailbox register write to force the writes to be
13744          * posted to the chip in order.
13745          */
13746         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13747             !tg3_flag(tp, PCI_EXPRESS))
13748                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13749
13750         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13751                              &tp->pci_cacheline_sz);
13752         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13753                              &tp->pci_lat_timer);
13754         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13755             tp->pci_lat_timer < 64) {
13756                 tp->pci_lat_timer = 64;
13757                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13758                                       tp->pci_lat_timer);
13759         }
13760
13761         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13762                 /* 5700 BX chips need to have their TX producer index
13763                  * mailboxes written twice to workaround a bug.
13764                  */
13765                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13766
13767                 /* If we are in PCI-X mode, enable register write workaround.
13768                  *
13769                  * The workaround is to use indirect register accesses
13770                  * for all chip writes not to mailbox registers.
13771                  */
13772                 if (tg3_flag(tp, PCIX_MODE)) {
13773                         u32 pm_reg;
13774
13775                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13776
13777                         /* The chip can have it's power management PCI config
13778                          * space registers clobbered due to this bug.
13779                          * So explicitly force the chip into D0 here.
13780                          */
13781                         pci_read_config_dword(tp->pdev,
13782                                               tp->pm_cap + PCI_PM_CTRL,
13783                                               &pm_reg);
13784                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13785                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13786                         pci_write_config_dword(tp->pdev,
13787                                                tp->pm_cap + PCI_PM_CTRL,
13788                                                pm_reg);
13789
13790                         /* Also, force SERR#/PERR# in PCI command. */
13791                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13792                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13793                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13794                 }
13795         }
13796
13797         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13798                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13799         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13800                 tg3_flag_set(tp, PCI_32BIT);
13801
13802         /* Chip-specific fixup from Broadcom driver */
13803         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13804             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13805                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13806                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13807         }
13808
13809         /* Default fast path register access methods */
13810         tp->read32 = tg3_read32;
13811         tp->write32 = tg3_write32;
13812         tp->read32_mbox = tg3_read32;
13813         tp->write32_mbox = tg3_write32;
13814         tp->write32_tx_mbox = tg3_write32;
13815         tp->write32_rx_mbox = tg3_write32;
13816
13817         /* Various workaround register access methods */
13818         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13819                 tp->write32 = tg3_write_indirect_reg32;
13820         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13821                  (tg3_flag(tp, PCI_EXPRESS) &&
13822                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13823                 /*
13824                  * Back to back register writes can cause problems on these
13825                  * chips, the workaround is to read back all reg writes
13826                  * except those to mailbox regs.
13827                  *
13828                  * See tg3_write_indirect_reg32().
13829                  */
13830                 tp->write32 = tg3_write_flush_reg32;
13831         }
13832
13833         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13834                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13835                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13836                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13837         }
13838
13839         if (tg3_flag(tp, ICH_WORKAROUND)) {
13840                 tp->read32 = tg3_read_indirect_reg32;
13841                 tp->write32 = tg3_write_indirect_reg32;
13842                 tp->read32_mbox = tg3_read_indirect_mbox;
13843                 tp->write32_mbox = tg3_write_indirect_mbox;
13844                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13845                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13846
13847                 iounmap(tp->regs);
13848                 tp->regs = NULL;
13849
13850                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13851                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13852                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13853         }
13854         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13855                 tp->read32_mbox = tg3_read32_mbox_5906;
13856                 tp->write32_mbox = tg3_write32_mbox_5906;
13857                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13858                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13859         }
13860
13861         if (tp->write32 == tg3_write_indirect_reg32 ||
13862             (tg3_flag(tp, PCIX_MODE) &&
13863              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13864               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13865                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13866
13867         /* Get eeprom hw config before calling tg3_set_power_state().
13868          * In particular, the TG3_FLAG_IS_NIC flag must be
13869          * determined before calling tg3_set_power_state() so that
13870          * we know whether or not to switch out of Vaux power.
13871          * When the flag is set, it means that GPIO1 is used for eeprom
13872          * write protect and also implies that it is a LOM where GPIOs
13873          * are not used to switch power.
13874          */
13875         tg3_get_eeprom_hw_cfg(tp);
13876
13877         if (tg3_flag(tp, ENABLE_APE)) {
13878                 /* Allow reads and writes to the
13879                  * APE register and memory space.
13880                  */
13881                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13882                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13883                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13884                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13885                                        pci_state_reg);
13886         }
13887
13888         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13889             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13890             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13891             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13892             tg3_flag(tp, 57765_PLUS))
13893                 tg3_flag_set(tp, CPMU_PRESENT);
13894
13895         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13896          * GPIO1 driven high will bring 5700's external PHY out of reset.
13897          * It is also used as eeprom write protect on LOMs.
13898          */
13899         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13901             tg3_flag(tp, EEPROM_WRITE_PROT))
13902                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13903                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13904         /* Unused GPIO3 must be driven as output on 5752 because there
13905          * are no pull-up resistors on unused GPIO pins.
13906          */
13907         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13908                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13909
13910         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13911             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13912             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13913                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13914
13915         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13916             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13917                 /* Turn off the debug UART. */
13918                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13919                 if (tg3_flag(tp, IS_NIC))
13920                         /* Keep VMain power. */
13921                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13922                                               GRC_LCLCTRL_GPIO_OUTPUT0;
13923         }
13924
13925         /* Force the chip into D0. */
13926         err = tg3_power_up(tp);
13927         if (err) {
13928                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13929                 return err;
13930         }
13931
13932         /* Derive initial jumbo mode from MTU assigned in
13933          * ether_setup() via the alloc_etherdev() call
13934          */
13935         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13936                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13937
13938         /* Determine WakeOnLan speed to use. */
13939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13940             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13941             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13942             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13943                 tg3_flag_clear(tp, WOL_SPEED_100MB);
13944         } else {
13945                 tg3_flag_set(tp, WOL_SPEED_100MB);
13946         }
13947
13948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13949                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13950
13951         /* A few boards don't want Ethernet@WireSpeed phy feature */
13952         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13953             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13954              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13955              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13956             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13957             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13958                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13959
13960         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13961             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13962                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13963         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13964                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13965
13966         if (tg3_flag(tp, 5705_PLUS) &&
13967             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13968             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13969             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13970             !tg3_flag(tp, 57765_PLUS)) {
13971                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13972                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13973                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13974                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13975                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13976                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13977                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13978                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13979                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13980                 } else
13981                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13982         }
13983
13984         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13985             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13986                 tp->phy_otp = tg3_read_otp_phycfg(tp);
13987                 if (tp->phy_otp == 0)
13988                         tp->phy_otp = TG3_OTP_DEFAULT;
13989         }
13990
13991         if (tg3_flag(tp, CPMU_PRESENT))
13992                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13993         else
13994                 tp->mi_mode = MAC_MI_MODE_BASE;
13995
13996         tp->coalesce_mode = 0;
13997         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13998             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13999                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14000
14001         /* Set these bits to enable statistics workaround. */
14002         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14003             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14004             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14005                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14006                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14007         }
14008
14009         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14011                 tg3_flag_set(tp, USE_PHYLIB);
14012
14013         err = tg3_mdio_init(tp);
14014         if (err)
14015                 return err;
14016
14017         /* Initialize data/descriptor byte/word swapping. */
14018         val = tr32(GRC_MODE);
14019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14020                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14021                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14022                         GRC_MODE_B2HRX_ENABLE |
14023                         GRC_MODE_HTX2B_ENABLE |
14024                         GRC_MODE_HOST_STACKUP);
14025         else
14026                 val &= GRC_MODE_HOST_STACKUP;
14027
14028         tw32(GRC_MODE, val | tp->grc_mode);
14029
14030         tg3_switch_clocks(tp);
14031
14032         /* Clear this out for sanity. */
14033         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14034
14035         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14036                               &pci_state_reg);
14037         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14038             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14039                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14040
14041                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14042                     chiprevid == CHIPREV_ID_5701_B0 ||
14043                     chiprevid == CHIPREV_ID_5701_B2 ||
14044                     chiprevid == CHIPREV_ID_5701_B5) {
14045                         void __iomem *sram_base;
14046
14047                         /* Write some dummy words into the SRAM status block
14048                          * area, see if it reads back correctly.  If the return
14049                          * value is bad, force enable the PCIX workaround.
14050                          */
14051                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14052
14053                         writel(0x00000000, sram_base);
14054                         writel(0x00000000, sram_base + 4);
14055                         writel(0xffffffff, sram_base + 4);
14056                         if (readl(sram_base) != 0x00000000)
14057                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14058                 }
14059         }
14060
14061         udelay(50);
14062         tg3_nvram_init(tp);
14063
14064         grc_misc_cfg = tr32(GRC_MISC_CFG);
14065         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14066
14067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14068             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14069              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14070                 tg3_flag_set(tp, IS_5788);
14071
14072         if (!tg3_flag(tp, IS_5788) &&
14073             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14074                 tg3_flag_set(tp, TAGGED_STATUS);
14075         if (tg3_flag(tp, TAGGED_STATUS)) {
14076                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14077                                       HOSTCC_MODE_CLRTICK_TXBD);
14078
14079                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14080                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14081                                        tp->misc_host_ctrl);
14082         }
14083
14084         /* Preserve the APE MAC_MODE bits */
14085         if (tg3_flag(tp, ENABLE_APE))
14086                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14087         else
14088                 tp->mac_mode = TG3_DEF_MAC_MODE;
14089
14090         /* these are limited to 10/100 only */
14091         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14092              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14093             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14094              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14095              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14096               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14097               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14098             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14099              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14100               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14101               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14102             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14103             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14104             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14105             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14106                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14107
14108         err = tg3_phy_probe(tp);
14109         if (err) {
14110                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14111                 /* ... but do not return immediately ... */
14112                 tg3_mdio_fini(tp);
14113         }
14114
14115         tg3_read_vpd(tp);
14116         tg3_read_fw_ver(tp);
14117
14118         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14119                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14120         } else {
14121                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14122                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14123                 else
14124                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14125         }
14126
14127         /* 5700 {AX,BX} chips have a broken status block link
14128          * change bit implementation, so we must use the
14129          * status register in those cases.
14130          */
14131         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14132                 tg3_flag_set(tp, USE_LINKCHG_REG);
14133         else
14134                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14135
14136         /* The led_ctrl is set during tg3_phy_probe, here we might
14137          * have to force the link status polling mechanism based
14138          * upon subsystem IDs.
14139          */
14140         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14141             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14142             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14143                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14144                 tg3_flag_set(tp, USE_LINKCHG_REG);
14145         }
14146
14147         /* For all SERDES we poll the MAC status register. */
14148         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14149                 tg3_flag_set(tp, POLL_SERDES);
14150         else
14151                 tg3_flag_clear(tp, POLL_SERDES);
14152
14153         tp->rx_offset = NET_IP_ALIGN;
14154         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14156             tg3_flag(tp, PCIX_MODE)) {
14157                 tp->rx_offset = 0;
14158 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14159                 tp->rx_copy_thresh = ~(u16)0;
14160 #endif
14161         }
14162
14163         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14164         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14165         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14166
14167         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14168
14169         /* Increment the rx prod index on the rx std ring by at most
14170          * 8 for these chips to workaround hw errata.
14171          */
14172         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14173             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14174             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14175                 tp->rx_std_max_post = 8;
14176
14177         if (tg3_flag(tp, ASPM_WORKAROUND))
14178                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14179                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14180
14181         return err;
14182 }
14183
14184 #ifdef CONFIG_SPARC
14185 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14186 {
14187         struct net_device *dev = tp->dev;
14188         struct pci_dev *pdev = tp->pdev;
14189         struct device_node *dp = pci_device_to_OF_node(pdev);
14190         const unsigned char *addr;
14191         int len;
14192
14193         addr = of_get_property(dp, "local-mac-address", &len);
14194         if (addr && len == 6) {
14195                 memcpy(dev->dev_addr, addr, 6);
14196                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14197                 return 0;
14198         }
14199         return -ENODEV;
14200 }
14201
14202 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14203 {
14204         struct net_device *dev = tp->dev;
14205
14206         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14207         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14208         return 0;
14209 }
14210 #endif
14211
14212 static int __devinit tg3_get_device_address(struct tg3 *tp)
14213 {
14214         struct net_device *dev = tp->dev;
14215         u32 hi, lo, mac_offset;
14216         int addr_ok = 0;
14217
14218 #ifdef CONFIG_SPARC
14219         if (!tg3_get_macaddr_sparc(tp))
14220                 return 0;
14221 #endif
14222
14223         mac_offset = 0x7c;
14224         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14225             tg3_flag(tp, 5780_CLASS)) {
14226                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14227                         mac_offset = 0xcc;
14228                 if (tg3_nvram_lock(tp))
14229                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14230                 else
14231                         tg3_nvram_unlock(tp);
14232         } else if (tg3_flag(tp, 5717_PLUS)) {
14233                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14234                         mac_offset = 0xcc;
14235                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14236                         mac_offset += 0x18c;
14237         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14238                 mac_offset = 0x10;
14239
14240         /* First try to get it from MAC address mailbox. */
14241         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14242         if ((hi >> 16) == 0x484b) {
14243                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14244                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14245
14246                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14247                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14248                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14249                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14250                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14251
14252                 /* Some old bootcode may report a 0 MAC address in SRAM */
14253                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14254         }
14255         if (!addr_ok) {
14256                 /* Next, try NVRAM. */
14257                 if (!tg3_flag(tp, NO_NVRAM) &&
14258                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14259                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14260                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14261                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14262                 }
14263                 /* Finally just fetch it out of the MAC control regs. */
14264                 else {
14265                         hi = tr32(MAC_ADDR_0_HIGH);
14266                         lo = tr32(MAC_ADDR_0_LOW);
14267
14268                         dev->dev_addr[5] = lo & 0xff;
14269                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14270                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14271                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14272                         dev->dev_addr[1] = hi & 0xff;
14273                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14274                 }
14275         }
14276
14277         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14278 #ifdef CONFIG_SPARC
14279                 if (!tg3_get_default_macaddr_sparc(tp))
14280                         return 0;
14281 #endif
14282                 return -EINVAL;
14283         }
14284         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14285         return 0;
14286 }
14287
14288 #define BOUNDARY_SINGLE_CACHELINE       1
14289 #define BOUNDARY_MULTI_CACHELINE        2
14290
14291 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14292 {
14293         int cacheline_size;
14294         u8 byte;
14295         int goal;
14296
14297         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14298         if (byte == 0)
14299                 cacheline_size = 1024;
14300         else
14301                 cacheline_size = (int) byte * 4;
14302
14303         /* On 5703 and later chips, the boundary bits have no
14304          * effect.
14305          */
14306         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14307             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14308             !tg3_flag(tp, PCI_EXPRESS))
14309                 goto out;
14310
14311 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14312         goal = BOUNDARY_MULTI_CACHELINE;
14313 #else
14314 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14315         goal = BOUNDARY_SINGLE_CACHELINE;
14316 #else
14317         goal = 0;
14318 #endif
14319 #endif
14320
14321         if (tg3_flag(tp, 57765_PLUS)) {
14322                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14323                 goto out;
14324         }
14325
14326         if (!goal)
14327                 goto out;
14328
14329         /* PCI controllers on most RISC systems tend to disconnect
14330          * when a device tries to burst across a cache-line boundary.
14331          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14332          *
14333          * Unfortunately, for PCI-E there are only limited
14334          * write-side controls for this, and thus for reads
14335          * we will still get the disconnects.  We'll also waste
14336          * these PCI cycles for both read and write for chips
14337          * other than 5700 and 5701 which do not implement the
14338          * boundary bits.
14339          */
14340         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14341                 switch (cacheline_size) {
14342                 case 16:
14343                 case 32:
14344                 case 64:
14345                 case 128:
14346                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14347                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14348                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14349                         } else {
14350                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14351                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14352                         }
14353                         break;
14354
14355                 case 256:
14356                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14357                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14358                         break;
14359
14360                 default:
14361                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14362                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14363                         break;
14364                 }
14365         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14366                 switch (cacheline_size) {
14367                 case 16:
14368                 case 32:
14369                 case 64:
14370                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14371                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14372                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14373                                 break;
14374                         }
14375                         /* fallthrough */
14376                 case 128:
14377                 default:
14378                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14379                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14380                         break;
14381                 }
14382         } else {
14383                 switch (cacheline_size) {
14384                 case 16:
14385                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14386                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14387                                         DMA_RWCTRL_WRITE_BNDRY_16);
14388                                 break;
14389                         }
14390                         /* fallthrough */
14391                 case 32:
14392                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14393                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14394                                         DMA_RWCTRL_WRITE_BNDRY_32);
14395                                 break;
14396                         }
14397                         /* fallthrough */
14398                 case 64:
14399                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14400                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14401                                         DMA_RWCTRL_WRITE_BNDRY_64);
14402                                 break;
14403                         }
14404                         /* fallthrough */
14405                 case 128:
14406                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14407                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14408                                         DMA_RWCTRL_WRITE_BNDRY_128);
14409                                 break;
14410                         }
14411                         /* fallthrough */
14412                 case 256:
14413                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14414                                 DMA_RWCTRL_WRITE_BNDRY_256);
14415                         break;
14416                 case 512:
14417                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14418                                 DMA_RWCTRL_WRITE_BNDRY_512);
14419                         break;
14420                 case 1024:
14421                 default:
14422                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14423                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14424                         break;
14425                 }
14426         }
14427
14428 out:
14429         return val;
14430 }
14431
14432 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14433 {
14434         struct tg3_internal_buffer_desc test_desc;
14435         u32 sram_dma_descs;
14436         int i, ret;
14437
14438         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14439
14440         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14441         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14442         tw32(RDMAC_STATUS, 0);
14443         tw32(WDMAC_STATUS, 0);
14444
14445         tw32(BUFMGR_MODE, 0);
14446         tw32(FTQ_RESET, 0);
14447
14448         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14449         test_desc.addr_lo = buf_dma & 0xffffffff;
14450         test_desc.nic_mbuf = 0x00002100;
14451         test_desc.len = size;
14452
14453         /*
14454          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14455          * the *second* time the tg3 driver was getting loaded after an
14456          * initial scan.
14457          *
14458          * Broadcom tells me:
14459          *   ...the DMA engine is connected to the GRC block and a DMA
14460          *   reset may affect the GRC block in some unpredictable way...
14461          *   The behavior of resets to individual blocks has not been tested.
14462          *
14463          * Broadcom noted the GRC reset will also reset all sub-components.
14464          */
14465         if (to_device) {
14466                 test_desc.cqid_sqid = (13 << 8) | 2;
14467
14468                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14469                 udelay(40);
14470         } else {
14471                 test_desc.cqid_sqid = (16 << 8) | 7;
14472
14473                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14474                 udelay(40);
14475         }
14476         test_desc.flags = 0x00000005;
14477
14478         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14479                 u32 val;
14480
14481                 val = *(((u32 *)&test_desc) + i);
14482                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14483                                        sram_dma_descs + (i * sizeof(u32)));
14484                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14485         }
14486         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14487
14488         if (to_device)
14489                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14490         else
14491                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14492
14493         ret = -ENODEV;
14494         for (i = 0; i < 40; i++) {
14495                 u32 val;
14496
14497                 if (to_device)
14498                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14499                 else
14500                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14501                 if ((val & 0xffff) == sram_dma_descs) {
14502                         ret = 0;
14503                         break;
14504                 }
14505
14506                 udelay(100);
14507         }
14508
14509         return ret;
14510 }
14511
14512 #define TEST_BUFFER_SIZE        0x2000
14513
14514 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14515         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14516         { },
14517 };
14518
14519 static int __devinit tg3_test_dma(struct tg3 *tp)
14520 {
14521         dma_addr_t buf_dma;
14522         u32 *buf, saved_dma_rwctrl;
14523         int ret = 0;
14524
14525         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14526                                  &buf_dma, GFP_KERNEL);
14527         if (!buf) {
14528                 ret = -ENOMEM;
14529                 goto out_nofree;
14530         }
14531
14532         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14533                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14534
14535         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14536
14537         if (tg3_flag(tp, 57765_PLUS))
14538                 goto out;
14539
14540         if (tg3_flag(tp, PCI_EXPRESS)) {
14541                 /* DMA read watermark not used on PCIE */
14542                 tp->dma_rwctrl |= 0x00180000;
14543         } else if (!tg3_flag(tp, PCIX_MODE)) {
14544                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14545                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14546                         tp->dma_rwctrl |= 0x003f0000;
14547                 else
14548                         tp->dma_rwctrl |= 0x003f000f;
14549         } else {
14550                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14551                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14552                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14553                         u32 read_water = 0x7;
14554
14555                         /* If the 5704 is behind the EPB bridge, we can
14556                          * do the less restrictive ONE_DMA workaround for
14557                          * better performance.
14558                          */
14559                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14560                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14561                                 tp->dma_rwctrl |= 0x8000;
14562                         else if (ccval == 0x6 || ccval == 0x7)
14563                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14564
14565                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14566                                 read_water = 4;
14567                         /* Set bit 23 to enable PCIX hw bug fix */
14568                         tp->dma_rwctrl |=
14569                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14570                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14571                                 (1 << 23);
14572                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14573                         /* 5780 always in PCIX mode */
14574                         tp->dma_rwctrl |= 0x00144000;
14575                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14576                         /* 5714 always in PCIX mode */
14577                         tp->dma_rwctrl |= 0x00148000;
14578                 } else {
14579                         tp->dma_rwctrl |= 0x001b000f;
14580                 }
14581         }
14582
14583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14584             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14585                 tp->dma_rwctrl &= 0xfffffff0;
14586
14587         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14588             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14589                 /* Remove this if it causes problems for some boards. */
14590                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14591
14592                 /* On 5700/5701 chips, we need to set this bit.
14593                  * Otherwise the chip will issue cacheline transactions
14594                  * to streamable DMA memory with not all the byte
14595                  * enables turned on.  This is an error on several
14596                  * RISC PCI controllers, in particular sparc64.
14597                  *
14598                  * On 5703/5704 chips, this bit has been reassigned
14599                  * a different meaning.  In particular, it is used
14600                  * on those chips to enable a PCI-X workaround.
14601                  */
14602                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14603         }
14604
14605         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14606
14607 #if 0
14608         /* Unneeded, already done by tg3_get_invariants.  */
14609         tg3_switch_clocks(tp);
14610 #endif
14611
14612         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14613             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14614                 goto out;
14615
14616         /* It is best to perform DMA test with maximum write burst size
14617          * to expose the 5700/5701 write DMA bug.
14618          */
14619         saved_dma_rwctrl = tp->dma_rwctrl;
14620         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14621         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14622
14623         while (1) {
14624                 u32 *p = buf, i;
14625
14626                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14627                         p[i] = i;
14628
14629                 /* Send the buffer to the chip. */
14630                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14631                 if (ret) {
14632                         dev_err(&tp->pdev->dev,
14633                                 "%s: Buffer write failed. err = %d\n",
14634                                 __func__, ret);
14635                         break;
14636                 }
14637
14638 #if 0
14639                 /* validate data reached card RAM correctly. */
14640                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14641                         u32 val;
14642                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14643                         if (le32_to_cpu(val) != p[i]) {
14644                                 dev_err(&tp->pdev->dev,
14645                                         "%s: Buffer corrupted on device! "
14646                                         "(%d != %d)\n", __func__, val, i);
14647                                 /* ret = -ENODEV here? */
14648                         }
14649                         p[i] = 0;
14650                 }
14651 #endif
14652                 /* Now read it back. */
14653                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14654                 if (ret) {
14655                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14656                                 "err = %d\n", __func__, ret);
14657                         break;
14658                 }
14659
14660                 /* Verify it. */
14661                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14662                         if (p[i] == i)
14663                                 continue;
14664
14665                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14666                             DMA_RWCTRL_WRITE_BNDRY_16) {
14667                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14668                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14669                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14670                                 break;
14671                         } else {
14672                                 dev_err(&tp->pdev->dev,
14673                                         "%s: Buffer corrupted on read back! "
14674                                         "(%d != %d)\n", __func__, p[i], i);
14675                                 ret = -ENODEV;
14676                                 goto out;
14677                         }
14678                 }
14679
14680                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14681                         /* Success. */
14682                         ret = 0;
14683                         break;
14684                 }
14685         }
14686         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14687             DMA_RWCTRL_WRITE_BNDRY_16) {
14688                 /* DMA test passed without adjusting DMA boundary,
14689                  * now look for chipsets that are known to expose the
14690                  * DMA bug without failing the test.
14691                  */
14692                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14693                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14694                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14695                 } else {
14696                         /* Safe to use the calculated DMA boundary. */
14697                         tp->dma_rwctrl = saved_dma_rwctrl;
14698                 }
14699
14700                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14701         }
14702
14703 out:
14704         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14705 out_nofree:
14706         return ret;
14707 }
14708
14709 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14710 {
14711         if (tg3_flag(tp, 57765_PLUS)) {
14712                 tp->bufmgr_config.mbuf_read_dma_low_water =
14713                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14714                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14715                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14716                 tp->bufmgr_config.mbuf_high_water =
14717                         DEFAULT_MB_HIGH_WATER_57765;
14718
14719                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14720                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14721                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14722                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14723                 tp->bufmgr_config.mbuf_high_water_jumbo =
14724                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14725         } else if (tg3_flag(tp, 5705_PLUS)) {
14726                 tp->bufmgr_config.mbuf_read_dma_low_water =
14727                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14728                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14729                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14730                 tp->bufmgr_config.mbuf_high_water =
14731                         DEFAULT_MB_HIGH_WATER_5705;
14732                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14733                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14734                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14735                         tp->bufmgr_config.mbuf_high_water =
14736                                 DEFAULT_MB_HIGH_WATER_5906;
14737                 }
14738
14739                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14740                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14741                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14742                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14743                 tp->bufmgr_config.mbuf_high_water_jumbo =
14744                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14745         } else {
14746                 tp->bufmgr_config.mbuf_read_dma_low_water =
14747                         DEFAULT_MB_RDMA_LOW_WATER;
14748                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14749                         DEFAULT_MB_MACRX_LOW_WATER;
14750                 tp->bufmgr_config.mbuf_high_water =
14751                         DEFAULT_MB_HIGH_WATER;
14752
14753                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14754                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14755                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14756                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14757                 tp->bufmgr_config.mbuf_high_water_jumbo =
14758                         DEFAULT_MB_HIGH_WATER_JUMBO;
14759         }
14760
14761         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14762         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14763 }
14764
14765 static char * __devinit tg3_phy_string(struct tg3 *tp)
14766 {
14767         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14768         case TG3_PHY_ID_BCM5400:        return "5400";
14769         case TG3_PHY_ID_BCM5401:        return "5401";
14770         case TG3_PHY_ID_BCM5411:        return "5411";
14771         case TG3_PHY_ID_BCM5701:        return "5701";
14772         case TG3_PHY_ID_BCM5703:        return "5703";
14773         case TG3_PHY_ID_BCM5704:        return "5704";
14774         case TG3_PHY_ID_BCM5705:        return "5705";
14775         case TG3_PHY_ID_BCM5750:        return "5750";
14776         case TG3_PHY_ID_BCM5752:        return "5752";
14777         case TG3_PHY_ID_BCM5714:        return "5714";
14778         case TG3_PHY_ID_BCM5780:        return "5780";
14779         case TG3_PHY_ID_BCM5755:        return "5755";
14780         case TG3_PHY_ID_BCM5787:        return "5787";
14781         case TG3_PHY_ID_BCM5784:        return "5784";
14782         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14783         case TG3_PHY_ID_BCM5906:        return "5906";
14784         case TG3_PHY_ID_BCM5761:        return "5761";
14785         case TG3_PHY_ID_BCM5718C:       return "5718C";
14786         case TG3_PHY_ID_BCM5718S:       return "5718S";
14787         case TG3_PHY_ID_BCM57765:       return "57765";
14788         case TG3_PHY_ID_BCM5719C:       return "5719C";
14789         case TG3_PHY_ID_BCM5720C:       return "5720C";
14790         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14791         case 0:                 return "serdes";
14792         default:                return "unknown";
14793         }
14794 }
14795
14796 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14797 {
14798         if (tg3_flag(tp, PCI_EXPRESS)) {
14799                 strcpy(str, "PCI Express");
14800                 return str;
14801         } else if (tg3_flag(tp, PCIX_MODE)) {
14802                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14803
14804                 strcpy(str, "PCIX:");
14805
14806                 if ((clock_ctrl == 7) ||
14807                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14808                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14809                         strcat(str, "133MHz");
14810                 else if (clock_ctrl == 0)
14811                         strcat(str, "33MHz");
14812                 else if (clock_ctrl == 2)
14813                         strcat(str, "50MHz");
14814                 else if (clock_ctrl == 4)
14815                         strcat(str, "66MHz");
14816                 else if (clock_ctrl == 6)
14817                         strcat(str, "100MHz");
14818         } else {
14819                 strcpy(str, "PCI:");
14820                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14821                         strcat(str, "66MHz");
14822                 else
14823                         strcat(str, "33MHz");
14824         }
14825         if (tg3_flag(tp, PCI_32BIT))
14826                 strcat(str, ":32-bit");
14827         else
14828                 strcat(str, ":64-bit");
14829         return str;
14830 }
14831
14832 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14833 {
14834         struct pci_dev *peer;
14835         unsigned int func, devnr = tp->pdev->devfn & ~7;
14836
14837         for (func = 0; func < 8; func++) {
14838                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14839                 if (peer && peer != tp->pdev)
14840                         break;
14841                 pci_dev_put(peer);
14842         }
14843         /* 5704 can be configured in single-port mode, set peer to
14844          * tp->pdev in that case.
14845          */
14846         if (!peer) {
14847                 peer = tp->pdev;
14848                 return peer;
14849         }
14850
14851         /*
14852          * We don't need to keep the refcount elevated; there's no way
14853          * to remove one half of this device without removing the other
14854          */
14855         pci_dev_put(peer);
14856
14857         return peer;
14858 }
14859
14860 static void __devinit tg3_init_coal(struct tg3 *tp)
14861 {
14862         struct ethtool_coalesce *ec = &tp->coal;
14863
14864         memset(ec, 0, sizeof(*ec));
14865         ec->cmd = ETHTOOL_GCOALESCE;
14866         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14867         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14868         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14869         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14870         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14871         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14872         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14873         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14874         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14875
14876         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14877                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14878                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14879                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14880                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14881                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14882         }
14883
14884         if (tg3_flag(tp, 5705_PLUS)) {
14885                 ec->rx_coalesce_usecs_irq = 0;
14886                 ec->tx_coalesce_usecs_irq = 0;
14887                 ec->stats_block_coalesce_usecs = 0;
14888         }
14889 }
14890
14891 static const struct net_device_ops tg3_netdev_ops = {
14892         .ndo_open               = tg3_open,
14893         .ndo_stop               = tg3_close,
14894         .ndo_start_xmit         = tg3_start_xmit,
14895         .ndo_get_stats64        = tg3_get_stats64,
14896         .ndo_validate_addr      = eth_validate_addr,
14897         .ndo_set_multicast_list = tg3_set_rx_mode,
14898         .ndo_set_mac_address    = tg3_set_mac_addr,
14899         .ndo_do_ioctl           = tg3_ioctl,
14900         .ndo_tx_timeout         = tg3_tx_timeout,
14901         .ndo_change_mtu         = tg3_change_mtu,
14902         .ndo_fix_features       = tg3_fix_features,
14903         .ndo_set_features       = tg3_set_features,
14904 #ifdef CONFIG_NET_POLL_CONTROLLER
14905         .ndo_poll_controller    = tg3_poll_controller,
14906 #endif
14907 };
14908
14909 static int __devinit tg3_init_one(struct pci_dev *pdev,
14910                                   const struct pci_device_id *ent)
14911 {
14912         struct net_device *dev;
14913         struct tg3 *tp;
14914         int i, err, pm_cap;
14915         u32 sndmbx, rcvmbx, intmbx;
14916         char str[40];
14917         u64 dma_mask, persist_dma_mask;
14918         u32 features = 0;
14919
14920         printk_once(KERN_INFO "%s\n", version);
14921
14922         err = pci_enable_device(pdev);
14923         if (err) {
14924                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14925                 return err;
14926         }
14927
14928         err = pci_request_regions(pdev, DRV_MODULE_NAME);
14929         if (err) {
14930                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14931                 goto err_out_disable_pdev;
14932         }
14933
14934         pci_set_master(pdev);
14935
14936         /* Find power-management capability. */
14937         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14938         if (pm_cap == 0) {
14939                 dev_err(&pdev->dev,
14940                         "Cannot find Power Management capability, aborting\n");
14941                 err = -EIO;
14942                 goto err_out_free_res;
14943         }
14944
14945         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14946         if (!dev) {
14947                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14948                 err = -ENOMEM;
14949                 goto err_out_free_res;
14950         }
14951
14952         SET_NETDEV_DEV(dev, &pdev->dev);
14953
14954         tp = netdev_priv(dev);
14955         tp->pdev = pdev;
14956         tp->dev = dev;
14957         tp->pm_cap = pm_cap;
14958         tp->rx_mode = TG3_DEF_RX_MODE;
14959         tp->tx_mode = TG3_DEF_TX_MODE;
14960
14961         if (tg3_debug > 0)
14962                 tp->msg_enable = tg3_debug;
14963         else
14964                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14965
14966         /* The word/byte swap controls here control register access byte
14967          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
14968          * setting below.
14969          */
14970         tp->misc_host_ctrl =
14971                 MISC_HOST_CTRL_MASK_PCI_INT |
14972                 MISC_HOST_CTRL_WORD_SWAP |
14973                 MISC_HOST_CTRL_INDIR_ACCESS |
14974                 MISC_HOST_CTRL_PCISTATE_RW;
14975
14976         /* The NONFRM (non-frame) byte/word swap controls take effect
14977          * on descriptor entries, anything which isn't packet data.
14978          *
14979          * The StrongARM chips on the board (one for tx, one for rx)
14980          * are running in big-endian mode.
14981          */
14982         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14983                         GRC_MODE_WSWAP_NONFRM_DATA);
14984 #ifdef __BIG_ENDIAN
14985         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14986 #endif
14987         spin_lock_init(&tp->lock);
14988         spin_lock_init(&tp->indirect_lock);
14989         INIT_WORK(&tp->reset_task, tg3_reset_task);
14990
14991         tp->regs = pci_ioremap_bar(pdev, BAR_0);
14992         if (!tp->regs) {
14993                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14994                 err = -ENOMEM;
14995                 goto err_out_free_dev;
14996         }
14997
14998         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14999         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15000
15001         dev->ethtool_ops = &tg3_ethtool_ops;
15002         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15003         dev->netdev_ops = &tg3_netdev_ops;
15004         dev->irq = pdev->irq;
15005
15006         err = tg3_get_invariants(tp);
15007         if (err) {
15008                 dev_err(&pdev->dev,
15009                         "Problem fetching invariants of chip, aborting\n");
15010                 goto err_out_iounmap;
15011         }
15012
15013         /* The EPB bridge inside 5714, 5715, and 5780 and any
15014          * device behind the EPB cannot support DMA addresses > 40-bit.
15015          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15016          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15017          * do DMA address check in tg3_start_xmit().
15018          */
15019         if (tg3_flag(tp, IS_5788))
15020                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15021         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15022                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15023 #ifdef CONFIG_HIGHMEM
15024                 dma_mask = DMA_BIT_MASK(64);
15025 #endif
15026         } else
15027                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15028
15029         /* Configure DMA attributes. */
15030         if (dma_mask > DMA_BIT_MASK(32)) {
15031                 err = pci_set_dma_mask(pdev, dma_mask);
15032                 if (!err) {
15033                         features |= NETIF_F_HIGHDMA;
15034                         err = pci_set_consistent_dma_mask(pdev,
15035                                                           persist_dma_mask);
15036                         if (err < 0) {
15037                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15038                                         "DMA for consistent allocations\n");
15039                                 goto err_out_iounmap;
15040                         }
15041                 }
15042         }
15043         if (err || dma_mask == DMA_BIT_MASK(32)) {
15044                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15045                 if (err) {
15046                         dev_err(&pdev->dev,
15047                                 "No usable DMA configuration, aborting\n");
15048                         goto err_out_iounmap;
15049                 }
15050         }
15051
15052         tg3_init_bufmgr_config(tp);
15053
15054         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15055
15056         /* 5700 B0 chips do not support checksumming correctly due
15057          * to hardware bugs.
15058          */
15059         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15060                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15061
15062                 if (tg3_flag(tp, 5755_PLUS))
15063                         features |= NETIF_F_IPV6_CSUM;
15064         }
15065
15066         /* TSO is on by default on chips that support hardware TSO.
15067          * Firmware TSO on older chips gives lower performance, so it
15068          * is off by default, but can be enabled using ethtool.
15069          */
15070         if ((tg3_flag(tp, HW_TSO_1) ||
15071              tg3_flag(tp, HW_TSO_2) ||
15072              tg3_flag(tp, HW_TSO_3)) &&
15073             (features & NETIF_F_IP_CSUM))
15074                 features |= NETIF_F_TSO;
15075         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15076                 if (features & NETIF_F_IPV6_CSUM)
15077                         features |= NETIF_F_TSO6;
15078                 if (tg3_flag(tp, HW_TSO_3) ||
15079                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15080                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15081                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15082                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15083                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15084                         features |= NETIF_F_TSO_ECN;
15085         }
15086
15087         dev->features |= features;
15088         dev->vlan_features |= features;
15089
15090         /*
15091          * Add loopback capability only for a subset of devices that support
15092          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15093          * loopback for the remaining devices.
15094          */
15095         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15096             !tg3_flag(tp, CPMU_PRESENT))
15097                 /* Add the loopback capability */
15098                 features |= NETIF_F_LOOPBACK;
15099
15100         dev->hw_features |= features;
15101
15102         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15103             !tg3_flag(tp, TSO_CAPABLE) &&
15104             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15105                 tg3_flag_set(tp, MAX_RXPEND_64);
15106                 tp->rx_pending = 63;
15107         }
15108
15109         err = tg3_get_device_address(tp);
15110         if (err) {
15111                 dev_err(&pdev->dev,
15112                         "Could not obtain valid ethernet address, aborting\n");
15113                 goto err_out_iounmap;
15114         }
15115
15116         if (tg3_flag(tp, ENABLE_APE)) {
15117                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15118                 if (!tp->aperegs) {
15119                         dev_err(&pdev->dev,
15120                                 "Cannot map APE registers, aborting\n");
15121                         err = -ENOMEM;
15122                         goto err_out_iounmap;
15123                 }
15124
15125                 tg3_ape_lock_init(tp);
15126
15127                 if (tg3_flag(tp, ENABLE_ASF))
15128                         tg3_read_dash_ver(tp);
15129         }
15130
15131         /*
15132          * Reset chip in case UNDI or EFI driver did not shutdown
15133          * DMA self test will enable WDMAC and we'll see (spurious)
15134          * pending DMA on the PCI bus at that point.
15135          */
15136         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15137             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15138                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15139                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15140         }
15141
15142         err = tg3_test_dma(tp);
15143         if (err) {
15144                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15145                 goto err_out_apeunmap;
15146         }
15147
15148         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15149         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15150         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15151         for (i = 0; i < tp->irq_max; i++) {
15152                 struct tg3_napi *tnapi = &tp->napi[i];
15153
15154                 tnapi->tp = tp;
15155                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15156
15157                 tnapi->int_mbox = intmbx;
15158                 if (i < 4)
15159                         intmbx += 0x8;
15160                 else
15161                         intmbx += 0x4;
15162
15163                 tnapi->consmbox = rcvmbx;
15164                 tnapi->prodmbox = sndmbx;
15165
15166                 if (i)
15167                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15168                 else
15169                         tnapi->coal_now = HOSTCC_MODE_NOW;
15170
15171                 if (!tg3_flag(tp, SUPPORT_MSIX))
15172                         break;
15173
15174                 /*
15175                  * If we support MSIX, we'll be using RSS.  If we're using
15176                  * RSS, the first vector only handles link interrupts and the
15177                  * remaining vectors handle rx and tx interrupts.  Reuse the
15178                  * mailbox values for the next iteration.  The values we setup
15179                  * above are still useful for the single vectored mode.
15180                  */
15181                 if (!i)
15182                         continue;
15183
15184                 rcvmbx += 0x8;
15185
15186                 if (sndmbx & 0x4)
15187                         sndmbx -= 0x4;
15188                 else
15189                         sndmbx += 0xc;
15190         }
15191
15192         tg3_init_coal(tp);
15193
15194         pci_set_drvdata(pdev, dev);
15195
15196         err = register_netdev(dev);
15197         if (err) {
15198                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15199                 goto err_out_apeunmap;
15200         }
15201
15202         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15203                     tp->board_part_number,
15204                     tp->pci_chip_rev_id,
15205                     tg3_bus_string(tp, str),
15206                     dev->dev_addr);
15207
15208         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15209                 struct phy_device *phydev;
15210                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15211                 netdev_info(dev,
15212                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15213                             phydev->drv->name, dev_name(&phydev->dev));
15214         } else {
15215                 char *ethtype;
15216
15217                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15218                         ethtype = "10/100Base-TX";
15219                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15220                         ethtype = "1000Base-SX";
15221                 else
15222                         ethtype = "10/100/1000Base-T";
15223
15224                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15225                             "(WireSpeed[%d], EEE[%d])\n",
15226                             tg3_phy_string(tp), ethtype,
15227                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15228                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15229         }
15230
15231         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15232                     (dev->features & NETIF_F_RXCSUM) != 0,
15233                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15234                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15235                     tg3_flag(tp, ENABLE_ASF) != 0,
15236                     tg3_flag(tp, TSO_CAPABLE) != 0);
15237         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15238                     tp->dma_rwctrl,
15239                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15240                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15241
15242         pci_save_state(pdev);
15243
15244         return 0;
15245
15246 err_out_apeunmap:
15247         if (tp->aperegs) {
15248                 iounmap(tp->aperegs);
15249                 tp->aperegs = NULL;
15250         }
15251
15252 err_out_iounmap:
15253         if (tp->regs) {
15254                 iounmap(tp->regs);
15255                 tp->regs = NULL;
15256         }
15257
15258 err_out_free_dev:
15259         free_netdev(dev);
15260
15261 err_out_free_res:
15262         pci_release_regions(pdev);
15263
15264 err_out_disable_pdev:
15265         pci_disable_device(pdev);
15266         pci_set_drvdata(pdev, NULL);
15267         return err;
15268 }
15269
15270 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15271 {
15272         struct net_device *dev = pci_get_drvdata(pdev);
15273
15274         if (dev) {
15275                 struct tg3 *tp = netdev_priv(dev);
15276
15277                 if (tp->fw)
15278                         release_firmware(tp->fw);
15279
15280                 cancel_work_sync(&tp->reset_task);
15281
15282                 if (!tg3_flag(tp, USE_PHYLIB)) {
15283                         tg3_phy_fini(tp);
15284                         tg3_mdio_fini(tp);
15285                 }
15286
15287                 unregister_netdev(dev);
15288                 if (tp->aperegs) {
15289                         iounmap(tp->aperegs);
15290                         tp->aperegs = NULL;
15291                 }
15292                 if (tp->regs) {
15293                         iounmap(tp->regs);
15294                         tp->regs = NULL;
15295                 }
15296                 free_netdev(dev);
15297                 pci_release_regions(pdev);
15298                 pci_disable_device(pdev);
15299                 pci_set_drvdata(pdev, NULL);
15300         }
15301 }
15302
15303 #ifdef CONFIG_PM_SLEEP
15304 static int tg3_suspend(struct device *device)
15305 {
15306         struct pci_dev *pdev = to_pci_dev(device);
15307         struct net_device *dev = pci_get_drvdata(pdev);
15308         struct tg3 *tp = netdev_priv(dev);
15309         int err;
15310
15311         if (!netif_running(dev))
15312                 return 0;
15313
15314         flush_work_sync(&tp->reset_task);
15315         tg3_phy_stop(tp);
15316         tg3_netif_stop(tp);
15317
15318         del_timer_sync(&tp->timer);
15319
15320         tg3_full_lock(tp, 1);
15321         tg3_disable_ints(tp);
15322         tg3_full_unlock(tp);
15323
15324         netif_device_detach(dev);
15325
15326         tg3_full_lock(tp, 0);
15327         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15328         tg3_flag_clear(tp, INIT_COMPLETE);
15329         tg3_full_unlock(tp);
15330
15331         err = tg3_power_down_prepare(tp);
15332         if (err) {
15333                 int err2;
15334
15335                 tg3_full_lock(tp, 0);
15336
15337                 tg3_flag_set(tp, INIT_COMPLETE);
15338                 err2 = tg3_restart_hw(tp, 1);
15339                 if (err2)
15340                         goto out;
15341
15342                 tp->timer.expires = jiffies + tp->timer_offset;
15343                 add_timer(&tp->timer);
15344
15345                 netif_device_attach(dev);
15346                 tg3_netif_start(tp);
15347
15348 out:
15349                 tg3_full_unlock(tp);
15350
15351                 if (!err2)
15352                         tg3_phy_start(tp);
15353         }
15354
15355         return err;
15356 }
15357
15358 static int tg3_resume(struct device *device)
15359 {
15360         struct pci_dev *pdev = to_pci_dev(device);
15361         struct net_device *dev = pci_get_drvdata(pdev);
15362         struct tg3 *tp = netdev_priv(dev);
15363         int err;
15364
15365         if (!netif_running(dev))
15366                 return 0;
15367
15368         netif_device_attach(dev);
15369
15370         tg3_full_lock(tp, 0);
15371
15372         tg3_flag_set(tp, INIT_COMPLETE);
15373         err = tg3_restart_hw(tp, 1);
15374         if (err)
15375                 goto out;
15376
15377         tp->timer.expires = jiffies + tp->timer_offset;
15378         add_timer(&tp->timer);
15379
15380         tg3_netif_start(tp);
15381
15382 out:
15383         tg3_full_unlock(tp);
15384
15385         if (!err)
15386                 tg3_phy_start(tp);
15387
15388         return err;
15389 }
15390
15391 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15392 #define TG3_PM_OPS (&tg3_pm_ops)
15393
15394 #else
15395
15396 #define TG3_PM_OPS NULL
15397
15398 #endif /* CONFIG_PM_SLEEP */
15399
15400 /**
15401  * tg3_io_error_detected - called when PCI error is detected
15402  * @pdev: Pointer to PCI device
15403  * @state: The current pci connection state
15404  *
15405  * This function is called after a PCI bus error affecting
15406  * this device has been detected.
15407  */
15408 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15409                                               pci_channel_state_t state)
15410 {
15411         struct net_device *netdev = pci_get_drvdata(pdev);
15412         struct tg3 *tp = netdev_priv(netdev);
15413         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15414
15415         netdev_info(netdev, "PCI I/O error detected\n");
15416
15417         rtnl_lock();
15418
15419         if (!netif_running(netdev))
15420                 goto done;
15421
15422         tg3_phy_stop(tp);
15423
15424         tg3_netif_stop(tp);
15425
15426         del_timer_sync(&tp->timer);
15427         tg3_flag_clear(tp, RESTART_TIMER);
15428
15429         /* Want to make sure that the reset task doesn't run */
15430         cancel_work_sync(&tp->reset_task);
15431         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15432         tg3_flag_clear(tp, RESTART_TIMER);
15433
15434         netif_device_detach(netdev);
15435
15436         /* Clean up software state, even if MMIO is blocked */
15437         tg3_full_lock(tp, 0);
15438         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15439         tg3_full_unlock(tp);
15440
15441 done:
15442         if (state == pci_channel_io_perm_failure)
15443                 err = PCI_ERS_RESULT_DISCONNECT;
15444         else
15445                 pci_disable_device(pdev);
15446
15447         rtnl_unlock();
15448
15449         return err;
15450 }
15451
15452 /**
15453  * tg3_io_slot_reset - called after the pci bus has been reset.
15454  * @pdev: Pointer to PCI device
15455  *
15456  * Restart the card from scratch, as if from a cold-boot.
15457  * At this point, the card has exprienced a hard reset,
15458  * followed by fixups by BIOS, and has its config space
15459  * set up identically to what it was at cold boot.
15460  */
15461 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15462 {
15463         struct net_device *netdev = pci_get_drvdata(pdev);
15464         struct tg3 *tp = netdev_priv(netdev);
15465         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15466         int err;
15467
15468         rtnl_lock();
15469
15470         if (pci_enable_device(pdev)) {
15471                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15472                 goto done;
15473         }
15474
15475         pci_set_master(pdev);
15476         pci_restore_state(pdev);
15477         pci_save_state(pdev);
15478
15479         if (!netif_running(netdev)) {
15480                 rc = PCI_ERS_RESULT_RECOVERED;
15481                 goto done;
15482         }
15483
15484         err = tg3_power_up(tp);
15485         if (err) {
15486                 netdev_err(netdev, "Failed to restore register access.\n");
15487                 goto done;
15488         }
15489
15490         rc = PCI_ERS_RESULT_RECOVERED;
15491
15492 done:
15493         rtnl_unlock();
15494
15495         return rc;
15496 }
15497
15498 /**
15499  * tg3_io_resume - called when traffic can start flowing again.
15500  * @pdev: Pointer to PCI device
15501  *
15502  * This callback is called when the error recovery driver tells
15503  * us that its OK to resume normal operation.
15504  */
15505 static void tg3_io_resume(struct pci_dev *pdev)
15506 {
15507         struct net_device *netdev = pci_get_drvdata(pdev);
15508         struct tg3 *tp = netdev_priv(netdev);
15509         int err;
15510
15511         rtnl_lock();
15512
15513         if (!netif_running(netdev))
15514                 goto done;
15515
15516         tg3_full_lock(tp, 0);
15517         tg3_flag_set(tp, INIT_COMPLETE);
15518         err = tg3_restart_hw(tp, 1);
15519         tg3_full_unlock(tp);
15520         if (err) {
15521                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15522                 goto done;
15523         }
15524
15525         netif_device_attach(netdev);
15526
15527         tp->timer.expires = jiffies + tp->timer_offset;
15528         add_timer(&tp->timer);
15529
15530         tg3_netif_start(tp);
15531
15532         tg3_phy_start(tp);
15533
15534 done:
15535         rtnl_unlock();
15536 }
15537
15538 static struct pci_error_handlers tg3_err_handler = {
15539         .error_detected = tg3_io_error_detected,
15540         .slot_reset     = tg3_io_slot_reset,
15541         .resume         = tg3_io_resume
15542 };
15543
15544 static struct pci_driver tg3_driver = {
15545         .name           = DRV_MODULE_NAME,
15546         .id_table       = tg3_pci_tbl,
15547         .probe          = tg3_init_one,
15548         .remove         = __devexit_p(tg3_remove_one),
15549         .err_handler    = &tg3_err_handler,
15550         .driver.pm      = TG3_PM_OPS,
15551 };
15552
15553 static int __init tg3_init(void)
15554 {
15555         return pci_register_driver(&tg3_driver);
15556 }
15557
15558 static void __exit tg3_cleanup(void)
15559 {
15560         pci_unregister_driver(&tg3_driver);
15561 }
15562
15563 module_init(tg3_init);
15564 module_exit(tg3_cleanup);