Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #ifdef CONFIG_SPARC
58 #include <asm/idprom.h>
59 #include <asm/prom.h>
60 #endif
61
62 #define BAR_0   0
63 #define BAR_2   2
64
65 #include "tg3.h"
66
67 /* Functions & macros to verify TG3_FLAGS types */
68
69 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 {
71         return test_bit(flag, bits);
72 }
73
74 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 {
76         set_bit(flag, bits);
77 }
78
79 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 {
81         clear_bit(flag, bits);
82 }
83
84 #define tg3_flag(tp, flag)                              \
85         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_set(tp, flag)                          \
87         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_clear(tp, flag)                        \
89         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90
91 #define DRV_MODULE_NAME         "tg3"
92 #define TG3_MAJ_NUM                     3
93 #define TG3_MIN_NUM                     125
94 #define DRV_MODULE_VERSION      \
95         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
96 #define DRV_MODULE_RELDATE      "September 26, 2012"
97
98 #define RESET_KIND_SHUTDOWN     0
99 #define RESET_KIND_INIT         1
100 #define RESET_KIND_SUSPEND      2
101
102 #define TG3_DEF_RX_MODE         0
103 #define TG3_DEF_TX_MODE         0
104 #define TG3_DEF_MSG_ENABLE        \
105         (NETIF_MSG_DRV          | \
106          NETIF_MSG_PROBE        | \
107          NETIF_MSG_LINK         | \
108          NETIF_MSG_TIMER        | \
109          NETIF_MSG_IFDOWN       | \
110          NETIF_MSG_IFUP         | \
111          NETIF_MSG_RX_ERR       | \
112          NETIF_MSG_TX_ERR)
113
114 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
115
116 /* length of time before we decide the hardware is borked,
117  * and dev->tx_timeout() should be called to fix the problem
118  */
119
120 #define TG3_TX_TIMEOUT                  (5 * HZ)
121
122 /* hardware minimum and maximum for a single frame's data payload */
123 #define TG3_MIN_MTU                     60
124 #define TG3_MAX_MTU(tp) \
125         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126
127 /* These numbers seem to be hard coded in the NIC firmware somehow.
128  * You can't change the ring sizes, but you can change where you place
129  * them in the NIC onboard memory.
130  */
131 #define TG3_RX_STD_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
134 #define TG3_DEF_RX_RING_PENDING         200
135 #define TG3_RX_JMB_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
138 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX_2K            2048
203 #define TG3_TX_BD_DMA_MAX_4K            4096
204
205 #define TG3_RAW_IP_ALIGN 2
206
207 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
208 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
209
210 #define FIRMWARE_TG3            "tigon/tg3.bin"
211 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
212 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
213
214 static char version[] __devinitdata =
215         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
216
217 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219 MODULE_LICENSE("GPL");
220 MODULE_VERSION(DRV_MODULE_VERSION);
221 MODULE_FIRMWARE(FIRMWARE_TG3);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224
225 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
226 module_param(tg3_debug, int, 0);
227 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228
229 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
304         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
305         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
307         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
308         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
309         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
310         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
311         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312         {}
313 };
314
315 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
316
317 static const struct {
318         const char string[ETH_GSTRING_LEN];
319 } ethtool_stats_keys[] = {
320         { "rx_octets" },
321         { "rx_fragments" },
322         { "rx_ucast_packets" },
323         { "rx_mcast_packets" },
324         { "rx_bcast_packets" },
325         { "rx_fcs_errors" },
326         { "rx_align_errors" },
327         { "rx_xon_pause_rcvd" },
328         { "rx_xoff_pause_rcvd" },
329         { "rx_mac_ctrl_rcvd" },
330         { "rx_xoff_entered" },
331         { "rx_frame_too_long_errors" },
332         { "rx_jabbers" },
333         { "rx_undersize_packets" },
334         { "rx_in_length_errors" },
335         { "rx_out_length_errors" },
336         { "rx_64_or_less_octet_packets" },
337         { "rx_65_to_127_octet_packets" },
338         { "rx_128_to_255_octet_packets" },
339         { "rx_256_to_511_octet_packets" },
340         { "rx_512_to_1023_octet_packets" },
341         { "rx_1024_to_1522_octet_packets" },
342         { "rx_1523_to_2047_octet_packets" },
343         { "rx_2048_to_4095_octet_packets" },
344         { "rx_4096_to_8191_octet_packets" },
345         { "rx_8192_to_9022_octet_packets" },
346
347         { "tx_octets" },
348         { "tx_collisions" },
349
350         { "tx_xon_sent" },
351         { "tx_xoff_sent" },
352         { "tx_flow_control" },
353         { "tx_mac_errors" },
354         { "tx_single_collisions" },
355         { "tx_mult_collisions" },
356         { "tx_deferred" },
357         { "tx_excessive_collisions" },
358         { "tx_late_collisions" },
359         { "tx_collide_2times" },
360         { "tx_collide_3times" },
361         { "tx_collide_4times" },
362         { "tx_collide_5times" },
363         { "tx_collide_6times" },
364         { "tx_collide_7times" },
365         { "tx_collide_8times" },
366         { "tx_collide_9times" },
367         { "tx_collide_10times" },
368         { "tx_collide_11times" },
369         { "tx_collide_12times" },
370         { "tx_collide_13times" },
371         { "tx_collide_14times" },
372         { "tx_collide_15times" },
373         { "tx_ucast_packets" },
374         { "tx_mcast_packets" },
375         { "tx_bcast_packets" },
376         { "tx_carrier_sense_errors" },
377         { "tx_discards" },
378         { "tx_errors" },
379
380         { "dma_writeq_full" },
381         { "dma_write_prioq_full" },
382         { "rxbds_empty" },
383         { "rx_discards" },
384         { "rx_errors" },
385         { "rx_threshold_hit" },
386
387         { "dma_readq_full" },
388         { "dma_read_prioq_full" },
389         { "tx_comp_queue_full" },
390
391         { "ring_set_send_prod_index" },
392         { "ring_status_update" },
393         { "nic_irqs" },
394         { "nic_avoided_irqs" },
395         { "nic_tx_threshold_hit" },
396
397         { "mbuf_lwm_thresh_hit" },
398 };
399
400 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
401
402
403 static const struct {
404         const char string[ETH_GSTRING_LEN];
405 } ethtool_test_keys[] = {
406         { "nvram test        (online) " },
407         { "link test         (online) " },
408         { "register test     (offline)" },
409         { "memory test       (offline)" },
410         { "mac loopback test (offline)" },
411         { "phy loopback test (offline)" },
412         { "ext loopback test (offline)" },
413         { "interrupt test    (offline)" },
414 };
415
416 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
417
418
419 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
420 {
421         writel(val, tp->regs + off);
422 }
423
424 static u32 tg3_read32(struct tg3 *tp, u32 off)
425 {
426         return readl(tp->regs + off);
427 }
428
429 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
430 {
431         writel(val, tp->aperegs + off);
432 }
433
434 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
435 {
436         return readl(tp->aperegs + off);
437 }
438
439 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
440 {
441         unsigned long flags;
442
443         spin_lock_irqsave(&tp->indirect_lock, flags);
444         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
446         spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 }
448
449 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
450 {
451         writel(val, tp->regs + off);
452         readl(tp->regs + off);
453 }
454
455 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
456 {
457         unsigned long flags;
458         u32 val;
459
460         spin_lock_irqsave(&tp->indirect_lock, flags);
461         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
462         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
463         spin_unlock_irqrestore(&tp->indirect_lock, flags);
464         return val;
465 }
466
467 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
468 {
469         unsigned long flags;
470
471         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
472                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
473                                        TG3_64BIT_REG_LOW, val);
474                 return;
475         }
476         if (off == TG3_RX_STD_PROD_IDX_REG) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
478                                        TG3_64BIT_REG_LOW, val);
479                 return;
480         }
481
482         spin_lock_irqsave(&tp->indirect_lock, flags);
483         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
485         spin_unlock_irqrestore(&tp->indirect_lock, flags);
486
487         /* In indirect mode when disabling interrupts, we also need
488          * to clear the interrupt bit in the GRC local ctrl register.
489          */
490         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
491             (val == 0x1)) {
492                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
493                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494         }
495 }
496
497 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
498 {
499         unsigned long flags;
500         u32 val;
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
504         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
505         spin_unlock_irqrestore(&tp->indirect_lock, flags);
506         return val;
507 }
508
509 /* usec_wait specifies the wait time in usec when writing to certain registers
510  * where it is unsafe to read back the register without some delay.
511  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
512  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
513  */
514 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
515 {
516         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
517                 /* Non-posted methods */
518                 tp->write32(tp, off, val);
519         else {
520                 /* Posted method */
521                 tg3_write32(tp, off, val);
522                 if (usec_wait)
523                         udelay(usec_wait);
524                 tp->read32(tp, off);
525         }
526         /* Wait again after the read for the posted method to guarantee that
527          * the wait time is met.
528          */
529         if (usec_wait)
530                 udelay(usec_wait);
531 }
532
533 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
534 {
535         tp->write32_mbox(tp, off, val);
536         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
537                 tp->read32_mbox(tp, off);
538 }
539
540 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
541 {
542         void __iomem *mbox = tp->regs + off;
543         writel(val, mbox);
544         if (tg3_flag(tp, TXD_MBOX_HWBUG))
545                 writel(val, mbox);
546         if (tg3_flag(tp, MBOX_WRITE_REORDER))
547                 readl(mbox);
548 }
549
550 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
551 {
552         return readl(tp->regs + off + GRCMBOX_BASE);
553 }
554
555 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
556 {
557         writel(val, tp->regs + off + GRCMBOX_BASE);
558 }
559
560 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
561 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
562 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
563 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
564 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
565
566 #define tw32(reg, val)                  tp->write32(tp, reg, val)
567 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
568 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
569 #define tr32(reg)                       tp->read32(tp, reg)
570
571 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
572 {
573         unsigned long flags;
574
575         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
576             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
577                 return;
578
579         spin_lock_irqsave(&tp->indirect_lock, flags);
580         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
581                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
583
584                 /* Always leave this as zero. */
585                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
586         } else {
587                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
588                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
589
590                 /* Always leave this as zero. */
591                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
592         }
593         spin_unlock_irqrestore(&tp->indirect_lock, flags);
594 }
595
596 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
597 {
598         unsigned long flags;
599
600         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
601             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
602                 *val = 0;
603                 return;
604         }
605
606         spin_lock_irqsave(&tp->indirect_lock, flags);
607         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
608                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
609                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
610
611                 /* Always leave this as zero. */
612                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
613         } else {
614                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
615                 *val = tr32(TG3PCI_MEM_WIN_DATA);
616
617                 /* Always leave this as zero. */
618                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
619         }
620         spin_unlock_irqrestore(&tp->indirect_lock, flags);
621 }
622
623 static void tg3_ape_lock_init(struct tg3 *tp)
624 {
625         int i;
626         u32 regbase, bit;
627
628         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
629                 regbase = TG3_APE_LOCK_GRANT;
630         else
631                 regbase = TG3_APE_PER_LOCK_GRANT;
632
633         /* Make sure the driver hasn't any stale locks. */
634         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
635                 switch (i) {
636                 case TG3_APE_LOCK_PHY0:
637                 case TG3_APE_LOCK_PHY1:
638                 case TG3_APE_LOCK_PHY2:
639                 case TG3_APE_LOCK_PHY3:
640                         bit = APE_LOCK_GRANT_DRIVER;
641                         break;
642                 default:
643                         if (!tp->pci_fn)
644                                 bit = APE_LOCK_GRANT_DRIVER;
645                         else
646                                 bit = 1 << tp->pci_fn;
647                 }
648                 tg3_ape_write32(tp, regbase + 4 * i, bit);
649         }
650
651 }
652
653 static int tg3_ape_lock(struct tg3 *tp, int locknum)
654 {
655         int i, off;
656         int ret = 0;
657         u32 status, req, gnt, bit;
658
659         if (!tg3_flag(tp, ENABLE_APE))
660                 return 0;
661
662         switch (locknum) {
663         case TG3_APE_LOCK_GPIO:
664                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
665                         return 0;
666         case TG3_APE_LOCK_GRC:
667         case TG3_APE_LOCK_MEM:
668                 if (!tp->pci_fn)
669                         bit = APE_LOCK_REQ_DRIVER;
670                 else
671                         bit = 1 << tp->pci_fn;
672                 break;
673         case TG3_APE_LOCK_PHY0:
674         case TG3_APE_LOCK_PHY1:
675         case TG3_APE_LOCK_PHY2:
676         case TG3_APE_LOCK_PHY3:
677                 bit = APE_LOCK_REQ_DRIVER;
678                 break;
679         default:
680                 return -EINVAL;
681         }
682
683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
684                 req = TG3_APE_LOCK_REQ;
685                 gnt = TG3_APE_LOCK_GRANT;
686         } else {
687                 req = TG3_APE_PER_LOCK_REQ;
688                 gnt = TG3_APE_PER_LOCK_GRANT;
689         }
690
691         off = 4 * locknum;
692
693         tg3_ape_write32(tp, req + off, bit);
694
695         /* Wait for up to 1 millisecond to acquire lock. */
696         for (i = 0; i < 100; i++) {
697                 status = tg3_ape_read32(tp, gnt + off);
698                 if (status == bit)
699                         break;
700                 udelay(10);
701         }
702
703         if (status != bit) {
704                 /* Revoke the lock request. */
705                 tg3_ape_write32(tp, gnt + off, bit);
706                 ret = -EBUSY;
707         }
708
709         return ret;
710 }
711
712 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
713 {
714         u32 gnt, bit;
715
716         if (!tg3_flag(tp, ENABLE_APE))
717                 return;
718
719         switch (locknum) {
720         case TG3_APE_LOCK_GPIO:
721                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
722                         return;
723         case TG3_APE_LOCK_GRC:
724         case TG3_APE_LOCK_MEM:
725                 if (!tp->pci_fn)
726                         bit = APE_LOCK_GRANT_DRIVER;
727                 else
728                         bit = 1 << tp->pci_fn;
729                 break;
730         case TG3_APE_LOCK_PHY0:
731         case TG3_APE_LOCK_PHY1:
732         case TG3_APE_LOCK_PHY2:
733         case TG3_APE_LOCK_PHY3:
734                 bit = APE_LOCK_GRANT_DRIVER;
735                 break;
736         default:
737                 return;
738         }
739
740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
741                 gnt = TG3_APE_LOCK_GRANT;
742         else
743                 gnt = TG3_APE_PER_LOCK_GRANT;
744
745         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
746 }
747
748 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
749 {
750         u32 apedata;
751
752         while (timeout_us) {
753                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
754                         return -EBUSY;
755
756                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         break;
759
760                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
761
762                 udelay(10);
763                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
764         }
765
766         return timeout_us ? 0 : -EBUSY;
767 }
768
769 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
770 {
771         u32 i, apedata;
772
773         for (i = 0; i < timeout_us / 10; i++) {
774                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
775
776                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
777                         break;
778
779                 udelay(10);
780         }
781
782         return i == timeout_us / 10;
783 }
784
785 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
786 {
787         int err;
788         u32 i, bufoff, msgoff, maxlen, apedata;
789
790         if (!tg3_flag(tp, APE_HAS_NCSI))
791                 return 0;
792
793         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
794         if (apedata != APE_SEG_SIG_MAGIC)
795                 return -ENODEV;
796
797         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
798         if (!(apedata & APE_FW_STATUS_READY))
799                 return -EAGAIN;
800
801         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
802                  TG3_APE_SHMEM_BASE;
803         msgoff = bufoff + 2 * sizeof(u32);
804         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
805
806         while (len) {
807                 u32 length;
808
809                 /* Cap xfer sizes to scratchpad limits. */
810                 length = (len > maxlen) ? maxlen : len;
811                 len -= length;
812
813                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
814                 if (!(apedata & APE_FW_STATUS_READY))
815                         return -EAGAIN;
816
817                 /* Wait for up to 1 msec for APE to service previous event. */
818                 err = tg3_ape_event_lock(tp, 1000);
819                 if (err)
820                         return err;
821
822                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
823                           APE_EVENT_STATUS_SCRTCHPD_READ |
824                           APE_EVENT_STATUS_EVENT_PENDING;
825                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
826
827                 tg3_ape_write32(tp, bufoff, base_off);
828                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
829
830                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
831                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
832
833                 base_off += length;
834
835                 if (tg3_ape_wait_for_event(tp, 30000))
836                         return -EAGAIN;
837
838                 for (i = 0; length; i += 4, length -= 4) {
839                         u32 val = tg3_ape_read32(tp, msgoff + i);
840                         memcpy(data, &val, sizeof(u32));
841                         data++;
842                 }
843         }
844
845         return 0;
846 }
847
848 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
849 {
850         int err;
851         u32 apedata;
852
853         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
854         if (apedata != APE_SEG_SIG_MAGIC)
855                 return -EAGAIN;
856
857         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
858         if (!(apedata & APE_FW_STATUS_READY))
859                 return -EAGAIN;
860
861         /* Wait for up to 1 millisecond for APE to service previous event. */
862         err = tg3_ape_event_lock(tp, 1000);
863         if (err)
864                 return err;
865
866         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
867                         event | APE_EVENT_STATUS_EVENT_PENDING);
868
869         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
870         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
871
872         return 0;
873 }
874
875 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
876 {
877         u32 event;
878         u32 apedata;
879
880         if (!tg3_flag(tp, ENABLE_APE))
881                 return;
882
883         switch (kind) {
884         case RESET_KIND_INIT:
885                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
886                                 APE_HOST_SEG_SIG_MAGIC);
887                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
888                                 APE_HOST_SEG_LEN_MAGIC);
889                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
890                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
891                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
892                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
893                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
894                                 APE_HOST_BEHAV_NO_PHYLOCK);
895                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
896                                     TG3_APE_HOST_DRVR_STATE_START);
897
898                 event = APE_EVENT_STATUS_STATE_START;
899                 break;
900         case RESET_KIND_SHUTDOWN:
901                 /* With the interface we are currently using,
902                  * APE does not track driver state.  Wiping
903                  * out the HOST SEGMENT SIGNATURE forces
904                  * the APE to assume OS absent status.
905                  */
906                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
907
908                 if (device_may_wakeup(&tp->pdev->dev) &&
909                     tg3_flag(tp, WOL_ENABLE)) {
910                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
911                                             TG3_APE_HOST_WOL_SPEED_AUTO);
912                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
913                 } else
914                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
915
916                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
917
918                 event = APE_EVENT_STATUS_STATE_UNLOAD;
919                 break;
920         case RESET_KIND_SUSPEND:
921                 event = APE_EVENT_STATUS_STATE_SUSPEND;
922                 break;
923         default:
924                 return;
925         }
926
927         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
928
929         tg3_ape_send_event(tp, event);
930 }
931
932 static void tg3_disable_ints(struct tg3 *tp)
933 {
934         int i;
935
936         tw32(TG3PCI_MISC_HOST_CTRL,
937              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
938         for (i = 0; i < tp->irq_max; i++)
939                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
940 }
941
942 static void tg3_enable_ints(struct tg3 *tp)
943 {
944         int i;
945
946         tp->irq_sync = 0;
947         wmb();
948
949         tw32(TG3PCI_MISC_HOST_CTRL,
950              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
951
952         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
953         for (i = 0; i < tp->irq_cnt; i++) {
954                 struct tg3_napi *tnapi = &tp->napi[i];
955
956                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
957                 if (tg3_flag(tp, 1SHOT_MSI))
958                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
959
960                 tp->coal_now |= tnapi->coal_now;
961         }
962
963         /* Force an initial interrupt */
964         if (!tg3_flag(tp, TAGGED_STATUS) &&
965             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
966                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
967         else
968                 tw32(HOSTCC_MODE, tp->coal_now);
969
970         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
971 }
972
973 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
974 {
975         struct tg3 *tp = tnapi->tp;
976         struct tg3_hw_status *sblk = tnapi->hw_status;
977         unsigned int work_exists = 0;
978
979         /* check for phy events */
980         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
981                 if (sblk->status & SD_STATUS_LINK_CHG)
982                         work_exists = 1;
983         }
984
985         /* check for TX work to do */
986         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
987                 work_exists = 1;
988
989         /* check for RX work to do */
990         if (tnapi->rx_rcb_prod_idx &&
991             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
992                 work_exists = 1;
993
994         return work_exists;
995 }
996
997 /* tg3_int_reenable
998  *  similar to tg3_enable_ints, but it accurately determines whether there
999  *  is new work pending and can return without flushing the PIO write
1000  *  which reenables interrupts
1001  */
1002 static void tg3_int_reenable(struct tg3_napi *tnapi)
1003 {
1004         struct tg3 *tp = tnapi->tp;
1005
1006         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1007         mmiowb();
1008
1009         /* When doing tagged status, this work check is unnecessary.
1010          * The last_tag we write above tells the chip which piece of
1011          * work we've completed.
1012          */
1013         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1014                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1015                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1016 }
1017
1018 static void tg3_switch_clocks(struct tg3 *tp)
1019 {
1020         u32 clock_ctrl;
1021         u32 orig_clock_ctrl;
1022
1023         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1024                 return;
1025
1026         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1027
1028         orig_clock_ctrl = clock_ctrl;
1029         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1030                        CLOCK_CTRL_CLKRUN_OENABLE |
1031                        0x1f);
1032         tp->pci_clock_ctrl = clock_ctrl;
1033
1034         if (tg3_flag(tp, 5705_PLUS)) {
1035                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1036                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1037                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1038                 }
1039         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1040                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1041                             clock_ctrl |
1042                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1043                             40);
1044                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1045                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1046                             40);
1047         }
1048         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1049 }
1050
1051 #define PHY_BUSY_LOOPS  5000
1052
1053 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1054 {
1055         u32 frame_val;
1056         unsigned int loops;
1057         int ret;
1058
1059         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1060                 tw32_f(MAC_MI_MODE,
1061                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1062                 udelay(80);
1063         }
1064
1065         tg3_ape_lock(tp, tp->phy_ape_lock);
1066
1067         *val = 0x0;
1068
1069         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1070                       MI_COM_PHY_ADDR_MASK);
1071         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1072                       MI_COM_REG_ADDR_MASK);
1073         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1074
1075         tw32_f(MAC_MI_COM, frame_val);
1076
1077         loops = PHY_BUSY_LOOPS;
1078         while (loops != 0) {
1079                 udelay(10);
1080                 frame_val = tr32(MAC_MI_COM);
1081
1082                 if ((frame_val & MI_COM_BUSY) == 0) {
1083                         udelay(5);
1084                         frame_val = tr32(MAC_MI_COM);
1085                         break;
1086                 }
1087                 loops -= 1;
1088         }
1089
1090         ret = -EBUSY;
1091         if (loops != 0) {
1092                 *val = frame_val & MI_COM_DATA_MASK;
1093                 ret = 0;
1094         }
1095
1096         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1097                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1098                 udelay(80);
1099         }
1100
1101         tg3_ape_unlock(tp, tp->phy_ape_lock);
1102
1103         return ret;
1104 }
1105
1106 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1107 {
1108         u32 frame_val;
1109         unsigned int loops;
1110         int ret;
1111
1112         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1113             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1114                 return 0;
1115
1116         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117                 tw32_f(MAC_MI_MODE,
1118                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119                 udelay(80);
1120         }
1121
1122         tg3_ape_lock(tp, tp->phy_ape_lock);
1123
1124         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1125                       MI_COM_PHY_ADDR_MASK);
1126         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1127                       MI_COM_REG_ADDR_MASK);
1128         frame_val |= (val & MI_COM_DATA_MASK);
1129         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1130
1131         tw32_f(MAC_MI_COM, frame_val);
1132
1133         loops = PHY_BUSY_LOOPS;
1134         while (loops != 0) {
1135                 udelay(10);
1136                 frame_val = tr32(MAC_MI_COM);
1137                 if ((frame_val & MI_COM_BUSY) == 0) {
1138                         udelay(5);
1139                         frame_val = tr32(MAC_MI_COM);
1140                         break;
1141                 }
1142                 loops -= 1;
1143         }
1144
1145         ret = -EBUSY;
1146         if (loops != 0)
1147                 ret = 0;
1148
1149         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1150                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1151                 udelay(80);
1152         }
1153
1154         tg3_ape_unlock(tp, tp->phy_ape_lock);
1155
1156         return ret;
1157 }
1158
1159 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1160 {
1161         int err;
1162
1163         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1164         if (err)
1165                 goto done;
1166
1167         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1168         if (err)
1169                 goto done;
1170
1171         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1172                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1173         if (err)
1174                 goto done;
1175
1176         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1177
1178 done:
1179         return err;
1180 }
1181
1182 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1183 {
1184         int err;
1185
1186         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1187         if (err)
1188                 goto done;
1189
1190         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1191         if (err)
1192                 goto done;
1193
1194         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1195                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1196         if (err)
1197                 goto done;
1198
1199         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1200
1201 done:
1202         return err;
1203 }
1204
1205 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1206 {
1207         int err;
1208
1209         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1210         if (!err)
1211                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1212
1213         return err;
1214 }
1215
1216 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1217 {
1218         int err;
1219
1220         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1221         if (!err)
1222                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1223
1224         return err;
1225 }
1226
1227 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1228 {
1229         int err;
1230
1231         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1232                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1233                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1234         if (!err)
1235                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1236
1237         return err;
1238 }
1239
1240 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1241 {
1242         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1243                 set |= MII_TG3_AUXCTL_MISC_WREN;
1244
1245         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1246 }
1247
1248 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1249         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1250                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1251                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1252
1253 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1254         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1255                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1256
1257 static int tg3_bmcr_reset(struct tg3 *tp)
1258 {
1259         u32 phy_control;
1260         int limit, err;
1261
1262         /* OK, reset it, and poll the BMCR_RESET bit until it
1263          * clears or we time out.
1264          */
1265         phy_control = BMCR_RESET;
1266         err = tg3_writephy(tp, MII_BMCR, phy_control);
1267         if (err != 0)
1268                 return -EBUSY;
1269
1270         limit = 5000;
1271         while (limit--) {
1272                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1273                 if (err != 0)
1274                         return -EBUSY;
1275
1276                 if ((phy_control & BMCR_RESET) == 0) {
1277                         udelay(40);
1278                         break;
1279                 }
1280                 udelay(10);
1281         }
1282         if (limit < 0)
1283                 return -EBUSY;
1284
1285         return 0;
1286 }
1287
1288 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1289 {
1290         struct tg3 *tp = bp->priv;
1291         u32 val;
1292
1293         spin_lock_bh(&tp->lock);
1294
1295         if (tg3_readphy(tp, reg, &val))
1296                 val = -EIO;
1297
1298         spin_unlock_bh(&tp->lock);
1299
1300         return val;
1301 }
1302
1303 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1304 {
1305         struct tg3 *tp = bp->priv;
1306         u32 ret = 0;
1307
1308         spin_lock_bh(&tp->lock);
1309
1310         if (tg3_writephy(tp, reg, val))
1311                 ret = -EIO;
1312
1313         spin_unlock_bh(&tp->lock);
1314
1315         return ret;
1316 }
1317
1318 static int tg3_mdio_reset(struct mii_bus *bp)
1319 {
1320         return 0;
1321 }
1322
1323 static void tg3_mdio_config_5785(struct tg3 *tp)
1324 {
1325         u32 val;
1326         struct phy_device *phydev;
1327
1328         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1329         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1330         case PHY_ID_BCM50610:
1331         case PHY_ID_BCM50610M:
1332                 val = MAC_PHYCFG2_50610_LED_MODES;
1333                 break;
1334         case PHY_ID_BCMAC131:
1335                 val = MAC_PHYCFG2_AC131_LED_MODES;
1336                 break;
1337         case PHY_ID_RTL8211C:
1338                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1339                 break;
1340         case PHY_ID_RTL8201E:
1341                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1342                 break;
1343         default:
1344                 return;
1345         }
1346
1347         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1348                 tw32(MAC_PHYCFG2, val);
1349
1350                 val = tr32(MAC_PHYCFG1);
1351                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1352                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1353                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1354                 tw32(MAC_PHYCFG1, val);
1355
1356                 return;
1357         }
1358
1359         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1360                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1361                        MAC_PHYCFG2_FMODE_MASK_MASK |
1362                        MAC_PHYCFG2_GMODE_MASK_MASK |
1363                        MAC_PHYCFG2_ACT_MASK_MASK   |
1364                        MAC_PHYCFG2_QUAL_MASK_MASK |
1365                        MAC_PHYCFG2_INBAND_ENABLE;
1366
1367         tw32(MAC_PHYCFG2, val);
1368
1369         val = tr32(MAC_PHYCFG1);
1370         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1371                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1372         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1373                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1374                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1375                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1376                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1377         }
1378         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1379                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1380         tw32(MAC_PHYCFG1, val);
1381
1382         val = tr32(MAC_EXT_RGMII_MODE);
1383         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1384                  MAC_RGMII_MODE_RX_QUALITY |
1385                  MAC_RGMII_MODE_RX_ACTIVITY |
1386                  MAC_RGMII_MODE_RX_ENG_DET |
1387                  MAC_RGMII_MODE_TX_ENABLE |
1388                  MAC_RGMII_MODE_TX_LOWPWR |
1389                  MAC_RGMII_MODE_TX_RESET);
1390         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1391                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1392                         val |= MAC_RGMII_MODE_RX_INT_B |
1393                                MAC_RGMII_MODE_RX_QUALITY |
1394                                MAC_RGMII_MODE_RX_ACTIVITY |
1395                                MAC_RGMII_MODE_RX_ENG_DET;
1396                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1397                         val |= MAC_RGMII_MODE_TX_ENABLE |
1398                                MAC_RGMII_MODE_TX_LOWPWR |
1399                                MAC_RGMII_MODE_TX_RESET;
1400         }
1401         tw32(MAC_EXT_RGMII_MODE, val);
1402 }
1403
1404 static void tg3_mdio_start(struct tg3 *tp)
1405 {
1406         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1407         tw32_f(MAC_MI_MODE, tp->mi_mode);
1408         udelay(80);
1409
1410         if (tg3_flag(tp, MDIOBUS_INITED) &&
1411             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1412                 tg3_mdio_config_5785(tp);
1413 }
1414
1415 static int tg3_mdio_init(struct tg3 *tp)
1416 {
1417         int i;
1418         u32 reg;
1419         struct phy_device *phydev;
1420
1421         if (tg3_flag(tp, 5717_PLUS)) {
1422                 u32 is_serdes;
1423
1424                 tp->phy_addr = tp->pci_fn + 1;
1425
1426                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1427                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1428                 else
1429                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1430                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1431                 if (is_serdes)
1432                         tp->phy_addr += 7;
1433         } else
1434                 tp->phy_addr = TG3_PHY_MII_ADDR;
1435
1436         tg3_mdio_start(tp);
1437
1438         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1439                 return 0;
1440
1441         tp->mdio_bus = mdiobus_alloc();
1442         if (tp->mdio_bus == NULL)
1443                 return -ENOMEM;
1444
1445         tp->mdio_bus->name     = "tg3 mdio bus";
1446         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1447                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1448         tp->mdio_bus->priv     = tp;
1449         tp->mdio_bus->parent   = &tp->pdev->dev;
1450         tp->mdio_bus->read     = &tg3_mdio_read;
1451         tp->mdio_bus->write    = &tg3_mdio_write;
1452         tp->mdio_bus->reset    = &tg3_mdio_reset;
1453         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1454         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1455
1456         for (i = 0; i < PHY_MAX_ADDR; i++)
1457                 tp->mdio_bus->irq[i] = PHY_POLL;
1458
1459         /* The bus registration will look for all the PHYs on the mdio bus.
1460          * Unfortunately, it does not ensure the PHY is powered up before
1461          * accessing the PHY ID registers.  A chip reset is the
1462          * quickest way to bring the device back to an operational state..
1463          */
1464         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1465                 tg3_bmcr_reset(tp);
1466
1467         i = mdiobus_register(tp->mdio_bus);
1468         if (i) {
1469                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1470                 mdiobus_free(tp->mdio_bus);
1471                 return i;
1472         }
1473
1474         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1475
1476         if (!phydev || !phydev->drv) {
1477                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1478                 mdiobus_unregister(tp->mdio_bus);
1479                 mdiobus_free(tp->mdio_bus);
1480                 return -ENODEV;
1481         }
1482
1483         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1484         case PHY_ID_BCM57780:
1485                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1486                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1487                 break;
1488         case PHY_ID_BCM50610:
1489         case PHY_ID_BCM50610M:
1490                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1491                                      PHY_BRCM_RX_REFCLK_UNUSED |
1492                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1493                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1494                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1495                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1496                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1497                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1498                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1499                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1500                 /* fallthru */
1501         case PHY_ID_RTL8211C:
1502                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1503                 break;
1504         case PHY_ID_RTL8201E:
1505         case PHY_ID_BCMAC131:
1506                 phydev->interface = PHY_INTERFACE_MODE_MII;
1507                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1508                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1509                 break;
1510         }
1511
1512         tg3_flag_set(tp, MDIOBUS_INITED);
1513
1514         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1515                 tg3_mdio_config_5785(tp);
1516
1517         return 0;
1518 }
1519
1520 static void tg3_mdio_fini(struct tg3 *tp)
1521 {
1522         if (tg3_flag(tp, MDIOBUS_INITED)) {
1523                 tg3_flag_clear(tp, MDIOBUS_INITED);
1524                 mdiobus_unregister(tp->mdio_bus);
1525                 mdiobus_free(tp->mdio_bus);
1526         }
1527 }
1528
1529 /* tp->lock is held. */
1530 static inline void tg3_generate_fw_event(struct tg3 *tp)
1531 {
1532         u32 val;
1533
1534         val = tr32(GRC_RX_CPU_EVENT);
1535         val |= GRC_RX_CPU_DRIVER_EVENT;
1536         tw32_f(GRC_RX_CPU_EVENT, val);
1537
1538         tp->last_event_jiffies = jiffies;
1539 }
1540
1541 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1542
1543 /* tp->lock is held. */
1544 static void tg3_wait_for_event_ack(struct tg3 *tp)
1545 {
1546         int i;
1547         unsigned int delay_cnt;
1548         long time_remain;
1549
1550         /* If enough time has passed, no wait is necessary. */
1551         time_remain = (long)(tp->last_event_jiffies + 1 +
1552                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1553                       (long)jiffies;
1554         if (time_remain < 0)
1555                 return;
1556
1557         /* Check if we can shorten the wait time. */
1558         delay_cnt = jiffies_to_usecs(time_remain);
1559         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1560                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1561         delay_cnt = (delay_cnt >> 3) + 1;
1562
1563         for (i = 0; i < delay_cnt; i++) {
1564                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1565                         break;
1566                 udelay(8);
1567         }
1568 }
1569
1570 /* tp->lock is held. */
1571 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1572 {
1573         u32 reg, val;
1574
1575         val = 0;
1576         if (!tg3_readphy(tp, MII_BMCR, &reg))
1577                 val = reg << 16;
1578         if (!tg3_readphy(tp, MII_BMSR, &reg))
1579                 val |= (reg & 0xffff);
1580         *data++ = val;
1581
1582         val = 0;
1583         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1584                 val = reg << 16;
1585         if (!tg3_readphy(tp, MII_LPA, &reg))
1586                 val |= (reg & 0xffff);
1587         *data++ = val;
1588
1589         val = 0;
1590         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1591                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1592                         val = reg << 16;
1593                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1594                         val |= (reg & 0xffff);
1595         }
1596         *data++ = val;
1597
1598         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1599                 val = reg << 16;
1600         else
1601                 val = 0;
1602         *data++ = val;
1603 }
1604
1605 /* tp->lock is held. */
1606 static void tg3_ump_link_report(struct tg3 *tp)
1607 {
1608         u32 data[4];
1609
1610         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1611                 return;
1612
1613         tg3_phy_gather_ump_data(tp, data);
1614
1615         tg3_wait_for_event_ack(tp);
1616
1617         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1618         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1619         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1620         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1621         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1622         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1623
1624         tg3_generate_fw_event(tp);
1625 }
1626
1627 /* tp->lock is held. */
1628 static void tg3_stop_fw(struct tg3 *tp)
1629 {
1630         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1631                 /* Wait for RX cpu to ACK the previous event. */
1632                 tg3_wait_for_event_ack(tp);
1633
1634                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1635
1636                 tg3_generate_fw_event(tp);
1637
1638                 /* Wait for RX cpu to ACK this event. */
1639                 tg3_wait_for_event_ack(tp);
1640         }
1641 }
1642
1643 /* tp->lock is held. */
1644 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1645 {
1646         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1647                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1648
1649         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1650                 switch (kind) {
1651                 case RESET_KIND_INIT:
1652                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1653                                       DRV_STATE_START);
1654                         break;
1655
1656                 case RESET_KIND_SHUTDOWN:
1657                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1658                                       DRV_STATE_UNLOAD);
1659                         break;
1660
1661                 case RESET_KIND_SUSPEND:
1662                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1663                                       DRV_STATE_SUSPEND);
1664                         break;
1665
1666                 default:
1667                         break;
1668                 }
1669         }
1670
1671         if (kind == RESET_KIND_INIT ||
1672             kind == RESET_KIND_SUSPEND)
1673                 tg3_ape_driver_state_change(tp, kind);
1674 }
1675
1676 /* tp->lock is held. */
1677 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1678 {
1679         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1680                 switch (kind) {
1681                 case RESET_KIND_INIT:
1682                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1683                                       DRV_STATE_START_DONE);
1684                         break;
1685
1686                 case RESET_KIND_SHUTDOWN:
1687                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1688                                       DRV_STATE_UNLOAD_DONE);
1689                         break;
1690
1691                 default:
1692                         break;
1693                 }
1694         }
1695
1696         if (kind == RESET_KIND_SHUTDOWN)
1697                 tg3_ape_driver_state_change(tp, kind);
1698 }
1699
1700 /* tp->lock is held. */
1701 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1702 {
1703         if (tg3_flag(tp, ENABLE_ASF)) {
1704                 switch (kind) {
1705                 case RESET_KIND_INIT:
1706                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1707                                       DRV_STATE_START);
1708                         break;
1709
1710                 case RESET_KIND_SHUTDOWN:
1711                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1712                                       DRV_STATE_UNLOAD);
1713                         break;
1714
1715                 case RESET_KIND_SUSPEND:
1716                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1717                                       DRV_STATE_SUSPEND);
1718                         break;
1719
1720                 default:
1721                         break;
1722                 }
1723         }
1724 }
1725
1726 static int tg3_poll_fw(struct tg3 *tp)
1727 {
1728         int i;
1729         u32 val;
1730
1731         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1732                 /* Wait up to 20ms for init done. */
1733                 for (i = 0; i < 200; i++) {
1734                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1735                                 return 0;
1736                         udelay(100);
1737                 }
1738                 return -ENODEV;
1739         }
1740
1741         /* Wait for firmware initialization to complete. */
1742         for (i = 0; i < 100000; i++) {
1743                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1744                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1745                         break;
1746                 udelay(10);
1747         }
1748
1749         /* Chip might not be fitted with firmware.  Some Sun onboard
1750          * parts are configured like that.  So don't signal the timeout
1751          * of the above loop as an error, but do report the lack of
1752          * running firmware once.
1753          */
1754         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1755                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1756
1757                 netdev_info(tp->dev, "No firmware running\n");
1758         }
1759
1760         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1761                 /* The 57765 A0 needs a little more
1762                  * time to do some important work.
1763                  */
1764                 mdelay(10);
1765         }
1766
1767         return 0;
1768 }
1769
1770 static void tg3_link_report(struct tg3 *tp)
1771 {
1772         if (!netif_carrier_ok(tp->dev)) {
1773                 netif_info(tp, link, tp->dev, "Link is down\n");
1774                 tg3_ump_link_report(tp);
1775         } else if (netif_msg_link(tp)) {
1776                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1777                             (tp->link_config.active_speed == SPEED_1000 ?
1778                              1000 :
1779                              (tp->link_config.active_speed == SPEED_100 ?
1780                               100 : 10)),
1781                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1782                              "full" : "half"));
1783
1784                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1785                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1786                             "on" : "off",
1787                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1788                             "on" : "off");
1789
1790                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1791                         netdev_info(tp->dev, "EEE is %s\n",
1792                                     tp->setlpicnt ? "enabled" : "disabled");
1793
1794                 tg3_ump_link_report(tp);
1795         }
1796 }
1797
1798 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1799 {
1800         u16 miireg;
1801
1802         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1803                 miireg = ADVERTISE_1000XPAUSE;
1804         else if (flow_ctrl & FLOW_CTRL_TX)
1805                 miireg = ADVERTISE_1000XPSE_ASYM;
1806         else if (flow_ctrl & FLOW_CTRL_RX)
1807                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1808         else
1809                 miireg = 0;
1810
1811         return miireg;
1812 }
1813
1814 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1815 {
1816         u8 cap = 0;
1817
1818         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1819                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1820         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1821                 if (lcladv & ADVERTISE_1000XPAUSE)
1822                         cap = FLOW_CTRL_RX;
1823                 if (rmtadv & ADVERTISE_1000XPAUSE)
1824                         cap = FLOW_CTRL_TX;
1825         }
1826
1827         return cap;
1828 }
1829
1830 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1831 {
1832         u8 autoneg;
1833         u8 flowctrl = 0;
1834         u32 old_rx_mode = tp->rx_mode;
1835         u32 old_tx_mode = tp->tx_mode;
1836
1837         if (tg3_flag(tp, USE_PHYLIB))
1838                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1839         else
1840                 autoneg = tp->link_config.autoneg;
1841
1842         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1843                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1844                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1845                 else
1846                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1847         } else
1848                 flowctrl = tp->link_config.flowctrl;
1849
1850         tp->link_config.active_flowctrl = flowctrl;
1851
1852         if (flowctrl & FLOW_CTRL_RX)
1853                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1854         else
1855                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1856
1857         if (old_rx_mode != tp->rx_mode)
1858                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1859
1860         if (flowctrl & FLOW_CTRL_TX)
1861                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1862         else
1863                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1864
1865         if (old_tx_mode != tp->tx_mode)
1866                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1867 }
1868
1869 static void tg3_adjust_link(struct net_device *dev)
1870 {
1871         u8 oldflowctrl, linkmesg = 0;
1872         u32 mac_mode, lcl_adv, rmt_adv;
1873         struct tg3 *tp = netdev_priv(dev);
1874         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1875
1876         spin_lock_bh(&tp->lock);
1877
1878         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1879                                     MAC_MODE_HALF_DUPLEX);
1880
1881         oldflowctrl = tp->link_config.active_flowctrl;
1882
1883         if (phydev->link) {
1884                 lcl_adv = 0;
1885                 rmt_adv = 0;
1886
1887                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1888                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1889                 else if (phydev->speed == SPEED_1000 ||
1890                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1891                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1892                 else
1893                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1894
1895                 if (phydev->duplex == DUPLEX_HALF)
1896                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1897                 else {
1898                         lcl_adv = mii_advertise_flowctrl(
1899                                   tp->link_config.flowctrl);
1900
1901                         if (phydev->pause)
1902                                 rmt_adv = LPA_PAUSE_CAP;
1903                         if (phydev->asym_pause)
1904                                 rmt_adv |= LPA_PAUSE_ASYM;
1905                 }
1906
1907                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1908         } else
1909                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1910
1911         if (mac_mode != tp->mac_mode) {
1912                 tp->mac_mode = mac_mode;
1913                 tw32_f(MAC_MODE, tp->mac_mode);
1914                 udelay(40);
1915         }
1916
1917         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1918                 if (phydev->speed == SPEED_10)
1919                         tw32(MAC_MI_STAT,
1920                              MAC_MI_STAT_10MBPS_MODE |
1921                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1922                 else
1923                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1924         }
1925
1926         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1927                 tw32(MAC_TX_LENGTHS,
1928                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1929                       (6 << TX_LENGTHS_IPG_SHIFT) |
1930                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1931         else
1932                 tw32(MAC_TX_LENGTHS,
1933                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1934                       (6 << TX_LENGTHS_IPG_SHIFT) |
1935                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1936
1937         if (phydev->link != tp->old_link ||
1938             phydev->speed != tp->link_config.active_speed ||
1939             phydev->duplex != tp->link_config.active_duplex ||
1940             oldflowctrl != tp->link_config.active_flowctrl)
1941                 linkmesg = 1;
1942
1943         tp->old_link = phydev->link;
1944         tp->link_config.active_speed = phydev->speed;
1945         tp->link_config.active_duplex = phydev->duplex;
1946
1947         spin_unlock_bh(&tp->lock);
1948
1949         if (linkmesg)
1950                 tg3_link_report(tp);
1951 }
1952
1953 static int tg3_phy_init(struct tg3 *tp)
1954 {
1955         struct phy_device *phydev;
1956
1957         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1958                 return 0;
1959
1960         /* Bring the PHY back to a known state. */
1961         tg3_bmcr_reset(tp);
1962
1963         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1964
1965         /* Attach the MAC to the PHY. */
1966         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1967                              phydev->dev_flags, phydev->interface);
1968         if (IS_ERR(phydev)) {
1969                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1970                 return PTR_ERR(phydev);
1971         }
1972
1973         /* Mask with MAC supported features. */
1974         switch (phydev->interface) {
1975         case PHY_INTERFACE_MODE_GMII:
1976         case PHY_INTERFACE_MODE_RGMII:
1977                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1978                         phydev->supported &= (PHY_GBIT_FEATURES |
1979                                               SUPPORTED_Pause |
1980                                               SUPPORTED_Asym_Pause);
1981                         break;
1982                 }
1983                 /* fallthru */
1984         case PHY_INTERFACE_MODE_MII:
1985                 phydev->supported &= (PHY_BASIC_FEATURES |
1986                                       SUPPORTED_Pause |
1987                                       SUPPORTED_Asym_Pause);
1988                 break;
1989         default:
1990                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1991                 return -EINVAL;
1992         }
1993
1994         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1995
1996         phydev->advertising = phydev->supported;
1997
1998         return 0;
1999 }
2000
2001 static void tg3_phy_start(struct tg3 *tp)
2002 {
2003         struct phy_device *phydev;
2004
2005         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2006                 return;
2007
2008         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2009
2010         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2011                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2012                 phydev->speed = tp->link_config.speed;
2013                 phydev->duplex = tp->link_config.duplex;
2014                 phydev->autoneg = tp->link_config.autoneg;
2015                 phydev->advertising = tp->link_config.advertising;
2016         }
2017
2018         phy_start(phydev);
2019
2020         phy_start_aneg(phydev);
2021 }
2022
2023 static void tg3_phy_stop(struct tg3 *tp)
2024 {
2025         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2026                 return;
2027
2028         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2029 }
2030
2031 static void tg3_phy_fini(struct tg3 *tp)
2032 {
2033         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2034                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2035                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2036         }
2037 }
2038
2039 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2040 {
2041         int err;
2042         u32 val;
2043
2044         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2045                 return 0;
2046
2047         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2048                 /* Cannot do read-modify-write on 5401 */
2049                 err = tg3_phy_auxctl_write(tp,
2050                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2051                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2052                                            0x4c20);
2053                 goto done;
2054         }
2055
2056         err = tg3_phy_auxctl_read(tp,
2057                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2058         if (err)
2059                 return err;
2060
2061         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2062         err = tg3_phy_auxctl_write(tp,
2063                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2064
2065 done:
2066         return err;
2067 }
2068
2069 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2070 {
2071         u32 phytest;
2072
2073         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2074                 u32 phy;
2075
2076                 tg3_writephy(tp, MII_TG3_FET_TEST,
2077                              phytest | MII_TG3_FET_SHADOW_EN);
2078                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2079                         if (enable)
2080                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2081                         else
2082                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2084                 }
2085                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2086         }
2087 }
2088
2089 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2090 {
2091         u32 reg;
2092
2093         if (!tg3_flag(tp, 5705_PLUS) ||
2094             (tg3_flag(tp, 5717_PLUS) &&
2095              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2096                 return;
2097
2098         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2099                 tg3_phy_fet_toggle_apd(tp, enable);
2100                 return;
2101         }
2102
2103         reg = MII_TG3_MISC_SHDW_WREN |
2104               MII_TG3_MISC_SHDW_SCR5_SEL |
2105               MII_TG3_MISC_SHDW_SCR5_LPED |
2106               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2107               MII_TG3_MISC_SHDW_SCR5_SDTL |
2108               MII_TG3_MISC_SHDW_SCR5_C125OE;
2109         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2110                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2111
2112         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2113
2114
2115         reg = MII_TG3_MISC_SHDW_WREN |
2116               MII_TG3_MISC_SHDW_APD_SEL |
2117               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2118         if (enable)
2119                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2120
2121         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2122 }
2123
2124 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2125 {
2126         u32 phy;
2127
2128         if (!tg3_flag(tp, 5705_PLUS) ||
2129             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2130                 return;
2131
2132         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2133                 u32 ephy;
2134
2135                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2136                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2137
2138                         tg3_writephy(tp, MII_TG3_FET_TEST,
2139                                      ephy | MII_TG3_FET_SHADOW_EN);
2140                         if (!tg3_readphy(tp, reg, &phy)) {
2141                                 if (enable)
2142                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2143                                 else
2144                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145                                 tg3_writephy(tp, reg, phy);
2146                         }
2147                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2148                 }
2149         } else {
2150                 int ret;
2151
2152                 ret = tg3_phy_auxctl_read(tp,
2153                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2154                 if (!ret) {
2155                         if (enable)
2156                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2157                         else
2158                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159                         tg3_phy_auxctl_write(tp,
2160                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2161                 }
2162         }
2163 }
2164
2165 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2166 {
2167         int ret;
2168         u32 val;
2169
2170         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2171                 return;
2172
2173         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2174         if (!ret)
2175                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2176                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2177 }
2178
2179 static void tg3_phy_apply_otp(struct tg3 *tp)
2180 {
2181         u32 otp, phy;
2182
2183         if (!tp->phy_otp)
2184                 return;
2185
2186         otp = tp->phy_otp;
2187
2188         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2189                 return;
2190
2191         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2192         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2193         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2194
2195         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2196               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2197         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2198
2199         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2200         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2201         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2202
2203         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2204         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2205
2206         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2207         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2208
2209         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2210               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2211         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2212
2213         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2214 }
2215
2216 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2217 {
2218         u32 val;
2219
2220         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2221                 return;
2222
2223         tp->setlpicnt = 0;
2224
2225         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2226             current_link_up == 1 &&
2227             tp->link_config.active_duplex == DUPLEX_FULL &&
2228             (tp->link_config.active_speed == SPEED_100 ||
2229              tp->link_config.active_speed == SPEED_1000)) {
2230                 u32 eeectl;
2231
2232                 if (tp->link_config.active_speed == SPEED_1000)
2233                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2234                 else
2235                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2236
2237                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2238
2239                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2240                                   TG3_CL45_D7_EEERES_STAT, &val);
2241
2242                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2243                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2244                         tp->setlpicnt = 2;
2245         }
2246
2247         if (!tp->setlpicnt) {
2248                 if (current_link_up == 1 &&
2249                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2250                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2251                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2252                 }
2253
2254                 val = tr32(TG3_CPMU_EEE_MODE);
2255                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2256         }
2257 }
2258
2259 static void tg3_phy_eee_enable(struct tg3 *tp)
2260 {
2261         u32 val;
2262
2263         if (tp->link_config.active_speed == SPEED_1000 &&
2264             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2265              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2266              tg3_flag(tp, 57765_CLASS)) &&
2267             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2268                 val = MII_TG3_DSP_TAP26_ALNOKO |
2269                       MII_TG3_DSP_TAP26_RMRXSTO;
2270                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2271                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2272         }
2273
2274         val = tr32(TG3_CPMU_EEE_MODE);
2275         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2276 }
2277
2278 static int tg3_wait_macro_done(struct tg3 *tp)
2279 {
2280         int limit = 100;
2281
2282         while (limit--) {
2283                 u32 tmp32;
2284
2285                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2286                         if ((tmp32 & 0x1000) == 0)
2287                                 break;
2288                 }
2289         }
2290         if (limit < 0)
2291                 return -EBUSY;
2292
2293         return 0;
2294 }
2295
2296 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2297 {
2298         static const u32 test_pat[4][6] = {
2299         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2300         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2301         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2302         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2303         };
2304         int chan;
2305
2306         for (chan = 0; chan < 4; chan++) {
2307                 int i;
2308
2309                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2310                              (chan * 0x2000) | 0x0200);
2311                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2312
2313                 for (i = 0; i < 6; i++)
2314                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2315                                      test_pat[chan][i]);
2316
2317                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2318                 if (tg3_wait_macro_done(tp)) {
2319                         *resetp = 1;
2320                         return -EBUSY;
2321                 }
2322
2323                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2324                              (chan * 0x2000) | 0x0200);
2325                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2326                 if (tg3_wait_macro_done(tp)) {
2327                         *resetp = 1;
2328                         return -EBUSY;
2329                 }
2330
2331                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2332                 if (tg3_wait_macro_done(tp)) {
2333                         *resetp = 1;
2334                         return -EBUSY;
2335                 }
2336
2337                 for (i = 0; i < 6; i += 2) {
2338                         u32 low, high;
2339
2340                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2341                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2342                             tg3_wait_macro_done(tp)) {
2343                                 *resetp = 1;
2344                                 return -EBUSY;
2345                         }
2346                         low &= 0x7fff;
2347                         high &= 0x000f;
2348                         if (low != test_pat[chan][i] ||
2349                             high != test_pat[chan][i+1]) {
2350                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2351                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2352                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2353
2354                                 return -EBUSY;
2355                         }
2356                 }
2357         }
2358
2359         return 0;
2360 }
2361
2362 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2363 {
2364         int chan;
2365
2366         for (chan = 0; chan < 4; chan++) {
2367                 int i;
2368
2369                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2370                              (chan * 0x2000) | 0x0200);
2371                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2372                 for (i = 0; i < 6; i++)
2373                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2374                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2375                 if (tg3_wait_macro_done(tp))
2376                         return -EBUSY;
2377         }
2378
2379         return 0;
2380 }
2381
2382 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2383 {
2384         u32 reg32, phy9_orig;
2385         int retries, do_phy_reset, err;
2386
2387         retries = 10;
2388         do_phy_reset = 1;
2389         do {
2390                 if (do_phy_reset) {
2391                         err = tg3_bmcr_reset(tp);
2392                         if (err)
2393                                 return err;
2394                         do_phy_reset = 0;
2395                 }
2396
2397                 /* Disable transmitter and interrupt.  */
2398                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2399                         continue;
2400
2401                 reg32 |= 0x3000;
2402                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2403
2404                 /* Set full-duplex, 1000 mbps.  */
2405                 tg3_writephy(tp, MII_BMCR,
2406                              BMCR_FULLDPLX | BMCR_SPEED1000);
2407
2408                 /* Set to master mode.  */
2409                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2410                         continue;
2411
2412                 tg3_writephy(tp, MII_CTRL1000,
2413                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2414
2415                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2416                 if (err)
2417                         return err;
2418
2419                 /* Block the PHY control access.  */
2420                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2421
2422                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2423                 if (!err)
2424                         break;
2425         } while (--retries);
2426
2427         err = tg3_phy_reset_chanpat(tp);
2428         if (err)
2429                 return err;
2430
2431         tg3_phydsp_write(tp, 0x8005, 0x0000);
2432
2433         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2434         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2435
2436         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2437
2438         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2439
2440         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2441                 reg32 &= ~0x3000;
2442                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2443         } else if (!err)
2444                 err = -EBUSY;
2445
2446         return err;
2447 }
2448
2449 /* This will reset the tigon3 PHY if there is no valid
2450  * link unless the FORCE argument is non-zero.
2451  */
2452 static int tg3_phy_reset(struct tg3 *tp)
2453 {
2454         u32 val, cpmuctrl;
2455         int err;
2456
2457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2458                 val = tr32(GRC_MISC_CFG);
2459                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2460                 udelay(40);
2461         }
2462         err  = tg3_readphy(tp, MII_BMSR, &val);
2463         err |= tg3_readphy(tp, MII_BMSR, &val);
2464         if (err != 0)
2465                 return -EBUSY;
2466
2467         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2468                 netif_carrier_off(tp->dev);
2469                 tg3_link_report(tp);
2470         }
2471
2472         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2474             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2475                 err = tg3_phy_reset_5703_4_5(tp);
2476                 if (err)
2477                         return err;
2478                 goto out;
2479         }
2480
2481         cpmuctrl = 0;
2482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2483             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2484                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2485                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2486                         tw32(TG3_CPMU_CTRL,
2487                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2488         }
2489
2490         err = tg3_bmcr_reset(tp);
2491         if (err)
2492                 return err;
2493
2494         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2495                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2496                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2497
2498                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2499         }
2500
2501         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2502             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2503                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2504                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2505                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2506                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2507                         udelay(40);
2508                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2509                 }
2510         }
2511
2512         if (tg3_flag(tp, 5717_PLUS) &&
2513             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2514                 return 0;
2515
2516         tg3_phy_apply_otp(tp);
2517
2518         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2519                 tg3_phy_toggle_apd(tp, true);
2520         else
2521                 tg3_phy_toggle_apd(tp, false);
2522
2523 out:
2524         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2525             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2526                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2527                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2528                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2529         }
2530
2531         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2532                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2533                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2534         }
2535
2536         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2537                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2538                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2539                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2540                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2541                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2542                 }
2543         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2544                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2545                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2546                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2547                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2548                                 tg3_writephy(tp, MII_TG3_TEST1,
2549                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2550                         } else
2551                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2552
2553                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2554                 }
2555         }
2556
2557         /* Set Extended packet length bit (bit 14) on all chips that */
2558         /* support jumbo frames */
2559         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2560                 /* Cannot do read-modify-write on 5401 */
2561                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2562         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2563                 /* Set bit 14 with read-modify-write to preserve other bits */
2564                 err = tg3_phy_auxctl_read(tp,
2565                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2566                 if (!err)
2567                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2568                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2569         }
2570
2571         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2572          * jumbo frames transmission.
2573          */
2574         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2575                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2576                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2577                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2578         }
2579
2580         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2581                 /* adjust output voltage */
2582                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2583         }
2584
2585         tg3_phy_toggle_automdix(tp, 1);
2586         tg3_phy_set_wirespeed(tp);
2587         return 0;
2588 }
2589
2590 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2591 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2592 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2593                                           TG3_GPIO_MSG_NEED_VAUX)
2594 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2595         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2596          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2597          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2598          (TG3_GPIO_MSG_DRVR_PRES << 12))
2599
2600 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2601         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2602          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2603          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2604          (TG3_GPIO_MSG_NEED_VAUX << 12))
2605
2606 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2607 {
2608         u32 status, shift;
2609
2610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2611             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2612                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2613         else
2614                 status = tr32(TG3_CPMU_DRV_STATUS);
2615
2616         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2617         status &= ~(TG3_GPIO_MSG_MASK << shift);
2618         status |= (newstat << shift);
2619
2620         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2621             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2622                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2623         else
2624                 tw32(TG3_CPMU_DRV_STATUS, status);
2625
2626         return status >> TG3_APE_GPIO_MSG_SHIFT;
2627 }
2628
2629 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2630 {
2631         if (!tg3_flag(tp, IS_NIC))
2632                 return 0;
2633
2634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2637                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2638                         return -EIO;
2639
2640                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2641
2642                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2643                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2644
2645                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2646         } else {
2647                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2648                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2649         }
2650
2651         return 0;
2652 }
2653
2654 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2655 {
2656         u32 grc_local_ctrl;
2657
2658         if (!tg3_flag(tp, IS_NIC) ||
2659             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2660             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2661                 return;
2662
2663         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2664
2665         tw32_wait_f(GRC_LOCAL_CTRL,
2666                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2667                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2668
2669         tw32_wait_f(GRC_LOCAL_CTRL,
2670                     grc_local_ctrl,
2671                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2672
2673         tw32_wait_f(GRC_LOCAL_CTRL,
2674                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2675                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2676 }
2677
2678 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2679 {
2680         if (!tg3_flag(tp, IS_NIC))
2681                 return;
2682
2683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2684             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2685                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2686                             (GRC_LCLCTRL_GPIO_OE0 |
2687                              GRC_LCLCTRL_GPIO_OE1 |
2688                              GRC_LCLCTRL_GPIO_OE2 |
2689                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2690                              GRC_LCLCTRL_GPIO_OUTPUT1),
2691                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2692         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2693                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2694                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2695                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2696                                      GRC_LCLCTRL_GPIO_OE1 |
2697                                      GRC_LCLCTRL_GPIO_OE2 |
2698                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2699                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2700                                      tp->grc_local_ctrl;
2701                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2702                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2703
2704                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2705                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2706                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2707
2708                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2709                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2710                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2711         } else {
2712                 u32 no_gpio2;
2713                 u32 grc_local_ctrl = 0;
2714
2715                 /* Workaround to prevent overdrawing Amps. */
2716                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2717                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2718                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2719                                     grc_local_ctrl,
2720                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2721                 }
2722
2723                 /* On 5753 and variants, GPIO2 cannot be used. */
2724                 no_gpio2 = tp->nic_sram_data_cfg &
2725                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2726
2727                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2728                                   GRC_LCLCTRL_GPIO_OE1 |
2729                                   GRC_LCLCTRL_GPIO_OE2 |
2730                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2731                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2732                 if (no_gpio2) {
2733                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2734                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2735                 }
2736                 tw32_wait_f(GRC_LOCAL_CTRL,
2737                             tp->grc_local_ctrl | grc_local_ctrl,
2738                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2739
2740                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2741
2742                 tw32_wait_f(GRC_LOCAL_CTRL,
2743                             tp->grc_local_ctrl | grc_local_ctrl,
2744                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2745
2746                 if (!no_gpio2) {
2747                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2748                         tw32_wait_f(GRC_LOCAL_CTRL,
2749                                     tp->grc_local_ctrl | grc_local_ctrl,
2750                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2751                 }
2752         }
2753 }
2754
2755 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2756 {
2757         u32 msg = 0;
2758
2759         /* Serialize power state transitions */
2760         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2761                 return;
2762
2763         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2764                 msg = TG3_GPIO_MSG_NEED_VAUX;
2765
2766         msg = tg3_set_function_status(tp, msg);
2767
2768         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2769                 goto done;
2770
2771         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2772                 tg3_pwrsrc_switch_to_vaux(tp);
2773         else
2774                 tg3_pwrsrc_die_with_vmain(tp);
2775
2776 done:
2777         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2778 }
2779
2780 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2781 {
2782         bool need_vaux = false;
2783
2784         /* The GPIOs do something completely different on 57765. */
2785         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2786                 return;
2787
2788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2791                 tg3_frob_aux_power_5717(tp, include_wol ?
2792                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2793                 return;
2794         }
2795
2796         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2797                 struct net_device *dev_peer;
2798
2799                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2800
2801                 /* remove_one() may have been run on the peer. */
2802                 if (dev_peer) {
2803                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2804
2805                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2806                                 return;
2807
2808                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2809                             tg3_flag(tp_peer, ENABLE_ASF))
2810                                 need_vaux = true;
2811                 }
2812         }
2813
2814         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2815             tg3_flag(tp, ENABLE_ASF))
2816                 need_vaux = true;
2817
2818         if (need_vaux)
2819                 tg3_pwrsrc_switch_to_vaux(tp);
2820         else
2821                 tg3_pwrsrc_die_with_vmain(tp);
2822 }
2823
2824 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2825 {
2826         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2827                 return 1;
2828         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2829                 if (speed != SPEED_10)
2830                         return 1;
2831         } else if (speed == SPEED_10)
2832                 return 1;
2833
2834         return 0;
2835 }
2836
2837 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2838 {
2839         u32 val;
2840
2841         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2842                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2843                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2844                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2845
2846                         sg_dig_ctrl |=
2847                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2848                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2849                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2850                 }
2851                 return;
2852         }
2853
2854         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2855                 tg3_bmcr_reset(tp);
2856                 val = tr32(GRC_MISC_CFG);
2857                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2858                 udelay(40);
2859                 return;
2860         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2861                 u32 phytest;
2862                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2863                         u32 phy;
2864
2865                         tg3_writephy(tp, MII_ADVERTISE, 0);
2866                         tg3_writephy(tp, MII_BMCR,
2867                                      BMCR_ANENABLE | BMCR_ANRESTART);
2868
2869                         tg3_writephy(tp, MII_TG3_FET_TEST,
2870                                      phytest | MII_TG3_FET_SHADOW_EN);
2871                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2872                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2873                                 tg3_writephy(tp,
2874                                              MII_TG3_FET_SHDW_AUXMODE4,
2875                                              phy);
2876                         }
2877                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2878                 }
2879                 return;
2880         } else if (do_low_power) {
2881                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2882                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2883
2884                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2885                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2886                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2887                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2888         }
2889
2890         /* The PHY should not be powered down on some chips because
2891          * of bugs.
2892          */
2893         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2894             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2895             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2896              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2897             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2898              !tp->pci_fn))
2899                 return;
2900
2901         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2902             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2903                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2904                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2905                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2906                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2907         }
2908
2909         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2910 }
2911
2912 /* tp->lock is held. */
2913 static int tg3_nvram_lock(struct tg3 *tp)
2914 {
2915         if (tg3_flag(tp, NVRAM)) {
2916                 int i;
2917
2918                 if (tp->nvram_lock_cnt == 0) {
2919                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2920                         for (i = 0; i < 8000; i++) {
2921                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2922                                         break;
2923                                 udelay(20);
2924                         }
2925                         if (i == 8000) {
2926                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2927                                 return -ENODEV;
2928                         }
2929                 }
2930                 tp->nvram_lock_cnt++;
2931         }
2932         return 0;
2933 }
2934
2935 /* tp->lock is held. */
2936 static void tg3_nvram_unlock(struct tg3 *tp)
2937 {
2938         if (tg3_flag(tp, NVRAM)) {
2939                 if (tp->nvram_lock_cnt > 0)
2940                         tp->nvram_lock_cnt--;
2941                 if (tp->nvram_lock_cnt == 0)
2942                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2943         }
2944 }
2945
2946 /* tp->lock is held. */
2947 static void tg3_enable_nvram_access(struct tg3 *tp)
2948 {
2949         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2950                 u32 nvaccess = tr32(NVRAM_ACCESS);
2951
2952                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2953         }
2954 }
2955
2956 /* tp->lock is held. */
2957 static void tg3_disable_nvram_access(struct tg3 *tp)
2958 {
2959         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2960                 u32 nvaccess = tr32(NVRAM_ACCESS);
2961
2962                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2963         }
2964 }
2965
2966 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2967                                         u32 offset, u32 *val)
2968 {
2969         u32 tmp;
2970         int i;
2971
2972         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2973                 return -EINVAL;
2974
2975         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2976                                         EEPROM_ADDR_DEVID_MASK |
2977                                         EEPROM_ADDR_READ);
2978         tw32(GRC_EEPROM_ADDR,
2979              tmp |
2980              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2981              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2982               EEPROM_ADDR_ADDR_MASK) |
2983              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2984
2985         for (i = 0; i < 1000; i++) {
2986                 tmp = tr32(GRC_EEPROM_ADDR);
2987
2988                 if (tmp & EEPROM_ADDR_COMPLETE)
2989                         break;
2990                 msleep(1);
2991         }
2992         if (!(tmp & EEPROM_ADDR_COMPLETE))
2993                 return -EBUSY;
2994
2995         tmp = tr32(GRC_EEPROM_DATA);
2996
2997         /*
2998          * The data will always be opposite the native endian
2999          * format.  Perform a blind byteswap to compensate.
3000          */
3001         *val = swab32(tmp);
3002
3003         return 0;
3004 }
3005
3006 #define NVRAM_CMD_TIMEOUT 10000
3007
3008 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3009 {
3010         int i;
3011
3012         tw32(NVRAM_CMD, nvram_cmd);
3013         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3014                 udelay(10);
3015                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3016                         udelay(10);
3017                         break;
3018                 }
3019         }
3020
3021         if (i == NVRAM_CMD_TIMEOUT)
3022                 return -EBUSY;
3023
3024         return 0;
3025 }
3026
3027 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3028 {
3029         if (tg3_flag(tp, NVRAM) &&
3030             tg3_flag(tp, NVRAM_BUFFERED) &&
3031             tg3_flag(tp, FLASH) &&
3032             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3033             (tp->nvram_jedecnum == JEDEC_ATMEL))
3034
3035                 addr = ((addr / tp->nvram_pagesize) <<
3036                         ATMEL_AT45DB0X1B_PAGE_POS) +
3037                        (addr % tp->nvram_pagesize);
3038
3039         return addr;
3040 }
3041
3042 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3043 {
3044         if (tg3_flag(tp, NVRAM) &&
3045             tg3_flag(tp, NVRAM_BUFFERED) &&
3046             tg3_flag(tp, FLASH) &&
3047             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3048             (tp->nvram_jedecnum == JEDEC_ATMEL))
3049
3050                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3051                         tp->nvram_pagesize) +
3052                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3053
3054         return addr;
3055 }
3056
3057 /* NOTE: Data read in from NVRAM is byteswapped according to
3058  * the byteswapping settings for all other register accesses.
3059  * tg3 devices are BE devices, so on a BE machine, the data
3060  * returned will be exactly as it is seen in NVRAM.  On a LE
3061  * machine, the 32-bit value will be byteswapped.
3062  */
3063 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3064 {
3065         int ret;
3066
3067         if (!tg3_flag(tp, NVRAM))
3068                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3069
3070         offset = tg3_nvram_phys_addr(tp, offset);
3071
3072         if (offset > NVRAM_ADDR_MSK)
3073                 return -EINVAL;
3074
3075         ret = tg3_nvram_lock(tp);
3076         if (ret)
3077                 return ret;
3078
3079         tg3_enable_nvram_access(tp);
3080
3081         tw32(NVRAM_ADDR, offset);
3082         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3083                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3084
3085         if (ret == 0)
3086                 *val = tr32(NVRAM_RDDATA);
3087
3088         tg3_disable_nvram_access(tp);
3089
3090         tg3_nvram_unlock(tp);
3091
3092         return ret;
3093 }
3094
3095 /* Ensures NVRAM data is in bytestream format. */
3096 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3097 {
3098         u32 v;
3099         int res = tg3_nvram_read(tp, offset, &v);
3100         if (!res)
3101                 *val = cpu_to_be32(v);
3102         return res;
3103 }
3104
3105 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3106                                     u32 offset, u32 len, u8 *buf)
3107 {
3108         int i, j, rc = 0;
3109         u32 val;
3110
3111         for (i = 0; i < len; i += 4) {
3112                 u32 addr;
3113                 __be32 data;
3114
3115                 addr = offset + i;
3116
3117                 memcpy(&data, buf + i, 4);
3118
3119                 /*
3120                  * The SEEPROM interface expects the data to always be opposite
3121                  * the native endian format.  We accomplish this by reversing
3122                  * all the operations that would have been performed on the
3123                  * data from a call to tg3_nvram_read_be32().
3124                  */
3125                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3126
3127                 val = tr32(GRC_EEPROM_ADDR);
3128                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3129
3130                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3131                         EEPROM_ADDR_READ);
3132                 tw32(GRC_EEPROM_ADDR, val |
3133                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3134                         (addr & EEPROM_ADDR_ADDR_MASK) |
3135                         EEPROM_ADDR_START |
3136                         EEPROM_ADDR_WRITE);
3137
3138                 for (j = 0; j < 1000; j++) {
3139                         val = tr32(GRC_EEPROM_ADDR);
3140
3141                         if (val & EEPROM_ADDR_COMPLETE)
3142                                 break;
3143                         msleep(1);
3144                 }
3145                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3146                         rc = -EBUSY;
3147                         break;
3148                 }
3149         }
3150
3151         return rc;
3152 }
3153
3154 /* offset and length are dword aligned */
3155 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3156                 u8 *buf)
3157 {
3158         int ret = 0;
3159         u32 pagesize = tp->nvram_pagesize;
3160         u32 pagemask = pagesize - 1;
3161         u32 nvram_cmd;
3162         u8 *tmp;
3163
3164         tmp = kmalloc(pagesize, GFP_KERNEL);
3165         if (tmp == NULL)
3166                 return -ENOMEM;
3167
3168         while (len) {
3169                 int j;
3170                 u32 phy_addr, page_off, size;
3171
3172                 phy_addr = offset & ~pagemask;
3173
3174                 for (j = 0; j < pagesize; j += 4) {
3175                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3176                                                   (__be32 *) (tmp + j));
3177                         if (ret)
3178                                 break;
3179                 }
3180                 if (ret)
3181                         break;
3182
3183                 page_off = offset & pagemask;
3184                 size = pagesize;
3185                 if (len < size)
3186                         size = len;
3187
3188                 len -= size;
3189
3190                 memcpy(tmp + page_off, buf, size);
3191
3192                 offset = offset + (pagesize - page_off);
3193
3194                 tg3_enable_nvram_access(tp);
3195
3196                 /*
3197                  * Before we can erase the flash page, we need
3198                  * to issue a special "write enable" command.
3199                  */
3200                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3201
3202                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3203                         break;
3204
3205                 /* Erase the target page */
3206                 tw32(NVRAM_ADDR, phy_addr);
3207
3208                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3209                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3210
3211                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3212                         break;
3213
3214                 /* Issue another write enable to start the write. */
3215                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3216
3217                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3218                         break;
3219
3220                 for (j = 0; j < pagesize; j += 4) {
3221                         __be32 data;
3222
3223                         data = *((__be32 *) (tmp + j));
3224
3225                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3226
3227                         tw32(NVRAM_ADDR, phy_addr + j);
3228
3229                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3230                                 NVRAM_CMD_WR;
3231
3232                         if (j == 0)
3233                                 nvram_cmd |= NVRAM_CMD_FIRST;
3234                         else if (j == (pagesize - 4))
3235                                 nvram_cmd |= NVRAM_CMD_LAST;
3236
3237                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3238                         if (ret)
3239                                 break;
3240                 }
3241                 if (ret)
3242                         break;
3243         }
3244
3245         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3246         tg3_nvram_exec_cmd(tp, nvram_cmd);
3247
3248         kfree(tmp);
3249
3250         return ret;
3251 }
3252
3253 /* offset and length are dword aligned */
3254 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3255                 u8 *buf)
3256 {
3257         int i, ret = 0;
3258
3259         for (i = 0; i < len; i += 4, offset += 4) {
3260                 u32 page_off, phy_addr, nvram_cmd;
3261                 __be32 data;
3262
3263                 memcpy(&data, buf + i, 4);
3264                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3265
3266                 page_off = offset % tp->nvram_pagesize;
3267
3268                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3269
3270                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3271
3272                 if (page_off == 0 || i == 0)
3273                         nvram_cmd |= NVRAM_CMD_FIRST;
3274                 if (page_off == (tp->nvram_pagesize - 4))
3275                         nvram_cmd |= NVRAM_CMD_LAST;
3276
3277                 if (i == (len - 4))
3278                         nvram_cmd |= NVRAM_CMD_LAST;
3279
3280                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3281                     !tg3_flag(tp, FLASH) ||
3282                     !tg3_flag(tp, 57765_PLUS))
3283                         tw32(NVRAM_ADDR, phy_addr);
3284
3285                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3286                     !tg3_flag(tp, 5755_PLUS) &&
3287                     (tp->nvram_jedecnum == JEDEC_ST) &&
3288                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3289                         u32 cmd;
3290
3291                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3292                         ret = tg3_nvram_exec_cmd(tp, cmd);
3293                         if (ret)
3294                                 break;
3295                 }
3296                 if (!tg3_flag(tp, FLASH)) {
3297                         /* We always do complete word writes to eeprom. */
3298                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3299                 }
3300
3301                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3302                 if (ret)
3303                         break;
3304         }
3305         return ret;
3306 }
3307
3308 /* offset and length are dword aligned */
3309 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3310 {
3311         int ret;
3312
3313         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3314                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3315                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3316                 udelay(40);
3317         }
3318
3319         if (!tg3_flag(tp, NVRAM)) {
3320                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3321         } else {
3322                 u32 grc_mode;
3323
3324                 ret = tg3_nvram_lock(tp);
3325                 if (ret)
3326                         return ret;
3327
3328                 tg3_enable_nvram_access(tp);
3329                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3330                         tw32(NVRAM_WRITE1, 0x406);
3331
3332                 grc_mode = tr32(GRC_MODE);
3333                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3334
3335                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3336                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3337                                 buf);
3338                 } else {
3339                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3340                                 buf);
3341                 }
3342
3343                 grc_mode = tr32(GRC_MODE);
3344                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3345
3346                 tg3_disable_nvram_access(tp);
3347                 tg3_nvram_unlock(tp);
3348         }
3349
3350         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3351                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3352                 udelay(40);
3353         }
3354
3355         return ret;
3356 }
3357
3358 #define RX_CPU_SCRATCH_BASE     0x30000
3359 #define RX_CPU_SCRATCH_SIZE     0x04000
3360 #define TX_CPU_SCRATCH_BASE     0x34000
3361 #define TX_CPU_SCRATCH_SIZE     0x04000
3362
3363 /* tp->lock is held. */
3364 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3365 {
3366         int i;
3367
3368         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3369
3370         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3371                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3372
3373                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3374                 return 0;
3375         }
3376         if (offset == RX_CPU_BASE) {
3377                 for (i = 0; i < 10000; i++) {
3378                         tw32(offset + CPU_STATE, 0xffffffff);
3379                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3380                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3381                                 break;
3382                 }
3383
3384                 tw32(offset + CPU_STATE, 0xffffffff);
3385                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3386                 udelay(10);
3387         } else {
3388                 for (i = 0; i < 10000; i++) {
3389                         tw32(offset + CPU_STATE, 0xffffffff);
3390                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3391                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3392                                 break;
3393                 }
3394         }
3395
3396         if (i >= 10000) {
3397                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3398                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3399                 return -ENODEV;
3400         }
3401
3402         /* Clear firmware's nvram arbitration. */
3403         if (tg3_flag(tp, NVRAM))
3404                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3405         return 0;
3406 }
3407
3408 struct fw_info {
3409         unsigned int fw_base;
3410         unsigned int fw_len;
3411         const __be32 *fw_data;
3412 };
3413
3414 /* tp->lock is held. */
3415 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3416                                  u32 cpu_scratch_base, int cpu_scratch_size,
3417                                  struct fw_info *info)
3418 {
3419         int err, lock_err, i;
3420         void (*write_op)(struct tg3 *, u32, u32);
3421
3422         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3423                 netdev_err(tp->dev,
3424                            "%s: Trying to load TX cpu firmware which is 5705\n",
3425                            __func__);
3426                 return -EINVAL;
3427         }
3428
3429         if (tg3_flag(tp, 5705_PLUS))
3430                 write_op = tg3_write_mem;
3431         else
3432                 write_op = tg3_write_indirect_reg32;
3433
3434         /* It is possible that bootcode is still loading at this point.
3435          * Get the nvram lock first before halting the cpu.
3436          */
3437         lock_err = tg3_nvram_lock(tp);
3438         err = tg3_halt_cpu(tp, cpu_base);
3439         if (!lock_err)
3440                 tg3_nvram_unlock(tp);
3441         if (err)
3442                 goto out;
3443
3444         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3445                 write_op(tp, cpu_scratch_base + i, 0);
3446         tw32(cpu_base + CPU_STATE, 0xffffffff);
3447         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3448         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3449                 write_op(tp, (cpu_scratch_base +
3450                               (info->fw_base & 0xffff) +
3451                               (i * sizeof(u32))),
3452                               be32_to_cpu(info->fw_data[i]));
3453
3454         err = 0;
3455
3456 out:
3457         return err;
3458 }
3459
3460 /* tp->lock is held. */
3461 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3462 {
3463         struct fw_info info;
3464         const __be32 *fw_data;
3465         int err, i;
3466
3467         fw_data = (void *)tp->fw->data;
3468
3469         /* Firmware blob starts with version numbers, followed by
3470            start address and length. We are setting complete length.
3471            length = end_address_of_bss - start_address_of_text.
3472            Remainder is the blob to be loaded contiguously
3473            from start address. */
3474
3475         info.fw_base = be32_to_cpu(fw_data[1]);
3476         info.fw_len = tp->fw->size - 12;
3477         info.fw_data = &fw_data[3];
3478
3479         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3480                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3481                                     &info);
3482         if (err)
3483                 return err;
3484
3485         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3486                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3487                                     &info);
3488         if (err)
3489                 return err;
3490
3491         /* Now startup only the RX cpu. */
3492         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3493         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3494
3495         for (i = 0; i < 5; i++) {
3496                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3497                         break;
3498                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3499                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3500                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3501                 udelay(1000);
3502         }
3503         if (i >= 5) {
3504                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3505                            "should be %08x\n", __func__,
3506                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3507                 return -ENODEV;
3508         }
3509         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3510         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3511
3512         return 0;
3513 }
3514
3515 /* tp->lock is held. */
3516 static int tg3_load_tso_firmware(struct tg3 *tp)
3517 {
3518         struct fw_info info;
3519         const __be32 *fw_data;
3520         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3521         int err, i;
3522
3523         if (tg3_flag(tp, HW_TSO_1) ||
3524             tg3_flag(tp, HW_TSO_2) ||
3525             tg3_flag(tp, HW_TSO_3))
3526                 return 0;
3527
3528         fw_data = (void *)tp->fw->data;
3529
3530         /* Firmware blob starts with version numbers, followed by
3531            start address and length. We are setting complete length.
3532            length = end_address_of_bss - start_address_of_text.
3533            Remainder is the blob to be loaded contiguously
3534            from start address. */
3535
3536         info.fw_base = be32_to_cpu(fw_data[1]);
3537         cpu_scratch_size = tp->fw_len;
3538         info.fw_len = tp->fw->size - 12;
3539         info.fw_data = &fw_data[3];
3540
3541         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3542                 cpu_base = RX_CPU_BASE;
3543                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3544         } else {
3545                 cpu_base = TX_CPU_BASE;
3546                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3547                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3548         }
3549
3550         err = tg3_load_firmware_cpu(tp, cpu_base,
3551                                     cpu_scratch_base, cpu_scratch_size,
3552                                     &info);
3553         if (err)
3554                 return err;
3555
3556         /* Now startup the cpu. */
3557         tw32(cpu_base + CPU_STATE, 0xffffffff);
3558         tw32_f(cpu_base + CPU_PC, info.fw_base);
3559
3560         for (i = 0; i < 5; i++) {
3561                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3562                         break;
3563                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3564                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3565                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3566                 udelay(1000);
3567         }
3568         if (i >= 5) {
3569                 netdev_err(tp->dev,
3570                            "%s fails to set CPU PC, is %08x should be %08x\n",
3571                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3572                 return -ENODEV;
3573         }
3574         tw32(cpu_base + CPU_STATE, 0xffffffff);
3575         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3576         return 0;
3577 }
3578
3579
3580 /* tp->lock is held. */
3581 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3582 {
3583         u32 addr_high, addr_low;
3584         int i;
3585
3586         addr_high = ((tp->dev->dev_addr[0] << 8) |
3587                      tp->dev->dev_addr[1]);
3588         addr_low = ((tp->dev->dev_addr[2] << 24) |
3589                     (tp->dev->dev_addr[3] << 16) |
3590                     (tp->dev->dev_addr[4] <<  8) |
3591                     (tp->dev->dev_addr[5] <<  0));
3592         for (i = 0; i < 4; i++) {
3593                 if (i == 1 && skip_mac_1)
3594                         continue;
3595                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3596                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3597         }
3598
3599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3600             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3601                 for (i = 0; i < 12; i++) {
3602                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3603                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3604                 }
3605         }
3606
3607         addr_high = (tp->dev->dev_addr[0] +
3608                      tp->dev->dev_addr[1] +
3609                      tp->dev->dev_addr[2] +
3610                      tp->dev->dev_addr[3] +
3611                      tp->dev->dev_addr[4] +
3612                      tp->dev->dev_addr[5]) &
3613                 TX_BACKOFF_SEED_MASK;
3614         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3615 }
3616
3617 static void tg3_enable_register_access(struct tg3 *tp)
3618 {
3619         /*
3620          * Make sure register accesses (indirect or otherwise) will function
3621          * correctly.
3622          */
3623         pci_write_config_dword(tp->pdev,
3624                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3625 }
3626
3627 static int tg3_power_up(struct tg3 *tp)
3628 {
3629         int err;
3630
3631         tg3_enable_register_access(tp);
3632
3633         err = pci_set_power_state(tp->pdev, PCI_D0);
3634         if (!err) {
3635                 /* Switch out of Vaux if it is a NIC */
3636                 tg3_pwrsrc_switch_to_vmain(tp);
3637         } else {
3638                 netdev_err(tp->dev, "Transition to D0 failed\n");
3639         }
3640
3641         return err;
3642 }
3643
3644 static int tg3_setup_phy(struct tg3 *, int);
3645
3646 static int tg3_power_down_prepare(struct tg3 *tp)
3647 {
3648         u32 misc_host_ctrl;
3649         bool device_should_wake, do_low_power;
3650
3651         tg3_enable_register_access(tp);
3652
3653         /* Restore the CLKREQ setting. */
3654         if (tg3_flag(tp, CLKREQ_BUG))
3655                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3656                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3657
3658         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3659         tw32(TG3PCI_MISC_HOST_CTRL,
3660              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3661
3662         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3663                              tg3_flag(tp, WOL_ENABLE);
3664
3665         if (tg3_flag(tp, USE_PHYLIB)) {
3666                 do_low_power = false;
3667                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3668                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3669                         struct phy_device *phydev;
3670                         u32 phyid, advertising;
3671
3672                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3673
3674                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3675
3676                         tp->link_config.speed = phydev->speed;
3677                         tp->link_config.duplex = phydev->duplex;
3678                         tp->link_config.autoneg = phydev->autoneg;
3679                         tp->link_config.advertising = phydev->advertising;
3680
3681                         advertising = ADVERTISED_TP |
3682                                       ADVERTISED_Pause |
3683                                       ADVERTISED_Autoneg |
3684                                       ADVERTISED_10baseT_Half;
3685
3686                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3687                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3688                                         advertising |=
3689                                                 ADVERTISED_100baseT_Half |
3690                                                 ADVERTISED_100baseT_Full |
3691                                                 ADVERTISED_10baseT_Full;
3692                                 else
3693                                         advertising |= ADVERTISED_10baseT_Full;
3694                         }
3695
3696                         phydev->advertising = advertising;
3697
3698                         phy_start_aneg(phydev);
3699
3700                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3701                         if (phyid != PHY_ID_BCMAC131) {
3702                                 phyid &= PHY_BCM_OUI_MASK;
3703                                 if (phyid == PHY_BCM_OUI_1 ||
3704                                     phyid == PHY_BCM_OUI_2 ||
3705                                     phyid == PHY_BCM_OUI_3)
3706                                         do_low_power = true;
3707                         }
3708                 }
3709         } else {
3710                 do_low_power = true;
3711
3712                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3713                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3714
3715                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3716                         tg3_setup_phy(tp, 0);
3717         }
3718
3719         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3720                 u32 val;
3721
3722                 val = tr32(GRC_VCPU_EXT_CTRL);
3723                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3724         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3725                 int i;
3726                 u32 val;
3727
3728                 for (i = 0; i < 200; i++) {
3729                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3730                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3731                                 break;
3732                         msleep(1);
3733                 }
3734         }
3735         if (tg3_flag(tp, WOL_CAP))
3736                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3737                                                      WOL_DRV_STATE_SHUTDOWN |
3738                                                      WOL_DRV_WOL |
3739                                                      WOL_SET_MAGIC_PKT);
3740
3741         if (device_should_wake) {
3742                 u32 mac_mode;
3743
3744                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3745                         if (do_low_power &&
3746                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3747                                 tg3_phy_auxctl_write(tp,
3748                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3749                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3750                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3751                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3752                                 udelay(40);
3753                         }
3754
3755                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3756                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3757                         else
3758                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3759
3760                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3761                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3762                             ASIC_REV_5700) {
3763                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3764                                              SPEED_100 : SPEED_10;
3765                                 if (tg3_5700_link_polarity(tp, speed))
3766                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3767                                 else
3768                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3769                         }
3770                 } else {
3771                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3772                 }
3773
3774                 if (!tg3_flag(tp, 5750_PLUS))
3775                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3776
3777                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3778                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3779                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3780                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3781
3782                 if (tg3_flag(tp, ENABLE_APE))
3783                         mac_mode |= MAC_MODE_APE_TX_EN |
3784                                     MAC_MODE_APE_RX_EN |
3785                                     MAC_MODE_TDE_ENABLE;
3786
3787                 tw32_f(MAC_MODE, mac_mode);
3788                 udelay(100);
3789
3790                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3791                 udelay(10);
3792         }
3793
3794         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3795             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3796              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3797                 u32 base_val;
3798
3799                 base_val = tp->pci_clock_ctrl;
3800                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3801                              CLOCK_CTRL_TXCLK_DISABLE);
3802
3803                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3804                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3805         } else if (tg3_flag(tp, 5780_CLASS) ||
3806                    tg3_flag(tp, CPMU_PRESENT) ||
3807                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3808                 /* do nothing */
3809         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3810                 u32 newbits1, newbits2;
3811
3812                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3813                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3814                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3815                                     CLOCK_CTRL_TXCLK_DISABLE |
3816                                     CLOCK_CTRL_ALTCLK);
3817                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3818                 } else if (tg3_flag(tp, 5705_PLUS)) {
3819                         newbits1 = CLOCK_CTRL_625_CORE;
3820                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3821                 } else {
3822                         newbits1 = CLOCK_CTRL_ALTCLK;
3823                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3824                 }
3825
3826                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3827                             40);
3828
3829                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3830                             40);
3831
3832                 if (!tg3_flag(tp, 5705_PLUS)) {
3833                         u32 newbits3;
3834
3835                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3836                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3837                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3838                                             CLOCK_CTRL_TXCLK_DISABLE |
3839                                             CLOCK_CTRL_44MHZ_CORE);
3840                         } else {
3841                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3842                         }
3843
3844                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3845                                     tp->pci_clock_ctrl | newbits3, 40);
3846                 }
3847         }
3848
3849         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3850                 tg3_power_down_phy(tp, do_low_power);
3851
3852         tg3_frob_aux_power(tp, true);
3853
3854         /* Workaround for unstable PLL clock */
3855         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3856             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3857                 u32 val = tr32(0x7d00);
3858
3859                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3860                 tw32(0x7d00, val);
3861                 if (!tg3_flag(tp, ENABLE_ASF)) {
3862                         int err;
3863
3864                         err = tg3_nvram_lock(tp);
3865                         tg3_halt_cpu(tp, RX_CPU_BASE);
3866                         if (!err)
3867                                 tg3_nvram_unlock(tp);
3868                 }
3869         }
3870
3871         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3872
3873         return 0;
3874 }
3875
3876 static void tg3_power_down(struct tg3 *tp)
3877 {
3878         tg3_power_down_prepare(tp);
3879
3880         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3881         pci_set_power_state(tp->pdev, PCI_D3hot);
3882 }
3883
3884 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3885 {
3886         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3887         case MII_TG3_AUX_STAT_10HALF:
3888                 *speed = SPEED_10;
3889                 *duplex = DUPLEX_HALF;
3890                 break;
3891
3892         case MII_TG3_AUX_STAT_10FULL:
3893                 *speed = SPEED_10;
3894                 *duplex = DUPLEX_FULL;
3895                 break;
3896
3897         case MII_TG3_AUX_STAT_100HALF:
3898                 *speed = SPEED_100;
3899                 *duplex = DUPLEX_HALF;
3900                 break;
3901
3902         case MII_TG3_AUX_STAT_100FULL:
3903                 *speed = SPEED_100;
3904                 *duplex = DUPLEX_FULL;
3905                 break;
3906
3907         case MII_TG3_AUX_STAT_1000HALF:
3908                 *speed = SPEED_1000;
3909                 *duplex = DUPLEX_HALF;
3910                 break;
3911
3912         case MII_TG3_AUX_STAT_1000FULL:
3913                 *speed = SPEED_1000;
3914                 *duplex = DUPLEX_FULL;
3915                 break;
3916
3917         default:
3918                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3919                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3920                                  SPEED_10;
3921                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3922                                   DUPLEX_HALF;
3923                         break;
3924                 }
3925                 *speed = SPEED_UNKNOWN;
3926                 *duplex = DUPLEX_UNKNOWN;
3927                 break;
3928         }
3929 }
3930
3931 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3932 {
3933         int err = 0;
3934         u32 val, new_adv;
3935
3936         new_adv = ADVERTISE_CSMA;
3937         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3938         new_adv |= mii_advertise_flowctrl(flowctrl);
3939
3940         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3941         if (err)
3942                 goto done;
3943
3944         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3945                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3946
3947                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3948                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3949                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3950
3951                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3952                 if (err)
3953                         goto done;
3954         }
3955
3956         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3957                 goto done;
3958
3959         tw32(TG3_CPMU_EEE_MODE,
3960              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3961
3962         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3963         if (!err) {
3964                 u32 err2;
3965
3966                 val = 0;
3967                 /* Advertise 100-BaseTX EEE ability */
3968                 if (advertise & ADVERTISED_100baseT_Full)
3969                         val |= MDIO_AN_EEE_ADV_100TX;
3970                 /* Advertise 1000-BaseT EEE ability */
3971                 if (advertise & ADVERTISED_1000baseT_Full)
3972                         val |= MDIO_AN_EEE_ADV_1000T;
3973                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3974                 if (err)
3975                         val = 0;
3976
3977                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3978                 case ASIC_REV_5717:
3979                 case ASIC_REV_57765:
3980                 case ASIC_REV_57766:
3981                 case ASIC_REV_5719:
3982                         /* If we advertised any eee advertisements above... */
3983                         if (val)
3984                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3985                                       MII_TG3_DSP_TAP26_RMRXSTO |
3986                                       MII_TG3_DSP_TAP26_OPCSINPT;
3987                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3988                         /* Fall through */
3989                 case ASIC_REV_5720:
3990                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3991                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3992                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3993                 }
3994
3995                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3996                 if (!err)
3997                         err = err2;
3998         }
3999
4000 done:
4001         return err;
4002 }
4003
4004 static void tg3_phy_copper_begin(struct tg3 *tp)
4005 {
4006         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4007             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4008                 u32 adv, fc;
4009
4010                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4011                         adv = ADVERTISED_10baseT_Half |
4012                               ADVERTISED_10baseT_Full;
4013                         if (tg3_flag(tp, WOL_SPEED_100MB))
4014                                 adv |= ADVERTISED_100baseT_Half |
4015                                        ADVERTISED_100baseT_Full;
4016
4017                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4018                 } else {
4019                         adv = tp->link_config.advertising;
4020                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4021                                 adv &= ~(ADVERTISED_1000baseT_Half |
4022                                          ADVERTISED_1000baseT_Full);
4023
4024                         fc = tp->link_config.flowctrl;
4025                 }
4026
4027                 tg3_phy_autoneg_cfg(tp, adv, fc);
4028
4029                 tg3_writephy(tp, MII_BMCR,
4030                              BMCR_ANENABLE | BMCR_ANRESTART);
4031         } else {
4032                 int i;
4033                 u32 bmcr, orig_bmcr;
4034
4035                 tp->link_config.active_speed = tp->link_config.speed;
4036                 tp->link_config.active_duplex = tp->link_config.duplex;
4037
4038                 bmcr = 0;
4039                 switch (tp->link_config.speed) {
4040                 default:
4041                 case SPEED_10:
4042                         break;
4043
4044                 case SPEED_100:
4045                         bmcr |= BMCR_SPEED100;
4046                         break;
4047
4048                 case SPEED_1000:
4049                         bmcr |= BMCR_SPEED1000;
4050                         break;
4051                 }
4052
4053                 if (tp->link_config.duplex == DUPLEX_FULL)
4054                         bmcr |= BMCR_FULLDPLX;
4055
4056                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4057                     (bmcr != orig_bmcr)) {
4058                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4059                         for (i = 0; i < 1500; i++) {
4060                                 u32 tmp;
4061
4062                                 udelay(10);
4063                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4064                                     tg3_readphy(tp, MII_BMSR, &tmp))
4065                                         continue;
4066                                 if (!(tmp & BMSR_LSTATUS)) {
4067                                         udelay(40);
4068                                         break;
4069                                 }
4070                         }
4071                         tg3_writephy(tp, MII_BMCR, bmcr);
4072                         udelay(40);
4073                 }
4074         }
4075 }
4076
4077 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4078 {
4079         int err;
4080
4081         /* Turn off tap power management. */
4082         /* Set Extended packet length bit */
4083         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4084
4085         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4086         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4087         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4088         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4089         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4090
4091         udelay(40);
4092
4093         return err;
4094 }
4095
4096 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4097 {
4098         u32 advmsk, tgtadv, advertising;
4099
4100         advertising = tp->link_config.advertising;
4101         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4102
4103         advmsk = ADVERTISE_ALL;
4104         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4105                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4106                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4107         }
4108
4109         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4110                 return false;
4111
4112         if ((*lcladv & advmsk) != tgtadv)
4113                 return false;
4114
4115         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4116                 u32 tg3_ctrl;
4117
4118                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4119
4120                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4121                         return false;
4122
4123                 if (tgtadv &&
4124                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4125                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4126                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4127                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4128                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4129                 } else {
4130                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4131                 }
4132
4133                 if (tg3_ctrl != tgtadv)
4134                         return false;
4135         }
4136
4137         return true;
4138 }
4139
4140 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4141 {
4142         u32 lpeth = 0;
4143
4144         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4145                 u32 val;
4146
4147                 if (tg3_readphy(tp, MII_STAT1000, &val))
4148                         return false;
4149
4150                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4151         }
4152
4153         if (tg3_readphy(tp, MII_LPA, rmtadv))
4154                 return false;
4155
4156         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4157         tp->link_config.rmt_adv = lpeth;
4158
4159         return true;
4160 }
4161
4162 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4163 {
4164         int current_link_up;
4165         u32 bmsr, val;
4166         u32 lcl_adv, rmt_adv;
4167         u16 current_speed;
4168         u8 current_duplex;
4169         int i, err;
4170
4171         tw32(MAC_EVENT, 0);
4172
4173         tw32_f(MAC_STATUS,
4174              (MAC_STATUS_SYNC_CHANGED |
4175               MAC_STATUS_CFG_CHANGED |
4176               MAC_STATUS_MI_COMPLETION |
4177               MAC_STATUS_LNKSTATE_CHANGED));
4178         udelay(40);
4179
4180         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4181                 tw32_f(MAC_MI_MODE,
4182                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4183                 udelay(80);
4184         }
4185
4186         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4187
4188         /* Some third-party PHYs need to be reset on link going
4189          * down.
4190          */
4191         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4192              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4193              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4194             netif_carrier_ok(tp->dev)) {
4195                 tg3_readphy(tp, MII_BMSR, &bmsr);
4196                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4197                     !(bmsr & BMSR_LSTATUS))
4198                         force_reset = 1;
4199         }
4200         if (force_reset)
4201                 tg3_phy_reset(tp);
4202
4203         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4204                 tg3_readphy(tp, MII_BMSR, &bmsr);
4205                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4206                     !tg3_flag(tp, INIT_COMPLETE))
4207                         bmsr = 0;
4208
4209                 if (!(bmsr & BMSR_LSTATUS)) {
4210                         err = tg3_init_5401phy_dsp(tp);
4211                         if (err)
4212                                 return err;
4213
4214                         tg3_readphy(tp, MII_BMSR, &bmsr);
4215                         for (i = 0; i < 1000; i++) {
4216                                 udelay(10);
4217                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4218                                     (bmsr & BMSR_LSTATUS)) {
4219                                         udelay(40);
4220                                         break;
4221                                 }
4222                         }
4223
4224                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4225                             TG3_PHY_REV_BCM5401_B0 &&
4226                             !(bmsr & BMSR_LSTATUS) &&
4227                             tp->link_config.active_speed == SPEED_1000) {
4228                                 err = tg3_phy_reset(tp);
4229                                 if (!err)
4230                                         err = tg3_init_5401phy_dsp(tp);
4231                                 if (err)
4232                                         return err;
4233                         }
4234                 }
4235         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4236                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4237                 /* 5701 {A0,B0} CRC bug workaround */
4238                 tg3_writephy(tp, 0x15, 0x0a75);
4239                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4240                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4241                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4242         }
4243
4244         /* Clear pending interrupts... */
4245         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4246         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4247
4248         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4249                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4250         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4251                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4252
4253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4254             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4255                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4256                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4257                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4258                 else
4259                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4260         }
4261
4262         current_link_up = 0;
4263         current_speed = SPEED_UNKNOWN;
4264         current_duplex = DUPLEX_UNKNOWN;
4265         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4266         tp->link_config.rmt_adv = 0;
4267
4268         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4269                 err = tg3_phy_auxctl_read(tp,
4270                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4271                                           &val);
4272                 if (!err && !(val & (1 << 10))) {
4273                         tg3_phy_auxctl_write(tp,
4274                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4275                                              val | (1 << 10));
4276                         goto relink;
4277                 }
4278         }
4279
4280         bmsr = 0;
4281         for (i = 0; i < 100; i++) {
4282                 tg3_readphy(tp, MII_BMSR, &bmsr);
4283                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4284                     (bmsr & BMSR_LSTATUS))
4285                         break;
4286                 udelay(40);
4287         }
4288
4289         if (bmsr & BMSR_LSTATUS) {
4290                 u32 aux_stat, bmcr;
4291
4292                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4293                 for (i = 0; i < 2000; i++) {
4294                         udelay(10);
4295                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4296                             aux_stat)
4297                                 break;
4298                 }
4299
4300                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4301                                              &current_speed,
4302                                              &current_duplex);
4303
4304                 bmcr = 0;
4305                 for (i = 0; i < 200; i++) {
4306                         tg3_readphy(tp, MII_BMCR, &bmcr);
4307                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4308                                 continue;
4309                         if (bmcr && bmcr != 0x7fff)
4310                                 break;
4311                         udelay(10);
4312                 }
4313
4314                 lcl_adv = 0;
4315                 rmt_adv = 0;
4316
4317                 tp->link_config.active_speed = current_speed;
4318                 tp->link_config.active_duplex = current_duplex;
4319
4320                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4321                         if ((bmcr & BMCR_ANENABLE) &&
4322                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4323                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4324                                 current_link_up = 1;
4325                 } else {
4326                         if (!(bmcr & BMCR_ANENABLE) &&
4327                             tp->link_config.speed == current_speed &&
4328                             tp->link_config.duplex == current_duplex &&
4329                             tp->link_config.flowctrl ==
4330                             tp->link_config.active_flowctrl) {
4331                                 current_link_up = 1;
4332                         }
4333                 }
4334
4335                 if (current_link_up == 1 &&
4336                     tp->link_config.active_duplex == DUPLEX_FULL) {
4337                         u32 reg, bit;
4338
4339                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4340                                 reg = MII_TG3_FET_GEN_STAT;
4341                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4342                         } else {
4343                                 reg = MII_TG3_EXT_STAT;
4344                                 bit = MII_TG3_EXT_STAT_MDIX;
4345                         }
4346
4347                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4348                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4349
4350                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4351                 }
4352         }
4353
4354 relink:
4355         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4356                 tg3_phy_copper_begin(tp);
4357
4358                 tg3_readphy(tp, MII_BMSR, &bmsr);
4359                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4360                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4361                         current_link_up = 1;
4362         }
4363
4364         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4365         if (current_link_up == 1) {
4366                 if (tp->link_config.active_speed == SPEED_100 ||
4367                     tp->link_config.active_speed == SPEED_10)
4368                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4369                 else
4370                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4371         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4372                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4373         else
4374                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4375
4376         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4377         if (tp->link_config.active_duplex == DUPLEX_HALF)
4378                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4379
4380         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4381                 if (current_link_up == 1 &&
4382                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4383                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4384                 else
4385                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4386         }
4387
4388         /* ??? Without this setting Netgear GA302T PHY does not
4389          * ??? send/receive packets...
4390          */
4391         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4392             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4393                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4394                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4395                 udelay(80);
4396         }
4397
4398         tw32_f(MAC_MODE, tp->mac_mode);
4399         udelay(40);
4400
4401         tg3_phy_eee_adjust(tp, current_link_up);
4402
4403         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4404                 /* Polled via timer. */
4405                 tw32_f(MAC_EVENT, 0);
4406         } else {
4407                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4408         }
4409         udelay(40);
4410
4411         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4412             current_link_up == 1 &&
4413             tp->link_config.active_speed == SPEED_1000 &&
4414             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4415                 udelay(120);
4416                 tw32_f(MAC_STATUS,
4417                      (MAC_STATUS_SYNC_CHANGED |
4418                       MAC_STATUS_CFG_CHANGED));
4419                 udelay(40);
4420                 tg3_write_mem(tp,
4421                               NIC_SRAM_FIRMWARE_MBOX,
4422                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4423         }
4424
4425         /* Prevent send BD corruption. */
4426         if (tg3_flag(tp, CLKREQ_BUG)) {
4427                 if (tp->link_config.active_speed == SPEED_100 ||
4428                     tp->link_config.active_speed == SPEED_10)
4429                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4430                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4431                 else
4432                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4433                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4434         }
4435
4436         if (current_link_up != netif_carrier_ok(tp->dev)) {
4437                 if (current_link_up)
4438                         netif_carrier_on(tp->dev);
4439                 else
4440                         netif_carrier_off(tp->dev);
4441                 tg3_link_report(tp);
4442         }
4443
4444         return 0;
4445 }
4446
4447 struct tg3_fiber_aneginfo {
4448         int state;
4449 #define ANEG_STATE_UNKNOWN              0
4450 #define ANEG_STATE_AN_ENABLE            1
4451 #define ANEG_STATE_RESTART_INIT         2
4452 #define ANEG_STATE_RESTART              3
4453 #define ANEG_STATE_DISABLE_LINK_OK      4
4454 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4455 #define ANEG_STATE_ABILITY_DETECT       6
4456 #define ANEG_STATE_ACK_DETECT_INIT      7
4457 #define ANEG_STATE_ACK_DETECT           8
4458 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4459 #define ANEG_STATE_COMPLETE_ACK         10
4460 #define ANEG_STATE_IDLE_DETECT_INIT     11
4461 #define ANEG_STATE_IDLE_DETECT          12
4462 #define ANEG_STATE_LINK_OK              13
4463 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4464 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4465
4466         u32 flags;
4467 #define MR_AN_ENABLE            0x00000001
4468 #define MR_RESTART_AN           0x00000002
4469 #define MR_AN_COMPLETE          0x00000004
4470 #define MR_PAGE_RX              0x00000008
4471 #define MR_NP_LOADED            0x00000010
4472 #define MR_TOGGLE_TX            0x00000020
4473 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4474 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4475 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4476 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4477 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4478 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4479 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4480 #define MR_TOGGLE_RX            0x00002000
4481 #define MR_NP_RX                0x00004000
4482
4483 #define MR_LINK_OK              0x80000000
4484
4485         unsigned long link_time, cur_time;
4486
4487         u32 ability_match_cfg;
4488         int ability_match_count;
4489
4490         char ability_match, idle_match, ack_match;
4491
4492         u32 txconfig, rxconfig;
4493 #define ANEG_CFG_NP             0x00000080
4494 #define ANEG_CFG_ACK            0x00000040
4495 #define ANEG_CFG_RF2            0x00000020
4496 #define ANEG_CFG_RF1            0x00000010
4497 #define ANEG_CFG_PS2            0x00000001
4498 #define ANEG_CFG_PS1            0x00008000
4499 #define ANEG_CFG_HD             0x00004000
4500 #define ANEG_CFG_FD             0x00002000
4501 #define ANEG_CFG_INVAL          0x00001f06
4502
4503 };
4504 #define ANEG_OK         0
4505 #define ANEG_DONE       1
4506 #define ANEG_TIMER_ENAB 2
4507 #define ANEG_FAILED     -1
4508
4509 #define ANEG_STATE_SETTLE_TIME  10000
4510
4511 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4512                                    struct tg3_fiber_aneginfo *ap)
4513 {
4514         u16 flowctrl;
4515         unsigned long delta;
4516         u32 rx_cfg_reg;
4517         int ret;
4518
4519         if (ap->state == ANEG_STATE_UNKNOWN) {
4520                 ap->rxconfig = 0;
4521                 ap->link_time = 0;
4522                 ap->cur_time = 0;
4523                 ap->ability_match_cfg = 0;
4524                 ap->ability_match_count = 0;
4525                 ap->ability_match = 0;
4526                 ap->idle_match = 0;
4527                 ap->ack_match = 0;
4528         }
4529         ap->cur_time++;
4530
4531         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4532                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4533
4534                 if (rx_cfg_reg != ap->ability_match_cfg) {
4535                         ap->ability_match_cfg = rx_cfg_reg;
4536                         ap->ability_match = 0;
4537                         ap->ability_match_count = 0;
4538                 } else {
4539                         if (++ap->ability_match_count > 1) {
4540                                 ap->ability_match = 1;
4541                                 ap->ability_match_cfg = rx_cfg_reg;
4542                         }
4543                 }
4544                 if (rx_cfg_reg & ANEG_CFG_ACK)
4545                         ap->ack_match = 1;
4546                 else
4547                         ap->ack_match = 0;
4548
4549                 ap->idle_match = 0;
4550         } else {
4551                 ap->idle_match = 1;
4552                 ap->ability_match_cfg = 0;
4553                 ap->ability_match_count = 0;
4554                 ap->ability_match = 0;
4555                 ap->ack_match = 0;
4556
4557                 rx_cfg_reg = 0;
4558         }
4559
4560         ap->rxconfig = rx_cfg_reg;
4561         ret = ANEG_OK;
4562
4563         switch (ap->state) {
4564         case ANEG_STATE_UNKNOWN:
4565                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4566                         ap->state = ANEG_STATE_AN_ENABLE;
4567
4568                 /* fallthru */
4569         case ANEG_STATE_AN_ENABLE:
4570                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4571                 if (ap->flags & MR_AN_ENABLE) {
4572                         ap->link_time = 0;
4573                         ap->cur_time = 0;
4574                         ap->ability_match_cfg = 0;
4575                         ap->ability_match_count = 0;
4576                         ap->ability_match = 0;
4577                         ap->idle_match = 0;
4578                         ap->ack_match = 0;
4579
4580                         ap->state = ANEG_STATE_RESTART_INIT;
4581                 } else {
4582                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4583                 }
4584                 break;
4585
4586         case ANEG_STATE_RESTART_INIT:
4587                 ap->link_time = ap->cur_time;
4588                 ap->flags &= ~(MR_NP_LOADED);
4589                 ap->txconfig = 0;
4590                 tw32(MAC_TX_AUTO_NEG, 0);
4591                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4592                 tw32_f(MAC_MODE, tp->mac_mode);
4593                 udelay(40);
4594
4595                 ret = ANEG_TIMER_ENAB;
4596                 ap->state = ANEG_STATE_RESTART;
4597
4598                 /* fallthru */
4599         case ANEG_STATE_RESTART:
4600                 delta = ap->cur_time - ap->link_time;
4601                 if (delta > ANEG_STATE_SETTLE_TIME)
4602                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4603                 else
4604                         ret = ANEG_TIMER_ENAB;
4605                 break;
4606
4607         case ANEG_STATE_DISABLE_LINK_OK:
4608                 ret = ANEG_DONE;
4609                 break;
4610
4611         case ANEG_STATE_ABILITY_DETECT_INIT:
4612                 ap->flags &= ~(MR_TOGGLE_TX);
4613                 ap->txconfig = ANEG_CFG_FD;
4614                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4615                 if (flowctrl & ADVERTISE_1000XPAUSE)
4616                         ap->txconfig |= ANEG_CFG_PS1;
4617                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4618                         ap->txconfig |= ANEG_CFG_PS2;
4619                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4620                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4621                 tw32_f(MAC_MODE, tp->mac_mode);
4622                 udelay(40);
4623
4624                 ap->state = ANEG_STATE_ABILITY_DETECT;
4625                 break;
4626
4627         case ANEG_STATE_ABILITY_DETECT:
4628                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4629                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4630                 break;
4631
4632         case ANEG_STATE_ACK_DETECT_INIT:
4633                 ap->txconfig |= ANEG_CFG_ACK;
4634                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4635                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4636                 tw32_f(MAC_MODE, tp->mac_mode);
4637                 udelay(40);
4638
4639                 ap->state = ANEG_STATE_ACK_DETECT;
4640
4641                 /* fallthru */
4642         case ANEG_STATE_ACK_DETECT:
4643                 if (ap->ack_match != 0) {
4644                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4645                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4646                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4647                         } else {
4648                                 ap->state = ANEG_STATE_AN_ENABLE;
4649                         }
4650                 } else if (ap->ability_match != 0 &&
4651                            ap->rxconfig == 0) {
4652                         ap->state = ANEG_STATE_AN_ENABLE;
4653                 }
4654                 break;
4655
4656         case ANEG_STATE_COMPLETE_ACK_INIT:
4657                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4658                         ret = ANEG_FAILED;
4659                         break;
4660                 }
4661                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4662                                MR_LP_ADV_HALF_DUPLEX |
4663                                MR_LP_ADV_SYM_PAUSE |
4664                                MR_LP_ADV_ASYM_PAUSE |
4665                                MR_LP_ADV_REMOTE_FAULT1 |
4666                                MR_LP_ADV_REMOTE_FAULT2 |
4667                                MR_LP_ADV_NEXT_PAGE |
4668                                MR_TOGGLE_RX |
4669                                MR_NP_RX);
4670                 if (ap->rxconfig & ANEG_CFG_FD)
4671                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4672                 if (ap->rxconfig & ANEG_CFG_HD)
4673                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4674                 if (ap->rxconfig & ANEG_CFG_PS1)
4675                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4676                 if (ap->rxconfig & ANEG_CFG_PS2)
4677                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4678                 if (ap->rxconfig & ANEG_CFG_RF1)
4679                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4680                 if (ap->rxconfig & ANEG_CFG_RF2)
4681                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4682                 if (ap->rxconfig & ANEG_CFG_NP)
4683                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4684
4685                 ap->link_time = ap->cur_time;
4686
4687                 ap->flags ^= (MR_TOGGLE_TX);
4688                 if (ap->rxconfig & 0x0008)
4689                         ap->flags |= MR_TOGGLE_RX;
4690                 if (ap->rxconfig & ANEG_CFG_NP)
4691                         ap->flags |= MR_NP_RX;
4692                 ap->flags |= MR_PAGE_RX;
4693
4694                 ap->state = ANEG_STATE_COMPLETE_ACK;
4695                 ret = ANEG_TIMER_ENAB;
4696                 break;
4697
4698         case ANEG_STATE_COMPLETE_ACK:
4699                 if (ap->ability_match != 0 &&
4700                     ap->rxconfig == 0) {
4701                         ap->state = ANEG_STATE_AN_ENABLE;
4702                         break;
4703                 }
4704                 delta = ap->cur_time - ap->link_time;
4705                 if (delta > ANEG_STATE_SETTLE_TIME) {
4706                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4707                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4708                         } else {
4709                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4710                                     !(ap->flags & MR_NP_RX)) {
4711                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4712                                 } else {
4713                                         ret = ANEG_FAILED;
4714                                 }
4715                         }
4716                 }
4717                 break;
4718
4719         case ANEG_STATE_IDLE_DETECT_INIT:
4720                 ap->link_time = ap->cur_time;
4721                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4722                 tw32_f(MAC_MODE, tp->mac_mode);
4723                 udelay(40);
4724
4725                 ap->state = ANEG_STATE_IDLE_DETECT;
4726                 ret = ANEG_TIMER_ENAB;
4727                 break;
4728
4729         case ANEG_STATE_IDLE_DETECT:
4730                 if (ap->ability_match != 0 &&
4731                     ap->rxconfig == 0) {
4732                         ap->state = ANEG_STATE_AN_ENABLE;
4733                         break;
4734                 }
4735                 delta = ap->cur_time - ap->link_time;
4736                 if (delta > ANEG_STATE_SETTLE_TIME) {
4737                         /* XXX another gem from the Broadcom driver :( */
4738                         ap->state = ANEG_STATE_LINK_OK;
4739                 }
4740                 break;
4741
4742         case ANEG_STATE_LINK_OK:
4743                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4744                 ret = ANEG_DONE;
4745                 break;
4746
4747         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4748                 /* ??? unimplemented */
4749                 break;
4750
4751         case ANEG_STATE_NEXT_PAGE_WAIT:
4752                 /* ??? unimplemented */
4753                 break;
4754
4755         default:
4756                 ret = ANEG_FAILED;
4757                 break;
4758         }
4759
4760         return ret;
4761 }
4762
4763 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4764 {
4765         int res = 0;
4766         struct tg3_fiber_aneginfo aninfo;
4767         int status = ANEG_FAILED;
4768         unsigned int tick;
4769         u32 tmp;
4770
4771         tw32_f(MAC_TX_AUTO_NEG, 0);
4772
4773         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4774         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4775         udelay(40);
4776
4777         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4778         udelay(40);
4779
4780         memset(&aninfo, 0, sizeof(aninfo));
4781         aninfo.flags |= MR_AN_ENABLE;
4782         aninfo.state = ANEG_STATE_UNKNOWN;
4783         aninfo.cur_time = 0;
4784         tick = 0;
4785         while (++tick < 195000) {
4786                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4787                 if (status == ANEG_DONE || status == ANEG_FAILED)
4788                         break;
4789
4790                 udelay(1);
4791         }
4792
4793         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4794         tw32_f(MAC_MODE, tp->mac_mode);
4795         udelay(40);
4796
4797         *txflags = aninfo.txconfig;
4798         *rxflags = aninfo.flags;
4799
4800         if (status == ANEG_DONE &&
4801             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4802                              MR_LP_ADV_FULL_DUPLEX)))
4803                 res = 1;
4804
4805         return res;
4806 }
4807
4808 static void tg3_init_bcm8002(struct tg3 *tp)
4809 {
4810         u32 mac_status = tr32(MAC_STATUS);
4811         int i;
4812
4813         /* Reset when initting first time or we have a link. */
4814         if (tg3_flag(tp, INIT_COMPLETE) &&
4815             !(mac_status & MAC_STATUS_PCS_SYNCED))
4816                 return;
4817
4818         /* Set PLL lock range. */
4819         tg3_writephy(tp, 0x16, 0x8007);
4820
4821         /* SW reset */
4822         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4823
4824         /* Wait for reset to complete. */
4825         /* XXX schedule_timeout() ... */
4826         for (i = 0; i < 500; i++)
4827                 udelay(10);
4828
4829         /* Config mode; select PMA/Ch 1 regs. */
4830         tg3_writephy(tp, 0x10, 0x8411);
4831
4832         /* Enable auto-lock and comdet, select txclk for tx. */
4833         tg3_writephy(tp, 0x11, 0x0a10);
4834
4835         tg3_writephy(tp, 0x18, 0x00a0);
4836         tg3_writephy(tp, 0x16, 0x41ff);
4837
4838         /* Assert and deassert POR. */
4839         tg3_writephy(tp, 0x13, 0x0400);
4840         udelay(40);
4841         tg3_writephy(tp, 0x13, 0x0000);
4842
4843         tg3_writephy(tp, 0x11, 0x0a50);
4844         udelay(40);
4845         tg3_writephy(tp, 0x11, 0x0a10);
4846
4847         /* Wait for signal to stabilize */
4848         /* XXX schedule_timeout() ... */
4849         for (i = 0; i < 15000; i++)
4850                 udelay(10);
4851
4852         /* Deselect the channel register so we can read the PHYID
4853          * later.
4854          */
4855         tg3_writephy(tp, 0x10, 0x8011);
4856 }
4857
4858 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4859 {
4860         u16 flowctrl;
4861         u32 sg_dig_ctrl, sg_dig_status;
4862         u32 serdes_cfg, expected_sg_dig_ctrl;
4863         int workaround, port_a;
4864         int current_link_up;
4865
4866         serdes_cfg = 0;
4867         expected_sg_dig_ctrl = 0;
4868         workaround = 0;
4869         port_a = 1;
4870         current_link_up = 0;
4871
4872         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4873             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4874                 workaround = 1;
4875                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4876                         port_a = 0;
4877
4878                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4879                 /* preserve bits 20-23 for voltage regulator */
4880                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4881         }
4882
4883         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4884
4885         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4886                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4887                         if (workaround) {
4888                                 u32 val = serdes_cfg;
4889
4890                                 if (port_a)
4891                                         val |= 0xc010000;
4892                                 else
4893                                         val |= 0x4010000;
4894                                 tw32_f(MAC_SERDES_CFG, val);
4895                         }
4896
4897                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4898                 }
4899                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4900                         tg3_setup_flow_control(tp, 0, 0);
4901                         current_link_up = 1;
4902                 }
4903                 goto out;
4904         }
4905
4906         /* Want auto-negotiation.  */
4907         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4908
4909         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4910         if (flowctrl & ADVERTISE_1000XPAUSE)
4911                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4912         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4913                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4914
4915         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4916                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4917                     tp->serdes_counter &&
4918                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4919                                     MAC_STATUS_RCVD_CFG)) ==
4920                      MAC_STATUS_PCS_SYNCED)) {
4921                         tp->serdes_counter--;
4922                         current_link_up = 1;
4923                         goto out;
4924                 }
4925 restart_autoneg:
4926                 if (workaround)
4927                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4928                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4929                 udelay(5);
4930                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4931
4932                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4933                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4934         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4935                                  MAC_STATUS_SIGNAL_DET)) {
4936                 sg_dig_status = tr32(SG_DIG_STATUS);
4937                 mac_status = tr32(MAC_STATUS);
4938
4939                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4940                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4941                         u32 local_adv = 0, remote_adv = 0;
4942
4943                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4944                                 local_adv |= ADVERTISE_1000XPAUSE;
4945                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4946                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4947
4948                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4949                                 remote_adv |= LPA_1000XPAUSE;
4950                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4951                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4952
4953                         tp->link_config.rmt_adv =
4954                                            mii_adv_to_ethtool_adv_x(remote_adv);
4955
4956                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4957                         current_link_up = 1;
4958                         tp->serdes_counter = 0;
4959                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4960                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4961                         if (tp->serdes_counter)
4962                                 tp->serdes_counter--;
4963                         else {
4964                                 if (workaround) {
4965                                         u32 val = serdes_cfg;
4966
4967                                         if (port_a)
4968                                                 val |= 0xc010000;
4969                                         else
4970                                                 val |= 0x4010000;
4971
4972                                         tw32_f(MAC_SERDES_CFG, val);
4973                                 }
4974
4975                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4976                                 udelay(40);
4977
4978                                 /* Link parallel detection - link is up */
4979                                 /* only if we have PCS_SYNC and not */
4980                                 /* receiving config code words */
4981                                 mac_status = tr32(MAC_STATUS);
4982                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4983                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4984                                         tg3_setup_flow_control(tp, 0, 0);
4985                                         current_link_up = 1;
4986                                         tp->phy_flags |=
4987                                                 TG3_PHYFLG_PARALLEL_DETECT;
4988                                         tp->serdes_counter =
4989                                                 SERDES_PARALLEL_DET_TIMEOUT;
4990                                 } else
4991                                         goto restart_autoneg;
4992                         }
4993                 }
4994         } else {
4995                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4996                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4997         }
4998
4999 out:
5000         return current_link_up;
5001 }
5002
5003 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5004 {
5005         int current_link_up = 0;
5006
5007         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5008                 goto out;
5009
5010         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5011                 u32 txflags, rxflags;
5012                 int i;
5013
5014                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5015                         u32 local_adv = 0, remote_adv = 0;
5016
5017                         if (txflags & ANEG_CFG_PS1)
5018                                 local_adv |= ADVERTISE_1000XPAUSE;
5019                         if (txflags & ANEG_CFG_PS2)
5020                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5021
5022                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5023                                 remote_adv |= LPA_1000XPAUSE;
5024                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5025                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5026
5027                         tp->link_config.rmt_adv =
5028                                            mii_adv_to_ethtool_adv_x(remote_adv);
5029
5030                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5031
5032                         current_link_up = 1;
5033                 }
5034                 for (i = 0; i < 30; i++) {
5035                         udelay(20);
5036                         tw32_f(MAC_STATUS,
5037                                (MAC_STATUS_SYNC_CHANGED |
5038                                 MAC_STATUS_CFG_CHANGED));
5039                         udelay(40);
5040                         if ((tr32(MAC_STATUS) &
5041                              (MAC_STATUS_SYNC_CHANGED |
5042                               MAC_STATUS_CFG_CHANGED)) == 0)
5043                                 break;
5044                 }
5045
5046                 mac_status = tr32(MAC_STATUS);
5047                 if (current_link_up == 0 &&
5048                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5049                     !(mac_status & MAC_STATUS_RCVD_CFG))
5050                         current_link_up = 1;
5051         } else {
5052                 tg3_setup_flow_control(tp, 0, 0);
5053
5054                 /* Forcing 1000FD link up. */
5055                 current_link_up = 1;
5056
5057                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5058                 udelay(40);
5059
5060                 tw32_f(MAC_MODE, tp->mac_mode);
5061                 udelay(40);
5062         }
5063
5064 out:
5065         return current_link_up;
5066 }
5067
5068 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5069 {
5070         u32 orig_pause_cfg;
5071         u16 orig_active_speed;
5072         u8 orig_active_duplex;
5073         u32 mac_status;
5074         int current_link_up;
5075         int i;
5076
5077         orig_pause_cfg = tp->link_config.active_flowctrl;
5078         orig_active_speed = tp->link_config.active_speed;
5079         orig_active_duplex = tp->link_config.active_duplex;
5080
5081         if (!tg3_flag(tp, HW_AUTONEG) &&
5082             netif_carrier_ok(tp->dev) &&
5083             tg3_flag(tp, INIT_COMPLETE)) {
5084                 mac_status = tr32(MAC_STATUS);
5085                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5086                                MAC_STATUS_SIGNAL_DET |
5087                                MAC_STATUS_CFG_CHANGED |
5088                                MAC_STATUS_RCVD_CFG);
5089                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5090                                    MAC_STATUS_SIGNAL_DET)) {
5091                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5092                                             MAC_STATUS_CFG_CHANGED));
5093                         return 0;
5094                 }
5095         }
5096
5097         tw32_f(MAC_TX_AUTO_NEG, 0);
5098
5099         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5100         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5101         tw32_f(MAC_MODE, tp->mac_mode);
5102         udelay(40);
5103
5104         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5105                 tg3_init_bcm8002(tp);
5106
5107         /* Enable link change event even when serdes polling.  */
5108         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5109         udelay(40);
5110
5111         current_link_up = 0;
5112         tp->link_config.rmt_adv = 0;
5113         mac_status = tr32(MAC_STATUS);
5114
5115         if (tg3_flag(tp, HW_AUTONEG))
5116                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5117         else
5118                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5119
5120         tp->napi[0].hw_status->status =
5121                 (SD_STATUS_UPDATED |
5122                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5123
5124         for (i = 0; i < 100; i++) {
5125                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5126                                     MAC_STATUS_CFG_CHANGED));
5127                 udelay(5);
5128                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5129                                          MAC_STATUS_CFG_CHANGED |
5130                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5131                         break;
5132         }
5133
5134         mac_status = tr32(MAC_STATUS);
5135         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5136                 current_link_up = 0;
5137                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5138                     tp->serdes_counter == 0) {
5139                         tw32_f(MAC_MODE, (tp->mac_mode |
5140                                           MAC_MODE_SEND_CONFIGS));
5141                         udelay(1);
5142                         tw32_f(MAC_MODE, tp->mac_mode);
5143                 }
5144         }
5145
5146         if (current_link_up == 1) {
5147                 tp->link_config.active_speed = SPEED_1000;
5148                 tp->link_config.active_duplex = DUPLEX_FULL;
5149                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5150                                     LED_CTRL_LNKLED_OVERRIDE |
5151                                     LED_CTRL_1000MBPS_ON));
5152         } else {
5153                 tp->link_config.active_speed = SPEED_UNKNOWN;
5154                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5155                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5156                                     LED_CTRL_LNKLED_OVERRIDE |
5157                                     LED_CTRL_TRAFFIC_OVERRIDE));
5158         }
5159
5160         if (current_link_up != netif_carrier_ok(tp->dev)) {
5161                 if (current_link_up)
5162                         netif_carrier_on(tp->dev);
5163                 else
5164                         netif_carrier_off(tp->dev);
5165                 tg3_link_report(tp);
5166         } else {
5167                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5168                 if (orig_pause_cfg != now_pause_cfg ||
5169                     orig_active_speed != tp->link_config.active_speed ||
5170                     orig_active_duplex != tp->link_config.active_duplex)
5171                         tg3_link_report(tp);
5172         }
5173
5174         return 0;
5175 }
5176
5177 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5178 {
5179         int current_link_up, err = 0;
5180         u32 bmsr, bmcr;
5181         u16 current_speed;
5182         u8 current_duplex;
5183         u32 local_adv, remote_adv;
5184
5185         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5186         tw32_f(MAC_MODE, tp->mac_mode);
5187         udelay(40);
5188
5189         tw32(MAC_EVENT, 0);
5190
5191         tw32_f(MAC_STATUS,
5192              (MAC_STATUS_SYNC_CHANGED |
5193               MAC_STATUS_CFG_CHANGED |
5194               MAC_STATUS_MI_COMPLETION |
5195               MAC_STATUS_LNKSTATE_CHANGED));
5196         udelay(40);
5197
5198         if (force_reset)
5199                 tg3_phy_reset(tp);
5200
5201         current_link_up = 0;
5202         current_speed = SPEED_UNKNOWN;
5203         current_duplex = DUPLEX_UNKNOWN;
5204         tp->link_config.rmt_adv = 0;
5205
5206         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5207         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5208         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5209                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5210                         bmsr |= BMSR_LSTATUS;
5211                 else
5212                         bmsr &= ~BMSR_LSTATUS;
5213         }
5214
5215         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5216
5217         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5218             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5219                 /* do nothing, just check for link up at the end */
5220         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5221                 u32 adv, newadv;
5222
5223                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5224                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5225                                  ADVERTISE_1000XPAUSE |
5226                                  ADVERTISE_1000XPSE_ASYM |
5227                                  ADVERTISE_SLCT);
5228
5229                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5230                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5231
5232                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5233                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5234                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5235                         tg3_writephy(tp, MII_BMCR, bmcr);
5236
5237                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5238                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5239                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5240
5241                         return err;
5242                 }
5243         } else {
5244                 u32 new_bmcr;
5245
5246                 bmcr &= ~BMCR_SPEED1000;
5247                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5248
5249                 if (tp->link_config.duplex == DUPLEX_FULL)
5250                         new_bmcr |= BMCR_FULLDPLX;
5251
5252                 if (new_bmcr != bmcr) {
5253                         /* BMCR_SPEED1000 is a reserved bit that needs
5254                          * to be set on write.
5255                          */
5256                         new_bmcr |= BMCR_SPEED1000;
5257
5258                         /* Force a linkdown */
5259                         if (netif_carrier_ok(tp->dev)) {
5260                                 u32 adv;
5261
5262                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5263                                 adv &= ~(ADVERTISE_1000XFULL |
5264                                          ADVERTISE_1000XHALF |
5265                                          ADVERTISE_SLCT);
5266                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5267                                 tg3_writephy(tp, MII_BMCR, bmcr |
5268                                                            BMCR_ANRESTART |
5269                                                            BMCR_ANENABLE);
5270                                 udelay(10);
5271                                 netif_carrier_off(tp->dev);
5272                         }
5273                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5274                         bmcr = new_bmcr;
5275                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5276                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5277                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5278                             ASIC_REV_5714) {
5279                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5280                                         bmsr |= BMSR_LSTATUS;
5281                                 else
5282                                         bmsr &= ~BMSR_LSTATUS;
5283                         }
5284                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5285                 }
5286         }
5287
5288         if (bmsr & BMSR_LSTATUS) {
5289                 current_speed = SPEED_1000;
5290                 current_link_up = 1;
5291                 if (bmcr & BMCR_FULLDPLX)
5292                         current_duplex = DUPLEX_FULL;
5293                 else
5294                         current_duplex = DUPLEX_HALF;
5295
5296                 local_adv = 0;
5297                 remote_adv = 0;
5298
5299                 if (bmcr & BMCR_ANENABLE) {
5300                         u32 common;
5301
5302                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5303                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5304                         common = local_adv & remote_adv;
5305                         if (common & (ADVERTISE_1000XHALF |
5306                                       ADVERTISE_1000XFULL)) {
5307                                 if (common & ADVERTISE_1000XFULL)
5308                                         current_duplex = DUPLEX_FULL;
5309                                 else
5310                                         current_duplex = DUPLEX_HALF;
5311
5312                                 tp->link_config.rmt_adv =
5313                                            mii_adv_to_ethtool_adv_x(remote_adv);
5314                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5315                                 /* Link is up via parallel detect */
5316                         } else {
5317                                 current_link_up = 0;
5318                         }
5319                 }
5320         }
5321
5322         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5323                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5324
5325         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5326         if (tp->link_config.active_duplex == DUPLEX_HALF)
5327                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5328
5329         tw32_f(MAC_MODE, tp->mac_mode);
5330         udelay(40);
5331
5332         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5333
5334         tp->link_config.active_speed = current_speed;
5335         tp->link_config.active_duplex = current_duplex;
5336
5337         if (current_link_up != netif_carrier_ok(tp->dev)) {
5338                 if (current_link_up)
5339                         netif_carrier_on(tp->dev);
5340                 else {
5341                         netif_carrier_off(tp->dev);
5342                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5343                 }
5344                 tg3_link_report(tp);
5345         }
5346         return err;
5347 }
5348
5349 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5350 {
5351         if (tp->serdes_counter) {
5352                 /* Give autoneg time to complete. */
5353                 tp->serdes_counter--;
5354                 return;
5355         }
5356
5357         if (!netif_carrier_ok(tp->dev) &&
5358             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5359                 u32 bmcr;
5360
5361                 tg3_readphy(tp, MII_BMCR, &bmcr);
5362                 if (bmcr & BMCR_ANENABLE) {
5363                         u32 phy1, phy2;
5364
5365                         /* Select shadow register 0x1f */
5366                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5367                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5368
5369                         /* Select expansion interrupt status register */
5370                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5371                                          MII_TG3_DSP_EXP1_INT_STAT);
5372                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5373                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5374
5375                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5376                                 /* We have signal detect and not receiving
5377                                  * config code words, link is up by parallel
5378                                  * detection.
5379                                  */
5380
5381                                 bmcr &= ~BMCR_ANENABLE;
5382                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5383                                 tg3_writephy(tp, MII_BMCR, bmcr);
5384                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5385                         }
5386                 }
5387         } else if (netif_carrier_ok(tp->dev) &&
5388                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5389                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5390                 u32 phy2;
5391
5392                 /* Select expansion interrupt status register */
5393                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5394                                  MII_TG3_DSP_EXP1_INT_STAT);
5395                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5396                 if (phy2 & 0x20) {
5397                         u32 bmcr;
5398
5399                         /* Config code words received, turn on autoneg. */
5400                         tg3_readphy(tp, MII_BMCR, &bmcr);
5401                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5402
5403                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5404
5405                 }
5406         }
5407 }
5408
5409 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5410 {
5411         u32 val;
5412         int err;
5413
5414         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5415                 err = tg3_setup_fiber_phy(tp, force_reset);
5416         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5417                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5418         else
5419                 err = tg3_setup_copper_phy(tp, force_reset);
5420
5421         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5422                 u32 scale;
5423
5424                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5425                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5426                         scale = 65;
5427                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5428                         scale = 6;
5429                 else
5430                         scale = 12;
5431
5432                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5433                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5434                 tw32(GRC_MISC_CFG, val);
5435         }
5436
5437         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5438               (6 << TX_LENGTHS_IPG_SHIFT);
5439         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5440                 val |= tr32(MAC_TX_LENGTHS) &
5441                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5442                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5443
5444         if (tp->link_config.active_speed == SPEED_1000 &&
5445             tp->link_config.active_duplex == DUPLEX_HALF)
5446                 tw32(MAC_TX_LENGTHS, val |
5447                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5448         else
5449                 tw32(MAC_TX_LENGTHS, val |
5450                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5451
5452         if (!tg3_flag(tp, 5705_PLUS)) {
5453                 if (netif_carrier_ok(tp->dev)) {
5454                         tw32(HOSTCC_STAT_COAL_TICKS,
5455                              tp->coal.stats_block_coalesce_usecs);
5456                 } else {
5457                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5458                 }
5459         }
5460
5461         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5462                 val = tr32(PCIE_PWR_MGMT_THRESH);
5463                 if (!netif_carrier_ok(tp->dev))
5464                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5465                               tp->pwrmgmt_thresh;
5466                 else
5467                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5468                 tw32(PCIE_PWR_MGMT_THRESH, val);
5469         }
5470
5471         return err;
5472 }
5473
5474 static inline int tg3_irq_sync(struct tg3 *tp)
5475 {
5476         return tp->irq_sync;
5477 }
5478
5479 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5480 {
5481         int i;
5482
5483         dst = (u32 *)((u8 *)dst + off);
5484         for (i = 0; i < len; i += sizeof(u32))
5485                 *dst++ = tr32(off + i);
5486 }
5487
5488 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5489 {
5490         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5491         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5492         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5493         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5494         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5495         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5496         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5497         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5498         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5499         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5500         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5501         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5502         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5503         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5504         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5505         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5506         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5507         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5508         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5509
5510         if (tg3_flag(tp, SUPPORT_MSIX))
5511                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5512
5513         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5514         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5515         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5516         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5517         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5518         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5519         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5520         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5521
5522         if (!tg3_flag(tp, 5705_PLUS)) {
5523                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5524                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5525                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5526         }
5527
5528         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5529         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5530         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5531         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5532         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5533
5534         if (tg3_flag(tp, NVRAM))
5535                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5536 }
5537
5538 static void tg3_dump_state(struct tg3 *tp)
5539 {
5540         int i;
5541         u32 *regs;
5542
5543         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5544         if (!regs) {
5545                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5546                 return;
5547         }
5548
5549         if (tg3_flag(tp, PCI_EXPRESS)) {
5550                 /* Read up to but not including private PCI registers */
5551                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5552                         regs[i / sizeof(u32)] = tr32(i);
5553         } else
5554                 tg3_dump_legacy_regs(tp, regs);
5555
5556         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5557                 if (!regs[i + 0] && !regs[i + 1] &&
5558                     !regs[i + 2] && !regs[i + 3])
5559                         continue;
5560
5561                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5562                            i * 4,
5563                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5564         }
5565
5566         kfree(regs);
5567
5568         for (i = 0; i < tp->irq_cnt; i++) {
5569                 struct tg3_napi *tnapi = &tp->napi[i];
5570
5571                 /* SW status block */
5572                 netdev_err(tp->dev,
5573                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5574                            i,
5575                            tnapi->hw_status->status,
5576                            tnapi->hw_status->status_tag,
5577                            tnapi->hw_status->rx_jumbo_consumer,
5578                            tnapi->hw_status->rx_consumer,
5579                            tnapi->hw_status->rx_mini_consumer,
5580                            tnapi->hw_status->idx[0].rx_producer,
5581                            tnapi->hw_status->idx[0].tx_consumer);
5582
5583                 netdev_err(tp->dev,
5584                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5585                            i,
5586                            tnapi->last_tag, tnapi->last_irq_tag,
5587                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5588                            tnapi->rx_rcb_ptr,
5589                            tnapi->prodring.rx_std_prod_idx,
5590                            tnapi->prodring.rx_std_cons_idx,
5591                            tnapi->prodring.rx_jmb_prod_idx,
5592                            tnapi->prodring.rx_jmb_cons_idx);
5593         }
5594 }
5595
5596 /* This is called whenever we suspect that the system chipset is re-
5597  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5598  * is bogus tx completions. We try to recover by setting the
5599  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5600  * in the workqueue.
5601  */
5602 static void tg3_tx_recover(struct tg3 *tp)
5603 {
5604         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5605                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5606
5607         netdev_warn(tp->dev,
5608                     "The system may be re-ordering memory-mapped I/O "
5609                     "cycles to the network device, attempting to recover. "
5610                     "Please report the problem to the driver maintainer "
5611                     "and include system chipset information.\n");
5612
5613         spin_lock(&tp->lock);
5614         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5615         spin_unlock(&tp->lock);
5616 }
5617
5618 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5619 {
5620         /* Tell compiler to fetch tx indices from memory. */
5621         barrier();
5622         return tnapi->tx_pending -
5623                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5624 }
5625
5626 /* Tigon3 never reports partial packet sends.  So we do not
5627  * need special logic to handle SKBs that have not had all
5628  * of their frags sent yet, like SunGEM does.
5629  */
5630 static void tg3_tx(struct tg3_napi *tnapi)
5631 {
5632         struct tg3 *tp = tnapi->tp;
5633         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5634         u32 sw_idx = tnapi->tx_cons;
5635         struct netdev_queue *txq;
5636         int index = tnapi - tp->napi;
5637         unsigned int pkts_compl = 0, bytes_compl = 0;
5638
5639         if (tg3_flag(tp, ENABLE_TSS))
5640                 index--;
5641
5642         txq = netdev_get_tx_queue(tp->dev, index);
5643
5644         while (sw_idx != hw_idx) {
5645                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5646                 struct sk_buff *skb = ri->skb;
5647                 int i, tx_bug = 0;
5648
5649                 if (unlikely(skb == NULL)) {
5650                         tg3_tx_recover(tp);
5651                         return;
5652                 }
5653
5654                 pci_unmap_single(tp->pdev,
5655                                  dma_unmap_addr(ri, mapping),
5656                                  skb_headlen(skb),
5657                                  PCI_DMA_TODEVICE);
5658
5659                 ri->skb = NULL;
5660
5661                 while (ri->fragmented) {
5662                         ri->fragmented = false;
5663                         sw_idx = NEXT_TX(sw_idx);
5664                         ri = &tnapi->tx_buffers[sw_idx];
5665                 }
5666
5667                 sw_idx = NEXT_TX(sw_idx);
5668
5669                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5670                         ri = &tnapi->tx_buffers[sw_idx];
5671                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5672                                 tx_bug = 1;
5673
5674                         pci_unmap_page(tp->pdev,
5675                                        dma_unmap_addr(ri, mapping),
5676                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5677                                        PCI_DMA_TODEVICE);
5678
5679                         while (ri->fragmented) {
5680                                 ri->fragmented = false;
5681                                 sw_idx = NEXT_TX(sw_idx);
5682                                 ri = &tnapi->tx_buffers[sw_idx];
5683                         }
5684
5685                         sw_idx = NEXT_TX(sw_idx);
5686                 }
5687
5688                 pkts_compl++;
5689                 bytes_compl += skb->len;
5690
5691                 dev_kfree_skb(skb);
5692
5693                 if (unlikely(tx_bug)) {
5694                         tg3_tx_recover(tp);
5695                         return;
5696                 }
5697         }
5698
5699         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5700
5701         tnapi->tx_cons = sw_idx;
5702
5703         /* Need to make the tx_cons update visible to tg3_start_xmit()
5704          * before checking for netif_queue_stopped().  Without the
5705          * memory barrier, there is a small possibility that tg3_start_xmit()
5706          * will miss it and cause the queue to be stopped forever.
5707          */
5708         smp_mb();
5709
5710         if (unlikely(netif_tx_queue_stopped(txq) &&
5711                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5712                 __netif_tx_lock(txq, smp_processor_id());
5713                 if (netif_tx_queue_stopped(txq) &&
5714                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5715                         netif_tx_wake_queue(txq);
5716                 __netif_tx_unlock(txq);
5717         }
5718 }
5719
5720 static void tg3_frag_free(bool is_frag, void *data)
5721 {
5722         if (is_frag)
5723                 put_page(virt_to_head_page(data));
5724         else
5725                 kfree(data);
5726 }
5727
5728 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5729 {
5730         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5731                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5732
5733         if (!ri->data)
5734                 return;
5735
5736         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5737                          map_sz, PCI_DMA_FROMDEVICE);
5738         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5739         ri->data = NULL;
5740 }
5741
5742
5743 /* Returns size of skb allocated or < 0 on error.
5744  *
5745  * We only need to fill in the address because the other members
5746  * of the RX descriptor are invariant, see tg3_init_rings.
5747  *
5748  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5749  * posting buffers we only dirty the first cache line of the RX
5750  * descriptor (containing the address).  Whereas for the RX status
5751  * buffers the cpu only reads the last cacheline of the RX descriptor
5752  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5753  */
5754 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5755                              u32 opaque_key, u32 dest_idx_unmasked,
5756                              unsigned int *frag_size)
5757 {
5758         struct tg3_rx_buffer_desc *desc;
5759         struct ring_info *map;
5760         u8 *data;
5761         dma_addr_t mapping;
5762         int skb_size, data_size, dest_idx;
5763
5764         switch (opaque_key) {
5765         case RXD_OPAQUE_RING_STD:
5766                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5767                 desc = &tpr->rx_std[dest_idx];
5768                 map = &tpr->rx_std_buffers[dest_idx];
5769                 data_size = tp->rx_pkt_map_sz;
5770                 break;
5771
5772         case RXD_OPAQUE_RING_JUMBO:
5773                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5774                 desc = &tpr->rx_jmb[dest_idx].std;
5775                 map = &tpr->rx_jmb_buffers[dest_idx];
5776                 data_size = TG3_RX_JMB_MAP_SZ;
5777                 break;
5778
5779         default:
5780                 return -EINVAL;
5781         }
5782
5783         /* Do not overwrite any of the map or rp information
5784          * until we are sure we can commit to a new buffer.
5785          *
5786          * Callers depend upon this behavior and assume that
5787          * we leave everything unchanged if we fail.
5788          */
5789         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5790                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5791         if (skb_size <= PAGE_SIZE) {
5792                 data = netdev_alloc_frag(skb_size);
5793                 *frag_size = skb_size;
5794         } else {
5795                 data = kmalloc(skb_size, GFP_ATOMIC);
5796                 *frag_size = 0;
5797         }
5798         if (!data)
5799                 return -ENOMEM;
5800
5801         mapping = pci_map_single(tp->pdev,
5802                                  data + TG3_RX_OFFSET(tp),
5803                                  data_size,
5804                                  PCI_DMA_FROMDEVICE);
5805         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5806                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5807                 return -EIO;
5808         }
5809
5810         map->data = data;
5811         dma_unmap_addr_set(map, mapping, mapping);
5812
5813         desc->addr_hi = ((u64)mapping >> 32);
5814         desc->addr_lo = ((u64)mapping & 0xffffffff);
5815
5816         return data_size;
5817 }
5818
5819 /* We only need to move over in the address because the other
5820  * members of the RX descriptor are invariant.  See notes above
5821  * tg3_alloc_rx_data for full details.
5822  */
5823 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5824                            struct tg3_rx_prodring_set *dpr,
5825                            u32 opaque_key, int src_idx,
5826                            u32 dest_idx_unmasked)
5827 {
5828         struct tg3 *tp = tnapi->tp;
5829         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5830         struct ring_info *src_map, *dest_map;
5831         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5832         int dest_idx;
5833
5834         switch (opaque_key) {
5835         case RXD_OPAQUE_RING_STD:
5836                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5837                 dest_desc = &dpr->rx_std[dest_idx];
5838                 dest_map = &dpr->rx_std_buffers[dest_idx];
5839                 src_desc = &spr->rx_std[src_idx];
5840                 src_map = &spr->rx_std_buffers[src_idx];
5841                 break;
5842
5843         case RXD_OPAQUE_RING_JUMBO:
5844                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5845                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5846                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5847                 src_desc = &spr->rx_jmb[src_idx].std;
5848                 src_map = &spr->rx_jmb_buffers[src_idx];
5849                 break;
5850
5851         default:
5852                 return;
5853         }
5854
5855         dest_map->data = src_map->data;
5856         dma_unmap_addr_set(dest_map, mapping,
5857                            dma_unmap_addr(src_map, mapping));
5858         dest_desc->addr_hi = src_desc->addr_hi;
5859         dest_desc->addr_lo = src_desc->addr_lo;
5860
5861         /* Ensure that the update to the skb happens after the physical
5862          * addresses have been transferred to the new BD location.
5863          */
5864         smp_wmb();
5865
5866         src_map->data = NULL;
5867 }
5868
5869 /* The RX ring scheme is composed of multiple rings which post fresh
5870  * buffers to the chip, and one special ring the chip uses to report
5871  * status back to the host.
5872  *
5873  * The special ring reports the status of received packets to the
5874  * host.  The chip does not write into the original descriptor the
5875  * RX buffer was obtained from.  The chip simply takes the original
5876  * descriptor as provided by the host, updates the status and length
5877  * field, then writes this into the next status ring entry.
5878  *
5879  * Each ring the host uses to post buffers to the chip is described
5880  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5881  * it is first placed into the on-chip ram.  When the packet's length
5882  * is known, it walks down the TG3_BDINFO entries to select the ring.
5883  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5884  * which is within the range of the new packet's length is chosen.
5885  *
5886  * The "separate ring for rx status" scheme may sound queer, but it makes
5887  * sense from a cache coherency perspective.  If only the host writes
5888  * to the buffer post rings, and only the chip writes to the rx status
5889  * rings, then cache lines never move beyond shared-modified state.
5890  * If both the host and chip were to write into the same ring, cache line
5891  * eviction could occur since both entities want it in an exclusive state.
5892  */
5893 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5894 {
5895         struct tg3 *tp = tnapi->tp;
5896         u32 work_mask, rx_std_posted = 0;
5897         u32 std_prod_idx, jmb_prod_idx;
5898         u32 sw_idx = tnapi->rx_rcb_ptr;
5899         u16 hw_idx;
5900         int received;
5901         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5902
5903         hw_idx = *(tnapi->rx_rcb_prod_idx);
5904         /*
5905          * We need to order the read of hw_idx and the read of
5906          * the opaque cookie.
5907          */
5908         rmb();
5909         work_mask = 0;
5910         received = 0;
5911         std_prod_idx = tpr->rx_std_prod_idx;
5912         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5913         while (sw_idx != hw_idx && budget > 0) {
5914                 struct ring_info *ri;
5915                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5916                 unsigned int len;
5917                 struct sk_buff *skb;
5918                 dma_addr_t dma_addr;
5919                 u32 opaque_key, desc_idx, *post_ptr;
5920                 u8 *data;
5921
5922                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5923                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5924                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5925                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5926                         dma_addr = dma_unmap_addr(ri, mapping);
5927                         data = ri->data;
5928                         post_ptr = &std_prod_idx;
5929                         rx_std_posted++;
5930                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5931                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5932                         dma_addr = dma_unmap_addr(ri, mapping);
5933                         data = ri->data;
5934                         post_ptr = &jmb_prod_idx;
5935                 } else
5936                         goto next_pkt_nopost;
5937
5938                 work_mask |= opaque_key;
5939
5940                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5941                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5942                 drop_it:
5943                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5944                                        desc_idx, *post_ptr);
5945                 drop_it_no_recycle:
5946                         /* Other statistics kept track of by card. */
5947                         tp->rx_dropped++;
5948                         goto next_pkt;
5949                 }
5950
5951                 prefetch(data + TG3_RX_OFFSET(tp));
5952                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5953                       ETH_FCS_LEN;
5954
5955                 if (len > TG3_RX_COPY_THRESH(tp)) {
5956                         int skb_size;
5957                         unsigned int frag_size;
5958
5959                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5960                                                     *post_ptr, &frag_size);
5961                         if (skb_size < 0)
5962                                 goto drop_it;
5963
5964                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5965                                          PCI_DMA_FROMDEVICE);
5966
5967                         skb = build_skb(data, frag_size);
5968                         if (!skb) {
5969                                 tg3_frag_free(frag_size != 0, data);
5970                                 goto drop_it_no_recycle;
5971                         }
5972                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5973                         /* Ensure that the update to the data happens
5974                          * after the usage of the old DMA mapping.
5975                          */
5976                         smp_wmb();
5977
5978                         ri->data = NULL;
5979
5980                 } else {
5981                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5982                                        desc_idx, *post_ptr);
5983
5984                         skb = netdev_alloc_skb(tp->dev,
5985                                                len + TG3_RAW_IP_ALIGN);
5986                         if (skb == NULL)
5987                                 goto drop_it_no_recycle;
5988
5989                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5990                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5991                         memcpy(skb->data,
5992                                data + TG3_RX_OFFSET(tp),
5993                                len);
5994                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5995                 }
5996
5997                 skb_put(skb, len);
5998                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5999                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6000                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6001                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6002                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6003                 else
6004                         skb_checksum_none_assert(skb);
6005
6006                 skb->protocol = eth_type_trans(skb, tp->dev);
6007
6008                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6009                     skb->protocol != htons(ETH_P_8021Q)) {
6010                         dev_kfree_skb(skb);
6011                         goto drop_it_no_recycle;
6012                 }
6013
6014                 if (desc->type_flags & RXD_FLAG_VLAN &&
6015                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6016                         __vlan_hwaccel_put_tag(skb,
6017                                                desc->err_vlan & RXD_VLAN_MASK);
6018
6019                 napi_gro_receive(&tnapi->napi, skb);
6020
6021                 received++;
6022                 budget--;
6023
6024 next_pkt:
6025                 (*post_ptr)++;
6026
6027                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6028                         tpr->rx_std_prod_idx = std_prod_idx &
6029                                                tp->rx_std_ring_mask;
6030                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6031                                      tpr->rx_std_prod_idx);
6032                         work_mask &= ~RXD_OPAQUE_RING_STD;
6033                         rx_std_posted = 0;
6034                 }
6035 next_pkt_nopost:
6036                 sw_idx++;
6037                 sw_idx &= tp->rx_ret_ring_mask;
6038
6039                 /* Refresh hw_idx to see if there is new work */
6040                 if (sw_idx == hw_idx) {
6041                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6042                         rmb();
6043                 }
6044         }
6045
6046         /* ACK the status ring. */
6047         tnapi->rx_rcb_ptr = sw_idx;
6048         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6049
6050         /* Refill RX ring(s). */
6051         if (!tg3_flag(tp, ENABLE_RSS)) {
6052                 /* Sync BD data before updating mailbox */
6053                 wmb();
6054
6055                 if (work_mask & RXD_OPAQUE_RING_STD) {
6056                         tpr->rx_std_prod_idx = std_prod_idx &
6057                                                tp->rx_std_ring_mask;
6058                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6059                                      tpr->rx_std_prod_idx);
6060                 }
6061                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6062                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6063                                                tp->rx_jmb_ring_mask;
6064                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6065                                      tpr->rx_jmb_prod_idx);
6066                 }
6067                 mmiowb();
6068         } else if (work_mask) {
6069                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6070                  * updated before the producer indices can be updated.
6071                  */
6072                 smp_wmb();
6073
6074                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6075                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6076
6077                 if (tnapi != &tp->napi[1]) {
6078                         tp->rx_refill = true;
6079                         napi_schedule(&tp->napi[1].napi);
6080                 }
6081         }
6082
6083         return received;
6084 }
6085
6086 static void tg3_poll_link(struct tg3 *tp)
6087 {
6088         /* handle link change and other phy events */
6089         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6090                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6091
6092                 if (sblk->status & SD_STATUS_LINK_CHG) {
6093                         sblk->status = SD_STATUS_UPDATED |
6094                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6095                         spin_lock(&tp->lock);
6096                         if (tg3_flag(tp, USE_PHYLIB)) {
6097                                 tw32_f(MAC_STATUS,
6098                                      (MAC_STATUS_SYNC_CHANGED |
6099                                       MAC_STATUS_CFG_CHANGED |
6100                                       MAC_STATUS_MI_COMPLETION |
6101                                       MAC_STATUS_LNKSTATE_CHANGED));
6102                                 udelay(40);
6103                         } else
6104                                 tg3_setup_phy(tp, 0);
6105                         spin_unlock(&tp->lock);
6106                 }
6107         }
6108 }
6109
6110 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6111                                 struct tg3_rx_prodring_set *dpr,
6112                                 struct tg3_rx_prodring_set *spr)
6113 {
6114         u32 si, di, cpycnt, src_prod_idx;
6115         int i, err = 0;
6116
6117         while (1) {
6118                 src_prod_idx = spr->rx_std_prod_idx;
6119
6120                 /* Make sure updates to the rx_std_buffers[] entries and the
6121                  * standard producer index are seen in the correct order.
6122                  */
6123                 smp_rmb();
6124
6125                 if (spr->rx_std_cons_idx == src_prod_idx)
6126                         break;
6127
6128                 if (spr->rx_std_cons_idx < src_prod_idx)
6129                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6130                 else
6131                         cpycnt = tp->rx_std_ring_mask + 1 -
6132                                  spr->rx_std_cons_idx;
6133
6134                 cpycnt = min(cpycnt,
6135                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6136
6137                 si = spr->rx_std_cons_idx;
6138                 di = dpr->rx_std_prod_idx;
6139
6140                 for (i = di; i < di + cpycnt; i++) {
6141                         if (dpr->rx_std_buffers[i].data) {
6142                                 cpycnt = i - di;
6143                                 err = -ENOSPC;
6144                                 break;
6145                         }
6146                 }
6147
6148                 if (!cpycnt)
6149                         break;
6150
6151                 /* Ensure that updates to the rx_std_buffers ring and the
6152                  * shadowed hardware producer ring from tg3_recycle_skb() are
6153                  * ordered correctly WRT the skb check above.
6154                  */
6155                 smp_rmb();
6156
6157                 memcpy(&dpr->rx_std_buffers[di],
6158                        &spr->rx_std_buffers[si],
6159                        cpycnt * sizeof(struct ring_info));
6160
6161                 for (i = 0; i < cpycnt; i++, di++, si++) {
6162                         struct tg3_rx_buffer_desc *sbd, *dbd;
6163                         sbd = &spr->rx_std[si];
6164                         dbd = &dpr->rx_std[di];
6165                         dbd->addr_hi = sbd->addr_hi;
6166                         dbd->addr_lo = sbd->addr_lo;
6167                 }
6168
6169                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6170                                        tp->rx_std_ring_mask;
6171                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6172                                        tp->rx_std_ring_mask;
6173         }
6174
6175         while (1) {
6176                 src_prod_idx = spr->rx_jmb_prod_idx;
6177
6178                 /* Make sure updates to the rx_jmb_buffers[] entries and
6179                  * the jumbo producer index are seen in the correct order.
6180                  */
6181                 smp_rmb();
6182
6183                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6184                         break;
6185
6186                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6187                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6188                 else
6189                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6190                                  spr->rx_jmb_cons_idx;
6191
6192                 cpycnt = min(cpycnt,
6193                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6194
6195                 si = spr->rx_jmb_cons_idx;
6196                 di = dpr->rx_jmb_prod_idx;
6197
6198                 for (i = di; i < di + cpycnt; i++) {
6199                         if (dpr->rx_jmb_buffers[i].data) {
6200                                 cpycnt = i - di;
6201                                 err = -ENOSPC;
6202                                 break;
6203                         }
6204                 }
6205
6206                 if (!cpycnt)
6207                         break;
6208
6209                 /* Ensure that updates to the rx_jmb_buffers ring and the
6210                  * shadowed hardware producer ring from tg3_recycle_skb() are
6211                  * ordered correctly WRT the skb check above.
6212                  */
6213                 smp_rmb();
6214
6215                 memcpy(&dpr->rx_jmb_buffers[di],
6216                        &spr->rx_jmb_buffers[si],
6217                        cpycnt * sizeof(struct ring_info));
6218
6219                 for (i = 0; i < cpycnt; i++, di++, si++) {
6220                         struct tg3_rx_buffer_desc *sbd, *dbd;
6221                         sbd = &spr->rx_jmb[si].std;
6222                         dbd = &dpr->rx_jmb[di].std;
6223                         dbd->addr_hi = sbd->addr_hi;
6224                         dbd->addr_lo = sbd->addr_lo;
6225                 }
6226
6227                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6228                                        tp->rx_jmb_ring_mask;
6229                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6230                                        tp->rx_jmb_ring_mask;
6231         }
6232
6233         return err;
6234 }
6235
6236 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6237 {
6238         struct tg3 *tp = tnapi->tp;
6239
6240         /* run TX completion thread */
6241         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6242                 tg3_tx(tnapi);
6243                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6244                         return work_done;
6245         }
6246
6247         if (!tnapi->rx_rcb_prod_idx)
6248                 return work_done;
6249
6250         /* run RX thread, within the bounds set by NAPI.
6251          * All RX "locking" is done by ensuring outside
6252          * code synchronizes with tg3->napi.poll()
6253          */
6254         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6255                 work_done += tg3_rx(tnapi, budget - work_done);
6256
6257         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6258                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6259                 int i, err = 0;
6260                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6261                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6262
6263                 tp->rx_refill = false;
6264                 for (i = 1; i <= tp->rxq_cnt; i++)
6265                         err |= tg3_rx_prodring_xfer(tp, dpr,
6266                                                     &tp->napi[i].prodring);
6267
6268                 wmb();
6269
6270                 if (std_prod_idx != dpr->rx_std_prod_idx)
6271                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6272                                      dpr->rx_std_prod_idx);
6273
6274                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6275                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6276                                      dpr->rx_jmb_prod_idx);
6277
6278                 mmiowb();
6279
6280                 if (err)
6281                         tw32_f(HOSTCC_MODE, tp->coal_now);
6282         }
6283
6284         return work_done;
6285 }
6286
6287 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6288 {
6289         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6290                 schedule_work(&tp->reset_task);
6291 }
6292
6293 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6294 {
6295         cancel_work_sync(&tp->reset_task);
6296         tg3_flag_clear(tp, RESET_TASK_PENDING);
6297         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6298 }
6299
6300 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6301 {
6302         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6303         struct tg3 *tp = tnapi->tp;
6304         int work_done = 0;
6305         struct tg3_hw_status *sblk = tnapi->hw_status;
6306
6307         while (1) {
6308                 work_done = tg3_poll_work(tnapi, work_done, budget);
6309
6310                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6311                         goto tx_recovery;
6312
6313                 if (unlikely(work_done >= budget))
6314                         break;
6315
6316                 /* tp->last_tag is used in tg3_int_reenable() below
6317                  * to tell the hw how much work has been processed,
6318                  * so we must read it before checking for more work.
6319                  */
6320                 tnapi->last_tag = sblk->status_tag;
6321                 tnapi->last_irq_tag = tnapi->last_tag;
6322                 rmb();
6323
6324                 /* check for RX/TX work to do */
6325                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6326                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6327
6328                         /* This test here is not race free, but will reduce
6329                          * the number of interrupts by looping again.
6330                          */
6331                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6332                                 continue;
6333
6334                         napi_complete(napi);
6335                         /* Reenable interrupts. */
6336                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6337
6338                         /* This test here is synchronized by napi_schedule()
6339                          * and napi_complete() to close the race condition.
6340                          */
6341                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6342                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6343                                                   HOSTCC_MODE_ENABLE |
6344                                                   tnapi->coal_now);
6345                         }
6346                         mmiowb();
6347                         break;
6348                 }
6349         }
6350
6351         return work_done;
6352
6353 tx_recovery:
6354         /* work_done is guaranteed to be less than budget. */
6355         napi_complete(napi);
6356         tg3_reset_task_schedule(tp);
6357         return work_done;
6358 }
6359
6360 static void tg3_process_error(struct tg3 *tp)
6361 {
6362         u32 val;
6363         bool real_error = false;
6364
6365         if (tg3_flag(tp, ERROR_PROCESSED))
6366                 return;
6367
6368         /* Check Flow Attention register */
6369         val = tr32(HOSTCC_FLOW_ATTN);
6370         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6371                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6372                 real_error = true;
6373         }
6374
6375         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6376                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6377                 real_error = true;
6378         }
6379
6380         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6381                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6382                 real_error = true;
6383         }
6384
6385         if (!real_error)
6386                 return;
6387
6388         tg3_dump_state(tp);
6389
6390         tg3_flag_set(tp, ERROR_PROCESSED);
6391         tg3_reset_task_schedule(tp);
6392 }
6393
6394 static int tg3_poll(struct napi_struct *napi, int budget)
6395 {
6396         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6397         struct tg3 *tp = tnapi->tp;
6398         int work_done = 0;
6399         struct tg3_hw_status *sblk = tnapi->hw_status;
6400
6401         while (1) {
6402                 if (sblk->status & SD_STATUS_ERROR)
6403                         tg3_process_error(tp);
6404
6405                 tg3_poll_link(tp);
6406
6407                 work_done = tg3_poll_work(tnapi, work_done, budget);
6408
6409                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6410                         goto tx_recovery;
6411
6412                 if (unlikely(work_done >= budget))
6413                         break;
6414
6415                 if (tg3_flag(tp, TAGGED_STATUS)) {
6416                         /* tp->last_tag is used in tg3_int_reenable() below
6417                          * to tell the hw how much work has been processed,
6418                          * so we must read it before checking for more work.
6419                          */
6420                         tnapi->last_tag = sblk->status_tag;
6421                         tnapi->last_irq_tag = tnapi->last_tag;
6422                         rmb();
6423                 } else
6424                         sblk->status &= ~SD_STATUS_UPDATED;
6425
6426                 if (likely(!tg3_has_work(tnapi))) {
6427                         napi_complete(napi);
6428                         tg3_int_reenable(tnapi);
6429                         break;
6430                 }
6431         }
6432
6433         return work_done;
6434
6435 tx_recovery:
6436         /* work_done is guaranteed to be less than budget. */
6437         napi_complete(napi);
6438         tg3_reset_task_schedule(tp);
6439         return work_done;
6440 }
6441
6442 static void tg3_napi_disable(struct tg3 *tp)
6443 {
6444         int i;
6445
6446         for (i = tp->irq_cnt - 1; i >= 0; i--)
6447                 napi_disable(&tp->napi[i].napi);
6448 }
6449
6450 static void tg3_napi_enable(struct tg3 *tp)
6451 {
6452         int i;
6453
6454         for (i = 0; i < tp->irq_cnt; i++)
6455                 napi_enable(&tp->napi[i].napi);
6456 }
6457
6458 static void tg3_napi_init(struct tg3 *tp)
6459 {
6460         int i;
6461
6462         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6463         for (i = 1; i < tp->irq_cnt; i++)
6464                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6465 }
6466
6467 static void tg3_napi_fini(struct tg3 *tp)
6468 {
6469         int i;
6470
6471         for (i = 0; i < tp->irq_cnt; i++)
6472                 netif_napi_del(&tp->napi[i].napi);
6473 }
6474
6475 static inline void tg3_netif_stop(struct tg3 *tp)
6476 {
6477         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6478         tg3_napi_disable(tp);
6479         netif_tx_disable(tp->dev);
6480 }
6481
6482 static inline void tg3_netif_start(struct tg3 *tp)
6483 {
6484         /* NOTE: unconditional netif_tx_wake_all_queues is only
6485          * appropriate so long as all callers are assured to
6486          * have free tx slots (such as after tg3_init_hw)
6487          */
6488         netif_tx_wake_all_queues(tp->dev);
6489
6490         tg3_napi_enable(tp);
6491         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6492         tg3_enable_ints(tp);
6493 }
6494
6495 static void tg3_irq_quiesce(struct tg3 *tp)
6496 {
6497         int i;
6498
6499         BUG_ON(tp->irq_sync);
6500
6501         tp->irq_sync = 1;
6502         smp_mb();
6503
6504         for (i = 0; i < tp->irq_cnt; i++)
6505                 synchronize_irq(tp->napi[i].irq_vec);
6506 }
6507
6508 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6509  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6510  * with as well.  Most of the time, this is not necessary except when
6511  * shutting down the device.
6512  */
6513 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6514 {
6515         spin_lock_bh(&tp->lock);
6516         if (irq_sync)
6517                 tg3_irq_quiesce(tp);
6518 }
6519
6520 static inline void tg3_full_unlock(struct tg3 *tp)
6521 {
6522         spin_unlock_bh(&tp->lock);
6523 }
6524
6525 /* One-shot MSI handler - Chip automatically disables interrupt
6526  * after sending MSI so driver doesn't have to do it.
6527  */
6528 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6529 {
6530         struct tg3_napi *tnapi = dev_id;
6531         struct tg3 *tp = tnapi->tp;
6532
6533         prefetch(tnapi->hw_status);
6534         if (tnapi->rx_rcb)
6535                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6536
6537         if (likely(!tg3_irq_sync(tp)))
6538                 napi_schedule(&tnapi->napi);
6539
6540         return IRQ_HANDLED;
6541 }
6542
6543 /* MSI ISR - No need to check for interrupt sharing and no need to
6544  * flush status block and interrupt mailbox. PCI ordering rules
6545  * guarantee that MSI will arrive after the status block.
6546  */
6547 static irqreturn_t tg3_msi(int irq, void *dev_id)
6548 {
6549         struct tg3_napi *tnapi = dev_id;
6550         struct tg3 *tp = tnapi->tp;
6551
6552         prefetch(tnapi->hw_status);
6553         if (tnapi->rx_rcb)
6554                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6555         /*
6556          * Writing any value to intr-mbox-0 clears PCI INTA# and
6557          * chip-internal interrupt pending events.
6558          * Writing non-zero to intr-mbox-0 additional tells the
6559          * NIC to stop sending us irqs, engaging "in-intr-handler"
6560          * event coalescing.
6561          */
6562         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6563         if (likely(!tg3_irq_sync(tp)))
6564                 napi_schedule(&tnapi->napi);
6565
6566         return IRQ_RETVAL(1);
6567 }
6568
6569 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6570 {
6571         struct tg3_napi *tnapi = dev_id;
6572         struct tg3 *tp = tnapi->tp;
6573         struct tg3_hw_status *sblk = tnapi->hw_status;
6574         unsigned int handled = 1;
6575
6576         /* In INTx mode, it is possible for the interrupt to arrive at
6577          * the CPU before the status block posted prior to the interrupt.
6578          * Reading the PCI State register will confirm whether the
6579          * interrupt is ours and will flush the status block.
6580          */
6581         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6582                 if (tg3_flag(tp, CHIP_RESETTING) ||
6583                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6584                         handled = 0;
6585                         goto out;
6586                 }
6587         }
6588
6589         /*
6590          * Writing any value to intr-mbox-0 clears PCI INTA# and
6591          * chip-internal interrupt pending events.
6592          * Writing non-zero to intr-mbox-0 additional tells the
6593          * NIC to stop sending us irqs, engaging "in-intr-handler"
6594          * event coalescing.
6595          *
6596          * Flush the mailbox to de-assert the IRQ immediately to prevent
6597          * spurious interrupts.  The flush impacts performance but
6598          * excessive spurious interrupts can be worse in some cases.
6599          */
6600         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6601         if (tg3_irq_sync(tp))
6602                 goto out;
6603         sblk->status &= ~SD_STATUS_UPDATED;
6604         if (likely(tg3_has_work(tnapi))) {
6605                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6606                 napi_schedule(&tnapi->napi);
6607         } else {
6608                 /* No work, shared interrupt perhaps?  re-enable
6609                  * interrupts, and flush that PCI write
6610                  */
6611                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6612                                0x00000000);
6613         }
6614 out:
6615         return IRQ_RETVAL(handled);
6616 }
6617
6618 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6619 {
6620         struct tg3_napi *tnapi = dev_id;
6621         struct tg3 *tp = tnapi->tp;
6622         struct tg3_hw_status *sblk = tnapi->hw_status;
6623         unsigned int handled = 1;
6624
6625         /* In INTx mode, it is possible for the interrupt to arrive at
6626          * the CPU before the status block posted prior to the interrupt.
6627          * Reading the PCI State register will confirm whether the
6628          * interrupt is ours and will flush the status block.
6629          */
6630         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6631                 if (tg3_flag(tp, CHIP_RESETTING) ||
6632                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6633                         handled = 0;
6634                         goto out;
6635                 }
6636         }
6637
6638         /*
6639          * writing any value to intr-mbox-0 clears PCI INTA# and
6640          * chip-internal interrupt pending events.
6641          * writing non-zero to intr-mbox-0 additional tells the
6642          * NIC to stop sending us irqs, engaging "in-intr-handler"
6643          * event coalescing.
6644          *
6645          * Flush the mailbox to de-assert the IRQ immediately to prevent
6646          * spurious interrupts.  The flush impacts performance but
6647          * excessive spurious interrupts can be worse in some cases.
6648          */
6649         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6650
6651         /*
6652          * In a shared interrupt configuration, sometimes other devices'
6653          * interrupts will scream.  We record the current status tag here
6654          * so that the above check can report that the screaming interrupts
6655          * are unhandled.  Eventually they will be silenced.
6656          */
6657         tnapi->last_irq_tag = sblk->status_tag;
6658
6659         if (tg3_irq_sync(tp))
6660                 goto out;
6661
6662         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6663
6664         napi_schedule(&tnapi->napi);
6665
6666 out:
6667         return IRQ_RETVAL(handled);
6668 }
6669
6670 /* ISR for interrupt test */
6671 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6672 {
6673         struct tg3_napi *tnapi = dev_id;
6674         struct tg3 *tp = tnapi->tp;
6675         struct tg3_hw_status *sblk = tnapi->hw_status;
6676
6677         if ((sblk->status & SD_STATUS_UPDATED) ||
6678             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6679                 tg3_disable_ints(tp);
6680                 return IRQ_RETVAL(1);
6681         }
6682         return IRQ_RETVAL(0);
6683 }
6684
6685 #ifdef CONFIG_NET_POLL_CONTROLLER
6686 static void tg3_poll_controller(struct net_device *dev)
6687 {
6688         int i;
6689         struct tg3 *tp = netdev_priv(dev);
6690
6691         for (i = 0; i < tp->irq_cnt; i++)
6692                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6693 }
6694 #endif
6695
6696 static void tg3_tx_timeout(struct net_device *dev)
6697 {
6698         struct tg3 *tp = netdev_priv(dev);
6699
6700         if (netif_msg_tx_err(tp)) {
6701                 netdev_err(dev, "transmit timed out, resetting\n");
6702                 tg3_dump_state(tp);
6703         }
6704
6705         tg3_reset_task_schedule(tp);
6706 }
6707
6708 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6709 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6710 {
6711         u32 base = (u32) mapping & 0xffffffff;
6712
6713         return (base > 0xffffdcc0) && (base + len + 8 < base);
6714 }
6715
6716 /* Test for DMA addresses > 40-bit */
6717 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6718                                           int len)
6719 {
6720 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6721         if (tg3_flag(tp, 40BIT_DMA_BUG))
6722                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6723         return 0;
6724 #else
6725         return 0;
6726 #endif
6727 }
6728
6729 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6730                                  dma_addr_t mapping, u32 len, u32 flags,
6731                                  u32 mss, u32 vlan)
6732 {
6733         txbd->addr_hi = ((u64) mapping >> 32);
6734         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6735         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6736         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6737 }
6738
6739 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6740                             dma_addr_t map, u32 len, u32 flags,
6741                             u32 mss, u32 vlan)
6742 {
6743         struct tg3 *tp = tnapi->tp;
6744         bool hwbug = false;
6745
6746         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6747                 hwbug = true;
6748
6749         if (tg3_4g_overflow_test(map, len))
6750                 hwbug = true;
6751
6752         if (tg3_40bit_overflow_test(tp, map, len))
6753                 hwbug = true;
6754
6755         if (tp->dma_limit) {
6756                 u32 prvidx = *entry;
6757                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6758                 while (len > tp->dma_limit && *budget) {
6759                         u32 frag_len = tp->dma_limit;
6760                         len -= tp->dma_limit;
6761
6762                         /* Avoid the 8byte DMA problem */
6763                         if (len <= 8) {
6764                                 len += tp->dma_limit / 2;
6765                                 frag_len = tp->dma_limit / 2;
6766                         }
6767
6768                         tnapi->tx_buffers[*entry].fragmented = true;
6769
6770                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6771                                       frag_len, tmp_flag, mss, vlan);
6772                         *budget -= 1;
6773                         prvidx = *entry;
6774                         *entry = NEXT_TX(*entry);
6775
6776                         map += frag_len;
6777                 }
6778
6779                 if (len) {
6780                         if (*budget) {
6781                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6782                                               len, flags, mss, vlan);
6783                                 *budget -= 1;
6784                                 *entry = NEXT_TX(*entry);
6785                         } else {
6786                                 hwbug = true;
6787                                 tnapi->tx_buffers[prvidx].fragmented = false;
6788                         }
6789                 }
6790         } else {
6791                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6792                               len, flags, mss, vlan);
6793                 *entry = NEXT_TX(*entry);
6794         }
6795
6796         return hwbug;
6797 }
6798
6799 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6800 {
6801         int i;
6802         struct sk_buff *skb;
6803         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6804
6805         skb = txb->skb;
6806         txb->skb = NULL;
6807
6808         pci_unmap_single(tnapi->tp->pdev,
6809                          dma_unmap_addr(txb, mapping),
6810                          skb_headlen(skb),
6811                          PCI_DMA_TODEVICE);
6812
6813         while (txb->fragmented) {
6814                 txb->fragmented = false;
6815                 entry = NEXT_TX(entry);
6816                 txb = &tnapi->tx_buffers[entry];
6817         }
6818
6819         for (i = 0; i <= last; i++) {
6820                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6821
6822                 entry = NEXT_TX(entry);
6823                 txb = &tnapi->tx_buffers[entry];
6824
6825                 pci_unmap_page(tnapi->tp->pdev,
6826                                dma_unmap_addr(txb, mapping),
6827                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6828
6829                 while (txb->fragmented) {
6830                         txb->fragmented = false;
6831                         entry = NEXT_TX(entry);
6832                         txb = &tnapi->tx_buffers[entry];
6833                 }
6834         }
6835 }
6836
6837 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6838 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6839                                        struct sk_buff **pskb,
6840                                        u32 *entry, u32 *budget,
6841                                        u32 base_flags, u32 mss, u32 vlan)
6842 {
6843         struct tg3 *tp = tnapi->tp;
6844         struct sk_buff *new_skb, *skb = *pskb;
6845         dma_addr_t new_addr = 0;
6846         int ret = 0;
6847
6848         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6849                 new_skb = skb_copy(skb, GFP_ATOMIC);
6850         else {
6851                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6852
6853                 new_skb = skb_copy_expand(skb,
6854                                           skb_headroom(skb) + more_headroom,
6855                                           skb_tailroom(skb), GFP_ATOMIC);
6856         }
6857
6858         if (!new_skb) {
6859                 ret = -1;
6860         } else {
6861                 /* New SKB is guaranteed to be linear. */
6862                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6863                                           PCI_DMA_TODEVICE);
6864                 /* Make sure the mapping succeeded */
6865                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6866                         dev_kfree_skb(new_skb);
6867                         ret = -1;
6868                 } else {
6869                         u32 save_entry = *entry;
6870
6871                         base_flags |= TXD_FLAG_END;
6872
6873                         tnapi->tx_buffers[*entry].skb = new_skb;
6874                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6875                                            mapping, new_addr);
6876
6877                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6878                                             new_skb->len, base_flags,
6879                                             mss, vlan)) {
6880                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6881                                 dev_kfree_skb(new_skb);
6882                                 ret = -1;
6883                         }
6884                 }
6885         }
6886
6887         dev_kfree_skb(skb);
6888         *pskb = new_skb;
6889         return ret;
6890 }
6891
6892 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6893
6894 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6895  * TSO header is greater than 80 bytes.
6896  */
6897 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6898 {
6899         struct sk_buff *segs, *nskb;
6900         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6901
6902         /* Estimate the number of fragments in the worst case */
6903         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6904                 netif_stop_queue(tp->dev);
6905
6906                 /* netif_tx_stop_queue() must be done before checking
6907                  * checking tx index in tg3_tx_avail() below, because in
6908                  * tg3_tx(), we update tx index before checking for
6909                  * netif_tx_queue_stopped().
6910                  */
6911                 smp_mb();
6912                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6913                         return NETDEV_TX_BUSY;
6914
6915                 netif_wake_queue(tp->dev);
6916         }
6917
6918         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6919         if (IS_ERR(segs))
6920                 goto tg3_tso_bug_end;
6921
6922         do {
6923                 nskb = segs;
6924                 segs = segs->next;
6925                 nskb->next = NULL;
6926                 tg3_start_xmit(nskb, tp->dev);
6927         } while (segs);
6928
6929 tg3_tso_bug_end:
6930         dev_kfree_skb(skb);
6931
6932         return NETDEV_TX_OK;
6933 }
6934
6935 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6936  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6937  */
6938 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6939 {
6940         struct tg3 *tp = netdev_priv(dev);
6941         u32 len, entry, base_flags, mss, vlan = 0;
6942         u32 budget;
6943         int i = -1, would_hit_hwbug;
6944         dma_addr_t mapping;
6945         struct tg3_napi *tnapi;
6946         struct netdev_queue *txq;
6947         unsigned int last;
6948
6949         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6950         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6951         if (tg3_flag(tp, ENABLE_TSS))
6952                 tnapi++;
6953
6954         budget = tg3_tx_avail(tnapi);
6955
6956         /* We are running in BH disabled context with netif_tx_lock
6957          * and TX reclaim runs via tp->napi.poll inside of a software
6958          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6959          * no IRQ context deadlocks to worry about either.  Rejoice!
6960          */
6961         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6962                 if (!netif_tx_queue_stopped(txq)) {
6963                         netif_tx_stop_queue(txq);
6964
6965                         /* This is a hard error, log it. */
6966                         netdev_err(dev,
6967                                    "BUG! Tx Ring full when queue awake!\n");
6968                 }
6969                 return NETDEV_TX_BUSY;
6970         }
6971
6972         entry = tnapi->tx_prod;
6973         base_flags = 0;
6974         if (skb->ip_summed == CHECKSUM_PARTIAL)
6975                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6976
6977         mss = skb_shinfo(skb)->gso_size;
6978         if (mss) {
6979                 struct iphdr *iph;
6980                 u32 tcp_opt_len, hdr_len;
6981
6982                 if (skb_header_cloned(skb) &&
6983                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6984                         goto drop;
6985
6986                 iph = ip_hdr(skb);
6987                 tcp_opt_len = tcp_optlen(skb);
6988
6989                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6990
6991                 if (!skb_is_gso_v6(skb)) {
6992                         iph->check = 0;
6993                         iph->tot_len = htons(mss + hdr_len);
6994                 }
6995
6996                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6997                     tg3_flag(tp, TSO_BUG))
6998                         return tg3_tso_bug(tp, skb);
6999
7000                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7001                                TXD_FLAG_CPU_POST_DMA);
7002
7003                 if (tg3_flag(tp, HW_TSO_1) ||
7004                     tg3_flag(tp, HW_TSO_2) ||
7005                     tg3_flag(tp, HW_TSO_3)) {
7006                         tcp_hdr(skb)->check = 0;
7007                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7008                 } else
7009                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7010                                                                  iph->daddr, 0,
7011                                                                  IPPROTO_TCP,
7012                                                                  0);
7013
7014                 if (tg3_flag(tp, HW_TSO_3)) {
7015                         mss |= (hdr_len & 0xc) << 12;
7016                         if (hdr_len & 0x10)
7017                                 base_flags |= 0x00000010;
7018                         base_flags |= (hdr_len & 0x3e0) << 5;
7019                 } else if (tg3_flag(tp, HW_TSO_2))
7020                         mss |= hdr_len << 9;
7021                 else if (tg3_flag(tp, HW_TSO_1) ||
7022                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7023                         if (tcp_opt_len || iph->ihl > 5) {
7024                                 int tsflags;
7025
7026                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7027                                 mss |= (tsflags << 11);
7028                         }
7029                 } else {
7030                         if (tcp_opt_len || iph->ihl > 5) {
7031                                 int tsflags;
7032
7033                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7034                                 base_flags |= tsflags << 12;
7035                         }
7036                 }
7037         }
7038
7039         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7040             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7041                 base_flags |= TXD_FLAG_JMB_PKT;
7042
7043         if (vlan_tx_tag_present(skb)) {
7044                 base_flags |= TXD_FLAG_VLAN;
7045                 vlan = vlan_tx_tag_get(skb);
7046         }
7047
7048         len = skb_headlen(skb);
7049
7050         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7051         if (pci_dma_mapping_error(tp->pdev, mapping))
7052                 goto drop;
7053
7054
7055         tnapi->tx_buffers[entry].skb = skb;
7056         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7057
7058         would_hit_hwbug = 0;
7059
7060         if (tg3_flag(tp, 5701_DMA_BUG))
7061                 would_hit_hwbug = 1;
7062
7063         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7064                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7065                             mss, vlan)) {
7066                 would_hit_hwbug = 1;
7067         } else if (skb_shinfo(skb)->nr_frags > 0) {
7068                 u32 tmp_mss = mss;
7069
7070                 if (!tg3_flag(tp, HW_TSO_1) &&
7071                     !tg3_flag(tp, HW_TSO_2) &&
7072                     !tg3_flag(tp, HW_TSO_3))
7073                         tmp_mss = 0;
7074
7075                 /* Now loop through additional data
7076                  * fragments, and queue them.
7077                  */
7078                 last = skb_shinfo(skb)->nr_frags - 1;
7079                 for (i = 0; i <= last; i++) {
7080                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7081
7082                         len = skb_frag_size(frag);
7083                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7084                                                    len, DMA_TO_DEVICE);
7085
7086                         tnapi->tx_buffers[entry].skb = NULL;
7087                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7088                                            mapping);
7089                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7090                                 goto dma_error;
7091
7092                         if (!budget ||
7093                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7094                                             len, base_flags |
7095                                             ((i == last) ? TXD_FLAG_END : 0),
7096                                             tmp_mss, vlan)) {
7097                                 would_hit_hwbug = 1;
7098                                 break;
7099                         }
7100                 }
7101         }
7102
7103         if (would_hit_hwbug) {
7104                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7105
7106                 /* If the workaround fails due to memory/mapping
7107                  * failure, silently drop this packet.
7108                  */
7109                 entry = tnapi->tx_prod;
7110                 budget = tg3_tx_avail(tnapi);
7111                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7112                                                 base_flags, mss, vlan))
7113                         goto drop_nofree;
7114         }
7115
7116         skb_tx_timestamp(skb);
7117         netdev_tx_sent_queue(txq, skb->len);
7118
7119         /* Sync BD data before updating mailbox */
7120         wmb();
7121
7122         /* Packets are ready, update Tx producer idx local and on card. */
7123         tw32_tx_mbox(tnapi->prodmbox, entry);
7124
7125         tnapi->tx_prod = entry;
7126         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7127                 netif_tx_stop_queue(txq);
7128
7129                 /* netif_tx_stop_queue() must be done before checking
7130                  * checking tx index in tg3_tx_avail() below, because in
7131                  * tg3_tx(), we update tx index before checking for
7132                  * netif_tx_queue_stopped().
7133                  */
7134                 smp_mb();
7135                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7136                         netif_tx_wake_queue(txq);
7137         }
7138
7139         mmiowb();
7140         return NETDEV_TX_OK;
7141
7142 dma_error:
7143         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7144         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7145 drop:
7146         dev_kfree_skb(skb);
7147 drop_nofree:
7148         tp->tx_dropped++;
7149         return NETDEV_TX_OK;
7150 }
7151
7152 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7153 {
7154         if (enable) {
7155                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7156                                   MAC_MODE_PORT_MODE_MASK);
7157
7158                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7159
7160                 if (!tg3_flag(tp, 5705_PLUS))
7161                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7162
7163                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7164                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7165                 else
7166                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7167         } else {
7168                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7169
7170                 if (tg3_flag(tp, 5705_PLUS) ||
7171                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7172                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7173                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7174         }
7175
7176         tw32(MAC_MODE, tp->mac_mode);
7177         udelay(40);
7178 }
7179
7180 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7181 {
7182         u32 val, bmcr, mac_mode, ptest = 0;
7183
7184         tg3_phy_toggle_apd(tp, false);
7185         tg3_phy_toggle_automdix(tp, 0);
7186
7187         if (extlpbk && tg3_phy_set_extloopbk(tp))
7188                 return -EIO;
7189
7190         bmcr = BMCR_FULLDPLX;
7191         switch (speed) {
7192         case SPEED_10:
7193                 break;
7194         case SPEED_100:
7195                 bmcr |= BMCR_SPEED100;
7196                 break;
7197         case SPEED_1000:
7198         default:
7199                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7200                         speed = SPEED_100;
7201                         bmcr |= BMCR_SPEED100;
7202                 } else {
7203                         speed = SPEED_1000;
7204                         bmcr |= BMCR_SPEED1000;
7205                 }
7206         }
7207
7208         if (extlpbk) {
7209                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7210                         tg3_readphy(tp, MII_CTRL1000, &val);
7211                         val |= CTL1000_AS_MASTER |
7212                                CTL1000_ENABLE_MASTER;
7213                         tg3_writephy(tp, MII_CTRL1000, val);
7214                 } else {
7215                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7216                                 MII_TG3_FET_PTEST_TRIM_2;
7217                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7218                 }
7219         } else
7220                 bmcr |= BMCR_LOOPBACK;
7221
7222         tg3_writephy(tp, MII_BMCR, bmcr);
7223
7224         /* The write needs to be flushed for the FETs */
7225         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7226                 tg3_readphy(tp, MII_BMCR, &bmcr);
7227
7228         udelay(40);
7229
7230         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7231             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7232                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7233                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7234                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7235
7236                 /* The write needs to be flushed for the AC131 */
7237                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7238         }
7239
7240         /* Reset to prevent losing 1st rx packet intermittently */
7241         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7242             tg3_flag(tp, 5780_CLASS)) {
7243                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7244                 udelay(10);
7245                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7246         }
7247
7248         mac_mode = tp->mac_mode &
7249                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7250         if (speed == SPEED_1000)
7251                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7252         else
7253                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7254
7255         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7256                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7257
7258                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7259                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7260                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7261                         mac_mode |= MAC_MODE_LINK_POLARITY;
7262
7263                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7264                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7265         }
7266
7267         tw32(MAC_MODE, mac_mode);
7268         udelay(40);
7269
7270         return 0;
7271 }
7272
7273 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7274 {
7275         struct tg3 *tp = netdev_priv(dev);
7276
7277         if (features & NETIF_F_LOOPBACK) {
7278                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7279                         return;
7280
7281                 spin_lock_bh(&tp->lock);
7282                 tg3_mac_loopback(tp, true);
7283                 netif_carrier_on(tp->dev);
7284                 spin_unlock_bh(&tp->lock);
7285                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7286         } else {
7287                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7288                         return;
7289
7290                 spin_lock_bh(&tp->lock);
7291                 tg3_mac_loopback(tp, false);
7292                 /* Force link status check */
7293                 tg3_setup_phy(tp, 1);
7294                 spin_unlock_bh(&tp->lock);
7295                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7296         }
7297 }
7298
7299 static netdev_features_t tg3_fix_features(struct net_device *dev,
7300         netdev_features_t features)
7301 {
7302         struct tg3 *tp = netdev_priv(dev);
7303
7304         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7305                 features &= ~NETIF_F_ALL_TSO;
7306
7307         return features;
7308 }
7309
7310 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7311 {
7312         netdev_features_t changed = dev->features ^ features;
7313
7314         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7315                 tg3_set_loopback(dev, features);
7316
7317         return 0;
7318 }
7319
7320 static void tg3_rx_prodring_free(struct tg3 *tp,
7321                                  struct tg3_rx_prodring_set *tpr)
7322 {
7323         int i;
7324
7325         if (tpr != &tp->napi[0].prodring) {
7326                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7327                      i = (i + 1) & tp->rx_std_ring_mask)
7328                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7329                                         tp->rx_pkt_map_sz);
7330
7331                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7332                         for (i = tpr->rx_jmb_cons_idx;
7333                              i != tpr->rx_jmb_prod_idx;
7334                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7335                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7336                                                 TG3_RX_JMB_MAP_SZ);
7337                         }
7338                 }
7339
7340                 return;
7341         }
7342
7343         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7344                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7345                                 tp->rx_pkt_map_sz);
7346
7347         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7348                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7349                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7350                                         TG3_RX_JMB_MAP_SZ);
7351         }
7352 }
7353
7354 /* Initialize rx rings for packet processing.
7355  *
7356  * The chip has been shut down and the driver detached from
7357  * the networking, so no interrupts or new tx packets will
7358  * end up in the driver.  tp->{tx,}lock are held and thus
7359  * we may not sleep.
7360  */
7361 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7362                                  struct tg3_rx_prodring_set *tpr)
7363 {
7364         u32 i, rx_pkt_dma_sz;
7365
7366         tpr->rx_std_cons_idx = 0;
7367         tpr->rx_std_prod_idx = 0;
7368         tpr->rx_jmb_cons_idx = 0;
7369         tpr->rx_jmb_prod_idx = 0;
7370
7371         if (tpr != &tp->napi[0].prodring) {
7372                 memset(&tpr->rx_std_buffers[0], 0,
7373                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7374                 if (tpr->rx_jmb_buffers)
7375                         memset(&tpr->rx_jmb_buffers[0], 0,
7376                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7377                 goto done;
7378         }
7379
7380         /* Zero out all descriptors. */
7381         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7382
7383         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7384         if (tg3_flag(tp, 5780_CLASS) &&
7385             tp->dev->mtu > ETH_DATA_LEN)
7386                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7387         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7388
7389         /* Initialize invariants of the rings, we only set this
7390          * stuff once.  This works because the card does not
7391          * write into the rx buffer posting rings.
7392          */
7393         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7394                 struct tg3_rx_buffer_desc *rxd;
7395
7396                 rxd = &tpr->rx_std[i];
7397                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7398                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7399                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7400                                (i << RXD_OPAQUE_INDEX_SHIFT));
7401         }
7402
7403         /* Now allocate fresh SKBs for each rx ring. */
7404         for (i = 0; i < tp->rx_pending; i++) {
7405                 unsigned int frag_size;
7406
7407                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7408                                       &frag_size) < 0) {
7409                         netdev_warn(tp->dev,
7410                                     "Using a smaller RX standard ring. Only "
7411                                     "%d out of %d buffers were allocated "
7412                                     "successfully\n", i, tp->rx_pending);
7413                         if (i == 0)
7414                                 goto initfail;
7415                         tp->rx_pending = i;
7416                         break;
7417                 }
7418         }
7419
7420         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7421                 goto done;
7422
7423         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7424
7425         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7426                 goto done;
7427
7428         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7429                 struct tg3_rx_buffer_desc *rxd;
7430
7431                 rxd = &tpr->rx_jmb[i].std;
7432                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7433                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7434                                   RXD_FLAG_JUMBO;
7435                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7436                        (i << RXD_OPAQUE_INDEX_SHIFT));
7437         }
7438
7439         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7440                 unsigned int frag_size;
7441
7442                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7443                                       &frag_size) < 0) {
7444                         netdev_warn(tp->dev,
7445                                     "Using a smaller RX jumbo ring. Only %d "
7446                                     "out of %d buffers were allocated "
7447                                     "successfully\n", i, tp->rx_jumbo_pending);
7448                         if (i == 0)
7449                                 goto initfail;
7450                         tp->rx_jumbo_pending = i;
7451                         break;
7452                 }
7453         }
7454
7455 done:
7456         return 0;
7457
7458 initfail:
7459         tg3_rx_prodring_free(tp, tpr);
7460         return -ENOMEM;
7461 }
7462
7463 static void tg3_rx_prodring_fini(struct tg3 *tp,
7464                                  struct tg3_rx_prodring_set *tpr)
7465 {
7466         kfree(tpr->rx_std_buffers);
7467         tpr->rx_std_buffers = NULL;
7468         kfree(tpr->rx_jmb_buffers);
7469         tpr->rx_jmb_buffers = NULL;
7470         if (tpr->rx_std) {
7471                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7472                                   tpr->rx_std, tpr->rx_std_mapping);
7473                 tpr->rx_std = NULL;
7474         }
7475         if (tpr->rx_jmb) {
7476                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7477                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7478                 tpr->rx_jmb = NULL;
7479         }
7480 }
7481
7482 static int tg3_rx_prodring_init(struct tg3 *tp,
7483                                 struct tg3_rx_prodring_set *tpr)
7484 {
7485         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7486                                       GFP_KERNEL);
7487         if (!tpr->rx_std_buffers)
7488                 return -ENOMEM;
7489
7490         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7491                                          TG3_RX_STD_RING_BYTES(tp),
7492                                          &tpr->rx_std_mapping,
7493                                          GFP_KERNEL);
7494         if (!tpr->rx_std)
7495                 goto err_out;
7496
7497         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7498                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7499                                               GFP_KERNEL);
7500                 if (!tpr->rx_jmb_buffers)
7501                         goto err_out;
7502
7503                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7504                                                  TG3_RX_JMB_RING_BYTES(tp),
7505                                                  &tpr->rx_jmb_mapping,
7506                                                  GFP_KERNEL);
7507                 if (!tpr->rx_jmb)
7508                         goto err_out;
7509         }
7510
7511         return 0;
7512
7513 err_out:
7514         tg3_rx_prodring_fini(tp, tpr);
7515         return -ENOMEM;
7516 }
7517
7518 /* Free up pending packets in all rx/tx rings.
7519  *
7520  * The chip has been shut down and the driver detached from
7521  * the networking, so no interrupts or new tx packets will
7522  * end up in the driver.  tp->{tx,}lock is not held and we are not
7523  * in an interrupt context and thus may sleep.
7524  */
7525 static void tg3_free_rings(struct tg3 *tp)
7526 {
7527         int i, j;
7528
7529         for (j = 0; j < tp->irq_cnt; j++) {
7530                 struct tg3_napi *tnapi = &tp->napi[j];
7531
7532                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7533
7534                 if (!tnapi->tx_buffers)
7535                         continue;
7536
7537                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7538                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7539
7540                         if (!skb)
7541                                 continue;
7542
7543                         tg3_tx_skb_unmap(tnapi, i,
7544                                          skb_shinfo(skb)->nr_frags - 1);
7545
7546                         dev_kfree_skb_any(skb);
7547                 }
7548                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7549         }
7550 }
7551
7552 /* Initialize tx/rx rings for packet processing.
7553  *
7554  * The chip has been shut down and the driver detached from
7555  * the networking, so no interrupts or new tx packets will
7556  * end up in the driver.  tp->{tx,}lock are held and thus
7557  * we may not sleep.
7558  */
7559 static int tg3_init_rings(struct tg3 *tp)
7560 {
7561         int i;
7562
7563         /* Free up all the SKBs. */
7564         tg3_free_rings(tp);
7565
7566         for (i = 0; i < tp->irq_cnt; i++) {
7567                 struct tg3_napi *tnapi = &tp->napi[i];
7568
7569                 tnapi->last_tag = 0;
7570                 tnapi->last_irq_tag = 0;
7571                 tnapi->hw_status->status = 0;
7572                 tnapi->hw_status->status_tag = 0;
7573                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7574
7575                 tnapi->tx_prod = 0;
7576                 tnapi->tx_cons = 0;
7577                 if (tnapi->tx_ring)
7578                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7579
7580                 tnapi->rx_rcb_ptr = 0;
7581                 if (tnapi->rx_rcb)
7582                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7583
7584                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7585                         tg3_free_rings(tp);
7586                         return -ENOMEM;
7587                 }
7588         }
7589
7590         return 0;
7591 }
7592
7593 static void tg3_mem_tx_release(struct tg3 *tp)
7594 {
7595         int i;
7596
7597         for (i = 0; i < tp->irq_max; i++) {
7598                 struct tg3_napi *tnapi = &tp->napi[i];
7599
7600                 if (tnapi->tx_ring) {
7601                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7602                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7603                         tnapi->tx_ring = NULL;
7604                 }
7605
7606                 kfree(tnapi->tx_buffers);
7607                 tnapi->tx_buffers = NULL;
7608         }
7609 }
7610
7611 static int tg3_mem_tx_acquire(struct tg3 *tp)
7612 {
7613         int i;
7614         struct tg3_napi *tnapi = &tp->napi[0];
7615
7616         /* If multivector TSS is enabled, vector 0 does not handle
7617          * tx interrupts.  Don't allocate any resources for it.
7618          */
7619         if (tg3_flag(tp, ENABLE_TSS))
7620                 tnapi++;
7621
7622         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7623                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7624                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7625                 if (!tnapi->tx_buffers)
7626                         goto err_out;
7627
7628                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7629                                                     TG3_TX_RING_BYTES,
7630                                                     &tnapi->tx_desc_mapping,
7631                                                     GFP_KERNEL);
7632                 if (!tnapi->tx_ring)
7633                         goto err_out;
7634         }
7635
7636         return 0;
7637
7638 err_out:
7639         tg3_mem_tx_release(tp);
7640         return -ENOMEM;
7641 }
7642
7643 static void tg3_mem_rx_release(struct tg3 *tp)
7644 {
7645         int i;
7646
7647         for (i = 0; i < tp->irq_max; i++) {
7648                 struct tg3_napi *tnapi = &tp->napi[i];
7649
7650                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7651
7652                 if (!tnapi->rx_rcb)
7653                         continue;
7654
7655                 dma_free_coherent(&tp->pdev->dev,
7656                                   TG3_RX_RCB_RING_BYTES(tp),
7657                                   tnapi->rx_rcb,
7658                                   tnapi->rx_rcb_mapping);
7659                 tnapi->rx_rcb = NULL;
7660         }
7661 }
7662
7663 static int tg3_mem_rx_acquire(struct tg3 *tp)
7664 {
7665         unsigned int i, limit;
7666
7667         limit = tp->rxq_cnt;
7668
7669         /* If RSS is enabled, we need a (dummy) producer ring
7670          * set on vector zero.  This is the true hw prodring.
7671          */
7672         if (tg3_flag(tp, ENABLE_RSS))
7673                 limit++;
7674
7675         for (i = 0; i < limit; i++) {
7676                 struct tg3_napi *tnapi = &tp->napi[i];
7677
7678                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7679                         goto err_out;
7680
7681                 /* If multivector RSS is enabled, vector 0
7682                  * does not handle rx or tx interrupts.
7683                  * Don't allocate any resources for it.
7684                  */
7685                 if (!i && tg3_flag(tp, ENABLE_RSS))
7686                         continue;
7687
7688                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7689                                                    TG3_RX_RCB_RING_BYTES(tp),
7690                                                    &tnapi->rx_rcb_mapping,
7691                                                    GFP_KERNEL);
7692                 if (!tnapi->rx_rcb)
7693                         goto err_out;
7694
7695                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7696         }
7697
7698         return 0;
7699
7700 err_out:
7701         tg3_mem_rx_release(tp);
7702         return -ENOMEM;
7703 }
7704
7705 /*
7706  * Must not be invoked with interrupt sources disabled and
7707  * the hardware shutdown down.
7708  */
7709 static void tg3_free_consistent(struct tg3 *tp)
7710 {
7711         int i;
7712
7713         for (i = 0; i < tp->irq_cnt; i++) {
7714                 struct tg3_napi *tnapi = &tp->napi[i];
7715
7716                 if (tnapi->hw_status) {
7717                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7718                                           tnapi->hw_status,
7719                                           tnapi->status_mapping);
7720                         tnapi->hw_status = NULL;
7721                 }
7722         }
7723
7724         tg3_mem_rx_release(tp);
7725         tg3_mem_tx_release(tp);
7726
7727         if (tp->hw_stats) {
7728                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7729                                   tp->hw_stats, tp->stats_mapping);
7730                 tp->hw_stats = NULL;
7731         }
7732 }
7733
7734 /*
7735  * Must not be invoked with interrupt sources disabled and
7736  * the hardware shutdown down.  Can sleep.
7737  */
7738 static int tg3_alloc_consistent(struct tg3 *tp)
7739 {
7740         int i;
7741
7742         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7743                                           sizeof(struct tg3_hw_stats),
7744                                           &tp->stats_mapping,
7745                                           GFP_KERNEL);
7746         if (!tp->hw_stats)
7747                 goto err_out;
7748
7749         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7750
7751         for (i = 0; i < tp->irq_cnt; i++) {
7752                 struct tg3_napi *tnapi = &tp->napi[i];
7753                 struct tg3_hw_status *sblk;
7754
7755                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7756                                                       TG3_HW_STATUS_SIZE,
7757                                                       &tnapi->status_mapping,
7758                                                       GFP_KERNEL);
7759                 if (!tnapi->hw_status)
7760                         goto err_out;
7761
7762                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7763                 sblk = tnapi->hw_status;
7764
7765                 if (tg3_flag(tp, ENABLE_RSS)) {
7766                         u16 *prodptr = 0;
7767
7768                         /*
7769                          * When RSS is enabled, the status block format changes
7770                          * slightly.  The "rx_jumbo_consumer", "reserved",
7771                          * and "rx_mini_consumer" members get mapped to the
7772                          * other three rx return ring producer indexes.
7773                          */
7774                         switch (i) {
7775                         case 1:
7776                                 prodptr = &sblk->idx[0].rx_producer;
7777                                 break;
7778                         case 2:
7779                                 prodptr = &sblk->rx_jumbo_consumer;
7780                                 break;
7781                         case 3:
7782                                 prodptr = &sblk->reserved;
7783                                 break;
7784                         case 4:
7785                                 prodptr = &sblk->rx_mini_consumer;
7786                                 break;
7787                         }
7788                         tnapi->rx_rcb_prod_idx = prodptr;
7789                 } else {
7790                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7791                 }
7792         }
7793
7794         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7795                 goto err_out;
7796
7797         return 0;
7798
7799 err_out:
7800         tg3_free_consistent(tp);
7801         return -ENOMEM;
7802 }
7803
7804 #define MAX_WAIT_CNT 1000
7805
7806 /* To stop a block, clear the enable bit and poll till it
7807  * clears.  tp->lock is held.
7808  */
7809 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7810 {
7811         unsigned int i;
7812         u32 val;
7813
7814         if (tg3_flag(tp, 5705_PLUS)) {
7815                 switch (ofs) {
7816                 case RCVLSC_MODE:
7817                 case DMAC_MODE:
7818                 case MBFREE_MODE:
7819                 case BUFMGR_MODE:
7820                 case MEMARB_MODE:
7821                         /* We can't enable/disable these bits of the
7822                          * 5705/5750, just say success.
7823                          */
7824                         return 0;
7825
7826                 default:
7827                         break;
7828                 }
7829         }
7830
7831         val = tr32(ofs);
7832         val &= ~enable_bit;
7833         tw32_f(ofs, val);
7834
7835         for (i = 0; i < MAX_WAIT_CNT; i++) {
7836                 udelay(100);
7837                 val = tr32(ofs);
7838                 if ((val & enable_bit) == 0)
7839                         break;
7840         }
7841
7842         if (i == MAX_WAIT_CNT && !silent) {
7843                 dev_err(&tp->pdev->dev,
7844                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7845                         ofs, enable_bit);
7846                 return -ENODEV;
7847         }
7848
7849         return 0;
7850 }
7851
7852 /* tp->lock is held. */
7853 static int tg3_abort_hw(struct tg3 *tp, int silent)
7854 {
7855         int i, err;
7856
7857         tg3_disable_ints(tp);
7858
7859         tp->rx_mode &= ~RX_MODE_ENABLE;
7860         tw32_f(MAC_RX_MODE, tp->rx_mode);
7861         udelay(10);
7862
7863         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7864         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7865         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7866         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7867         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7868         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7869
7870         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7871         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7872         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7873         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7874         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7875         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7876         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7877
7878         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7879         tw32_f(MAC_MODE, tp->mac_mode);
7880         udelay(40);
7881
7882         tp->tx_mode &= ~TX_MODE_ENABLE;
7883         tw32_f(MAC_TX_MODE, tp->tx_mode);
7884
7885         for (i = 0; i < MAX_WAIT_CNT; i++) {
7886                 udelay(100);
7887                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7888                         break;
7889         }
7890         if (i >= MAX_WAIT_CNT) {
7891                 dev_err(&tp->pdev->dev,
7892                         "%s timed out, TX_MODE_ENABLE will not clear "
7893                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7894                 err |= -ENODEV;
7895         }
7896
7897         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7898         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7899         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7900
7901         tw32(FTQ_RESET, 0xffffffff);
7902         tw32(FTQ_RESET, 0x00000000);
7903
7904         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7905         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7906
7907         for (i = 0; i < tp->irq_cnt; i++) {
7908                 struct tg3_napi *tnapi = &tp->napi[i];
7909                 if (tnapi->hw_status)
7910                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7911         }
7912
7913         return err;
7914 }
7915
7916 /* Save PCI command register before chip reset */
7917 static void tg3_save_pci_state(struct tg3 *tp)
7918 {
7919         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7920 }
7921
7922 /* Restore PCI state after chip reset */
7923 static void tg3_restore_pci_state(struct tg3 *tp)
7924 {
7925         u32 val;
7926
7927         /* Re-enable indirect register accesses. */
7928         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7929                                tp->misc_host_ctrl);
7930
7931         /* Set MAX PCI retry to zero. */
7932         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7933         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7934             tg3_flag(tp, PCIX_MODE))
7935                 val |= PCISTATE_RETRY_SAME_DMA;
7936         /* Allow reads and writes to the APE register and memory space. */
7937         if (tg3_flag(tp, ENABLE_APE))
7938                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7939                        PCISTATE_ALLOW_APE_SHMEM_WR |
7940                        PCISTATE_ALLOW_APE_PSPACE_WR;
7941         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7942
7943         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7944
7945         if (!tg3_flag(tp, PCI_EXPRESS)) {
7946                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7947                                       tp->pci_cacheline_sz);
7948                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7949                                       tp->pci_lat_timer);
7950         }
7951
7952         /* Make sure PCI-X relaxed ordering bit is clear. */
7953         if (tg3_flag(tp, PCIX_MODE)) {
7954                 u16 pcix_cmd;
7955
7956                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7957                                      &pcix_cmd);
7958                 pcix_cmd &= ~PCI_X_CMD_ERO;
7959                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7960                                       pcix_cmd);
7961         }
7962
7963         if (tg3_flag(tp, 5780_CLASS)) {
7964
7965                 /* Chip reset on 5780 will reset MSI enable bit,
7966                  * so need to restore it.
7967                  */
7968                 if (tg3_flag(tp, USING_MSI)) {
7969                         u16 ctrl;
7970
7971                         pci_read_config_word(tp->pdev,
7972                                              tp->msi_cap + PCI_MSI_FLAGS,
7973                                              &ctrl);
7974                         pci_write_config_word(tp->pdev,
7975                                               tp->msi_cap + PCI_MSI_FLAGS,
7976                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7977                         val = tr32(MSGINT_MODE);
7978                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7979                 }
7980         }
7981 }
7982
7983 /* tp->lock is held. */
7984 static int tg3_chip_reset(struct tg3 *tp)
7985 {
7986         u32 val;
7987         void (*write_op)(struct tg3 *, u32, u32);
7988         int i, err;
7989
7990         tg3_nvram_lock(tp);
7991
7992         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7993
7994         /* No matching tg3_nvram_unlock() after this because
7995          * chip reset below will undo the nvram lock.
7996          */
7997         tp->nvram_lock_cnt = 0;
7998
7999         /* GRC_MISC_CFG core clock reset will clear the memory
8000          * enable bit in PCI register 4 and the MSI enable bit
8001          * on some chips, so we save relevant registers here.
8002          */
8003         tg3_save_pci_state(tp);
8004
8005         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8006             tg3_flag(tp, 5755_PLUS))
8007                 tw32(GRC_FASTBOOT_PC, 0);
8008
8009         /*
8010          * We must avoid the readl() that normally takes place.
8011          * It locks machines, causes machine checks, and other
8012          * fun things.  So, temporarily disable the 5701
8013          * hardware workaround, while we do the reset.
8014          */
8015         write_op = tp->write32;
8016         if (write_op == tg3_write_flush_reg32)
8017                 tp->write32 = tg3_write32;
8018
8019         /* Prevent the irq handler from reading or writing PCI registers
8020          * during chip reset when the memory enable bit in the PCI command
8021          * register may be cleared.  The chip does not generate interrupt
8022          * at this time, but the irq handler may still be called due to irq
8023          * sharing or irqpoll.
8024          */
8025         tg3_flag_set(tp, CHIP_RESETTING);
8026         for (i = 0; i < tp->irq_cnt; i++) {
8027                 struct tg3_napi *tnapi = &tp->napi[i];
8028                 if (tnapi->hw_status) {
8029                         tnapi->hw_status->status = 0;
8030                         tnapi->hw_status->status_tag = 0;
8031                 }
8032                 tnapi->last_tag = 0;
8033                 tnapi->last_irq_tag = 0;
8034         }
8035         smp_mb();
8036
8037         for (i = 0; i < tp->irq_cnt; i++)
8038                 synchronize_irq(tp->napi[i].irq_vec);
8039
8040         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8041                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8042                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8043         }
8044
8045         /* do the reset */
8046         val = GRC_MISC_CFG_CORECLK_RESET;
8047
8048         if (tg3_flag(tp, PCI_EXPRESS)) {
8049                 /* Force PCIe 1.0a mode */
8050                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8051                     !tg3_flag(tp, 57765_PLUS) &&
8052                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8053                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8054                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8055
8056                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8057                         tw32(GRC_MISC_CFG, (1 << 29));
8058                         val |= (1 << 29);
8059                 }
8060         }
8061
8062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8063                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8064                 tw32(GRC_VCPU_EXT_CTRL,
8065                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8066         }
8067
8068         /* Manage gphy power for all CPMU absent PCIe devices. */
8069         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8070                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8071
8072         tw32(GRC_MISC_CFG, val);
8073
8074         /* restore 5701 hardware bug workaround write method */
8075         tp->write32 = write_op;
8076
8077         /* Unfortunately, we have to delay before the PCI read back.
8078          * Some 575X chips even will not respond to a PCI cfg access
8079          * when the reset command is given to the chip.
8080          *
8081          * How do these hardware designers expect things to work
8082          * properly if the PCI write is posted for a long period
8083          * of time?  It is always necessary to have some method by
8084          * which a register read back can occur to push the write
8085          * out which does the reset.
8086          *
8087          * For most tg3 variants the trick below was working.
8088          * Ho hum...
8089          */
8090         udelay(120);
8091
8092         /* Flush PCI posted writes.  The normal MMIO registers
8093          * are inaccessible at this time so this is the only
8094          * way to make this reliably (actually, this is no longer
8095          * the case, see above).  I tried to use indirect
8096          * register read/write but this upset some 5701 variants.
8097          */
8098         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8099
8100         udelay(120);
8101
8102         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8103                 u16 val16;
8104
8105                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8106                         int i;
8107                         u32 cfg_val;
8108
8109                         /* Wait for link training to complete.  */
8110                         for (i = 0; i < 5000; i++)
8111                                 udelay(100);
8112
8113                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8114                         pci_write_config_dword(tp->pdev, 0xc4,
8115                                                cfg_val | (1 << 15));
8116                 }
8117
8118                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8119                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8120                 /*
8121                  * Older PCIe devices only support the 128 byte
8122                  * MPS setting.  Enforce the restriction.
8123                  */
8124                 if (!tg3_flag(tp, CPMU_PRESENT))
8125                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8126                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8127
8128                 /* Clear error status */
8129                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8130                                       PCI_EXP_DEVSTA_CED |
8131                                       PCI_EXP_DEVSTA_NFED |
8132                                       PCI_EXP_DEVSTA_FED |
8133                                       PCI_EXP_DEVSTA_URD);
8134         }
8135
8136         tg3_restore_pci_state(tp);
8137
8138         tg3_flag_clear(tp, CHIP_RESETTING);
8139         tg3_flag_clear(tp, ERROR_PROCESSED);
8140
8141         val = 0;
8142         if (tg3_flag(tp, 5780_CLASS))
8143                 val = tr32(MEMARB_MODE);
8144         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8145
8146         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8147                 tg3_stop_fw(tp);
8148                 tw32(0x5000, 0x400);
8149         }
8150
8151         tw32(GRC_MODE, tp->grc_mode);
8152
8153         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8154                 val = tr32(0xc4);
8155
8156                 tw32(0xc4, val | (1 << 15));
8157         }
8158
8159         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8160             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8161                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8162                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8163                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8164                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8165         }
8166
8167         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8168                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8169                 val = tp->mac_mode;
8170         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8171                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8172                 val = tp->mac_mode;
8173         } else
8174                 val = 0;
8175
8176         tw32_f(MAC_MODE, val);
8177         udelay(40);
8178
8179         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8180
8181         err = tg3_poll_fw(tp);
8182         if (err)
8183                 return err;
8184
8185         tg3_mdio_start(tp);
8186
8187         if (tg3_flag(tp, PCI_EXPRESS) &&
8188             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8189             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8190             !tg3_flag(tp, 57765_PLUS)) {
8191                 val = tr32(0x7c00);
8192
8193                 tw32(0x7c00, val | (1 << 25));
8194         }
8195
8196         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8197                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8198                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8199         }
8200
8201         /* Reprobe ASF enable state.  */
8202         tg3_flag_clear(tp, ENABLE_ASF);
8203         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8204         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8205         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8206                 u32 nic_cfg;
8207
8208                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8209                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8210                         tg3_flag_set(tp, ENABLE_ASF);
8211                         tp->last_event_jiffies = jiffies;
8212                         if (tg3_flag(tp, 5750_PLUS))
8213                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8214                 }
8215         }
8216
8217         return 0;
8218 }
8219
8220 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8221 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8222
8223 /* tp->lock is held. */
8224 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8225 {
8226         int err;
8227
8228         tg3_stop_fw(tp);
8229
8230         tg3_write_sig_pre_reset(tp, kind);
8231
8232         tg3_abort_hw(tp, silent);
8233         err = tg3_chip_reset(tp);
8234
8235         __tg3_set_mac_addr(tp, 0);
8236
8237         tg3_write_sig_legacy(tp, kind);
8238         tg3_write_sig_post_reset(tp, kind);
8239
8240         if (tp->hw_stats) {
8241                 /* Save the stats across chip resets... */
8242                 tg3_get_nstats(tp, &tp->net_stats_prev);
8243                 tg3_get_estats(tp, &tp->estats_prev);
8244
8245                 /* And make sure the next sample is new data */
8246                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8247         }
8248
8249         if (err)
8250                 return err;
8251
8252         return 0;
8253 }
8254
8255 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8256 {
8257         struct tg3 *tp = netdev_priv(dev);
8258         struct sockaddr *addr = p;
8259         int err = 0, skip_mac_1 = 0;
8260
8261         if (!is_valid_ether_addr(addr->sa_data))
8262                 return -EADDRNOTAVAIL;
8263
8264         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8265
8266         if (!netif_running(dev))
8267                 return 0;
8268
8269         if (tg3_flag(tp, ENABLE_ASF)) {
8270                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8271
8272                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8273                 addr0_low = tr32(MAC_ADDR_0_LOW);
8274                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8275                 addr1_low = tr32(MAC_ADDR_1_LOW);
8276
8277                 /* Skip MAC addr 1 if ASF is using it. */
8278                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8279                     !(addr1_high == 0 && addr1_low == 0))
8280                         skip_mac_1 = 1;
8281         }
8282         spin_lock_bh(&tp->lock);
8283         __tg3_set_mac_addr(tp, skip_mac_1);
8284         spin_unlock_bh(&tp->lock);
8285
8286         return err;
8287 }
8288
8289 /* tp->lock is held. */
8290 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8291                            dma_addr_t mapping, u32 maxlen_flags,
8292                            u32 nic_addr)
8293 {
8294         tg3_write_mem(tp,
8295                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8296                       ((u64) mapping >> 32));
8297         tg3_write_mem(tp,
8298                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8299                       ((u64) mapping & 0xffffffff));
8300         tg3_write_mem(tp,
8301                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8302                        maxlen_flags);
8303
8304         if (!tg3_flag(tp, 5705_PLUS))
8305                 tg3_write_mem(tp,
8306                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8307                               nic_addr);
8308 }
8309
8310
8311 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8312 {
8313         int i = 0;
8314
8315         if (!tg3_flag(tp, ENABLE_TSS)) {
8316                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8317                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8318                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8319         } else {
8320                 tw32(HOSTCC_TXCOL_TICKS, 0);
8321                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8322                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8323
8324                 for (; i < tp->txq_cnt; i++) {
8325                         u32 reg;
8326
8327                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8328                         tw32(reg, ec->tx_coalesce_usecs);
8329                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8330                         tw32(reg, ec->tx_max_coalesced_frames);
8331                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8332                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8333                 }
8334         }
8335
8336         for (; i < tp->irq_max - 1; i++) {
8337                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8338                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8339                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8340         }
8341 }
8342
8343 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8344 {
8345         int i = 0;
8346         u32 limit = tp->rxq_cnt;
8347
8348         if (!tg3_flag(tp, ENABLE_RSS)) {
8349                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8350                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8351                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8352                 limit--;
8353         } else {
8354                 tw32(HOSTCC_RXCOL_TICKS, 0);
8355                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8356                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8357         }
8358
8359         for (; i < limit; i++) {
8360                 u32 reg;
8361
8362                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8363                 tw32(reg, ec->rx_coalesce_usecs);
8364                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8365                 tw32(reg, ec->rx_max_coalesced_frames);
8366                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8367                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8368         }
8369
8370         for (; i < tp->irq_max - 1; i++) {
8371                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8372                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8373                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8374         }
8375 }
8376
8377 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8378 {
8379         tg3_coal_tx_init(tp, ec);
8380         tg3_coal_rx_init(tp, ec);
8381
8382         if (!tg3_flag(tp, 5705_PLUS)) {
8383                 u32 val = ec->stats_block_coalesce_usecs;
8384
8385                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8386                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8387
8388                 if (!netif_carrier_ok(tp->dev))
8389                         val = 0;
8390
8391                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8392         }
8393 }
8394
8395 /* tp->lock is held. */
8396 static void tg3_rings_reset(struct tg3 *tp)
8397 {
8398         int i;
8399         u32 stblk, txrcb, rxrcb, limit;
8400         struct tg3_napi *tnapi = &tp->napi[0];
8401
8402         /* Disable all transmit rings but the first. */
8403         if (!tg3_flag(tp, 5705_PLUS))
8404                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8405         else if (tg3_flag(tp, 5717_PLUS))
8406                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8407         else if (tg3_flag(tp, 57765_CLASS))
8408                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8409         else
8410                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8411
8412         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8413              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8414                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8415                               BDINFO_FLAGS_DISABLED);
8416
8417
8418         /* Disable all receive return rings but the first. */
8419         if (tg3_flag(tp, 5717_PLUS))
8420                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8421         else if (!tg3_flag(tp, 5705_PLUS))
8422                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8423         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8424                  tg3_flag(tp, 57765_CLASS))
8425                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8426         else
8427                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8428
8429         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8430              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8431                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8432                               BDINFO_FLAGS_DISABLED);
8433
8434         /* Disable interrupts */
8435         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8436         tp->napi[0].chk_msi_cnt = 0;
8437         tp->napi[0].last_rx_cons = 0;
8438         tp->napi[0].last_tx_cons = 0;
8439
8440         /* Zero mailbox registers. */
8441         if (tg3_flag(tp, SUPPORT_MSIX)) {
8442                 for (i = 1; i < tp->irq_max; i++) {
8443                         tp->napi[i].tx_prod = 0;
8444                         tp->napi[i].tx_cons = 0;
8445                         if (tg3_flag(tp, ENABLE_TSS))
8446                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8447                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8448                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8449                         tp->napi[i].chk_msi_cnt = 0;
8450                         tp->napi[i].last_rx_cons = 0;
8451                         tp->napi[i].last_tx_cons = 0;
8452                 }
8453                 if (!tg3_flag(tp, ENABLE_TSS))
8454                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8455         } else {
8456                 tp->napi[0].tx_prod = 0;
8457                 tp->napi[0].tx_cons = 0;
8458                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8459                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8460         }
8461
8462         /* Make sure the NIC-based send BD rings are disabled. */
8463         if (!tg3_flag(tp, 5705_PLUS)) {
8464                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8465                 for (i = 0; i < 16; i++)
8466                         tw32_tx_mbox(mbox + i * 8, 0);
8467         }
8468
8469         txrcb = NIC_SRAM_SEND_RCB;
8470         rxrcb = NIC_SRAM_RCV_RET_RCB;
8471
8472         /* Clear status block in ram. */
8473         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8474
8475         /* Set status block DMA address */
8476         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8477              ((u64) tnapi->status_mapping >> 32));
8478         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8479              ((u64) tnapi->status_mapping & 0xffffffff));
8480
8481         if (tnapi->tx_ring) {
8482                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8483                                (TG3_TX_RING_SIZE <<
8484                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8485                                NIC_SRAM_TX_BUFFER_DESC);
8486                 txrcb += TG3_BDINFO_SIZE;
8487         }
8488
8489         if (tnapi->rx_rcb) {
8490                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8491                                (tp->rx_ret_ring_mask + 1) <<
8492                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8493                 rxrcb += TG3_BDINFO_SIZE;
8494         }
8495
8496         stblk = HOSTCC_STATBLCK_RING1;
8497
8498         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8499                 u64 mapping = (u64)tnapi->status_mapping;
8500                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8501                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8502
8503                 /* Clear status block in ram. */
8504                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8505
8506                 if (tnapi->tx_ring) {
8507                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8508                                        (TG3_TX_RING_SIZE <<
8509                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8510                                        NIC_SRAM_TX_BUFFER_DESC);
8511                         txrcb += TG3_BDINFO_SIZE;
8512                 }
8513
8514                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8515                                ((tp->rx_ret_ring_mask + 1) <<
8516                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8517
8518                 stblk += 8;
8519                 rxrcb += TG3_BDINFO_SIZE;
8520         }
8521 }
8522
8523 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8524 {
8525         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8526
8527         if (!tg3_flag(tp, 5750_PLUS) ||
8528             tg3_flag(tp, 5780_CLASS) ||
8529             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8530             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8531             tg3_flag(tp, 57765_PLUS))
8532                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8533         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8534                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8535                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8536         else
8537                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8538
8539         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8540         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8541
8542         val = min(nic_rep_thresh, host_rep_thresh);
8543         tw32(RCVBDI_STD_THRESH, val);
8544
8545         if (tg3_flag(tp, 57765_PLUS))
8546                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8547
8548         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8549                 return;
8550
8551         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8552
8553         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8554
8555         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8556         tw32(RCVBDI_JUMBO_THRESH, val);
8557
8558         if (tg3_flag(tp, 57765_PLUS))
8559                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8560 }
8561
8562 static inline u32 calc_crc(unsigned char *buf, int len)
8563 {
8564         u32 reg;
8565         u32 tmp;
8566         int j, k;
8567
8568         reg = 0xffffffff;
8569
8570         for (j = 0; j < len; j++) {
8571                 reg ^= buf[j];
8572
8573                 for (k = 0; k < 8; k++) {
8574                         tmp = reg & 0x01;
8575
8576                         reg >>= 1;
8577
8578                         if (tmp)
8579                                 reg ^= 0xedb88320;
8580                 }
8581         }
8582
8583         return ~reg;
8584 }
8585
8586 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8587 {
8588         /* accept or reject all multicast frames */
8589         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8590         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8591         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8592         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8593 }
8594
8595 static void __tg3_set_rx_mode(struct net_device *dev)
8596 {
8597         struct tg3 *tp = netdev_priv(dev);
8598         u32 rx_mode;
8599
8600         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8601                                   RX_MODE_KEEP_VLAN_TAG);
8602
8603 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8604         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8605          * flag clear.
8606          */
8607         if (!tg3_flag(tp, ENABLE_ASF))
8608                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8609 #endif
8610
8611         if (dev->flags & IFF_PROMISC) {
8612                 /* Promiscuous mode. */
8613                 rx_mode |= RX_MODE_PROMISC;
8614         } else if (dev->flags & IFF_ALLMULTI) {
8615                 /* Accept all multicast. */
8616                 tg3_set_multi(tp, 1);
8617         } else if (netdev_mc_empty(dev)) {
8618                 /* Reject all multicast. */
8619                 tg3_set_multi(tp, 0);
8620         } else {
8621                 /* Accept one or more multicast(s). */
8622                 struct netdev_hw_addr *ha;
8623                 u32 mc_filter[4] = { 0, };
8624                 u32 regidx;
8625                 u32 bit;
8626                 u32 crc;
8627
8628                 netdev_for_each_mc_addr(ha, dev) {
8629                         crc = calc_crc(ha->addr, ETH_ALEN);
8630                         bit = ~crc & 0x7f;
8631                         regidx = (bit & 0x60) >> 5;
8632                         bit &= 0x1f;
8633                         mc_filter[regidx] |= (1 << bit);
8634                 }
8635
8636                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8637                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8638                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8639                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8640         }
8641
8642         if (rx_mode != tp->rx_mode) {
8643                 tp->rx_mode = rx_mode;
8644                 tw32_f(MAC_RX_MODE, rx_mode);
8645                 udelay(10);
8646         }
8647 }
8648
8649 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8650 {
8651         int i;
8652
8653         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8654                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8655 }
8656
8657 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8658 {
8659         int i;
8660
8661         if (!tg3_flag(tp, SUPPORT_MSIX))
8662                 return;
8663
8664         if (tp->irq_cnt <= 2) {
8665                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8666                 return;
8667         }
8668
8669         /* Validate table against current IRQ count */
8670         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8671                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8672                         break;
8673         }
8674
8675         if (i != TG3_RSS_INDIR_TBL_SIZE)
8676                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8677 }
8678
8679 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8680 {
8681         int i = 0;
8682         u32 reg = MAC_RSS_INDIR_TBL_0;
8683
8684         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8685                 u32 val = tp->rss_ind_tbl[i];
8686                 i++;
8687                 for (; i % 8; i++) {
8688                         val <<= 4;
8689                         val |= tp->rss_ind_tbl[i];
8690                 }
8691                 tw32(reg, val);
8692                 reg += 4;
8693         }
8694 }
8695
8696 /* tp->lock is held. */
8697 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8698 {
8699         u32 val, rdmac_mode;
8700         int i, err, limit;
8701         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8702
8703         tg3_disable_ints(tp);
8704
8705         tg3_stop_fw(tp);
8706
8707         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8708
8709         if (tg3_flag(tp, INIT_COMPLETE))
8710                 tg3_abort_hw(tp, 1);
8711
8712         /* Enable MAC control of LPI */
8713         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8714                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8715                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8716                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8717
8718                 tw32_f(TG3_CPMU_EEE_CTRL,
8719                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8720
8721                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8722                       TG3_CPMU_EEEMD_LPI_IN_TX |
8723                       TG3_CPMU_EEEMD_LPI_IN_RX |
8724                       TG3_CPMU_EEEMD_EEE_ENABLE;
8725
8726                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8727                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8728
8729                 if (tg3_flag(tp, ENABLE_APE))
8730                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8731
8732                 tw32_f(TG3_CPMU_EEE_MODE, val);
8733
8734                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8735                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8736                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8737
8738                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8739                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8740                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8741         }
8742
8743         if (reset_phy)
8744                 tg3_phy_reset(tp);
8745
8746         err = tg3_chip_reset(tp);
8747         if (err)
8748                 return err;
8749
8750         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8751
8752         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8753                 val = tr32(TG3_CPMU_CTRL);
8754                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8755                 tw32(TG3_CPMU_CTRL, val);
8756
8757                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8758                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8759                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8760                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8761
8762                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8763                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8764                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8765                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8766
8767                 val = tr32(TG3_CPMU_HST_ACC);
8768                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8769                 val |= CPMU_HST_ACC_MACCLK_6_25;
8770                 tw32(TG3_CPMU_HST_ACC, val);
8771         }
8772
8773         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8774                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8775                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8776                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8777                 tw32(PCIE_PWR_MGMT_THRESH, val);
8778
8779                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8780                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8781
8782                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8783
8784                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8785                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8786         }
8787
8788         if (tg3_flag(tp, L1PLLPD_EN)) {
8789                 u32 grc_mode = tr32(GRC_MODE);
8790
8791                 /* Access the lower 1K of PL PCIE block registers. */
8792                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8793                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8794
8795                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8796                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8797                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8798
8799                 tw32(GRC_MODE, grc_mode);
8800         }
8801
8802         if (tg3_flag(tp, 57765_CLASS)) {
8803                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8804                         u32 grc_mode = tr32(GRC_MODE);
8805
8806                         /* Access the lower 1K of PL PCIE block registers. */
8807                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8808                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8809
8810                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8811                                    TG3_PCIE_PL_LO_PHYCTL5);
8812                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8813                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8814
8815                         tw32(GRC_MODE, grc_mode);
8816                 }
8817
8818                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8819                         u32 grc_mode = tr32(GRC_MODE);
8820
8821                         /* Access the lower 1K of DL PCIE block registers. */
8822                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8823                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8824
8825                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8826                                    TG3_PCIE_DL_LO_FTSMAX);
8827                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8828                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8829                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8830
8831                         tw32(GRC_MODE, grc_mode);
8832                 }
8833
8834                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8835                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8836                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8837                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8838         }
8839
8840         /* This works around an issue with Athlon chipsets on
8841          * B3 tigon3 silicon.  This bit has no effect on any
8842          * other revision.  But do not set this on PCI Express
8843          * chips and don't even touch the clocks if the CPMU is present.
8844          */
8845         if (!tg3_flag(tp, CPMU_PRESENT)) {
8846                 if (!tg3_flag(tp, PCI_EXPRESS))
8847                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8848                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8849         }
8850
8851         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8852             tg3_flag(tp, PCIX_MODE)) {
8853                 val = tr32(TG3PCI_PCISTATE);
8854                 val |= PCISTATE_RETRY_SAME_DMA;
8855                 tw32(TG3PCI_PCISTATE, val);
8856         }
8857
8858         if (tg3_flag(tp, ENABLE_APE)) {
8859                 /* Allow reads and writes to the
8860                  * APE register and memory space.
8861                  */
8862                 val = tr32(TG3PCI_PCISTATE);
8863                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8864                        PCISTATE_ALLOW_APE_SHMEM_WR |
8865                        PCISTATE_ALLOW_APE_PSPACE_WR;
8866                 tw32(TG3PCI_PCISTATE, val);
8867         }
8868
8869         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8870                 /* Enable some hw fixes.  */
8871                 val = tr32(TG3PCI_MSI_DATA);
8872                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8873                 tw32(TG3PCI_MSI_DATA, val);
8874         }
8875
8876         /* Descriptor ring init may make accesses to the
8877          * NIC SRAM area to setup the TX descriptors, so we
8878          * can only do this after the hardware has been
8879          * successfully reset.
8880          */
8881         err = tg3_init_rings(tp);
8882         if (err)
8883                 return err;
8884
8885         if (tg3_flag(tp, 57765_PLUS)) {
8886                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8887                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8888                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8889                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8890                 if (!tg3_flag(tp, 57765_CLASS) &&
8891                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8892                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8893                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8894         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8895                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8896                 /* This value is determined during the probe time DMA
8897                  * engine test, tg3_test_dma.
8898                  */
8899                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8900         }
8901
8902         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8903                           GRC_MODE_4X_NIC_SEND_RINGS |
8904                           GRC_MODE_NO_TX_PHDR_CSUM |
8905                           GRC_MODE_NO_RX_PHDR_CSUM);
8906         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8907
8908         /* Pseudo-header checksum is done by hardware logic and not
8909          * the offload processers, so make the chip do the pseudo-
8910          * header checksums on receive.  For transmit it is more
8911          * convenient to do the pseudo-header checksum in software
8912          * as Linux does that on transmit for us in all cases.
8913          */
8914         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8915
8916         tw32(GRC_MODE,
8917              tp->grc_mode |
8918              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8919
8920         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8921         val = tr32(GRC_MISC_CFG);
8922         val &= ~0xff;
8923         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8924         tw32(GRC_MISC_CFG, val);
8925
8926         /* Initialize MBUF/DESC pool. */
8927         if (tg3_flag(tp, 5750_PLUS)) {
8928                 /* Do nothing.  */
8929         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8930                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8931                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8932                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8933                 else
8934                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8935                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8936                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8937         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8938                 int fw_len;
8939
8940                 fw_len = tp->fw_len;
8941                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8942                 tw32(BUFMGR_MB_POOL_ADDR,
8943                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8944                 tw32(BUFMGR_MB_POOL_SIZE,
8945                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8946         }
8947
8948         if (tp->dev->mtu <= ETH_DATA_LEN) {
8949                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8950                      tp->bufmgr_config.mbuf_read_dma_low_water);
8951                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8952                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8953                 tw32(BUFMGR_MB_HIGH_WATER,
8954                      tp->bufmgr_config.mbuf_high_water);
8955         } else {
8956                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8957                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8958                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8959                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8960                 tw32(BUFMGR_MB_HIGH_WATER,
8961                      tp->bufmgr_config.mbuf_high_water_jumbo);
8962         }
8963         tw32(BUFMGR_DMA_LOW_WATER,
8964              tp->bufmgr_config.dma_low_water);
8965         tw32(BUFMGR_DMA_HIGH_WATER,
8966              tp->bufmgr_config.dma_high_water);
8967
8968         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8970                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8971         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8972             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8973             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8974                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8975         tw32(BUFMGR_MODE, val);
8976         for (i = 0; i < 2000; i++) {
8977                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8978                         break;
8979                 udelay(10);
8980         }
8981         if (i >= 2000) {
8982                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8983                 return -ENODEV;
8984         }
8985
8986         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8987                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8988
8989         tg3_setup_rxbd_thresholds(tp);
8990
8991         /* Initialize TG3_BDINFO's at:
8992          *  RCVDBDI_STD_BD:     standard eth size rx ring
8993          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8994          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8995          *
8996          * like so:
8997          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8998          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8999          *                              ring attribute flags
9000          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9001          *
9002          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9003          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9004          *
9005          * The size of each ring is fixed in the firmware, but the location is
9006          * configurable.
9007          */
9008         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9009              ((u64) tpr->rx_std_mapping >> 32));
9010         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9011              ((u64) tpr->rx_std_mapping & 0xffffffff));
9012         if (!tg3_flag(tp, 5717_PLUS))
9013                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9014                      NIC_SRAM_RX_BUFFER_DESC);
9015
9016         /* Disable the mini ring */
9017         if (!tg3_flag(tp, 5705_PLUS))
9018                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9019                      BDINFO_FLAGS_DISABLED);
9020
9021         /* Program the jumbo buffer descriptor ring control
9022          * blocks on those devices that have them.
9023          */
9024         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9025             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9026
9027                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9028                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9029                              ((u64) tpr->rx_jmb_mapping >> 32));
9030                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9031                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9032                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9033                               BDINFO_FLAGS_MAXLEN_SHIFT;
9034                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9035                              val | BDINFO_FLAGS_USE_EXT_RECV);
9036                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9037                             tg3_flag(tp, 57765_CLASS))
9038                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9039                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9040                 } else {
9041                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9042                              BDINFO_FLAGS_DISABLED);
9043                 }
9044
9045                 if (tg3_flag(tp, 57765_PLUS)) {
9046                         val = TG3_RX_STD_RING_SIZE(tp);
9047                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9048                         val |= (TG3_RX_STD_DMA_SZ << 2);
9049                 } else
9050                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9051         } else
9052                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9053
9054         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9055
9056         tpr->rx_std_prod_idx = tp->rx_pending;
9057         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9058
9059         tpr->rx_jmb_prod_idx =
9060                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9061         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9062
9063         tg3_rings_reset(tp);
9064
9065         /* Initialize MAC address and backoff seed. */
9066         __tg3_set_mac_addr(tp, 0);
9067
9068         /* MTU + ethernet header + FCS + optional VLAN tag */
9069         tw32(MAC_RX_MTU_SIZE,
9070              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9071
9072         /* The slot time is changed by tg3_setup_phy if we
9073          * run at gigabit with half duplex.
9074          */
9075         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9076               (6 << TX_LENGTHS_IPG_SHIFT) |
9077               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9078
9079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9080                 val |= tr32(MAC_TX_LENGTHS) &
9081                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9082                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9083
9084         tw32(MAC_TX_LENGTHS, val);
9085
9086         /* Receive rules. */
9087         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9088         tw32(RCVLPC_CONFIG, 0x0181);
9089
9090         /* Calculate RDMAC_MODE setting early, we need it to determine
9091          * the RCVLPC_STATE_ENABLE mask.
9092          */
9093         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9094                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9095                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9096                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9097                       RDMAC_MODE_LNGREAD_ENAB);
9098
9099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9100                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9101
9102         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9103             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9105                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9106                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9107                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9108
9109         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9110             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9111                 if (tg3_flag(tp, TSO_CAPABLE) &&
9112                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9113                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9114                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9115                            !tg3_flag(tp, IS_5788)) {
9116                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9117                 }
9118         }
9119
9120         if (tg3_flag(tp, PCI_EXPRESS))
9121                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9122
9123         if (tg3_flag(tp, HW_TSO_1) ||
9124             tg3_flag(tp, HW_TSO_2) ||
9125             tg3_flag(tp, HW_TSO_3))
9126                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9127
9128         if (tg3_flag(tp, 57765_PLUS) ||
9129             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9130             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9131                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9132
9133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9134                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9135
9136         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9137             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9138             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9139             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9140             tg3_flag(tp, 57765_PLUS)) {
9141                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9142                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9143                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9144                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9145                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9146                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9147                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9148                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9149                 }
9150                 tw32(TG3_RDMA_RSRVCTRL_REG,
9151                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9152         }
9153
9154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9155             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9156                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9157                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9158                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9159                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9160         }
9161
9162         /* Receive/send statistics. */
9163         if (tg3_flag(tp, 5750_PLUS)) {
9164                 val = tr32(RCVLPC_STATS_ENABLE);
9165                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9166                 tw32(RCVLPC_STATS_ENABLE, val);
9167         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9168                    tg3_flag(tp, TSO_CAPABLE)) {
9169                 val = tr32(RCVLPC_STATS_ENABLE);
9170                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9171                 tw32(RCVLPC_STATS_ENABLE, val);
9172         } else {
9173                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9174         }
9175         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9176         tw32(SNDDATAI_STATSENAB, 0xffffff);
9177         tw32(SNDDATAI_STATSCTRL,
9178              (SNDDATAI_SCTRL_ENABLE |
9179               SNDDATAI_SCTRL_FASTUPD));
9180
9181         /* Setup host coalescing engine. */
9182         tw32(HOSTCC_MODE, 0);
9183         for (i = 0; i < 2000; i++) {
9184                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9185                         break;
9186                 udelay(10);
9187         }
9188
9189         __tg3_set_coalesce(tp, &tp->coal);
9190
9191         if (!tg3_flag(tp, 5705_PLUS)) {
9192                 /* Status/statistics block address.  See tg3_timer,
9193                  * the tg3_periodic_fetch_stats call there, and
9194                  * tg3_get_stats to see how this works for 5705/5750 chips.
9195                  */
9196                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9197                      ((u64) tp->stats_mapping >> 32));
9198                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9199                      ((u64) tp->stats_mapping & 0xffffffff));
9200                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9201
9202                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9203
9204                 /* Clear statistics and status block memory areas */
9205                 for (i = NIC_SRAM_STATS_BLK;
9206                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9207                      i += sizeof(u32)) {
9208                         tg3_write_mem(tp, i, 0);
9209                         udelay(40);
9210                 }
9211         }
9212
9213         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9214
9215         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9216         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9217         if (!tg3_flag(tp, 5705_PLUS))
9218                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9219
9220         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9221                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9222                 /* reset to prevent losing 1st rx packet intermittently */
9223                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9224                 udelay(10);
9225         }
9226
9227         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9228                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9229                         MAC_MODE_FHDE_ENABLE;
9230         if (tg3_flag(tp, ENABLE_APE))
9231                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9232         if (!tg3_flag(tp, 5705_PLUS) &&
9233             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9234             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9235                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9236         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9237         udelay(40);
9238
9239         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9240          * If TG3_FLAG_IS_NIC is zero, we should read the
9241          * register to preserve the GPIO settings for LOMs. The GPIOs,
9242          * whether used as inputs or outputs, are set by boot code after
9243          * reset.
9244          */
9245         if (!tg3_flag(tp, IS_NIC)) {
9246                 u32 gpio_mask;
9247
9248                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9249                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9250                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9251
9252                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9253                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9254                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9255
9256                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9257                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9258
9259                 tp->grc_local_ctrl &= ~gpio_mask;
9260                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9261
9262                 /* GPIO1 must be driven high for eeprom write protect */
9263                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9264                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9265                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9266         }
9267         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9268         udelay(100);
9269
9270         if (tg3_flag(tp, USING_MSIX)) {
9271                 val = tr32(MSGINT_MODE);
9272                 val |= MSGINT_MODE_ENABLE;
9273                 if (tp->irq_cnt > 1)
9274                         val |= MSGINT_MODE_MULTIVEC_EN;
9275                 if (!tg3_flag(tp, 1SHOT_MSI))
9276                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9277                 tw32(MSGINT_MODE, val);
9278         }
9279
9280         if (!tg3_flag(tp, 5705_PLUS)) {
9281                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9282                 udelay(40);
9283         }
9284
9285         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9286                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9287                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9288                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9289                WDMAC_MODE_LNGREAD_ENAB);
9290
9291         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9292             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9293                 if (tg3_flag(tp, TSO_CAPABLE) &&
9294                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9295                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9296                         /* nothing */
9297                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9298                            !tg3_flag(tp, IS_5788)) {
9299                         val |= WDMAC_MODE_RX_ACCEL;
9300                 }
9301         }
9302
9303         /* Enable host coalescing bug fix */
9304         if (tg3_flag(tp, 5755_PLUS))
9305                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9306
9307         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9308                 val |= WDMAC_MODE_BURST_ALL_DATA;
9309
9310         tw32_f(WDMAC_MODE, val);
9311         udelay(40);
9312
9313         if (tg3_flag(tp, PCIX_MODE)) {
9314                 u16 pcix_cmd;
9315
9316                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9317                                      &pcix_cmd);
9318                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9319                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9320                         pcix_cmd |= PCI_X_CMD_READ_2K;
9321                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9322                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9323                         pcix_cmd |= PCI_X_CMD_READ_2K;
9324                 }
9325                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9326                                       pcix_cmd);
9327         }
9328
9329         tw32_f(RDMAC_MODE, rdmac_mode);
9330         udelay(40);
9331
9332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9333                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9334                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9335                                 break;
9336                 }
9337                 if (i < TG3_NUM_RDMA_CHANNELS) {
9338                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9339                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9340                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9341                         tg3_flag_set(tp, 5719_RDMA_BUG);
9342                 }
9343         }
9344
9345         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9346         if (!tg3_flag(tp, 5705_PLUS))
9347                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9348
9349         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9350                 tw32(SNDDATAC_MODE,
9351                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9352         else
9353                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9354
9355         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9356         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9357         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9358         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9359                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9360         tw32(RCVDBDI_MODE, val);
9361         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9362         if (tg3_flag(tp, HW_TSO_1) ||
9363             tg3_flag(tp, HW_TSO_2) ||
9364             tg3_flag(tp, HW_TSO_3))
9365                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9366         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9367         if (tg3_flag(tp, ENABLE_TSS))
9368                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9369         tw32(SNDBDI_MODE, val);
9370         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9371
9372         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9373                 err = tg3_load_5701_a0_firmware_fix(tp);
9374                 if (err)
9375                         return err;
9376         }
9377
9378         if (tg3_flag(tp, TSO_CAPABLE)) {
9379                 err = tg3_load_tso_firmware(tp);
9380                 if (err)
9381                         return err;
9382         }
9383
9384         tp->tx_mode = TX_MODE_ENABLE;
9385
9386         if (tg3_flag(tp, 5755_PLUS) ||
9387             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9388                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9389
9390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9391                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9392                 tp->tx_mode &= ~val;
9393                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9394         }
9395
9396         tw32_f(MAC_TX_MODE, tp->tx_mode);
9397         udelay(100);
9398
9399         if (tg3_flag(tp, ENABLE_RSS)) {
9400                 tg3_rss_write_indir_tbl(tp);
9401
9402                 /* Setup the "secret" hash key. */
9403                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9404                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9405                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9406                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9407                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9408                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9409                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9410                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9411                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9412                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9413         }
9414
9415         tp->rx_mode = RX_MODE_ENABLE;
9416         if (tg3_flag(tp, 5755_PLUS))
9417                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9418
9419         if (tg3_flag(tp, ENABLE_RSS))
9420                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9421                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9422                                RX_MODE_RSS_IPV6_HASH_EN |
9423                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9424                                RX_MODE_RSS_IPV4_HASH_EN |
9425                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9426
9427         tw32_f(MAC_RX_MODE, tp->rx_mode);
9428         udelay(10);
9429
9430         tw32(MAC_LED_CTRL, tp->led_ctrl);
9431
9432         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9433         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9434                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9435                 udelay(10);
9436         }
9437         tw32_f(MAC_RX_MODE, tp->rx_mode);
9438         udelay(10);
9439
9440         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9441                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9442                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9443                         /* Set drive transmission level to 1.2V  */
9444                         /* only if the signal pre-emphasis bit is not set  */
9445                         val = tr32(MAC_SERDES_CFG);
9446                         val &= 0xfffff000;
9447                         val |= 0x880;
9448                         tw32(MAC_SERDES_CFG, val);
9449                 }
9450                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9451                         tw32(MAC_SERDES_CFG, 0x616000);
9452         }
9453
9454         /* Prevent chip from dropping frames when flow control
9455          * is enabled.
9456          */
9457         if (tg3_flag(tp, 57765_CLASS))
9458                 val = 1;
9459         else
9460                 val = 2;
9461         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9462
9463         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9464             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9465                 /* Use hardware link auto-negotiation */
9466                 tg3_flag_set(tp, HW_AUTONEG);
9467         }
9468
9469         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9470             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9471                 u32 tmp;
9472
9473                 tmp = tr32(SERDES_RX_CTRL);
9474                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9475                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9476                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9477                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9478         }
9479
9480         if (!tg3_flag(tp, USE_PHYLIB)) {
9481                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9482                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9483
9484                 err = tg3_setup_phy(tp, 0);
9485                 if (err)
9486                         return err;
9487
9488                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9489                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9490                         u32 tmp;
9491
9492                         /* Clear CRC stats. */
9493                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9494                                 tg3_writephy(tp, MII_TG3_TEST1,
9495                                              tmp | MII_TG3_TEST1_CRC_EN);
9496                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9497                         }
9498                 }
9499         }
9500
9501         __tg3_set_rx_mode(tp->dev);
9502
9503         /* Initialize receive rules. */
9504         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9505         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9506         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9507         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9508
9509         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9510                 limit = 8;
9511         else
9512                 limit = 16;
9513         if (tg3_flag(tp, ENABLE_ASF))
9514                 limit -= 4;
9515         switch (limit) {
9516         case 16:
9517                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9518         case 15:
9519                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9520         case 14:
9521                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9522         case 13:
9523                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9524         case 12:
9525                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9526         case 11:
9527                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9528         case 10:
9529                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9530         case 9:
9531                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9532         case 8:
9533                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9534         case 7:
9535                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9536         case 6:
9537                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9538         case 5:
9539                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9540         case 4:
9541                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9542         case 3:
9543                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9544         case 2:
9545         case 1:
9546
9547         default:
9548                 break;
9549         }
9550
9551         if (tg3_flag(tp, ENABLE_APE))
9552                 /* Write our heartbeat update interval to APE. */
9553                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9554                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9555
9556         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9557
9558         return 0;
9559 }
9560
9561 /* Called at device open time to get the chip ready for
9562  * packet processing.  Invoked with tp->lock held.
9563  */
9564 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9565 {
9566         tg3_switch_clocks(tp);
9567
9568         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9569
9570         return tg3_reset_hw(tp, reset_phy);
9571 }
9572
9573 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9574 {
9575         int i;
9576
9577         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9578                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9579
9580                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9581                 off += len;
9582
9583                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9584                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9585                         memset(ocir, 0, TG3_OCIR_LEN);
9586         }
9587 }
9588
9589 /* sysfs attributes for hwmon */
9590 static ssize_t tg3_show_temp(struct device *dev,
9591                              struct device_attribute *devattr, char *buf)
9592 {
9593         struct pci_dev *pdev = to_pci_dev(dev);
9594         struct net_device *netdev = pci_get_drvdata(pdev);
9595         struct tg3 *tp = netdev_priv(netdev);
9596         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9597         u32 temperature;
9598
9599         spin_lock_bh(&tp->lock);
9600         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9601                                 sizeof(temperature));
9602         spin_unlock_bh(&tp->lock);
9603         return sprintf(buf, "%u\n", temperature);
9604 }
9605
9606
9607 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9608                           TG3_TEMP_SENSOR_OFFSET);
9609 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9610                           TG3_TEMP_CAUTION_OFFSET);
9611 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9612                           TG3_TEMP_MAX_OFFSET);
9613
9614 static struct attribute *tg3_attributes[] = {
9615         &sensor_dev_attr_temp1_input.dev_attr.attr,
9616         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9617         &sensor_dev_attr_temp1_max.dev_attr.attr,
9618         NULL
9619 };
9620
9621 static const struct attribute_group tg3_group = {
9622         .attrs = tg3_attributes,
9623 };
9624
9625 static void tg3_hwmon_close(struct tg3 *tp)
9626 {
9627         if (tp->hwmon_dev) {
9628                 hwmon_device_unregister(tp->hwmon_dev);
9629                 tp->hwmon_dev = NULL;
9630                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9631         }
9632 }
9633
9634 static void tg3_hwmon_open(struct tg3 *tp)
9635 {
9636         int i, err;
9637         u32 size = 0;
9638         struct pci_dev *pdev = tp->pdev;
9639         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9640
9641         tg3_sd_scan_scratchpad(tp, ocirs);
9642
9643         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9644                 if (!ocirs[i].src_data_length)
9645                         continue;
9646
9647                 size += ocirs[i].src_hdr_length;
9648                 size += ocirs[i].src_data_length;
9649         }
9650
9651         if (!size)
9652                 return;
9653
9654         /* Register hwmon sysfs hooks */
9655         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9656         if (err) {
9657                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9658                 return;
9659         }
9660
9661         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9662         if (IS_ERR(tp->hwmon_dev)) {
9663                 tp->hwmon_dev = NULL;
9664                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9665                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9666         }
9667 }
9668
9669
9670 #define TG3_STAT_ADD32(PSTAT, REG) \
9671 do {    u32 __val = tr32(REG); \
9672         (PSTAT)->low += __val; \
9673         if ((PSTAT)->low < __val) \
9674                 (PSTAT)->high += 1; \
9675 } while (0)
9676
9677 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9678 {
9679         struct tg3_hw_stats *sp = tp->hw_stats;
9680
9681         if (!netif_carrier_ok(tp->dev))
9682                 return;
9683
9684         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9685         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9686         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9687         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9688         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9689         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9690         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9691         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9692         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9693         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9694         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9695         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9696         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9697         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9698                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9699                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9700                 u32 val;
9701
9702                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9703                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9704                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9705                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9706         }
9707
9708         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9709         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9710         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9711         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9712         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9713         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9714         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9715         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9716         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9717         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9718         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9719         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9720         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9721         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9722
9723         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9724         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9725             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9726             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9727                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9728         } else {
9729                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9730                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9731                 if (val) {
9732                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9733                         sp->rx_discards.low += val;
9734                         if (sp->rx_discards.low < val)
9735                                 sp->rx_discards.high += 1;
9736                 }
9737                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9738         }
9739         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9740 }
9741
9742 static void tg3_chk_missed_msi(struct tg3 *tp)
9743 {
9744         u32 i;
9745
9746         for (i = 0; i < tp->irq_cnt; i++) {
9747                 struct tg3_napi *tnapi = &tp->napi[i];
9748
9749                 if (tg3_has_work(tnapi)) {
9750                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9751                             tnapi->last_tx_cons == tnapi->tx_cons) {
9752                                 if (tnapi->chk_msi_cnt < 1) {
9753                                         tnapi->chk_msi_cnt++;
9754                                         return;
9755                                 }
9756                                 tg3_msi(0, tnapi);
9757                         }
9758                 }
9759                 tnapi->chk_msi_cnt = 0;
9760                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9761                 tnapi->last_tx_cons = tnapi->tx_cons;
9762         }
9763 }
9764
9765 static void tg3_timer(unsigned long __opaque)
9766 {
9767         struct tg3 *tp = (struct tg3 *) __opaque;
9768
9769         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9770                 goto restart_timer;
9771
9772         spin_lock(&tp->lock);
9773
9774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9775             tg3_flag(tp, 57765_CLASS))
9776                 tg3_chk_missed_msi(tp);
9777
9778         if (!tg3_flag(tp, TAGGED_STATUS)) {
9779                 /* All of this garbage is because when using non-tagged
9780                  * IRQ status the mailbox/status_block protocol the chip
9781                  * uses with the cpu is race prone.
9782                  */
9783                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9784                         tw32(GRC_LOCAL_CTRL,
9785                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9786                 } else {
9787                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9788                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9789                 }
9790
9791                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9792                         spin_unlock(&tp->lock);
9793                         tg3_reset_task_schedule(tp);
9794                         goto restart_timer;
9795                 }
9796         }
9797
9798         /* This part only runs once per second. */
9799         if (!--tp->timer_counter) {
9800                 if (tg3_flag(tp, 5705_PLUS))
9801                         tg3_periodic_fetch_stats(tp);
9802
9803                 if (tp->setlpicnt && !--tp->setlpicnt)
9804                         tg3_phy_eee_enable(tp);
9805
9806                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9807                         u32 mac_stat;
9808                         int phy_event;
9809
9810                         mac_stat = tr32(MAC_STATUS);
9811
9812                         phy_event = 0;
9813                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9814                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9815                                         phy_event = 1;
9816                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9817                                 phy_event = 1;
9818
9819                         if (phy_event)
9820                                 tg3_setup_phy(tp, 0);
9821                 } else if (tg3_flag(tp, POLL_SERDES)) {
9822                         u32 mac_stat = tr32(MAC_STATUS);
9823                         int need_setup = 0;
9824
9825                         if (netif_carrier_ok(tp->dev) &&
9826                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9827                                 need_setup = 1;
9828                         }
9829                         if (!netif_carrier_ok(tp->dev) &&
9830                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9831                                          MAC_STATUS_SIGNAL_DET))) {
9832                                 need_setup = 1;
9833                         }
9834                         if (need_setup) {
9835                                 if (!tp->serdes_counter) {
9836                                         tw32_f(MAC_MODE,
9837                                              (tp->mac_mode &
9838                                               ~MAC_MODE_PORT_MODE_MASK));
9839                                         udelay(40);
9840                                         tw32_f(MAC_MODE, tp->mac_mode);
9841                                         udelay(40);
9842                                 }
9843                                 tg3_setup_phy(tp, 0);
9844                         }
9845                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9846                            tg3_flag(tp, 5780_CLASS)) {
9847                         tg3_serdes_parallel_detect(tp);
9848                 }
9849
9850                 tp->timer_counter = tp->timer_multiplier;
9851         }
9852
9853         /* Heartbeat is only sent once every 2 seconds.
9854          *
9855          * The heartbeat is to tell the ASF firmware that the host
9856          * driver is still alive.  In the event that the OS crashes,
9857          * ASF needs to reset the hardware to free up the FIFO space
9858          * that may be filled with rx packets destined for the host.
9859          * If the FIFO is full, ASF will no longer function properly.
9860          *
9861          * Unintended resets have been reported on real time kernels
9862          * where the timer doesn't run on time.  Netpoll will also have
9863          * same problem.
9864          *
9865          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9866          * to check the ring condition when the heartbeat is expiring
9867          * before doing the reset.  This will prevent most unintended
9868          * resets.
9869          */
9870         if (!--tp->asf_counter) {
9871                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9872                         tg3_wait_for_event_ack(tp);
9873
9874                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9875                                       FWCMD_NICDRV_ALIVE3);
9876                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9877                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9878                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9879
9880                         tg3_generate_fw_event(tp);
9881                 }
9882                 tp->asf_counter = tp->asf_multiplier;
9883         }
9884
9885         spin_unlock(&tp->lock);
9886
9887 restart_timer:
9888         tp->timer.expires = jiffies + tp->timer_offset;
9889         add_timer(&tp->timer);
9890 }
9891
9892 static void __devinit tg3_timer_init(struct tg3 *tp)
9893 {
9894         if (tg3_flag(tp, TAGGED_STATUS) &&
9895             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9896             !tg3_flag(tp, 57765_CLASS))
9897                 tp->timer_offset = HZ;
9898         else
9899                 tp->timer_offset = HZ / 10;
9900
9901         BUG_ON(tp->timer_offset > HZ);
9902
9903         tp->timer_multiplier = (HZ / tp->timer_offset);
9904         tp->asf_multiplier = (HZ / tp->timer_offset) *
9905                              TG3_FW_UPDATE_FREQ_SEC;
9906
9907         init_timer(&tp->timer);
9908         tp->timer.data = (unsigned long) tp;
9909         tp->timer.function = tg3_timer;
9910 }
9911
9912 static void tg3_timer_start(struct tg3 *tp)
9913 {
9914         tp->asf_counter   = tp->asf_multiplier;
9915         tp->timer_counter = tp->timer_multiplier;
9916
9917         tp->timer.expires = jiffies + tp->timer_offset;
9918         add_timer(&tp->timer);
9919 }
9920
9921 static void tg3_timer_stop(struct tg3 *tp)
9922 {
9923         del_timer_sync(&tp->timer);
9924 }
9925
9926 /* Restart hardware after configuration changes, self-test, etc.
9927  * Invoked with tp->lock held.
9928  */
9929 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9930         __releases(tp->lock)
9931         __acquires(tp->lock)
9932 {
9933         int err;
9934
9935         err = tg3_init_hw(tp, reset_phy);
9936         if (err) {
9937                 netdev_err(tp->dev,
9938                            "Failed to re-initialize device, aborting\n");
9939                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9940                 tg3_full_unlock(tp);
9941                 tg3_timer_stop(tp);
9942                 tp->irq_sync = 0;
9943                 tg3_napi_enable(tp);
9944                 dev_close(tp->dev);
9945                 tg3_full_lock(tp, 0);
9946         }
9947         return err;
9948 }
9949
9950 static void tg3_reset_task(struct work_struct *work)
9951 {
9952         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9953         int err;
9954
9955         tg3_full_lock(tp, 0);
9956
9957         if (!netif_running(tp->dev)) {
9958                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9959                 tg3_full_unlock(tp);
9960                 return;
9961         }
9962
9963         tg3_full_unlock(tp);
9964
9965         tg3_phy_stop(tp);
9966
9967         tg3_netif_stop(tp);
9968
9969         tg3_full_lock(tp, 1);
9970
9971         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9972                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9973                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9974                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9975                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9976         }
9977
9978         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9979         err = tg3_init_hw(tp, 1);
9980         if (err)
9981                 goto out;
9982
9983         tg3_netif_start(tp);
9984
9985 out:
9986         tg3_full_unlock(tp);
9987
9988         if (!err)
9989                 tg3_phy_start(tp);
9990
9991         tg3_flag_clear(tp, RESET_TASK_PENDING);
9992 }
9993
9994 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9995 {
9996         irq_handler_t fn;
9997         unsigned long flags;
9998         char *name;
9999         struct tg3_napi *tnapi = &tp->napi[irq_num];
10000
10001         if (tp->irq_cnt == 1)
10002                 name = tp->dev->name;
10003         else {
10004                 name = &tnapi->irq_lbl[0];
10005                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10006                 name[IFNAMSIZ-1] = 0;
10007         }
10008
10009         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10010                 fn = tg3_msi;
10011                 if (tg3_flag(tp, 1SHOT_MSI))
10012                         fn = tg3_msi_1shot;
10013                 flags = 0;
10014         } else {
10015                 fn = tg3_interrupt;
10016                 if (tg3_flag(tp, TAGGED_STATUS))
10017                         fn = tg3_interrupt_tagged;
10018                 flags = IRQF_SHARED;
10019         }
10020
10021         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10022 }
10023
10024 static int tg3_test_interrupt(struct tg3 *tp)
10025 {
10026         struct tg3_napi *tnapi = &tp->napi[0];
10027         struct net_device *dev = tp->dev;
10028         int err, i, intr_ok = 0;
10029         u32 val;
10030
10031         if (!netif_running(dev))
10032                 return -ENODEV;
10033
10034         tg3_disable_ints(tp);
10035
10036         free_irq(tnapi->irq_vec, tnapi);
10037
10038         /*
10039          * Turn off MSI one shot mode.  Otherwise this test has no
10040          * observable way to know whether the interrupt was delivered.
10041          */
10042         if (tg3_flag(tp, 57765_PLUS)) {
10043                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10044                 tw32(MSGINT_MODE, val);
10045         }
10046
10047         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10048                           IRQF_SHARED, dev->name, tnapi);
10049         if (err)
10050                 return err;
10051
10052         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10053         tg3_enable_ints(tp);
10054
10055         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10056                tnapi->coal_now);
10057
10058         for (i = 0; i < 5; i++) {
10059                 u32 int_mbox, misc_host_ctrl;
10060
10061                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10062                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10063
10064                 if ((int_mbox != 0) ||
10065                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10066                         intr_ok = 1;
10067                         break;
10068                 }
10069
10070                 if (tg3_flag(tp, 57765_PLUS) &&
10071                     tnapi->hw_status->status_tag != tnapi->last_tag)
10072                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10073
10074                 msleep(10);
10075         }
10076
10077         tg3_disable_ints(tp);
10078
10079         free_irq(tnapi->irq_vec, tnapi);
10080
10081         err = tg3_request_irq(tp, 0);
10082
10083         if (err)
10084                 return err;
10085
10086         if (intr_ok) {
10087                 /* Reenable MSI one shot mode. */
10088                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10089                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10090                         tw32(MSGINT_MODE, val);
10091                 }
10092                 return 0;
10093         }
10094
10095         return -EIO;
10096 }
10097
10098 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10099  * successfully restored
10100  */
10101 static int tg3_test_msi(struct tg3 *tp)
10102 {
10103         int err;
10104         u16 pci_cmd;
10105
10106         if (!tg3_flag(tp, USING_MSI))
10107                 return 0;
10108
10109         /* Turn off SERR reporting in case MSI terminates with Master
10110          * Abort.
10111          */
10112         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10113         pci_write_config_word(tp->pdev, PCI_COMMAND,
10114                               pci_cmd & ~PCI_COMMAND_SERR);
10115
10116         err = tg3_test_interrupt(tp);
10117
10118         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10119
10120         if (!err)
10121                 return 0;
10122
10123         /* other failures */
10124         if (err != -EIO)
10125                 return err;
10126
10127         /* MSI test failed, go back to INTx mode */
10128         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10129                     "to INTx mode. Please report this failure to the PCI "
10130                     "maintainer and include system chipset information\n");
10131
10132         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10133
10134         pci_disable_msi(tp->pdev);
10135
10136         tg3_flag_clear(tp, USING_MSI);
10137         tp->napi[0].irq_vec = tp->pdev->irq;
10138
10139         err = tg3_request_irq(tp, 0);
10140         if (err)
10141                 return err;
10142
10143         /* Need to reset the chip because the MSI cycle may have terminated
10144          * with Master Abort.
10145          */
10146         tg3_full_lock(tp, 1);
10147
10148         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10149         err = tg3_init_hw(tp, 1);
10150
10151         tg3_full_unlock(tp);
10152
10153         if (err)
10154                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10155
10156         return err;
10157 }
10158
10159 static int tg3_request_firmware(struct tg3 *tp)
10160 {
10161         const __be32 *fw_data;
10162
10163         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10164                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10165                            tp->fw_needed);
10166                 return -ENOENT;
10167         }
10168
10169         fw_data = (void *)tp->fw->data;
10170
10171         /* Firmware blob starts with version numbers, followed by
10172          * start address and _full_ length including BSS sections
10173          * (which must be longer than the actual data, of course
10174          */
10175
10176         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10177         if (tp->fw_len < (tp->fw->size - 12)) {
10178                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10179                            tp->fw_len, tp->fw_needed);
10180                 release_firmware(tp->fw);
10181                 tp->fw = NULL;
10182                 return -EINVAL;
10183         }
10184
10185         /* We no longer need firmware; we have it. */
10186         tp->fw_needed = NULL;
10187         return 0;
10188 }
10189
10190 static u32 tg3_irq_count(struct tg3 *tp)
10191 {
10192         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10193
10194         if (irq_cnt > 1) {
10195                 /* We want as many rx rings enabled as there are cpus.
10196                  * In multiqueue MSI-X mode, the first MSI-X vector
10197                  * only deals with link interrupts, etc, so we add
10198                  * one to the number of vectors we are requesting.
10199                  */
10200                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10201         }
10202
10203         return irq_cnt;
10204 }
10205
10206 static bool tg3_enable_msix(struct tg3 *tp)
10207 {
10208         int i, rc;
10209         struct msix_entry msix_ent[tp->irq_max];
10210
10211         tp->txq_cnt = tp->txq_req;
10212         tp->rxq_cnt = tp->rxq_req;
10213         if (!tp->rxq_cnt)
10214                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10215         if (tp->rxq_cnt > tp->rxq_max)
10216                 tp->rxq_cnt = tp->rxq_max;
10217
10218         /* Disable multiple TX rings by default.  Simple round-robin hardware
10219          * scheduling of the TX rings can cause starvation of rings with
10220          * small packets when other rings have TSO or jumbo packets.
10221          */
10222         if (!tp->txq_req)
10223                 tp->txq_cnt = 1;
10224
10225         tp->irq_cnt = tg3_irq_count(tp);
10226
10227         for (i = 0; i < tp->irq_max; i++) {
10228                 msix_ent[i].entry  = i;
10229                 msix_ent[i].vector = 0;
10230         }
10231
10232         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10233         if (rc < 0) {
10234                 return false;
10235         } else if (rc != 0) {
10236                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10237                         return false;
10238                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10239                               tp->irq_cnt, rc);
10240                 tp->irq_cnt = rc;
10241                 tp->rxq_cnt = max(rc - 1, 1);
10242                 if (tp->txq_cnt)
10243                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10244         }
10245
10246         for (i = 0; i < tp->irq_max; i++)
10247                 tp->napi[i].irq_vec = msix_ent[i].vector;
10248
10249         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10250                 pci_disable_msix(tp->pdev);
10251                 return false;
10252         }
10253
10254         if (tp->irq_cnt == 1)
10255                 return true;
10256
10257         tg3_flag_set(tp, ENABLE_RSS);
10258
10259         if (tp->txq_cnt > 1)
10260                 tg3_flag_set(tp, ENABLE_TSS);
10261
10262         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10263
10264         return true;
10265 }
10266
10267 static void tg3_ints_init(struct tg3 *tp)
10268 {
10269         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10270             !tg3_flag(tp, TAGGED_STATUS)) {
10271                 /* All MSI supporting chips should support tagged
10272                  * status.  Assert that this is the case.
10273                  */
10274                 netdev_warn(tp->dev,
10275                             "MSI without TAGGED_STATUS? Not using MSI\n");
10276                 goto defcfg;
10277         }
10278
10279         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10280                 tg3_flag_set(tp, USING_MSIX);
10281         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10282                 tg3_flag_set(tp, USING_MSI);
10283
10284         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10285                 u32 msi_mode = tr32(MSGINT_MODE);
10286                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10287                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10288                 if (!tg3_flag(tp, 1SHOT_MSI))
10289                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10290                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10291         }
10292 defcfg:
10293         if (!tg3_flag(tp, USING_MSIX)) {
10294                 tp->irq_cnt = 1;
10295                 tp->napi[0].irq_vec = tp->pdev->irq;
10296         }
10297
10298         if (tp->irq_cnt == 1) {
10299                 tp->txq_cnt = 1;
10300                 tp->rxq_cnt = 1;
10301                 netif_set_real_num_tx_queues(tp->dev, 1);
10302                 netif_set_real_num_rx_queues(tp->dev, 1);
10303         }
10304 }
10305
10306 static void tg3_ints_fini(struct tg3 *tp)
10307 {
10308         if (tg3_flag(tp, USING_MSIX))
10309                 pci_disable_msix(tp->pdev);
10310         else if (tg3_flag(tp, USING_MSI))
10311                 pci_disable_msi(tp->pdev);
10312         tg3_flag_clear(tp, USING_MSI);
10313         tg3_flag_clear(tp, USING_MSIX);
10314         tg3_flag_clear(tp, ENABLE_RSS);
10315         tg3_flag_clear(tp, ENABLE_TSS);
10316 }
10317
10318 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10319 {
10320         struct net_device *dev = tp->dev;
10321         int i, err;
10322
10323         /*
10324          * Setup interrupts first so we know how
10325          * many NAPI resources to allocate
10326          */
10327         tg3_ints_init(tp);
10328
10329         tg3_rss_check_indir_tbl(tp);
10330
10331         /* The placement of this call is tied
10332          * to the setup and use of Host TX descriptors.
10333          */
10334         err = tg3_alloc_consistent(tp);
10335         if (err)
10336                 goto err_out1;
10337
10338         tg3_napi_init(tp);
10339
10340         tg3_napi_enable(tp);
10341
10342         for (i = 0; i < tp->irq_cnt; i++) {
10343                 struct tg3_napi *tnapi = &tp->napi[i];
10344                 err = tg3_request_irq(tp, i);
10345                 if (err) {
10346                         for (i--; i >= 0; i--) {
10347                                 tnapi = &tp->napi[i];
10348                                 free_irq(tnapi->irq_vec, tnapi);
10349                         }
10350                         goto err_out2;
10351                 }
10352         }
10353
10354         tg3_full_lock(tp, 0);
10355
10356         err = tg3_init_hw(tp, reset_phy);
10357         if (err) {
10358                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10359                 tg3_free_rings(tp);
10360         }
10361
10362         tg3_full_unlock(tp);
10363
10364         if (err)
10365                 goto err_out3;
10366
10367         if (test_irq && tg3_flag(tp, USING_MSI)) {
10368                 err = tg3_test_msi(tp);
10369
10370                 if (err) {
10371                         tg3_full_lock(tp, 0);
10372                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10373                         tg3_free_rings(tp);
10374                         tg3_full_unlock(tp);
10375
10376                         goto err_out2;
10377                 }
10378
10379                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10380                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10381
10382                         tw32(PCIE_TRANSACTION_CFG,
10383                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10384                 }
10385         }
10386
10387         tg3_phy_start(tp);
10388
10389         tg3_hwmon_open(tp);
10390
10391         tg3_full_lock(tp, 0);
10392
10393         tg3_timer_start(tp);
10394         tg3_flag_set(tp, INIT_COMPLETE);
10395         tg3_enable_ints(tp);
10396
10397         tg3_full_unlock(tp);
10398
10399         netif_tx_start_all_queues(dev);
10400
10401         /*
10402          * Reset loopback feature if it was turned on while the device was down
10403          * make sure that it's installed properly now.
10404          */
10405         if (dev->features & NETIF_F_LOOPBACK)
10406                 tg3_set_loopback(dev, dev->features);
10407
10408         return 0;
10409
10410 err_out3:
10411         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10412                 struct tg3_napi *tnapi = &tp->napi[i];
10413                 free_irq(tnapi->irq_vec, tnapi);
10414         }
10415
10416 err_out2:
10417         tg3_napi_disable(tp);
10418         tg3_napi_fini(tp);
10419         tg3_free_consistent(tp);
10420
10421 err_out1:
10422         tg3_ints_fini(tp);
10423
10424         return err;
10425 }
10426
10427 static void tg3_stop(struct tg3 *tp)
10428 {
10429         int i;
10430
10431         tg3_napi_disable(tp);
10432         tg3_reset_task_cancel(tp);
10433
10434         netif_tx_disable(tp->dev);
10435
10436         tg3_timer_stop(tp);
10437
10438         tg3_hwmon_close(tp);
10439
10440         tg3_phy_stop(tp);
10441
10442         tg3_full_lock(tp, 1);
10443
10444         tg3_disable_ints(tp);
10445
10446         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10447         tg3_free_rings(tp);
10448         tg3_flag_clear(tp, INIT_COMPLETE);
10449
10450         tg3_full_unlock(tp);
10451
10452         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10453                 struct tg3_napi *tnapi = &tp->napi[i];
10454                 free_irq(tnapi->irq_vec, tnapi);
10455         }
10456
10457         tg3_ints_fini(tp);
10458
10459         tg3_napi_fini(tp);
10460
10461         tg3_free_consistent(tp);
10462 }
10463
10464 static int tg3_open(struct net_device *dev)
10465 {
10466         struct tg3 *tp = netdev_priv(dev);
10467         int err;
10468
10469         if (tp->fw_needed) {
10470                 err = tg3_request_firmware(tp);
10471                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10472                         if (err)
10473                                 return err;
10474                 } else if (err) {
10475                         netdev_warn(tp->dev, "TSO capability disabled\n");
10476                         tg3_flag_clear(tp, TSO_CAPABLE);
10477                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10478                         netdev_notice(tp->dev, "TSO capability restored\n");
10479                         tg3_flag_set(tp, TSO_CAPABLE);
10480                 }
10481         }
10482
10483         netif_carrier_off(tp->dev);
10484
10485         err = tg3_power_up(tp);
10486         if (err)
10487                 return err;
10488
10489         tg3_full_lock(tp, 0);
10490
10491         tg3_disable_ints(tp);
10492         tg3_flag_clear(tp, INIT_COMPLETE);
10493
10494         tg3_full_unlock(tp);
10495
10496         err = tg3_start(tp, true, true);
10497         if (err) {
10498                 tg3_frob_aux_power(tp, false);
10499                 pci_set_power_state(tp->pdev, PCI_D3hot);
10500         }
10501         return err;
10502 }
10503
10504 static int tg3_close(struct net_device *dev)
10505 {
10506         struct tg3 *tp = netdev_priv(dev);
10507
10508         tg3_stop(tp);
10509
10510         /* Clear stats across close / open calls */
10511         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10512         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10513
10514         tg3_power_down(tp);
10515
10516         netif_carrier_off(tp->dev);
10517
10518         return 0;
10519 }
10520
10521 static inline u64 get_stat64(tg3_stat64_t *val)
10522 {
10523        return ((u64)val->high << 32) | ((u64)val->low);
10524 }
10525
10526 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10527 {
10528         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10529
10530         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10531             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10532              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10533                 u32 val;
10534
10535                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10536                         tg3_writephy(tp, MII_TG3_TEST1,
10537                                      val | MII_TG3_TEST1_CRC_EN);
10538                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10539                 } else
10540                         val = 0;
10541
10542                 tp->phy_crc_errors += val;
10543
10544                 return tp->phy_crc_errors;
10545         }
10546
10547         return get_stat64(&hw_stats->rx_fcs_errors);
10548 }
10549
10550 #define ESTAT_ADD(member) \
10551         estats->member =        old_estats->member + \
10552                                 get_stat64(&hw_stats->member)
10553
10554 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10555 {
10556         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10557         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10558
10559         ESTAT_ADD(rx_octets);
10560         ESTAT_ADD(rx_fragments);
10561         ESTAT_ADD(rx_ucast_packets);
10562         ESTAT_ADD(rx_mcast_packets);
10563         ESTAT_ADD(rx_bcast_packets);
10564         ESTAT_ADD(rx_fcs_errors);
10565         ESTAT_ADD(rx_align_errors);
10566         ESTAT_ADD(rx_xon_pause_rcvd);
10567         ESTAT_ADD(rx_xoff_pause_rcvd);
10568         ESTAT_ADD(rx_mac_ctrl_rcvd);
10569         ESTAT_ADD(rx_xoff_entered);
10570         ESTAT_ADD(rx_frame_too_long_errors);
10571         ESTAT_ADD(rx_jabbers);
10572         ESTAT_ADD(rx_undersize_packets);
10573         ESTAT_ADD(rx_in_length_errors);
10574         ESTAT_ADD(rx_out_length_errors);
10575         ESTAT_ADD(rx_64_or_less_octet_packets);
10576         ESTAT_ADD(rx_65_to_127_octet_packets);
10577         ESTAT_ADD(rx_128_to_255_octet_packets);
10578         ESTAT_ADD(rx_256_to_511_octet_packets);
10579         ESTAT_ADD(rx_512_to_1023_octet_packets);
10580         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10581         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10582         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10583         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10584         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10585
10586         ESTAT_ADD(tx_octets);
10587         ESTAT_ADD(tx_collisions);
10588         ESTAT_ADD(tx_xon_sent);
10589         ESTAT_ADD(tx_xoff_sent);
10590         ESTAT_ADD(tx_flow_control);
10591         ESTAT_ADD(tx_mac_errors);
10592         ESTAT_ADD(tx_single_collisions);
10593         ESTAT_ADD(tx_mult_collisions);
10594         ESTAT_ADD(tx_deferred);
10595         ESTAT_ADD(tx_excessive_collisions);
10596         ESTAT_ADD(tx_late_collisions);
10597         ESTAT_ADD(tx_collide_2times);
10598         ESTAT_ADD(tx_collide_3times);
10599         ESTAT_ADD(tx_collide_4times);
10600         ESTAT_ADD(tx_collide_5times);
10601         ESTAT_ADD(tx_collide_6times);
10602         ESTAT_ADD(tx_collide_7times);
10603         ESTAT_ADD(tx_collide_8times);
10604         ESTAT_ADD(tx_collide_9times);
10605         ESTAT_ADD(tx_collide_10times);
10606         ESTAT_ADD(tx_collide_11times);
10607         ESTAT_ADD(tx_collide_12times);
10608         ESTAT_ADD(tx_collide_13times);
10609         ESTAT_ADD(tx_collide_14times);
10610         ESTAT_ADD(tx_collide_15times);
10611         ESTAT_ADD(tx_ucast_packets);
10612         ESTAT_ADD(tx_mcast_packets);
10613         ESTAT_ADD(tx_bcast_packets);
10614         ESTAT_ADD(tx_carrier_sense_errors);
10615         ESTAT_ADD(tx_discards);
10616         ESTAT_ADD(tx_errors);
10617
10618         ESTAT_ADD(dma_writeq_full);
10619         ESTAT_ADD(dma_write_prioq_full);
10620         ESTAT_ADD(rxbds_empty);
10621         ESTAT_ADD(rx_discards);
10622         ESTAT_ADD(rx_errors);
10623         ESTAT_ADD(rx_threshold_hit);
10624
10625         ESTAT_ADD(dma_readq_full);
10626         ESTAT_ADD(dma_read_prioq_full);
10627         ESTAT_ADD(tx_comp_queue_full);
10628
10629         ESTAT_ADD(ring_set_send_prod_index);
10630         ESTAT_ADD(ring_status_update);
10631         ESTAT_ADD(nic_irqs);
10632         ESTAT_ADD(nic_avoided_irqs);
10633         ESTAT_ADD(nic_tx_threshold_hit);
10634
10635         ESTAT_ADD(mbuf_lwm_thresh_hit);
10636 }
10637
10638 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10639 {
10640         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10641         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10642
10643         stats->rx_packets = old_stats->rx_packets +
10644                 get_stat64(&hw_stats->rx_ucast_packets) +
10645                 get_stat64(&hw_stats->rx_mcast_packets) +
10646                 get_stat64(&hw_stats->rx_bcast_packets);
10647
10648         stats->tx_packets = old_stats->tx_packets +
10649                 get_stat64(&hw_stats->tx_ucast_packets) +
10650                 get_stat64(&hw_stats->tx_mcast_packets) +
10651                 get_stat64(&hw_stats->tx_bcast_packets);
10652
10653         stats->rx_bytes = old_stats->rx_bytes +
10654                 get_stat64(&hw_stats->rx_octets);
10655         stats->tx_bytes = old_stats->tx_bytes +
10656                 get_stat64(&hw_stats->tx_octets);
10657
10658         stats->rx_errors = old_stats->rx_errors +
10659                 get_stat64(&hw_stats->rx_errors);
10660         stats->tx_errors = old_stats->tx_errors +
10661                 get_stat64(&hw_stats->tx_errors) +
10662                 get_stat64(&hw_stats->tx_mac_errors) +
10663                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10664                 get_stat64(&hw_stats->tx_discards);
10665
10666         stats->multicast = old_stats->multicast +
10667                 get_stat64(&hw_stats->rx_mcast_packets);
10668         stats->collisions = old_stats->collisions +
10669                 get_stat64(&hw_stats->tx_collisions);
10670
10671         stats->rx_length_errors = old_stats->rx_length_errors +
10672                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10673                 get_stat64(&hw_stats->rx_undersize_packets);
10674
10675         stats->rx_over_errors = old_stats->rx_over_errors +
10676                 get_stat64(&hw_stats->rxbds_empty);
10677         stats->rx_frame_errors = old_stats->rx_frame_errors +
10678                 get_stat64(&hw_stats->rx_align_errors);
10679         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10680                 get_stat64(&hw_stats->tx_discards);
10681         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10682                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10683
10684         stats->rx_crc_errors = old_stats->rx_crc_errors +
10685                 tg3_calc_crc_errors(tp);
10686
10687         stats->rx_missed_errors = old_stats->rx_missed_errors +
10688                 get_stat64(&hw_stats->rx_discards);
10689
10690         stats->rx_dropped = tp->rx_dropped;
10691         stats->tx_dropped = tp->tx_dropped;
10692 }
10693
10694 static int tg3_get_regs_len(struct net_device *dev)
10695 {
10696         return TG3_REG_BLK_SIZE;
10697 }
10698
10699 static void tg3_get_regs(struct net_device *dev,
10700                 struct ethtool_regs *regs, void *_p)
10701 {
10702         struct tg3 *tp = netdev_priv(dev);
10703
10704         regs->version = 0;
10705
10706         memset(_p, 0, TG3_REG_BLK_SIZE);
10707
10708         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10709                 return;
10710
10711         tg3_full_lock(tp, 0);
10712
10713         tg3_dump_legacy_regs(tp, (u32 *)_p);
10714
10715         tg3_full_unlock(tp);
10716 }
10717
10718 static int tg3_get_eeprom_len(struct net_device *dev)
10719 {
10720         struct tg3 *tp = netdev_priv(dev);
10721
10722         return tp->nvram_size;
10723 }
10724
10725 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10726 {
10727         struct tg3 *tp = netdev_priv(dev);
10728         int ret;
10729         u8  *pd;
10730         u32 i, offset, len, b_offset, b_count;
10731         __be32 val;
10732
10733         if (tg3_flag(tp, NO_NVRAM))
10734                 return -EINVAL;
10735
10736         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10737                 return -EAGAIN;
10738
10739         offset = eeprom->offset;
10740         len = eeprom->len;
10741         eeprom->len = 0;
10742
10743         eeprom->magic = TG3_EEPROM_MAGIC;
10744
10745         if (offset & 3) {
10746                 /* adjustments to start on required 4 byte boundary */
10747                 b_offset = offset & 3;
10748                 b_count = 4 - b_offset;
10749                 if (b_count > len) {
10750                         /* i.e. offset=1 len=2 */
10751                         b_count = len;
10752                 }
10753                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10754                 if (ret)
10755                         return ret;
10756                 memcpy(data, ((char *)&val) + b_offset, b_count);
10757                 len -= b_count;
10758                 offset += b_count;
10759                 eeprom->len += b_count;
10760         }
10761
10762         /* read bytes up to the last 4 byte boundary */
10763         pd = &data[eeprom->len];
10764         for (i = 0; i < (len - (len & 3)); i += 4) {
10765                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10766                 if (ret) {
10767                         eeprom->len += i;
10768                         return ret;
10769                 }
10770                 memcpy(pd + i, &val, 4);
10771         }
10772         eeprom->len += i;
10773
10774         if (len & 3) {
10775                 /* read last bytes not ending on 4 byte boundary */
10776                 pd = &data[eeprom->len];
10777                 b_count = len & 3;
10778                 b_offset = offset + len - b_count;
10779                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10780                 if (ret)
10781                         return ret;
10782                 memcpy(pd, &val, b_count);
10783                 eeprom->len += b_count;
10784         }
10785         return 0;
10786 }
10787
10788 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10789 {
10790         struct tg3 *tp = netdev_priv(dev);
10791         int ret;
10792         u32 offset, len, b_offset, odd_len;
10793         u8 *buf;
10794         __be32 start, end;
10795
10796         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10797                 return -EAGAIN;
10798
10799         if (tg3_flag(tp, NO_NVRAM) ||
10800             eeprom->magic != TG3_EEPROM_MAGIC)
10801                 return -EINVAL;
10802
10803         offset = eeprom->offset;
10804         len = eeprom->len;
10805
10806         if ((b_offset = (offset & 3))) {
10807                 /* adjustments to start on required 4 byte boundary */
10808                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10809                 if (ret)
10810                         return ret;
10811                 len += b_offset;
10812                 offset &= ~3;
10813                 if (len < 4)
10814                         len = 4;
10815         }
10816
10817         odd_len = 0;
10818         if (len & 3) {
10819                 /* adjustments to end on required 4 byte boundary */
10820                 odd_len = 1;
10821                 len = (len + 3) & ~3;
10822                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10823                 if (ret)
10824                         return ret;
10825         }
10826
10827         buf = data;
10828         if (b_offset || odd_len) {
10829                 buf = kmalloc(len, GFP_KERNEL);
10830                 if (!buf)
10831                         return -ENOMEM;
10832                 if (b_offset)
10833                         memcpy(buf, &start, 4);
10834                 if (odd_len)
10835                         memcpy(buf+len-4, &end, 4);
10836                 memcpy(buf + b_offset, data, eeprom->len);
10837         }
10838
10839         ret = tg3_nvram_write_block(tp, offset, len, buf);
10840
10841         if (buf != data)
10842                 kfree(buf);
10843
10844         return ret;
10845 }
10846
10847 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10848 {
10849         struct tg3 *tp = netdev_priv(dev);
10850
10851         if (tg3_flag(tp, USE_PHYLIB)) {
10852                 struct phy_device *phydev;
10853                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10854                         return -EAGAIN;
10855                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10856                 return phy_ethtool_gset(phydev, cmd);
10857         }
10858
10859         cmd->supported = (SUPPORTED_Autoneg);
10860
10861         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10862                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10863                                    SUPPORTED_1000baseT_Full);
10864
10865         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10866                 cmd->supported |= (SUPPORTED_100baseT_Half |
10867                                   SUPPORTED_100baseT_Full |
10868                                   SUPPORTED_10baseT_Half |
10869                                   SUPPORTED_10baseT_Full |
10870                                   SUPPORTED_TP);
10871                 cmd->port = PORT_TP;
10872         } else {
10873                 cmd->supported |= SUPPORTED_FIBRE;
10874                 cmd->port = PORT_FIBRE;
10875         }
10876
10877         cmd->advertising = tp->link_config.advertising;
10878         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10879                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10880                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10881                                 cmd->advertising |= ADVERTISED_Pause;
10882                         } else {
10883                                 cmd->advertising |= ADVERTISED_Pause |
10884                                                     ADVERTISED_Asym_Pause;
10885                         }
10886                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10887                         cmd->advertising |= ADVERTISED_Asym_Pause;
10888                 }
10889         }
10890         if (netif_running(dev) && netif_carrier_ok(dev)) {
10891                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10892                 cmd->duplex = tp->link_config.active_duplex;
10893                 cmd->lp_advertising = tp->link_config.rmt_adv;
10894                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10895                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10896                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10897                         else
10898                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10899                 }
10900         } else {
10901                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10902                 cmd->duplex = DUPLEX_UNKNOWN;
10903                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10904         }
10905         cmd->phy_address = tp->phy_addr;
10906         cmd->transceiver = XCVR_INTERNAL;
10907         cmd->autoneg = tp->link_config.autoneg;
10908         cmd->maxtxpkt = 0;
10909         cmd->maxrxpkt = 0;
10910         return 0;
10911 }
10912
10913 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10914 {
10915         struct tg3 *tp = netdev_priv(dev);
10916         u32 speed = ethtool_cmd_speed(cmd);
10917
10918         if (tg3_flag(tp, USE_PHYLIB)) {
10919                 struct phy_device *phydev;
10920                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10921                         return -EAGAIN;
10922                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10923                 return phy_ethtool_sset(phydev, cmd);
10924         }
10925
10926         if (cmd->autoneg != AUTONEG_ENABLE &&
10927             cmd->autoneg != AUTONEG_DISABLE)
10928                 return -EINVAL;
10929
10930         if (cmd->autoneg == AUTONEG_DISABLE &&
10931             cmd->duplex != DUPLEX_FULL &&
10932             cmd->duplex != DUPLEX_HALF)
10933                 return -EINVAL;
10934
10935         if (cmd->autoneg == AUTONEG_ENABLE) {
10936                 u32 mask = ADVERTISED_Autoneg |
10937                            ADVERTISED_Pause |
10938                            ADVERTISED_Asym_Pause;
10939
10940                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10941                         mask |= ADVERTISED_1000baseT_Half |
10942                                 ADVERTISED_1000baseT_Full;
10943
10944                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10945                         mask |= ADVERTISED_100baseT_Half |
10946                                 ADVERTISED_100baseT_Full |
10947                                 ADVERTISED_10baseT_Half |
10948                                 ADVERTISED_10baseT_Full |
10949                                 ADVERTISED_TP;
10950                 else
10951                         mask |= ADVERTISED_FIBRE;
10952
10953                 if (cmd->advertising & ~mask)
10954                         return -EINVAL;
10955
10956                 mask &= (ADVERTISED_1000baseT_Half |
10957                          ADVERTISED_1000baseT_Full |
10958                          ADVERTISED_100baseT_Half |
10959                          ADVERTISED_100baseT_Full |
10960                          ADVERTISED_10baseT_Half |
10961                          ADVERTISED_10baseT_Full);
10962
10963                 cmd->advertising &= mask;
10964         } else {
10965                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10966                         if (speed != SPEED_1000)
10967                                 return -EINVAL;
10968
10969                         if (cmd->duplex != DUPLEX_FULL)
10970                                 return -EINVAL;
10971                 } else {
10972                         if (speed != SPEED_100 &&
10973                             speed != SPEED_10)
10974                                 return -EINVAL;
10975                 }
10976         }
10977
10978         tg3_full_lock(tp, 0);
10979
10980         tp->link_config.autoneg = cmd->autoneg;
10981         if (cmd->autoneg == AUTONEG_ENABLE) {
10982                 tp->link_config.advertising = (cmd->advertising |
10983                                               ADVERTISED_Autoneg);
10984                 tp->link_config.speed = SPEED_UNKNOWN;
10985                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10986         } else {
10987                 tp->link_config.advertising = 0;
10988                 tp->link_config.speed = speed;
10989                 tp->link_config.duplex = cmd->duplex;
10990         }
10991
10992         if (netif_running(dev))
10993                 tg3_setup_phy(tp, 1);
10994
10995         tg3_full_unlock(tp);
10996
10997         return 0;
10998 }
10999
11000 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11001 {
11002         struct tg3 *tp = netdev_priv(dev);
11003
11004         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11005         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11006         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11007         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11008 }
11009
11010 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11011 {
11012         struct tg3 *tp = netdev_priv(dev);
11013
11014         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11015                 wol->supported = WAKE_MAGIC;
11016         else
11017                 wol->supported = 0;
11018         wol->wolopts = 0;
11019         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11020                 wol->wolopts = WAKE_MAGIC;
11021         memset(&wol->sopass, 0, sizeof(wol->sopass));
11022 }
11023
11024 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11025 {
11026         struct tg3 *tp = netdev_priv(dev);
11027         struct device *dp = &tp->pdev->dev;
11028
11029         if (wol->wolopts & ~WAKE_MAGIC)
11030                 return -EINVAL;
11031         if ((wol->wolopts & WAKE_MAGIC) &&
11032             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11033                 return -EINVAL;
11034
11035         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11036
11037         spin_lock_bh(&tp->lock);
11038         if (device_may_wakeup(dp))
11039                 tg3_flag_set(tp, WOL_ENABLE);
11040         else
11041                 tg3_flag_clear(tp, WOL_ENABLE);
11042         spin_unlock_bh(&tp->lock);
11043
11044         return 0;
11045 }
11046
11047 static u32 tg3_get_msglevel(struct net_device *dev)
11048 {
11049         struct tg3 *tp = netdev_priv(dev);
11050         return tp->msg_enable;
11051 }
11052
11053 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11054 {
11055         struct tg3 *tp = netdev_priv(dev);
11056         tp->msg_enable = value;
11057 }
11058
11059 static int tg3_nway_reset(struct net_device *dev)
11060 {
11061         struct tg3 *tp = netdev_priv(dev);
11062         int r;
11063
11064         if (!netif_running(dev))
11065                 return -EAGAIN;
11066
11067         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11068                 return -EINVAL;
11069
11070         if (tg3_flag(tp, USE_PHYLIB)) {
11071                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11072                         return -EAGAIN;
11073                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11074         } else {
11075                 u32 bmcr;
11076
11077                 spin_lock_bh(&tp->lock);
11078                 r = -EINVAL;
11079                 tg3_readphy(tp, MII_BMCR, &bmcr);
11080                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11081                     ((bmcr & BMCR_ANENABLE) ||
11082                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11083                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11084                                                    BMCR_ANENABLE);
11085                         r = 0;
11086                 }
11087                 spin_unlock_bh(&tp->lock);
11088         }
11089
11090         return r;
11091 }
11092
11093 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11094 {
11095         struct tg3 *tp = netdev_priv(dev);
11096
11097         ering->rx_max_pending = tp->rx_std_ring_mask;
11098         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11099                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11100         else
11101                 ering->rx_jumbo_max_pending = 0;
11102
11103         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11104
11105         ering->rx_pending = tp->rx_pending;
11106         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11107                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11108         else
11109                 ering->rx_jumbo_pending = 0;
11110
11111         ering->tx_pending = tp->napi[0].tx_pending;
11112 }
11113
11114 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11115 {
11116         struct tg3 *tp = netdev_priv(dev);
11117         int i, irq_sync = 0, err = 0;
11118
11119         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11120             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11121             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11122             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11123             (tg3_flag(tp, TSO_BUG) &&
11124              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11125                 return -EINVAL;
11126
11127         if (netif_running(dev)) {
11128                 tg3_phy_stop(tp);
11129                 tg3_netif_stop(tp);
11130                 irq_sync = 1;
11131         }
11132
11133         tg3_full_lock(tp, irq_sync);
11134
11135         tp->rx_pending = ering->rx_pending;
11136
11137         if (tg3_flag(tp, MAX_RXPEND_64) &&
11138             tp->rx_pending > 63)
11139                 tp->rx_pending = 63;
11140         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11141
11142         for (i = 0; i < tp->irq_max; i++)
11143                 tp->napi[i].tx_pending = ering->tx_pending;
11144
11145         if (netif_running(dev)) {
11146                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11147                 err = tg3_restart_hw(tp, 1);
11148                 if (!err)
11149                         tg3_netif_start(tp);
11150         }
11151
11152         tg3_full_unlock(tp);
11153
11154         if (irq_sync && !err)
11155                 tg3_phy_start(tp);
11156
11157         return err;
11158 }
11159
11160 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11161 {
11162         struct tg3 *tp = netdev_priv(dev);
11163
11164         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11165
11166         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11167                 epause->rx_pause = 1;
11168         else
11169                 epause->rx_pause = 0;
11170
11171         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11172                 epause->tx_pause = 1;
11173         else
11174                 epause->tx_pause = 0;
11175 }
11176
11177 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11178 {
11179         struct tg3 *tp = netdev_priv(dev);
11180         int err = 0;
11181
11182         if (tg3_flag(tp, USE_PHYLIB)) {
11183                 u32 newadv;
11184                 struct phy_device *phydev;
11185
11186                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11187
11188                 if (!(phydev->supported & SUPPORTED_Pause) ||
11189                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11190                      (epause->rx_pause != epause->tx_pause)))
11191                         return -EINVAL;
11192
11193                 tp->link_config.flowctrl = 0;
11194                 if (epause->rx_pause) {
11195                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11196
11197                         if (epause->tx_pause) {
11198                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11199                                 newadv = ADVERTISED_Pause;
11200                         } else
11201                                 newadv = ADVERTISED_Pause |
11202                                          ADVERTISED_Asym_Pause;
11203                 } else if (epause->tx_pause) {
11204                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11205                         newadv = ADVERTISED_Asym_Pause;
11206                 } else
11207                         newadv = 0;
11208
11209                 if (epause->autoneg)
11210                         tg3_flag_set(tp, PAUSE_AUTONEG);
11211                 else
11212                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11213
11214                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11215                         u32 oldadv = phydev->advertising &
11216                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11217                         if (oldadv != newadv) {
11218                                 phydev->advertising &=
11219                                         ~(ADVERTISED_Pause |
11220                                           ADVERTISED_Asym_Pause);
11221                                 phydev->advertising |= newadv;
11222                                 if (phydev->autoneg) {
11223                                         /*
11224                                          * Always renegotiate the link to
11225                                          * inform our link partner of our
11226                                          * flow control settings, even if the
11227                                          * flow control is forced.  Let
11228                                          * tg3_adjust_link() do the final
11229                                          * flow control setup.
11230                                          */
11231                                         return phy_start_aneg(phydev);
11232                                 }
11233                         }
11234
11235                         if (!epause->autoneg)
11236                                 tg3_setup_flow_control(tp, 0, 0);
11237                 } else {
11238                         tp->link_config.advertising &=
11239                                         ~(ADVERTISED_Pause |
11240                                           ADVERTISED_Asym_Pause);
11241                         tp->link_config.advertising |= newadv;
11242                 }
11243         } else {
11244                 int irq_sync = 0;
11245
11246                 if (netif_running(dev)) {
11247                         tg3_netif_stop(tp);
11248                         irq_sync = 1;
11249                 }
11250
11251                 tg3_full_lock(tp, irq_sync);
11252
11253                 if (epause->autoneg)
11254                         tg3_flag_set(tp, PAUSE_AUTONEG);
11255                 else
11256                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11257                 if (epause->rx_pause)
11258                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11259                 else
11260                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11261                 if (epause->tx_pause)
11262                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11263                 else
11264                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11265
11266                 if (netif_running(dev)) {
11267                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11268                         err = tg3_restart_hw(tp, 1);
11269                         if (!err)
11270                                 tg3_netif_start(tp);
11271                 }
11272
11273                 tg3_full_unlock(tp);
11274         }
11275
11276         return err;
11277 }
11278
11279 static int tg3_get_sset_count(struct net_device *dev, int sset)
11280 {
11281         switch (sset) {
11282         case ETH_SS_TEST:
11283                 return TG3_NUM_TEST;
11284         case ETH_SS_STATS:
11285                 return TG3_NUM_STATS;
11286         default:
11287                 return -EOPNOTSUPP;
11288         }
11289 }
11290
11291 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11292                          u32 *rules __always_unused)
11293 {
11294         struct tg3 *tp = netdev_priv(dev);
11295
11296         if (!tg3_flag(tp, SUPPORT_MSIX))
11297                 return -EOPNOTSUPP;
11298
11299         switch (info->cmd) {
11300         case ETHTOOL_GRXRINGS:
11301                 if (netif_running(tp->dev))
11302                         info->data = tp->rxq_cnt;
11303                 else {
11304                         info->data = num_online_cpus();
11305                         if (info->data > TG3_RSS_MAX_NUM_QS)
11306                                 info->data = TG3_RSS_MAX_NUM_QS;
11307                 }
11308
11309                 /* The first interrupt vector only
11310                  * handles link interrupts.
11311                  */
11312                 info->data -= 1;
11313                 return 0;
11314
11315         default:
11316                 return -EOPNOTSUPP;
11317         }
11318 }
11319
11320 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11321 {
11322         u32 size = 0;
11323         struct tg3 *tp = netdev_priv(dev);
11324
11325         if (tg3_flag(tp, SUPPORT_MSIX))
11326                 size = TG3_RSS_INDIR_TBL_SIZE;
11327
11328         return size;
11329 }
11330
11331 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11332 {
11333         struct tg3 *tp = netdev_priv(dev);
11334         int i;
11335
11336         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11337                 indir[i] = tp->rss_ind_tbl[i];
11338
11339         return 0;
11340 }
11341
11342 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11343 {
11344         struct tg3 *tp = netdev_priv(dev);
11345         size_t i;
11346
11347         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11348                 tp->rss_ind_tbl[i] = indir[i];
11349
11350         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11351                 return 0;
11352
11353         /* It is legal to write the indirection
11354          * table while the device is running.
11355          */
11356         tg3_full_lock(tp, 0);
11357         tg3_rss_write_indir_tbl(tp);
11358         tg3_full_unlock(tp);
11359
11360         return 0;
11361 }
11362
11363 static void tg3_get_channels(struct net_device *dev,
11364                              struct ethtool_channels *channel)
11365 {
11366         struct tg3 *tp = netdev_priv(dev);
11367         u32 deflt_qs = netif_get_num_default_rss_queues();
11368
11369         channel->max_rx = tp->rxq_max;
11370         channel->max_tx = tp->txq_max;
11371
11372         if (netif_running(dev)) {
11373                 channel->rx_count = tp->rxq_cnt;
11374                 channel->tx_count = tp->txq_cnt;
11375         } else {
11376                 if (tp->rxq_req)
11377                         channel->rx_count = tp->rxq_req;
11378                 else
11379                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11380
11381                 if (tp->txq_req)
11382                         channel->tx_count = tp->txq_req;
11383                 else
11384                         channel->tx_count = min(deflt_qs, tp->txq_max);
11385         }
11386 }
11387
11388 static int tg3_set_channels(struct net_device *dev,
11389                             struct ethtool_channels *channel)
11390 {
11391         struct tg3 *tp = netdev_priv(dev);
11392
11393         if (!tg3_flag(tp, SUPPORT_MSIX))
11394                 return -EOPNOTSUPP;
11395
11396         if (channel->rx_count > tp->rxq_max ||
11397             channel->tx_count > tp->txq_max)
11398                 return -EINVAL;
11399
11400         tp->rxq_req = channel->rx_count;
11401         tp->txq_req = channel->tx_count;
11402
11403         if (!netif_running(dev))
11404                 return 0;
11405
11406         tg3_stop(tp);
11407
11408         netif_carrier_off(dev);
11409
11410         tg3_start(tp, true, false);
11411
11412         return 0;
11413 }
11414
11415 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11416 {
11417         switch (stringset) {
11418         case ETH_SS_STATS:
11419                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11420                 break;
11421         case ETH_SS_TEST:
11422                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11423                 break;
11424         default:
11425                 WARN_ON(1);     /* we need a WARN() */
11426                 break;
11427         }
11428 }
11429
11430 static int tg3_set_phys_id(struct net_device *dev,
11431                             enum ethtool_phys_id_state state)
11432 {
11433         struct tg3 *tp = netdev_priv(dev);
11434
11435         if (!netif_running(tp->dev))
11436                 return -EAGAIN;
11437
11438         switch (state) {
11439         case ETHTOOL_ID_ACTIVE:
11440                 return 1;       /* cycle on/off once per second */
11441
11442         case ETHTOOL_ID_ON:
11443                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11444                      LED_CTRL_1000MBPS_ON |
11445                      LED_CTRL_100MBPS_ON |
11446                      LED_CTRL_10MBPS_ON |
11447                      LED_CTRL_TRAFFIC_OVERRIDE |
11448                      LED_CTRL_TRAFFIC_BLINK |
11449                      LED_CTRL_TRAFFIC_LED);
11450                 break;
11451
11452         case ETHTOOL_ID_OFF:
11453                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11454                      LED_CTRL_TRAFFIC_OVERRIDE);
11455                 break;
11456
11457         case ETHTOOL_ID_INACTIVE:
11458                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11459                 break;
11460         }
11461
11462         return 0;
11463 }
11464
11465 static void tg3_get_ethtool_stats(struct net_device *dev,
11466                                    struct ethtool_stats *estats, u64 *tmp_stats)
11467 {
11468         struct tg3 *tp = netdev_priv(dev);
11469
11470         if (tp->hw_stats)
11471                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11472         else
11473                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11474 }
11475
11476 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11477 {
11478         int i;
11479         __be32 *buf;
11480         u32 offset = 0, len = 0;
11481         u32 magic, val;
11482
11483         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11484                 return NULL;
11485
11486         if (magic == TG3_EEPROM_MAGIC) {
11487                 for (offset = TG3_NVM_DIR_START;
11488                      offset < TG3_NVM_DIR_END;
11489                      offset += TG3_NVM_DIRENT_SIZE) {
11490                         if (tg3_nvram_read(tp, offset, &val))
11491                                 return NULL;
11492
11493                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11494                             TG3_NVM_DIRTYPE_EXTVPD)
11495                                 break;
11496                 }
11497
11498                 if (offset != TG3_NVM_DIR_END) {
11499                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11500                         if (tg3_nvram_read(tp, offset + 4, &offset))
11501                                 return NULL;
11502
11503                         offset = tg3_nvram_logical_addr(tp, offset);
11504                 }
11505         }
11506
11507         if (!offset || !len) {
11508                 offset = TG3_NVM_VPD_OFF;
11509                 len = TG3_NVM_VPD_LEN;
11510         }
11511
11512         buf = kmalloc(len, GFP_KERNEL);
11513         if (buf == NULL)
11514                 return NULL;
11515
11516         if (magic == TG3_EEPROM_MAGIC) {
11517                 for (i = 0; i < len; i += 4) {
11518                         /* The data is in little-endian format in NVRAM.
11519                          * Use the big-endian read routines to preserve
11520                          * the byte order as it exists in NVRAM.
11521                          */
11522                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11523                                 goto error;
11524                 }
11525         } else {
11526                 u8 *ptr;
11527                 ssize_t cnt;
11528                 unsigned int pos = 0;
11529
11530                 ptr = (u8 *)&buf[0];
11531                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11532                         cnt = pci_read_vpd(tp->pdev, pos,
11533                                            len - pos, ptr);
11534                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11535                                 cnt = 0;
11536                         else if (cnt < 0)
11537                                 goto error;
11538                 }
11539                 if (pos != len)
11540                         goto error;
11541         }
11542
11543         *vpdlen = len;
11544
11545         return buf;
11546
11547 error:
11548         kfree(buf);
11549         return NULL;
11550 }
11551
11552 #define NVRAM_TEST_SIZE 0x100
11553 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11554 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11555 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11556 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11557 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11558 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11559 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11560 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11561
11562 static int tg3_test_nvram(struct tg3 *tp)
11563 {
11564         u32 csum, magic, len;
11565         __be32 *buf;
11566         int i, j, k, err = 0, size;
11567
11568         if (tg3_flag(tp, NO_NVRAM))
11569                 return 0;
11570
11571         if (tg3_nvram_read(tp, 0, &magic) != 0)
11572                 return -EIO;
11573
11574         if (magic == TG3_EEPROM_MAGIC)
11575                 size = NVRAM_TEST_SIZE;
11576         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11577                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11578                     TG3_EEPROM_SB_FORMAT_1) {
11579                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11580                         case TG3_EEPROM_SB_REVISION_0:
11581                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11582                                 break;
11583                         case TG3_EEPROM_SB_REVISION_2:
11584                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11585                                 break;
11586                         case TG3_EEPROM_SB_REVISION_3:
11587                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11588                                 break;
11589                         case TG3_EEPROM_SB_REVISION_4:
11590                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11591                                 break;
11592                         case TG3_EEPROM_SB_REVISION_5:
11593                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11594                                 break;
11595                         case TG3_EEPROM_SB_REVISION_6:
11596                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11597                                 break;
11598                         default:
11599                                 return -EIO;
11600                         }
11601                 } else
11602                         return 0;
11603         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11604                 size = NVRAM_SELFBOOT_HW_SIZE;
11605         else
11606                 return -EIO;
11607
11608         buf = kmalloc(size, GFP_KERNEL);
11609         if (buf == NULL)
11610                 return -ENOMEM;
11611
11612         err = -EIO;
11613         for (i = 0, j = 0; i < size; i += 4, j++) {
11614                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11615                 if (err)
11616                         break;
11617         }
11618         if (i < size)
11619                 goto out;
11620
11621         /* Selfboot format */
11622         magic = be32_to_cpu(buf[0]);
11623         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11624             TG3_EEPROM_MAGIC_FW) {
11625                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11626
11627                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11628                     TG3_EEPROM_SB_REVISION_2) {
11629                         /* For rev 2, the csum doesn't include the MBA. */
11630                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11631                                 csum8 += buf8[i];
11632                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11633                                 csum8 += buf8[i];
11634                 } else {
11635                         for (i = 0; i < size; i++)
11636                                 csum8 += buf8[i];
11637                 }
11638
11639                 if (csum8 == 0) {
11640                         err = 0;
11641                         goto out;
11642                 }
11643
11644                 err = -EIO;
11645                 goto out;
11646         }
11647
11648         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11649             TG3_EEPROM_MAGIC_HW) {
11650                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11651                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11652                 u8 *buf8 = (u8 *) buf;
11653
11654                 /* Separate the parity bits and the data bytes.  */
11655                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11656                         if ((i == 0) || (i == 8)) {
11657                                 int l;
11658                                 u8 msk;
11659
11660                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11661                                         parity[k++] = buf8[i] & msk;
11662                                 i++;
11663                         } else if (i == 16) {
11664                                 int l;
11665                                 u8 msk;
11666
11667                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11668                                         parity[k++] = buf8[i] & msk;
11669                                 i++;
11670
11671                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11672                                         parity[k++] = buf8[i] & msk;
11673                                 i++;
11674                         }
11675                         data[j++] = buf8[i];
11676                 }
11677
11678                 err = -EIO;
11679                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11680                         u8 hw8 = hweight8(data[i]);
11681
11682                         if ((hw8 & 0x1) && parity[i])
11683                                 goto out;
11684                         else if (!(hw8 & 0x1) && !parity[i])
11685                                 goto out;
11686                 }
11687                 err = 0;
11688                 goto out;
11689         }
11690
11691         err = -EIO;
11692
11693         /* Bootstrap checksum at offset 0x10 */
11694         csum = calc_crc((unsigned char *) buf, 0x10);
11695         if (csum != le32_to_cpu(buf[0x10/4]))
11696                 goto out;
11697
11698         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11699         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11700         if (csum != le32_to_cpu(buf[0xfc/4]))
11701                 goto out;
11702
11703         kfree(buf);
11704
11705         buf = tg3_vpd_readblock(tp, &len);
11706         if (!buf)
11707                 return -ENOMEM;
11708
11709         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11710         if (i > 0) {
11711                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11712                 if (j < 0)
11713                         goto out;
11714
11715                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11716                         goto out;
11717
11718                 i += PCI_VPD_LRDT_TAG_SIZE;
11719                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11720                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11721                 if (j > 0) {
11722                         u8 csum8 = 0;
11723
11724                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11725
11726                         for (i = 0; i <= j; i++)
11727                                 csum8 += ((u8 *)buf)[i];
11728
11729                         if (csum8)
11730                                 goto out;
11731                 }
11732         }
11733
11734         err = 0;
11735
11736 out:
11737         kfree(buf);
11738         return err;
11739 }
11740
11741 #define TG3_SERDES_TIMEOUT_SEC  2
11742 #define TG3_COPPER_TIMEOUT_SEC  6
11743
11744 static int tg3_test_link(struct tg3 *tp)
11745 {
11746         int i, max;
11747
11748         if (!netif_running(tp->dev))
11749                 return -ENODEV;
11750
11751         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11752                 max = TG3_SERDES_TIMEOUT_SEC;
11753         else
11754                 max = TG3_COPPER_TIMEOUT_SEC;
11755
11756         for (i = 0; i < max; i++) {
11757                 if (netif_carrier_ok(tp->dev))
11758                         return 0;
11759
11760                 if (msleep_interruptible(1000))
11761                         break;
11762         }
11763
11764         return -EIO;
11765 }
11766
11767 /* Only test the commonly used registers */
11768 static int tg3_test_registers(struct tg3 *tp)
11769 {
11770         int i, is_5705, is_5750;
11771         u32 offset, read_mask, write_mask, val, save_val, read_val;
11772         static struct {
11773                 u16 offset;
11774                 u16 flags;
11775 #define TG3_FL_5705     0x1
11776 #define TG3_FL_NOT_5705 0x2
11777 #define TG3_FL_NOT_5788 0x4
11778 #define TG3_FL_NOT_5750 0x8
11779                 u32 read_mask;
11780                 u32 write_mask;
11781         } reg_tbl[] = {
11782                 /* MAC Control Registers */
11783                 { MAC_MODE, TG3_FL_NOT_5705,
11784                         0x00000000, 0x00ef6f8c },
11785                 { MAC_MODE, TG3_FL_5705,
11786                         0x00000000, 0x01ef6b8c },
11787                 { MAC_STATUS, TG3_FL_NOT_5705,
11788                         0x03800107, 0x00000000 },
11789                 { MAC_STATUS, TG3_FL_5705,
11790                         0x03800100, 0x00000000 },
11791                 { MAC_ADDR_0_HIGH, 0x0000,
11792                         0x00000000, 0x0000ffff },
11793                 { MAC_ADDR_0_LOW, 0x0000,
11794                         0x00000000, 0xffffffff },
11795                 { MAC_RX_MTU_SIZE, 0x0000,
11796                         0x00000000, 0x0000ffff },
11797                 { MAC_TX_MODE, 0x0000,
11798                         0x00000000, 0x00000070 },
11799                 { MAC_TX_LENGTHS, 0x0000,
11800                         0x00000000, 0x00003fff },
11801                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11802                         0x00000000, 0x000007fc },
11803                 { MAC_RX_MODE, TG3_FL_5705,
11804                         0x00000000, 0x000007dc },
11805                 { MAC_HASH_REG_0, 0x0000,
11806                         0x00000000, 0xffffffff },
11807                 { MAC_HASH_REG_1, 0x0000,
11808                         0x00000000, 0xffffffff },
11809                 { MAC_HASH_REG_2, 0x0000,
11810                         0x00000000, 0xffffffff },
11811                 { MAC_HASH_REG_3, 0x0000,
11812                         0x00000000, 0xffffffff },
11813
11814                 /* Receive Data and Receive BD Initiator Control Registers. */
11815                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11816                         0x00000000, 0xffffffff },
11817                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11818                         0x00000000, 0xffffffff },
11819                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11820                         0x00000000, 0x00000003 },
11821                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11822                         0x00000000, 0xffffffff },
11823                 { RCVDBDI_STD_BD+0, 0x0000,
11824                         0x00000000, 0xffffffff },
11825                 { RCVDBDI_STD_BD+4, 0x0000,
11826                         0x00000000, 0xffffffff },
11827                 { RCVDBDI_STD_BD+8, 0x0000,
11828                         0x00000000, 0xffff0002 },
11829                 { RCVDBDI_STD_BD+0xc, 0x0000,
11830                         0x00000000, 0xffffffff },
11831
11832                 /* Receive BD Initiator Control Registers. */
11833                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11834                         0x00000000, 0xffffffff },
11835                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11836                         0x00000000, 0x000003ff },
11837                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11838                         0x00000000, 0xffffffff },
11839
11840                 /* Host Coalescing Control Registers. */
11841                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11842                         0x00000000, 0x00000004 },
11843                 { HOSTCC_MODE, TG3_FL_5705,
11844                         0x00000000, 0x000000f6 },
11845                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11846                         0x00000000, 0xffffffff },
11847                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11848                         0x00000000, 0x000003ff },
11849                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11850                         0x00000000, 0xffffffff },
11851                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11852                         0x00000000, 0x000003ff },
11853                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11854                         0x00000000, 0xffffffff },
11855                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11856                         0x00000000, 0x000000ff },
11857                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11858                         0x00000000, 0xffffffff },
11859                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11860                         0x00000000, 0x000000ff },
11861                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11862                         0x00000000, 0xffffffff },
11863                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11864                         0x00000000, 0xffffffff },
11865                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11866                         0x00000000, 0xffffffff },
11867                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11868                         0x00000000, 0x000000ff },
11869                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11870                         0x00000000, 0xffffffff },
11871                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11872                         0x00000000, 0x000000ff },
11873                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11874                         0x00000000, 0xffffffff },
11875                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11876                         0x00000000, 0xffffffff },
11877                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11878                         0x00000000, 0xffffffff },
11879                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11880                         0x00000000, 0xffffffff },
11881                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11882                         0x00000000, 0xffffffff },
11883                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11884                         0xffffffff, 0x00000000 },
11885                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11886                         0xffffffff, 0x00000000 },
11887
11888                 /* Buffer Manager Control Registers. */
11889                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11890                         0x00000000, 0x007fff80 },
11891                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11892                         0x00000000, 0x007fffff },
11893                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11894                         0x00000000, 0x0000003f },
11895                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11896                         0x00000000, 0x000001ff },
11897                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11898                         0x00000000, 0x000001ff },
11899                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11900                         0xffffffff, 0x00000000 },
11901                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11902                         0xffffffff, 0x00000000 },
11903
11904                 /* Mailbox Registers */
11905                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11906                         0x00000000, 0x000001ff },
11907                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11908                         0x00000000, 0x000001ff },
11909                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11910                         0x00000000, 0x000007ff },
11911                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11912                         0x00000000, 0x000001ff },
11913
11914                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11915         };
11916
11917         is_5705 = is_5750 = 0;
11918         if (tg3_flag(tp, 5705_PLUS)) {
11919                 is_5705 = 1;
11920                 if (tg3_flag(tp, 5750_PLUS))
11921                         is_5750 = 1;
11922         }
11923
11924         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11925                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11926                         continue;
11927
11928                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11929                         continue;
11930
11931                 if (tg3_flag(tp, IS_5788) &&
11932                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11933                         continue;
11934
11935                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11936                         continue;
11937
11938                 offset = (u32) reg_tbl[i].offset;
11939                 read_mask = reg_tbl[i].read_mask;
11940                 write_mask = reg_tbl[i].write_mask;
11941
11942                 /* Save the original register content */
11943                 save_val = tr32(offset);
11944
11945                 /* Determine the read-only value. */
11946                 read_val = save_val & read_mask;
11947
11948                 /* Write zero to the register, then make sure the read-only bits
11949                  * are not changed and the read/write bits are all zeros.
11950                  */
11951                 tw32(offset, 0);
11952
11953                 val = tr32(offset);
11954
11955                 /* Test the read-only and read/write bits. */
11956                 if (((val & read_mask) != read_val) || (val & write_mask))
11957                         goto out;
11958
11959                 /* Write ones to all the bits defined by RdMask and WrMask, then
11960                  * make sure the read-only bits are not changed and the
11961                  * read/write bits are all ones.
11962                  */
11963                 tw32(offset, read_mask | write_mask);
11964
11965                 val = tr32(offset);
11966
11967                 /* Test the read-only bits. */
11968                 if ((val & read_mask) != read_val)
11969                         goto out;
11970
11971                 /* Test the read/write bits. */
11972                 if ((val & write_mask) != write_mask)
11973                         goto out;
11974
11975                 tw32(offset, save_val);
11976         }
11977
11978         return 0;
11979
11980 out:
11981         if (netif_msg_hw(tp))
11982                 netdev_err(tp->dev,
11983                            "Register test failed at offset %x\n", offset);
11984         tw32(offset, save_val);
11985         return -EIO;
11986 }
11987
11988 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11989 {
11990         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11991         int i;
11992         u32 j;
11993
11994         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11995                 for (j = 0; j < len; j += 4) {
11996                         u32 val;
11997
11998                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11999                         tg3_read_mem(tp, offset + j, &val);
12000                         if (val != test_pattern[i])
12001                                 return -EIO;
12002                 }
12003         }
12004         return 0;
12005 }
12006
12007 static int tg3_test_memory(struct tg3 *tp)
12008 {
12009         static struct mem_entry {
12010                 u32 offset;
12011                 u32 len;
12012         } mem_tbl_570x[] = {
12013                 { 0x00000000, 0x00b50},
12014                 { 0x00002000, 0x1c000},
12015                 { 0xffffffff, 0x00000}
12016         }, mem_tbl_5705[] = {
12017                 { 0x00000100, 0x0000c},
12018                 { 0x00000200, 0x00008},
12019                 { 0x00004000, 0x00800},
12020                 { 0x00006000, 0x01000},
12021                 { 0x00008000, 0x02000},
12022                 { 0x00010000, 0x0e000},
12023                 { 0xffffffff, 0x00000}
12024         }, mem_tbl_5755[] = {
12025                 { 0x00000200, 0x00008},
12026                 { 0x00004000, 0x00800},
12027                 { 0x00006000, 0x00800},
12028                 { 0x00008000, 0x02000},
12029                 { 0x00010000, 0x0c000},
12030                 { 0xffffffff, 0x00000}
12031         }, mem_tbl_5906[] = {
12032                 { 0x00000200, 0x00008},
12033                 { 0x00004000, 0x00400},
12034                 { 0x00006000, 0x00400},
12035                 { 0x00008000, 0x01000},
12036                 { 0x00010000, 0x01000},
12037                 { 0xffffffff, 0x00000}
12038         }, mem_tbl_5717[] = {
12039                 { 0x00000200, 0x00008},
12040                 { 0x00010000, 0x0a000},
12041                 { 0x00020000, 0x13c00},
12042                 { 0xffffffff, 0x00000}
12043         }, mem_tbl_57765[] = {
12044                 { 0x00000200, 0x00008},
12045                 { 0x00004000, 0x00800},
12046                 { 0x00006000, 0x09800},
12047                 { 0x00010000, 0x0a000},
12048                 { 0xffffffff, 0x00000}
12049         };
12050         struct mem_entry *mem_tbl;
12051         int err = 0;
12052         int i;
12053
12054         if (tg3_flag(tp, 5717_PLUS))
12055                 mem_tbl = mem_tbl_5717;
12056         else if (tg3_flag(tp, 57765_CLASS))
12057                 mem_tbl = mem_tbl_57765;
12058         else if (tg3_flag(tp, 5755_PLUS))
12059                 mem_tbl = mem_tbl_5755;
12060         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12061                 mem_tbl = mem_tbl_5906;
12062         else if (tg3_flag(tp, 5705_PLUS))
12063                 mem_tbl = mem_tbl_5705;
12064         else
12065                 mem_tbl = mem_tbl_570x;
12066
12067         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12068                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12069                 if (err)
12070                         break;
12071         }
12072
12073         return err;
12074 }
12075
12076 #define TG3_TSO_MSS             500
12077
12078 #define TG3_TSO_IP_HDR_LEN      20
12079 #define TG3_TSO_TCP_HDR_LEN     20
12080 #define TG3_TSO_TCP_OPT_LEN     12
12081
12082 static const u8 tg3_tso_header[] = {
12083 0x08, 0x00,
12084 0x45, 0x00, 0x00, 0x00,
12085 0x00, 0x00, 0x40, 0x00,
12086 0x40, 0x06, 0x00, 0x00,
12087 0x0a, 0x00, 0x00, 0x01,
12088 0x0a, 0x00, 0x00, 0x02,
12089 0x0d, 0x00, 0xe0, 0x00,
12090 0x00, 0x00, 0x01, 0x00,
12091 0x00, 0x00, 0x02, 0x00,
12092 0x80, 0x10, 0x10, 0x00,
12093 0x14, 0x09, 0x00, 0x00,
12094 0x01, 0x01, 0x08, 0x0a,
12095 0x11, 0x11, 0x11, 0x11,
12096 0x11, 0x11, 0x11, 0x11,
12097 };
12098
12099 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12100 {
12101         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12102         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12103         u32 budget;
12104         struct sk_buff *skb;
12105         u8 *tx_data, *rx_data;
12106         dma_addr_t map;
12107         int num_pkts, tx_len, rx_len, i, err;
12108         struct tg3_rx_buffer_desc *desc;
12109         struct tg3_napi *tnapi, *rnapi;
12110         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12111
12112         tnapi = &tp->napi[0];
12113         rnapi = &tp->napi[0];
12114         if (tp->irq_cnt > 1) {
12115                 if (tg3_flag(tp, ENABLE_RSS))
12116                         rnapi = &tp->napi[1];
12117                 if (tg3_flag(tp, ENABLE_TSS))
12118                         tnapi = &tp->napi[1];
12119         }
12120         coal_now = tnapi->coal_now | rnapi->coal_now;
12121
12122         err = -EIO;
12123
12124         tx_len = pktsz;
12125         skb = netdev_alloc_skb(tp->dev, tx_len);
12126         if (!skb)
12127                 return -ENOMEM;
12128
12129         tx_data = skb_put(skb, tx_len);
12130         memcpy(tx_data, tp->dev->dev_addr, 6);
12131         memset(tx_data + 6, 0x0, 8);
12132
12133         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12134
12135         if (tso_loopback) {
12136                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12137
12138                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12139                               TG3_TSO_TCP_OPT_LEN;
12140
12141                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12142                        sizeof(tg3_tso_header));
12143                 mss = TG3_TSO_MSS;
12144
12145                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12146                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12147
12148                 /* Set the total length field in the IP header */
12149                 iph->tot_len = htons((u16)(mss + hdr_len));
12150
12151                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12152                               TXD_FLAG_CPU_POST_DMA);
12153
12154                 if (tg3_flag(tp, HW_TSO_1) ||
12155                     tg3_flag(tp, HW_TSO_2) ||
12156                     tg3_flag(tp, HW_TSO_3)) {
12157                         struct tcphdr *th;
12158                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12159                         th = (struct tcphdr *)&tx_data[val];
12160                         th->check = 0;
12161                 } else
12162                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12163
12164                 if (tg3_flag(tp, HW_TSO_3)) {
12165                         mss |= (hdr_len & 0xc) << 12;
12166                         if (hdr_len & 0x10)
12167                                 base_flags |= 0x00000010;
12168                         base_flags |= (hdr_len & 0x3e0) << 5;
12169                 } else if (tg3_flag(tp, HW_TSO_2))
12170                         mss |= hdr_len << 9;
12171                 else if (tg3_flag(tp, HW_TSO_1) ||
12172                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12173                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12174                 } else {
12175                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12176                 }
12177
12178                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12179         } else {
12180                 num_pkts = 1;
12181                 data_off = ETH_HLEN;
12182
12183                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12184                     tx_len > VLAN_ETH_FRAME_LEN)
12185                         base_flags |= TXD_FLAG_JMB_PKT;
12186         }
12187
12188         for (i = data_off; i < tx_len; i++)
12189                 tx_data[i] = (u8) (i & 0xff);
12190
12191         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12192         if (pci_dma_mapping_error(tp->pdev, map)) {
12193                 dev_kfree_skb(skb);
12194                 return -EIO;
12195         }
12196
12197         val = tnapi->tx_prod;
12198         tnapi->tx_buffers[val].skb = skb;
12199         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12200
12201         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12202                rnapi->coal_now);
12203
12204         udelay(10);
12205
12206         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12207
12208         budget = tg3_tx_avail(tnapi);
12209         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12210                             base_flags | TXD_FLAG_END, mss, 0)) {
12211                 tnapi->tx_buffers[val].skb = NULL;
12212                 dev_kfree_skb(skb);
12213                 return -EIO;
12214         }
12215
12216         tnapi->tx_prod++;
12217
12218         /* Sync BD data before updating mailbox */
12219         wmb();
12220
12221         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12222         tr32_mailbox(tnapi->prodmbox);
12223
12224         udelay(10);
12225
12226         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12227         for (i = 0; i < 35; i++) {
12228                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12229                        coal_now);
12230
12231                 udelay(10);
12232
12233                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12234                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12235                 if ((tx_idx == tnapi->tx_prod) &&
12236                     (rx_idx == (rx_start_idx + num_pkts)))
12237                         break;
12238         }
12239
12240         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12241         dev_kfree_skb(skb);
12242
12243         if (tx_idx != tnapi->tx_prod)
12244                 goto out;
12245
12246         if (rx_idx != rx_start_idx + num_pkts)
12247                 goto out;
12248
12249         val = data_off;
12250         while (rx_idx != rx_start_idx) {
12251                 desc = &rnapi->rx_rcb[rx_start_idx++];
12252                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12253                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12254
12255                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12256                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12257                         goto out;
12258
12259                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12260                          - ETH_FCS_LEN;
12261
12262                 if (!tso_loopback) {
12263                         if (rx_len != tx_len)
12264                                 goto out;
12265
12266                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12267                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12268                                         goto out;
12269                         } else {
12270                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12271                                         goto out;
12272                         }
12273                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12274                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12275                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12276                         goto out;
12277                 }
12278
12279                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12280                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12281                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12282                                              mapping);
12283                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12284                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12285                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12286                                              mapping);
12287                 } else
12288                         goto out;
12289
12290                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12291                                             PCI_DMA_FROMDEVICE);
12292
12293                 rx_data += TG3_RX_OFFSET(tp);
12294                 for (i = data_off; i < rx_len; i++, val++) {
12295                         if (*(rx_data + i) != (u8) (val & 0xff))
12296                                 goto out;
12297                 }
12298         }
12299
12300         err = 0;
12301
12302         /* tg3_free_rings will unmap and free the rx_data */
12303 out:
12304         return err;
12305 }
12306
12307 #define TG3_STD_LOOPBACK_FAILED         1
12308 #define TG3_JMB_LOOPBACK_FAILED         2
12309 #define TG3_TSO_LOOPBACK_FAILED         4
12310 #define TG3_LOOPBACK_FAILED \
12311         (TG3_STD_LOOPBACK_FAILED | \
12312          TG3_JMB_LOOPBACK_FAILED | \
12313          TG3_TSO_LOOPBACK_FAILED)
12314
12315 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12316 {
12317         int err = -EIO;
12318         u32 eee_cap;
12319         u32 jmb_pkt_sz = 9000;
12320
12321         if (tp->dma_limit)
12322                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12323
12324         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12325         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12326
12327         if (!netif_running(tp->dev)) {
12328                 data[0] = TG3_LOOPBACK_FAILED;
12329                 data[1] = TG3_LOOPBACK_FAILED;
12330                 if (do_extlpbk)
12331                         data[2] = TG3_LOOPBACK_FAILED;
12332                 goto done;
12333         }
12334
12335         err = tg3_reset_hw(tp, 1);
12336         if (err) {
12337                 data[0] = TG3_LOOPBACK_FAILED;
12338                 data[1] = TG3_LOOPBACK_FAILED;
12339                 if (do_extlpbk)
12340                         data[2] = TG3_LOOPBACK_FAILED;
12341                 goto done;
12342         }
12343
12344         if (tg3_flag(tp, ENABLE_RSS)) {
12345                 int i;
12346
12347                 /* Reroute all rx packets to the 1st queue */
12348                 for (i = MAC_RSS_INDIR_TBL_0;
12349                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12350                         tw32(i, 0x0);
12351         }
12352
12353         /* HW errata - mac loopback fails in some cases on 5780.
12354          * Normal traffic and PHY loopback are not affected by
12355          * errata.  Also, the MAC loopback test is deprecated for
12356          * all newer ASIC revisions.
12357          */
12358         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12359             !tg3_flag(tp, CPMU_PRESENT)) {
12360                 tg3_mac_loopback(tp, true);
12361
12362                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12363                         data[0] |= TG3_STD_LOOPBACK_FAILED;
12364
12365                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12366                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12367                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
12368
12369                 tg3_mac_loopback(tp, false);
12370         }
12371
12372         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12373             !tg3_flag(tp, USE_PHYLIB)) {
12374                 int i;
12375
12376                 tg3_phy_lpbk_set(tp, 0, false);
12377
12378                 /* Wait for link */
12379                 for (i = 0; i < 100; i++) {
12380                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12381                                 break;
12382                         mdelay(1);
12383                 }
12384
12385                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12386                         data[1] |= TG3_STD_LOOPBACK_FAILED;
12387                 if (tg3_flag(tp, TSO_CAPABLE) &&
12388                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12389                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
12390                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12391                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12392                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
12393
12394                 if (do_extlpbk) {
12395                         tg3_phy_lpbk_set(tp, 0, true);
12396
12397                         /* All link indications report up, but the hardware
12398                          * isn't really ready for about 20 msec.  Double it
12399                          * to be sure.
12400                          */
12401                         mdelay(40);
12402
12403                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12404                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
12405                         if (tg3_flag(tp, TSO_CAPABLE) &&
12406                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12407                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12408                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12409                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12410                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12411                 }
12412
12413                 /* Re-enable gphy autopowerdown. */
12414                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12415                         tg3_phy_toggle_apd(tp, true);
12416         }
12417
12418         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12419
12420 done:
12421         tp->phy_flags |= eee_cap;
12422
12423         return err;
12424 }
12425
12426 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12427                           u64 *data)
12428 {
12429         struct tg3 *tp = netdev_priv(dev);
12430         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12431
12432         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12433             tg3_power_up(tp)) {
12434                 etest->flags |= ETH_TEST_FL_FAILED;
12435                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12436                 return;
12437         }
12438
12439         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12440
12441         if (tg3_test_nvram(tp) != 0) {
12442                 etest->flags |= ETH_TEST_FL_FAILED;
12443                 data[0] = 1;
12444         }
12445         if (!doextlpbk && tg3_test_link(tp)) {
12446                 etest->flags |= ETH_TEST_FL_FAILED;
12447                 data[1] = 1;
12448         }
12449         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12450                 int err, err2 = 0, irq_sync = 0;
12451
12452                 if (netif_running(dev)) {
12453                         tg3_phy_stop(tp);
12454                         tg3_netif_stop(tp);
12455                         irq_sync = 1;
12456                 }
12457
12458                 tg3_full_lock(tp, irq_sync);
12459
12460                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12461                 err = tg3_nvram_lock(tp);
12462                 tg3_halt_cpu(tp, RX_CPU_BASE);
12463                 if (!tg3_flag(tp, 5705_PLUS))
12464                         tg3_halt_cpu(tp, TX_CPU_BASE);
12465                 if (!err)
12466                         tg3_nvram_unlock(tp);
12467
12468                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12469                         tg3_phy_reset(tp);
12470
12471                 if (tg3_test_registers(tp) != 0) {
12472                         etest->flags |= ETH_TEST_FL_FAILED;
12473                         data[2] = 1;
12474                 }
12475
12476                 if (tg3_test_memory(tp) != 0) {
12477                         etest->flags |= ETH_TEST_FL_FAILED;
12478                         data[3] = 1;
12479                 }
12480
12481                 if (doextlpbk)
12482                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12483
12484                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12485                         etest->flags |= ETH_TEST_FL_FAILED;
12486
12487                 tg3_full_unlock(tp);
12488
12489                 if (tg3_test_interrupt(tp) != 0) {
12490                         etest->flags |= ETH_TEST_FL_FAILED;
12491                         data[7] = 1;
12492                 }
12493
12494                 tg3_full_lock(tp, 0);
12495
12496                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12497                 if (netif_running(dev)) {
12498                         tg3_flag_set(tp, INIT_COMPLETE);
12499                         err2 = tg3_restart_hw(tp, 1);
12500                         if (!err2)
12501                                 tg3_netif_start(tp);
12502                 }
12503
12504                 tg3_full_unlock(tp);
12505
12506                 if (irq_sync && !err2)
12507                         tg3_phy_start(tp);
12508         }
12509         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12510                 tg3_power_down(tp);
12511
12512 }
12513
12514 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12515 {
12516         struct mii_ioctl_data *data = if_mii(ifr);
12517         struct tg3 *tp = netdev_priv(dev);
12518         int err;
12519
12520         if (tg3_flag(tp, USE_PHYLIB)) {
12521                 struct phy_device *phydev;
12522                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12523                         return -EAGAIN;
12524                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12525                 return phy_mii_ioctl(phydev, ifr, cmd);
12526         }
12527
12528         switch (cmd) {
12529         case SIOCGMIIPHY:
12530                 data->phy_id = tp->phy_addr;
12531
12532                 /* fallthru */
12533         case SIOCGMIIREG: {
12534                 u32 mii_regval;
12535
12536                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12537                         break;                  /* We have no PHY */
12538
12539                 if (!netif_running(dev))
12540                         return -EAGAIN;
12541
12542                 spin_lock_bh(&tp->lock);
12543                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12544                 spin_unlock_bh(&tp->lock);
12545
12546                 data->val_out = mii_regval;
12547
12548                 return err;
12549         }
12550
12551         case SIOCSMIIREG:
12552                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12553                         break;                  /* We have no PHY */
12554
12555                 if (!netif_running(dev))
12556                         return -EAGAIN;
12557
12558                 spin_lock_bh(&tp->lock);
12559                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12560                 spin_unlock_bh(&tp->lock);
12561
12562                 return err;
12563
12564         default:
12565                 /* do nothing */
12566                 break;
12567         }
12568         return -EOPNOTSUPP;
12569 }
12570
12571 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12572 {
12573         struct tg3 *tp = netdev_priv(dev);
12574
12575         memcpy(ec, &tp->coal, sizeof(*ec));
12576         return 0;
12577 }
12578
12579 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12580 {
12581         struct tg3 *tp = netdev_priv(dev);
12582         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12583         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12584
12585         if (!tg3_flag(tp, 5705_PLUS)) {
12586                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12587                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12588                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12589                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12590         }
12591
12592         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12593             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12594             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12595             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12596             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12597             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12598             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12599             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12600             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12601             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12602                 return -EINVAL;
12603
12604         /* No rx interrupts will be generated if both are zero */
12605         if ((ec->rx_coalesce_usecs == 0) &&
12606             (ec->rx_max_coalesced_frames == 0))
12607                 return -EINVAL;
12608
12609         /* No tx interrupts will be generated if both are zero */
12610         if ((ec->tx_coalesce_usecs == 0) &&
12611             (ec->tx_max_coalesced_frames == 0))
12612                 return -EINVAL;
12613
12614         /* Only copy relevant parameters, ignore all others. */
12615         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12616         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12617         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12618         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12619         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12620         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12621         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12622         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12623         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12624
12625         if (netif_running(dev)) {
12626                 tg3_full_lock(tp, 0);
12627                 __tg3_set_coalesce(tp, &tp->coal);
12628                 tg3_full_unlock(tp);
12629         }
12630         return 0;
12631 }
12632
12633 static const struct ethtool_ops tg3_ethtool_ops = {
12634         .get_settings           = tg3_get_settings,
12635         .set_settings           = tg3_set_settings,
12636         .get_drvinfo            = tg3_get_drvinfo,
12637         .get_regs_len           = tg3_get_regs_len,
12638         .get_regs               = tg3_get_regs,
12639         .get_wol                = tg3_get_wol,
12640         .set_wol                = tg3_set_wol,
12641         .get_msglevel           = tg3_get_msglevel,
12642         .set_msglevel           = tg3_set_msglevel,
12643         .nway_reset             = tg3_nway_reset,
12644         .get_link               = ethtool_op_get_link,
12645         .get_eeprom_len         = tg3_get_eeprom_len,
12646         .get_eeprom             = tg3_get_eeprom,
12647         .set_eeprom             = tg3_set_eeprom,
12648         .get_ringparam          = tg3_get_ringparam,
12649         .set_ringparam          = tg3_set_ringparam,
12650         .get_pauseparam         = tg3_get_pauseparam,
12651         .set_pauseparam         = tg3_set_pauseparam,
12652         .self_test              = tg3_self_test,
12653         .get_strings            = tg3_get_strings,
12654         .set_phys_id            = tg3_set_phys_id,
12655         .get_ethtool_stats      = tg3_get_ethtool_stats,
12656         .get_coalesce           = tg3_get_coalesce,
12657         .set_coalesce           = tg3_set_coalesce,
12658         .get_sset_count         = tg3_get_sset_count,
12659         .get_rxnfc              = tg3_get_rxnfc,
12660         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12661         .get_rxfh_indir         = tg3_get_rxfh_indir,
12662         .set_rxfh_indir         = tg3_set_rxfh_indir,
12663         .get_channels           = tg3_get_channels,
12664         .set_channels           = tg3_set_channels,
12665         .get_ts_info            = ethtool_op_get_ts_info,
12666 };
12667
12668 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12669                                                 struct rtnl_link_stats64 *stats)
12670 {
12671         struct tg3 *tp = netdev_priv(dev);
12672
12673         spin_lock_bh(&tp->lock);
12674         if (!tp->hw_stats) {
12675                 spin_unlock_bh(&tp->lock);
12676                 return &tp->net_stats_prev;
12677         }
12678
12679         tg3_get_nstats(tp, stats);
12680         spin_unlock_bh(&tp->lock);
12681
12682         return stats;
12683 }
12684
12685 static void tg3_set_rx_mode(struct net_device *dev)
12686 {
12687         struct tg3 *tp = netdev_priv(dev);
12688
12689         if (!netif_running(dev))
12690                 return;
12691
12692         tg3_full_lock(tp, 0);
12693         __tg3_set_rx_mode(dev);
12694         tg3_full_unlock(tp);
12695 }
12696
12697 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12698                                int new_mtu)
12699 {
12700         dev->mtu = new_mtu;
12701
12702         if (new_mtu > ETH_DATA_LEN) {
12703                 if (tg3_flag(tp, 5780_CLASS)) {
12704                         netdev_update_features(dev);
12705                         tg3_flag_clear(tp, TSO_CAPABLE);
12706                 } else {
12707                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12708                 }
12709         } else {
12710                 if (tg3_flag(tp, 5780_CLASS)) {
12711                         tg3_flag_set(tp, TSO_CAPABLE);
12712                         netdev_update_features(dev);
12713                 }
12714                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12715         }
12716 }
12717
12718 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12719 {
12720         struct tg3 *tp = netdev_priv(dev);
12721         int err, reset_phy = 0;
12722
12723         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12724                 return -EINVAL;
12725
12726         if (!netif_running(dev)) {
12727                 /* We'll just catch it later when the
12728                  * device is up'd.
12729                  */
12730                 tg3_set_mtu(dev, tp, new_mtu);
12731                 return 0;
12732         }
12733
12734         tg3_phy_stop(tp);
12735
12736         tg3_netif_stop(tp);
12737
12738         tg3_full_lock(tp, 1);
12739
12740         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12741
12742         tg3_set_mtu(dev, tp, new_mtu);
12743
12744         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12745          * breaks all requests to 256 bytes.
12746          */
12747         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12748                 reset_phy = 1;
12749
12750         err = tg3_restart_hw(tp, reset_phy);
12751
12752         if (!err)
12753                 tg3_netif_start(tp);
12754
12755         tg3_full_unlock(tp);
12756
12757         if (!err)
12758                 tg3_phy_start(tp);
12759
12760         return err;
12761 }
12762
12763 static const struct net_device_ops tg3_netdev_ops = {
12764         .ndo_open               = tg3_open,
12765         .ndo_stop               = tg3_close,
12766         .ndo_start_xmit         = tg3_start_xmit,
12767         .ndo_get_stats64        = tg3_get_stats64,
12768         .ndo_validate_addr      = eth_validate_addr,
12769         .ndo_set_rx_mode        = tg3_set_rx_mode,
12770         .ndo_set_mac_address    = tg3_set_mac_addr,
12771         .ndo_do_ioctl           = tg3_ioctl,
12772         .ndo_tx_timeout         = tg3_tx_timeout,
12773         .ndo_change_mtu         = tg3_change_mtu,
12774         .ndo_fix_features       = tg3_fix_features,
12775         .ndo_set_features       = tg3_set_features,
12776 #ifdef CONFIG_NET_POLL_CONTROLLER
12777         .ndo_poll_controller    = tg3_poll_controller,
12778 #endif
12779 };
12780
12781 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12782 {
12783         u32 cursize, val, magic;
12784
12785         tp->nvram_size = EEPROM_CHIP_SIZE;
12786
12787         if (tg3_nvram_read(tp, 0, &magic) != 0)
12788                 return;
12789
12790         if ((magic != TG3_EEPROM_MAGIC) &&
12791             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12792             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12793                 return;
12794
12795         /*
12796          * Size the chip by reading offsets at increasing powers of two.
12797          * When we encounter our validation signature, we know the addressing
12798          * has wrapped around, and thus have our chip size.
12799          */
12800         cursize = 0x10;
12801
12802         while (cursize < tp->nvram_size) {
12803                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12804                         return;
12805
12806                 if (val == magic)
12807                         break;
12808
12809                 cursize <<= 1;
12810         }
12811
12812         tp->nvram_size = cursize;
12813 }
12814
12815 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12816 {
12817         u32 val;
12818
12819         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12820                 return;
12821
12822         /* Selfboot format */
12823         if (val != TG3_EEPROM_MAGIC) {
12824                 tg3_get_eeprom_size(tp);
12825                 return;
12826         }
12827
12828         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12829                 if (val != 0) {
12830                         /* This is confusing.  We want to operate on the
12831                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12832                          * call will read from NVRAM and byteswap the data
12833                          * according to the byteswapping settings for all
12834                          * other register accesses.  This ensures the data we
12835                          * want will always reside in the lower 16-bits.
12836                          * However, the data in NVRAM is in LE format, which
12837                          * means the data from the NVRAM read will always be
12838                          * opposite the endianness of the CPU.  The 16-bit
12839                          * byteswap then brings the data to CPU endianness.
12840                          */
12841                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12842                         return;
12843                 }
12844         }
12845         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12846 }
12847
12848 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12849 {
12850         u32 nvcfg1;
12851
12852         nvcfg1 = tr32(NVRAM_CFG1);
12853         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12854                 tg3_flag_set(tp, FLASH);
12855         } else {
12856                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12857                 tw32(NVRAM_CFG1, nvcfg1);
12858         }
12859
12860         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12861             tg3_flag(tp, 5780_CLASS)) {
12862                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12863                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12864                         tp->nvram_jedecnum = JEDEC_ATMEL;
12865                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12866                         tg3_flag_set(tp, NVRAM_BUFFERED);
12867                         break;
12868                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12869                         tp->nvram_jedecnum = JEDEC_ATMEL;
12870                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12871                         break;
12872                 case FLASH_VENDOR_ATMEL_EEPROM:
12873                         tp->nvram_jedecnum = JEDEC_ATMEL;
12874                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12875                         tg3_flag_set(tp, NVRAM_BUFFERED);
12876                         break;
12877                 case FLASH_VENDOR_ST:
12878                         tp->nvram_jedecnum = JEDEC_ST;
12879                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12880                         tg3_flag_set(tp, NVRAM_BUFFERED);
12881                         break;
12882                 case FLASH_VENDOR_SAIFUN:
12883                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12884                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12885                         break;
12886                 case FLASH_VENDOR_SST_SMALL:
12887                 case FLASH_VENDOR_SST_LARGE:
12888                         tp->nvram_jedecnum = JEDEC_SST;
12889                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12890                         break;
12891                 }
12892         } else {
12893                 tp->nvram_jedecnum = JEDEC_ATMEL;
12894                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12895                 tg3_flag_set(tp, NVRAM_BUFFERED);
12896         }
12897 }
12898
12899 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12900 {
12901         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12902         case FLASH_5752PAGE_SIZE_256:
12903                 tp->nvram_pagesize = 256;
12904                 break;
12905         case FLASH_5752PAGE_SIZE_512:
12906                 tp->nvram_pagesize = 512;
12907                 break;
12908         case FLASH_5752PAGE_SIZE_1K:
12909                 tp->nvram_pagesize = 1024;
12910                 break;
12911         case FLASH_5752PAGE_SIZE_2K:
12912                 tp->nvram_pagesize = 2048;
12913                 break;
12914         case FLASH_5752PAGE_SIZE_4K:
12915                 tp->nvram_pagesize = 4096;
12916                 break;
12917         case FLASH_5752PAGE_SIZE_264:
12918                 tp->nvram_pagesize = 264;
12919                 break;
12920         case FLASH_5752PAGE_SIZE_528:
12921                 tp->nvram_pagesize = 528;
12922                 break;
12923         }
12924 }
12925
12926 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12927 {
12928         u32 nvcfg1;
12929
12930         nvcfg1 = tr32(NVRAM_CFG1);
12931
12932         /* NVRAM protection for TPM */
12933         if (nvcfg1 & (1 << 27))
12934                 tg3_flag_set(tp, PROTECTED_NVRAM);
12935
12936         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12937         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12938         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12939                 tp->nvram_jedecnum = JEDEC_ATMEL;
12940                 tg3_flag_set(tp, NVRAM_BUFFERED);
12941                 break;
12942         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12943                 tp->nvram_jedecnum = JEDEC_ATMEL;
12944                 tg3_flag_set(tp, NVRAM_BUFFERED);
12945                 tg3_flag_set(tp, FLASH);
12946                 break;
12947         case FLASH_5752VENDOR_ST_M45PE10:
12948         case FLASH_5752VENDOR_ST_M45PE20:
12949         case FLASH_5752VENDOR_ST_M45PE40:
12950                 tp->nvram_jedecnum = JEDEC_ST;
12951                 tg3_flag_set(tp, NVRAM_BUFFERED);
12952                 tg3_flag_set(tp, FLASH);
12953                 break;
12954         }
12955
12956         if (tg3_flag(tp, FLASH)) {
12957                 tg3_nvram_get_pagesize(tp, nvcfg1);
12958         } else {
12959                 /* For eeprom, set pagesize to maximum eeprom size */
12960                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12961
12962                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12963                 tw32(NVRAM_CFG1, nvcfg1);
12964         }
12965 }
12966
12967 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12968 {
12969         u32 nvcfg1, protect = 0;
12970
12971         nvcfg1 = tr32(NVRAM_CFG1);
12972
12973         /* NVRAM protection for TPM */
12974         if (nvcfg1 & (1 << 27)) {
12975                 tg3_flag_set(tp, PROTECTED_NVRAM);
12976                 protect = 1;
12977         }
12978
12979         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12980         switch (nvcfg1) {
12981         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12982         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12983         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12984         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12985                 tp->nvram_jedecnum = JEDEC_ATMEL;
12986                 tg3_flag_set(tp, NVRAM_BUFFERED);
12987                 tg3_flag_set(tp, FLASH);
12988                 tp->nvram_pagesize = 264;
12989                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12990                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12991                         tp->nvram_size = (protect ? 0x3e200 :
12992                                           TG3_NVRAM_SIZE_512KB);
12993                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12994                         tp->nvram_size = (protect ? 0x1f200 :
12995                                           TG3_NVRAM_SIZE_256KB);
12996                 else
12997                         tp->nvram_size = (protect ? 0x1f200 :
12998                                           TG3_NVRAM_SIZE_128KB);
12999                 break;
13000         case FLASH_5752VENDOR_ST_M45PE10:
13001         case FLASH_5752VENDOR_ST_M45PE20:
13002         case FLASH_5752VENDOR_ST_M45PE40:
13003                 tp->nvram_jedecnum = JEDEC_ST;
13004                 tg3_flag_set(tp, NVRAM_BUFFERED);
13005                 tg3_flag_set(tp, FLASH);
13006                 tp->nvram_pagesize = 256;
13007                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13008                         tp->nvram_size = (protect ?
13009                                           TG3_NVRAM_SIZE_64KB :
13010                                           TG3_NVRAM_SIZE_128KB);
13011                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13012                         tp->nvram_size = (protect ?
13013                                           TG3_NVRAM_SIZE_64KB :
13014                                           TG3_NVRAM_SIZE_256KB);
13015                 else
13016                         tp->nvram_size = (protect ?
13017                                           TG3_NVRAM_SIZE_128KB :
13018                                           TG3_NVRAM_SIZE_512KB);
13019                 break;
13020         }
13021 }
13022
13023 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13024 {
13025         u32 nvcfg1;
13026
13027         nvcfg1 = tr32(NVRAM_CFG1);
13028
13029         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13030         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13031         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13032         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13033         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13034                 tp->nvram_jedecnum = JEDEC_ATMEL;
13035                 tg3_flag_set(tp, NVRAM_BUFFERED);
13036                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13037
13038                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13039                 tw32(NVRAM_CFG1, nvcfg1);
13040                 break;
13041         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13042         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13043         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13044         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13045                 tp->nvram_jedecnum = JEDEC_ATMEL;
13046                 tg3_flag_set(tp, NVRAM_BUFFERED);
13047                 tg3_flag_set(tp, FLASH);
13048                 tp->nvram_pagesize = 264;
13049                 break;
13050         case FLASH_5752VENDOR_ST_M45PE10:
13051         case FLASH_5752VENDOR_ST_M45PE20:
13052         case FLASH_5752VENDOR_ST_M45PE40:
13053                 tp->nvram_jedecnum = JEDEC_ST;
13054                 tg3_flag_set(tp, NVRAM_BUFFERED);
13055                 tg3_flag_set(tp, FLASH);
13056                 tp->nvram_pagesize = 256;
13057                 break;
13058         }
13059 }
13060
13061 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13062 {
13063         u32 nvcfg1, protect = 0;
13064
13065         nvcfg1 = tr32(NVRAM_CFG1);
13066
13067         /* NVRAM protection for TPM */
13068         if (nvcfg1 & (1 << 27)) {
13069                 tg3_flag_set(tp, PROTECTED_NVRAM);
13070                 protect = 1;
13071         }
13072
13073         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13074         switch (nvcfg1) {
13075         case FLASH_5761VENDOR_ATMEL_ADB021D:
13076         case FLASH_5761VENDOR_ATMEL_ADB041D:
13077         case FLASH_5761VENDOR_ATMEL_ADB081D:
13078         case FLASH_5761VENDOR_ATMEL_ADB161D:
13079         case FLASH_5761VENDOR_ATMEL_MDB021D:
13080         case FLASH_5761VENDOR_ATMEL_MDB041D:
13081         case FLASH_5761VENDOR_ATMEL_MDB081D:
13082         case FLASH_5761VENDOR_ATMEL_MDB161D:
13083                 tp->nvram_jedecnum = JEDEC_ATMEL;
13084                 tg3_flag_set(tp, NVRAM_BUFFERED);
13085                 tg3_flag_set(tp, FLASH);
13086                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13087                 tp->nvram_pagesize = 256;
13088                 break;
13089         case FLASH_5761VENDOR_ST_A_M45PE20:
13090         case FLASH_5761VENDOR_ST_A_M45PE40:
13091         case FLASH_5761VENDOR_ST_A_M45PE80:
13092         case FLASH_5761VENDOR_ST_A_M45PE16:
13093         case FLASH_5761VENDOR_ST_M_M45PE20:
13094         case FLASH_5761VENDOR_ST_M_M45PE40:
13095         case FLASH_5761VENDOR_ST_M_M45PE80:
13096         case FLASH_5761VENDOR_ST_M_M45PE16:
13097                 tp->nvram_jedecnum = JEDEC_ST;
13098                 tg3_flag_set(tp, NVRAM_BUFFERED);
13099                 tg3_flag_set(tp, FLASH);
13100                 tp->nvram_pagesize = 256;
13101                 break;
13102         }
13103
13104         if (protect) {
13105                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13106         } else {
13107                 switch (nvcfg1) {
13108                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13109                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13110                 case FLASH_5761VENDOR_ST_A_M45PE16:
13111                 case FLASH_5761VENDOR_ST_M_M45PE16:
13112                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13113                         break;
13114                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13115                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13116                 case FLASH_5761VENDOR_ST_A_M45PE80:
13117                 case FLASH_5761VENDOR_ST_M_M45PE80:
13118                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13119                         break;
13120                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13121                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13122                 case FLASH_5761VENDOR_ST_A_M45PE40:
13123                 case FLASH_5761VENDOR_ST_M_M45PE40:
13124                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13125                         break;
13126                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13127                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13128                 case FLASH_5761VENDOR_ST_A_M45PE20:
13129                 case FLASH_5761VENDOR_ST_M_M45PE20:
13130                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13131                         break;
13132                 }
13133         }
13134 }
13135
13136 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13137 {
13138         tp->nvram_jedecnum = JEDEC_ATMEL;
13139         tg3_flag_set(tp, NVRAM_BUFFERED);
13140         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13141 }
13142
13143 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13144 {
13145         u32 nvcfg1;
13146
13147         nvcfg1 = tr32(NVRAM_CFG1);
13148
13149         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13150         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13151         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13152                 tp->nvram_jedecnum = JEDEC_ATMEL;
13153                 tg3_flag_set(tp, NVRAM_BUFFERED);
13154                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13155
13156                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13157                 tw32(NVRAM_CFG1, nvcfg1);
13158                 return;
13159         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13160         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13161         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13162         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13163         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13164         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13165         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13166                 tp->nvram_jedecnum = JEDEC_ATMEL;
13167                 tg3_flag_set(tp, NVRAM_BUFFERED);
13168                 tg3_flag_set(tp, FLASH);
13169
13170                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13171                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13172                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13173                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13174                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13175                         break;
13176                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13177                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13178                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13179                         break;
13180                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13181                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13182                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13183                         break;
13184                 }
13185                 break;
13186         case FLASH_5752VENDOR_ST_M45PE10:
13187         case FLASH_5752VENDOR_ST_M45PE20:
13188         case FLASH_5752VENDOR_ST_M45PE40:
13189                 tp->nvram_jedecnum = JEDEC_ST;
13190                 tg3_flag_set(tp, NVRAM_BUFFERED);
13191                 tg3_flag_set(tp, FLASH);
13192
13193                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13194                 case FLASH_5752VENDOR_ST_M45PE10:
13195                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13196                         break;
13197                 case FLASH_5752VENDOR_ST_M45PE20:
13198                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13199                         break;
13200                 case FLASH_5752VENDOR_ST_M45PE40:
13201                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13202                         break;
13203                 }
13204                 break;
13205         default:
13206                 tg3_flag_set(tp, NO_NVRAM);
13207                 return;
13208         }
13209
13210         tg3_nvram_get_pagesize(tp, nvcfg1);
13211         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13212                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13213 }
13214
13215
13216 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13217 {
13218         u32 nvcfg1;
13219
13220         nvcfg1 = tr32(NVRAM_CFG1);
13221
13222         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13223         case FLASH_5717VENDOR_ATMEL_EEPROM:
13224         case FLASH_5717VENDOR_MICRO_EEPROM:
13225                 tp->nvram_jedecnum = JEDEC_ATMEL;
13226                 tg3_flag_set(tp, NVRAM_BUFFERED);
13227                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13228
13229                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13230                 tw32(NVRAM_CFG1, nvcfg1);
13231                 return;
13232         case FLASH_5717VENDOR_ATMEL_MDB011D:
13233         case FLASH_5717VENDOR_ATMEL_ADB011B:
13234         case FLASH_5717VENDOR_ATMEL_ADB011D:
13235         case FLASH_5717VENDOR_ATMEL_MDB021D:
13236         case FLASH_5717VENDOR_ATMEL_ADB021B:
13237         case FLASH_5717VENDOR_ATMEL_ADB021D:
13238         case FLASH_5717VENDOR_ATMEL_45USPT:
13239                 tp->nvram_jedecnum = JEDEC_ATMEL;
13240                 tg3_flag_set(tp, NVRAM_BUFFERED);
13241                 tg3_flag_set(tp, FLASH);
13242
13243                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13244                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13245                         /* Detect size with tg3_nvram_get_size() */
13246                         break;
13247                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13248                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13249                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13250                         break;
13251                 default:
13252                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13253                         break;
13254                 }
13255                 break;
13256         case FLASH_5717VENDOR_ST_M_M25PE10:
13257         case FLASH_5717VENDOR_ST_A_M25PE10:
13258         case FLASH_5717VENDOR_ST_M_M45PE10:
13259         case FLASH_5717VENDOR_ST_A_M45PE10:
13260         case FLASH_5717VENDOR_ST_M_M25PE20:
13261         case FLASH_5717VENDOR_ST_A_M25PE20:
13262         case FLASH_5717VENDOR_ST_M_M45PE20:
13263         case FLASH_5717VENDOR_ST_A_M45PE20:
13264         case FLASH_5717VENDOR_ST_25USPT:
13265         case FLASH_5717VENDOR_ST_45USPT:
13266                 tp->nvram_jedecnum = JEDEC_ST;
13267                 tg3_flag_set(tp, NVRAM_BUFFERED);
13268                 tg3_flag_set(tp, FLASH);
13269
13270                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13271                 case FLASH_5717VENDOR_ST_M_M25PE20:
13272                 case FLASH_5717VENDOR_ST_M_M45PE20:
13273                         /* Detect size with tg3_nvram_get_size() */
13274                         break;
13275                 case FLASH_5717VENDOR_ST_A_M25PE20:
13276                 case FLASH_5717VENDOR_ST_A_M45PE20:
13277                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13278                         break;
13279                 default:
13280                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13281                         break;
13282                 }
13283                 break;
13284         default:
13285                 tg3_flag_set(tp, NO_NVRAM);
13286                 return;
13287         }
13288
13289         tg3_nvram_get_pagesize(tp, nvcfg1);
13290         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13291                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13292 }
13293
13294 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13295 {
13296         u32 nvcfg1, nvmpinstrp;
13297
13298         nvcfg1 = tr32(NVRAM_CFG1);
13299         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13300
13301         switch (nvmpinstrp) {
13302         case FLASH_5720_EEPROM_HD:
13303         case FLASH_5720_EEPROM_LD:
13304                 tp->nvram_jedecnum = JEDEC_ATMEL;
13305                 tg3_flag_set(tp, NVRAM_BUFFERED);
13306
13307                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13308                 tw32(NVRAM_CFG1, nvcfg1);
13309                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13310                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13311                 else
13312                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13313                 return;
13314         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13315         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13316         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13317         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13318         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13319         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13320         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13321         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13322         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13323         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13324         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13325         case FLASH_5720VENDOR_ATMEL_45USPT:
13326                 tp->nvram_jedecnum = JEDEC_ATMEL;
13327                 tg3_flag_set(tp, NVRAM_BUFFERED);
13328                 tg3_flag_set(tp, FLASH);
13329
13330                 switch (nvmpinstrp) {
13331                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13332                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13333                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13334                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13335                         break;
13336                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13337                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13338                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13339                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13340                         break;
13341                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13342                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13343                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13344                         break;
13345                 default:
13346                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13347                         break;
13348                 }
13349                 break;
13350         case FLASH_5720VENDOR_M_ST_M25PE10:
13351         case FLASH_5720VENDOR_M_ST_M45PE10:
13352         case FLASH_5720VENDOR_A_ST_M25PE10:
13353         case FLASH_5720VENDOR_A_ST_M45PE10:
13354         case FLASH_5720VENDOR_M_ST_M25PE20:
13355         case FLASH_5720VENDOR_M_ST_M45PE20:
13356         case FLASH_5720VENDOR_A_ST_M25PE20:
13357         case FLASH_5720VENDOR_A_ST_M45PE20:
13358         case FLASH_5720VENDOR_M_ST_M25PE40:
13359         case FLASH_5720VENDOR_M_ST_M45PE40:
13360         case FLASH_5720VENDOR_A_ST_M25PE40:
13361         case FLASH_5720VENDOR_A_ST_M45PE40:
13362         case FLASH_5720VENDOR_M_ST_M25PE80:
13363         case FLASH_5720VENDOR_M_ST_M45PE80:
13364         case FLASH_5720VENDOR_A_ST_M25PE80:
13365         case FLASH_5720VENDOR_A_ST_M45PE80:
13366         case FLASH_5720VENDOR_ST_25USPT:
13367         case FLASH_5720VENDOR_ST_45USPT:
13368                 tp->nvram_jedecnum = JEDEC_ST;
13369                 tg3_flag_set(tp, NVRAM_BUFFERED);
13370                 tg3_flag_set(tp, FLASH);
13371
13372                 switch (nvmpinstrp) {
13373                 case FLASH_5720VENDOR_M_ST_M25PE20:
13374                 case FLASH_5720VENDOR_M_ST_M45PE20:
13375                 case FLASH_5720VENDOR_A_ST_M25PE20:
13376                 case FLASH_5720VENDOR_A_ST_M45PE20:
13377                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13378                         break;
13379                 case FLASH_5720VENDOR_M_ST_M25PE40:
13380                 case FLASH_5720VENDOR_M_ST_M45PE40:
13381                 case FLASH_5720VENDOR_A_ST_M25PE40:
13382                 case FLASH_5720VENDOR_A_ST_M45PE40:
13383                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13384                         break;
13385                 case FLASH_5720VENDOR_M_ST_M25PE80:
13386                 case FLASH_5720VENDOR_M_ST_M45PE80:
13387                 case FLASH_5720VENDOR_A_ST_M25PE80:
13388                 case FLASH_5720VENDOR_A_ST_M45PE80:
13389                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13390                         break;
13391                 default:
13392                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13393                         break;
13394                 }
13395                 break;
13396         default:
13397                 tg3_flag_set(tp, NO_NVRAM);
13398                 return;
13399         }
13400
13401         tg3_nvram_get_pagesize(tp, nvcfg1);
13402         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13403                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13404 }
13405
13406 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13407 static void __devinit tg3_nvram_init(struct tg3 *tp)
13408 {
13409         tw32_f(GRC_EEPROM_ADDR,
13410              (EEPROM_ADDR_FSM_RESET |
13411               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13412                EEPROM_ADDR_CLKPERD_SHIFT)));
13413
13414         msleep(1);
13415
13416         /* Enable seeprom accesses. */
13417         tw32_f(GRC_LOCAL_CTRL,
13418              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13419         udelay(100);
13420
13421         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13422             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13423                 tg3_flag_set(tp, NVRAM);
13424
13425                 if (tg3_nvram_lock(tp)) {
13426                         netdev_warn(tp->dev,
13427                                     "Cannot get nvram lock, %s failed\n",
13428                                     __func__);
13429                         return;
13430                 }
13431                 tg3_enable_nvram_access(tp);
13432
13433                 tp->nvram_size = 0;
13434
13435                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13436                         tg3_get_5752_nvram_info(tp);
13437                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13438                         tg3_get_5755_nvram_info(tp);
13439                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13440                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13441                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13442                         tg3_get_5787_nvram_info(tp);
13443                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13444                         tg3_get_5761_nvram_info(tp);
13445                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13446                         tg3_get_5906_nvram_info(tp);
13447                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13448                          tg3_flag(tp, 57765_CLASS))
13449                         tg3_get_57780_nvram_info(tp);
13450                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13451                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13452                         tg3_get_5717_nvram_info(tp);
13453                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13454                         tg3_get_5720_nvram_info(tp);
13455                 else
13456                         tg3_get_nvram_info(tp);
13457
13458                 if (tp->nvram_size == 0)
13459                         tg3_get_nvram_size(tp);
13460
13461                 tg3_disable_nvram_access(tp);
13462                 tg3_nvram_unlock(tp);
13463
13464         } else {
13465                 tg3_flag_clear(tp, NVRAM);
13466                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13467
13468                 tg3_get_eeprom_size(tp);
13469         }
13470 }
13471
13472 struct subsys_tbl_ent {
13473         u16 subsys_vendor, subsys_devid;
13474         u32 phy_id;
13475 };
13476
13477 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13478         /* Broadcom boards. */
13479         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13480           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13481         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13482           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13483         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13484           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13485         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13486           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13487         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13488           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13489         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13490           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13491         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13492           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13493         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13494           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13495         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13496           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13497         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13498           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13499         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13500           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13501
13502         /* 3com boards. */
13503         { TG3PCI_SUBVENDOR_ID_3COM,
13504           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13505         { TG3PCI_SUBVENDOR_ID_3COM,
13506           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13507         { TG3PCI_SUBVENDOR_ID_3COM,
13508           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13509         { TG3PCI_SUBVENDOR_ID_3COM,
13510           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13511         { TG3PCI_SUBVENDOR_ID_3COM,
13512           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13513
13514         /* DELL boards. */
13515         { TG3PCI_SUBVENDOR_ID_DELL,
13516           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13517         { TG3PCI_SUBVENDOR_ID_DELL,
13518           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13519         { TG3PCI_SUBVENDOR_ID_DELL,
13520           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13521         { TG3PCI_SUBVENDOR_ID_DELL,
13522           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13523
13524         /* Compaq boards. */
13525         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13526           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13527         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13528           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13529         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13530           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13531         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13532           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13533         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13534           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13535
13536         /* IBM boards. */
13537         { TG3PCI_SUBVENDOR_ID_IBM,
13538           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13539 };
13540
13541 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13542 {
13543         int i;
13544
13545         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13546                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13547                      tp->pdev->subsystem_vendor) &&
13548                     (subsys_id_to_phy_id[i].subsys_devid ==
13549                      tp->pdev->subsystem_device))
13550                         return &subsys_id_to_phy_id[i];
13551         }
13552         return NULL;
13553 }
13554
13555 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13556 {
13557         u32 val;
13558
13559         tp->phy_id = TG3_PHY_ID_INVALID;
13560         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13561
13562         /* Assume an onboard device and WOL capable by default.  */
13563         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13564         tg3_flag_set(tp, WOL_CAP);
13565
13566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13567                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13568                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13569                         tg3_flag_set(tp, IS_NIC);
13570                 }
13571                 val = tr32(VCPU_CFGSHDW);
13572                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13573                         tg3_flag_set(tp, ASPM_WORKAROUND);
13574                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13575                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13576                         tg3_flag_set(tp, WOL_ENABLE);
13577                         device_set_wakeup_enable(&tp->pdev->dev, true);
13578                 }
13579                 goto done;
13580         }
13581
13582         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13583         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13584                 u32 nic_cfg, led_cfg;
13585                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13586                 int eeprom_phy_serdes = 0;
13587
13588                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13589                 tp->nic_sram_data_cfg = nic_cfg;
13590
13591                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13592                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13593                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13594                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13595                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13596                     (ver > 0) && (ver < 0x100))
13597                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13598
13599                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13600                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13601
13602                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13603                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13604                         eeprom_phy_serdes = 1;
13605
13606                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13607                 if (nic_phy_id != 0) {
13608                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13609                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13610
13611                         eeprom_phy_id  = (id1 >> 16) << 10;
13612                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13613                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13614                 } else
13615                         eeprom_phy_id = 0;
13616
13617                 tp->phy_id = eeprom_phy_id;
13618                 if (eeprom_phy_serdes) {
13619                         if (!tg3_flag(tp, 5705_PLUS))
13620                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13621                         else
13622                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13623                 }
13624
13625                 if (tg3_flag(tp, 5750_PLUS))
13626                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13627                                     SHASTA_EXT_LED_MODE_MASK);
13628                 else
13629                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13630
13631                 switch (led_cfg) {
13632                 default:
13633                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13634                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13635                         break;
13636
13637                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13638                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13639                         break;
13640
13641                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13642                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13643
13644                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13645                          * read on some older 5700/5701 bootcode.
13646                          */
13647                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13648                             ASIC_REV_5700 ||
13649                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13650                             ASIC_REV_5701)
13651                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13652
13653                         break;
13654
13655                 case SHASTA_EXT_LED_SHARED:
13656                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13657                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13658                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13659                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13660                                                  LED_CTRL_MODE_PHY_2);
13661                         break;
13662
13663                 case SHASTA_EXT_LED_MAC:
13664                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13665                         break;
13666
13667                 case SHASTA_EXT_LED_COMBO:
13668                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13669                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13670                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13671                                                  LED_CTRL_MODE_PHY_2);
13672                         break;
13673
13674                 }
13675
13676                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13677                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13678                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13679                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13680
13681                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13682                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13683
13684                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13685                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13686                         if ((tp->pdev->subsystem_vendor ==
13687                              PCI_VENDOR_ID_ARIMA) &&
13688                             (tp->pdev->subsystem_device == 0x205a ||
13689                              tp->pdev->subsystem_device == 0x2063))
13690                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13691                 } else {
13692                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13693                         tg3_flag_set(tp, IS_NIC);
13694                 }
13695
13696                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13697                         tg3_flag_set(tp, ENABLE_ASF);
13698                         if (tg3_flag(tp, 5750_PLUS))
13699                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13700                 }
13701
13702                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13703                     tg3_flag(tp, 5750_PLUS))
13704                         tg3_flag_set(tp, ENABLE_APE);
13705
13706                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13707                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13708                         tg3_flag_clear(tp, WOL_CAP);
13709
13710                 if (tg3_flag(tp, WOL_CAP) &&
13711                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13712                         tg3_flag_set(tp, WOL_ENABLE);
13713                         device_set_wakeup_enable(&tp->pdev->dev, true);
13714                 }
13715
13716                 if (cfg2 & (1 << 17))
13717                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13718
13719                 /* serdes signal pre-emphasis in register 0x590 set by */
13720                 /* bootcode if bit 18 is set */
13721                 if (cfg2 & (1 << 18))
13722                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13723
13724                 if ((tg3_flag(tp, 57765_PLUS) ||
13725                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13726                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13727                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13728                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13729
13730                 if (tg3_flag(tp, PCI_EXPRESS) &&
13731                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13732                     !tg3_flag(tp, 57765_PLUS)) {
13733                         u32 cfg3;
13734
13735                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13736                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13737                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13738                 }
13739
13740                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13741                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13742                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13743                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13744                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13745                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13746         }
13747 done:
13748         if (tg3_flag(tp, WOL_CAP))
13749                 device_set_wakeup_enable(&tp->pdev->dev,
13750                                          tg3_flag(tp, WOL_ENABLE));
13751         else
13752                 device_set_wakeup_capable(&tp->pdev->dev, false);
13753 }
13754
13755 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13756 {
13757         int i;
13758         u32 val;
13759
13760         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13761         tw32(OTP_CTRL, cmd);
13762
13763         /* Wait for up to 1 ms for command to execute. */
13764         for (i = 0; i < 100; i++) {
13765                 val = tr32(OTP_STATUS);
13766                 if (val & OTP_STATUS_CMD_DONE)
13767                         break;
13768                 udelay(10);
13769         }
13770
13771         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13772 }
13773
13774 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13775  * configuration is a 32-bit value that straddles the alignment boundary.
13776  * We do two 32-bit reads and then shift and merge the results.
13777  */
13778 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13779 {
13780         u32 bhalf_otp, thalf_otp;
13781
13782         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13783
13784         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13785                 return 0;
13786
13787         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13788
13789         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13790                 return 0;
13791
13792         thalf_otp = tr32(OTP_READ_DATA);
13793
13794         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13795
13796         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13797                 return 0;
13798
13799         bhalf_otp = tr32(OTP_READ_DATA);
13800
13801         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13802 }
13803
13804 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13805 {
13806         u32 adv = ADVERTISED_Autoneg;
13807
13808         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13809                 adv |= ADVERTISED_1000baseT_Half |
13810                        ADVERTISED_1000baseT_Full;
13811
13812         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13813                 adv |= ADVERTISED_100baseT_Half |
13814                        ADVERTISED_100baseT_Full |
13815                        ADVERTISED_10baseT_Half |
13816                        ADVERTISED_10baseT_Full |
13817                        ADVERTISED_TP;
13818         else
13819                 adv |= ADVERTISED_FIBRE;
13820
13821         tp->link_config.advertising = adv;
13822         tp->link_config.speed = SPEED_UNKNOWN;
13823         tp->link_config.duplex = DUPLEX_UNKNOWN;
13824         tp->link_config.autoneg = AUTONEG_ENABLE;
13825         tp->link_config.active_speed = SPEED_UNKNOWN;
13826         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13827
13828         tp->old_link = -1;
13829 }
13830
13831 static int __devinit tg3_phy_probe(struct tg3 *tp)
13832 {
13833         u32 hw_phy_id_1, hw_phy_id_2;
13834         u32 hw_phy_id, hw_phy_id_masked;
13835         int err;
13836
13837         /* flow control autonegotiation is default behavior */
13838         tg3_flag_set(tp, PAUSE_AUTONEG);
13839         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13840
13841         if (tg3_flag(tp, ENABLE_APE)) {
13842                 switch (tp->pci_fn) {
13843                 case 0:
13844                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13845                         break;
13846                 case 1:
13847                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13848                         break;
13849                 case 2:
13850                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13851                         break;
13852                 case 3:
13853                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13854                         break;
13855                 }
13856         }
13857
13858         if (tg3_flag(tp, USE_PHYLIB))
13859                 return tg3_phy_init(tp);
13860
13861         /* Reading the PHY ID register can conflict with ASF
13862          * firmware access to the PHY hardware.
13863          */
13864         err = 0;
13865         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13866                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13867         } else {
13868                 /* Now read the physical PHY_ID from the chip and verify
13869                  * that it is sane.  If it doesn't look good, we fall back
13870                  * to either the hard-coded table based PHY_ID and failing
13871                  * that the value found in the eeprom area.
13872                  */
13873                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13874                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13875
13876                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13877                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13878                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13879
13880                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13881         }
13882
13883         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13884                 tp->phy_id = hw_phy_id;
13885                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13886                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13887                 else
13888                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13889         } else {
13890                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13891                         /* Do nothing, phy ID already set up in
13892                          * tg3_get_eeprom_hw_cfg().
13893                          */
13894                 } else {
13895                         struct subsys_tbl_ent *p;
13896
13897                         /* No eeprom signature?  Try the hardcoded
13898                          * subsys device table.
13899                          */
13900                         p = tg3_lookup_by_subsys(tp);
13901                         if (!p)
13902                                 return -ENODEV;
13903
13904                         tp->phy_id = p->phy_id;
13905                         if (!tp->phy_id ||
13906                             tp->phy_id == TG3_PHY_ID_BCM8002)
13907                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13908                 }
13909         }
13910
13911         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13912             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13913              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13914              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13915               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13916              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13917               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13918                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13919
13920         tg3_phy_init_link_config(tp);
13921
13922         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13923             !tg3_flag(tp, ENABLE_APE) &&
13924             !tg3_flag(tp, ENABLE_ASF)) {
13925                 u32 bmsr, dummy;
13926
13927                 tg3_readphy(tp, MII_BMSR, &bmsr);
13928                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13929                     (bmsr & BMSR_LSTATUS))
13930                         goto skip_phy_reset;
13931
13932                 err = tg3_phy_reset(tp);
13933                 if (err)
13934                         return err;
13935
13936                 tg3_phy_set_wirespeed(tp);
13937
13938                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13939                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13940                                             tp->link_config.flowctrl);
13941
13942                         tg3_writephy(tp, MII_BMCR,
13943                                      BMCR_ANENABLE | BMCR_ANRESTART);
13944                 }
13945         }
13946
13947 skip_phy_reset:
13948         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13949                 err = tg3_init_5401phy_dsp(tp);
13950                 if (err)
13951                         return err;
13952
13953                 err = tg3_init_5401phy_dsp(tp);
13954         }
13955
13956         return err;
13957 }
13958
13959 static void __devinit tg3_read_vpd(struct tg3 *tp)
13960 {
13961         u8 *vpd_data;
13962         unsigned int block_end, rosize, len;
13963         u32 vpdlen;
13964         int j, i = 0;
13965
13966         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13967         if (!vpd_data)
13968                 goto out_no_vpd;
13969
13970         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13971         if (i < 0)
13972                 goto out_not_found;
13973
13974         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13975         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13976         i += PCI_VPD_LRDT_TAG_SIZE;
13977
13978         if (block_end > vpdlen)
13979                 goto out_not_found;
13980
13981         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13982                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13983         if (j > 0) {
13984                 len = pci_vpd_info_field_size(&vpd_data[j]);
13985
13986                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13987                 if (j + len > block_end || len != 4 ||
13988                     memcmp(&vpd_data[j], "1028", 4))
13989                         goto partno;
13990
13991                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13992                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13993                 if (j < 0)
13994                         goto partno;
13995
13996                 len = pci_vpd_info_field_size(&vpd_data[j]);
13997
13998                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13999                 if (j + len > block_end)
14000                         goto partno;
14001
14002                 memcpy(tp->fw_ver, &vpd_data[j], len);
14003                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14004         }
14005
14006 partno:
14007         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14008                                       PCI_VPD_RO_KEYWORD_PARTNO);
14009         if (i < 0)
14010                 goto out_not_found;
14011
14012         len = pci_vpd_info_field_size(&vpd_data[i]);
14013
14014         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14015         if (len > TG3_BPN_SIZE ||
14016             (len + i) > vpdlen)
14017                 goto out_not_found;
14018
14019         memcpy(tp->board_part_number, &vpd_data[i], len);
14020
14021 out_not_found:
14022         kfree(vpd_data);
14023         if (tp->board_part_number[0])
14024                 return;
14025
14026 out_no_vpd:
14027         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14028                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
14029                         strcpy(tp->board_part_number, "BCM5717");
14030                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14031                         strcpy(tp->board_part_number, "BCM5718");
14032                 else
14033                         goto nomatch;
14034         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14035                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14036                         strcpy(tp->board_part_number, "BCM57780");
14037                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14038                         strcpy(tp->board_part_number, "BCM57760");
14039                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14040                         strcpy(tp->board_part_number, "BCM57790");
14041                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14042                         strcpy(tp->board_part_number, "BCM57788");
14043                 else
14044                         goto nomatch;
14045         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14046                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14047                         strcpy(tp->board_part_number, "BCM57761");
14048                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14049                         strcpy(tp->board_part_number, "BCM57765");
14050                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14051                         strcpy(tp->board_part_number, "BCM57781");
14052                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14053                         strcpy(tp->board_part_number, "BCM57785");
14054                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14055                         strcpy(tp->board_part_number, "BCM57791");
14056                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14057                         strcpy(tp->board_part_number, "BCM57795");
14058                 else
14059                         goto nomatch;
14060         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14061                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14062                         strcpy(tp->board_part_number, "BCM57762");
14063                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14064                         strcpy(tp->board_part_number, "BCM57766");
14065                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14066                         strcpy(tp->board_part_number, "BCM57782");
14067                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14068                         strcpy(tp->board_part_number, "BCM57786");
14069                 else
14070                         goto nomatch;
14071         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14072                 strcpy(tp->board_part_number, "BCM95906");
14073         } else {
14074 nomatch:
14075                 strcpy(tp->board_part_number, "none");
14076         }
14077 }
14078
14079 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14080 {
14081         u32 val;
14082
14083         if (tg3_nvram_read(tp, offset, &val) ||
14084             (val & 0xfc000000) != 0x0c000000 ||
14085             tg3_nvram_read(tp, offset + 4, &val) ||
14086             val != 0)
14087                 return 0;
14088
14089         return 1;
14090 }
14091
14092 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14093 {
14094         u32 val, offset, start, ver_offset;
14095         int i, dst_off;
14096         bool newver = false;
14097
14098         if (tg3_nvram_read(tp, 0xc, &offset) ||
14099             tg3_nvram_read(tp, 0x4, &start))
14100                 return;
14101
14102         offset = tg3_nvram_logical_addr(tp, offset);
14103
14104         if (tg3_nvram_read(tp, offset, &val))
14105                 return;
14106
14107         if ((val & 0xfc000000) == 0x0c000000) {
14108                 if (tg3_nvram_read(tp, offset + 4, &val))
14109                         return;
14110
14111                 if (val == 0)
14112                         newver = true;
14113         }
14114
14115         dst_off = strlen(tp->fw_ver);
14116
14117         if (newver) {
14118                 if (TG3_VER_SIZE - dst_off < 16 ||
14119                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14120                         return;
14121
14122                 offset = offset + ver_offset - start;
14123                 for (i = 0; i < 16; i += 4) {
14124                         __be32 v;
14125                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14126                                 return;
14127
14128                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14129                 }
14130         } else {
14131                 u32 major, minor;
14132
14133                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14134                         return;
14135
14136                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14137                         TG3_NVM_BCVER_MAJSFT;
14138                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14139                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14140                          "v%d.%02d", major, minor);
14141         }
14142 }
14143
14144 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14145 {
14146         u32 val, major, minor;
14147
14148         /* Use native endian representation */
14149         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14150                 return;
14151
14152         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14153                 TG3_NVM_HWSB_CFG1_MAJSFT;
14154         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14155                 TG3_NVM_HWSB_CFG1_MINSFT;
14156
14157         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14158 }
14159
14160 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14161 {
14162         u32 offset, major, minor, build;
14163
14164         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14165
14166         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14167                 return;
14168
14169         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14170         case TG3_EEPROM_SB_REVISION_0:
14171                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14172                 break;
14173         case TG3_EEPROM_SB_REVISION_2:
14174                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14175                 break;
14176         case TG3_EEPROM_SB_REVISION_3:
14177                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14178                 break;
14179         case TG3_EEPROM_SB_REVISION_4:
14180                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14181                 break;
14182         case TG3_EEPROM_SB_REVISION_5:
14183                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14184                 break;
14185         case TG3_EEPROM_SB_REVISION_6:
14186                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14187                 break;
14188         default:
14189                 return;
14190         }
14191
14192         if (tg3_nvram_read(tp, offset, &val))
14193                 return;
14194
14195         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14196                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14197         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14198                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14199         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14200
14201         if (minor > 99 || build > 26)
14202                 return;
14203
14204         offset = strlen(tp->fw_ver);
14205         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14206                  " v%d.%02d", major, minor);
14207
14208         if (build > 0) {
14209                 offset = strlen(tp->fw_ver);
14210                 if (offset < TG3_VER_SIZE - 1)
14211                         tp->fw_ver[offset] = 'a' + build - 1;
14212         }
14213 }
14214
14215 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14216 {
14217         u32 val, offset, start;
14218         int i, vlen;
14219
14220         for (offset = TG3_NVM_DIR_START;
14221              offset < TG3_NVM_DIR_END;
14222              offset += TG3_NVM_DIRENT_SIZE) {
14223                 if (tg3_nvram_read(tp, offset, &val))
14224                         return;
14225
14226                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14227                         break;
14228         }
14229
14230         if (offset == TG3_NVM_DIR_END)
14231                 return;
14232
14233         if (!tg3_flag(tp, 5705_PLUS))
14234                 start = 0x08000000;
14235         else if (tg3_nvram_read(tp, offset - 4, &start))
14236                 return;
14237
14238         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14239             !tg3_fw_img_is_valid(tp, offset) ||
14240             tg3_nvram_read(tp, offset + 8, &val))
14241                 return;
14242
14243         offset += val - start;
14244
14245         vlen = strlen(tp->fw_ver);
14246
14247         tp->fw_ver[vlen++] = ',';
14248         tp->fw_ver[vlen++] = ' ';
14249
14250         for (i = 0; i < 4; i++) {
14251                 __be32 v;
14252                 if (tg3_nvram_read_be32(tp, offset, &v))
14253                         return;
14254
14255                 offset += sizeof(v);
14256
14257                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14258                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14259                         break;
14260                 }
14261
14262                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14263                 vlen += sizeof(v);
14264         }
14265 }
14266
14267 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14268 {
14269         u32 apedata;
14270
14271         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14272         if (apedata != APE_SEG_SIG_MAGIC)
14273                 return;
14274
14275         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14276         if (!(apedata & APE_FW_STATUS_READY))
14277                 return;
14278
14279         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14280                 tg3_flag_set(tp, APE_HAS_NCSI);
14281 }
14282
14283 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14284 {
14285         int vlen;
14286         u32 apedata;
14287         char *fwtype;
14288
14289         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14290
14291         if (tg3_flag(tp, APE_HAS_NCSI))
14292                 fwtype = "NCSI";
14293         else
14294                 fwtype = "DASH";
14295
14296         vlen = strlen(tp->fw_ver);
14297
14298         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14299                  fwtype,
14300                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14301                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14302                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14303                  (apedata & APE_FW_VERSION_BLDMSK));
14304 }
14305
14306 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14307 {
14308         u32 val;
14309         bool vpd_vers = false;
14310
14311         if (tp->fw_ver[0] != 0)
14312                 vpd_vers = true;
14313
14314         if (tg3_flag(tp, NO_NVRAM)) {
14315                 strcat(tp->fw_ver, "sb");
14316                 return;
14317         }
14318
14319         if (tg3_nvram_read(tp, 0, &val))
14320                 return;
14321
14322         if (val == TG3_EEPROM_MAGIC)
14323                 tg3_read_bc_ver(tp);
14324         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14325                 tg3_read_sb_ver(tp, val);
14326         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14327                 tg3_read_hwsb_ver(tp);
14328
14329         if (tg3_flag(tp, ENABLE_ASF)) {
14330                 if (tg3_flag(tp, ENABLE_APE)) {
14331                         tg3_probe_ncsi(tp);
14332                         if (!vpd_vers)
14333                                 tg3_read_dash_ver(tp);
14334                 } else if (!vpd_vers) {
14335                         tg3_read_mgmtfw_ver(tp);
14336                 }
14337         }
14338
14339         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14340 }
14341
14342 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14343 {
14344         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14345                 return TG3_RX_RET_MAX_SIZE_5717;
14346         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14347                 return TG3_RX_RET_MAX_SIZE_5700;
14348         else
14349                 return TG3_RX_RET_MAX_SIZE_5705;
14350 }
14351
14352 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14353         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14354         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14355         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14356         { },
14357 };
14358
14359 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14360 {
14361         struct pci_dev *peer;
14362         unsigned int func, devnr = tp->pdev->devfn & ~7;
14363
14364         for (func = 0; func < 8; func++) {
14365                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14366                 if (peer && peer != tp->pdev)
14367                         break;
14368                 pci_dev_put(peer);
14369         }
14370         /* 5704 can be configured in single-port mode, set peer to
14371          * tp->pdev in that case.
14372          */
14373         if (!peer) {
14374                 peer = tp->pdev;
14375                 return peer;
14376         }
14377
14378         /*
14379          * We don't need to keep the refcount elevated; there's no way
14380          * to remove one half of this device without removing the other
14381          */
14382         pci_dev_put(peer);
14383
14384         return peer;
14385 }
14386
14387 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14388 {
14389         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14391                 u32 reg;
14392
14393                 /* All devices that use the alternate
14394                  * ASIC REV location have a CPMU.
14395                  */
14396                 tg3_flag_set(tp, CPMU_PRESENT);
14397
14398                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14399                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14400                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14401                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14402                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14403                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14404                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14405                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14406                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14407                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14408                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14409                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14410                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14411                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14412                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14413                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14414                 else
14415                         reg = TG3PCI_PRODID_ASICREV;
14416
14417                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14418         }
14419
14420         /* Wrong chip ID in 5752 A0. This code can be removed later
14421          * as A0 is not in production.
14422          */
14423         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14424                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14425
14426         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14427             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14428             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14429                 tg3_flag_set(tp, 5717_PLUS);
14430
14431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14432             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14433                 tg3_flag_set(tp, 57765_CLASS);
14434
14435         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14436                 tg3_flag_set(tp, 57765_PLUS);
14437
14438         /* Intentionally exclude ASIC_REV_5906 */
14439         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14440             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14441             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14442             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14443             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14444             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14445             tg3_flag(tp, 57765_PLUS))
14446                 tg3_flag_set(tp, 5755_PLUS);
14447
14448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14450                 tg3_flag_set(tp, 5780_CLASS);
14451
14452         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14453             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14454             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14455             tg3_flag(tp, 5755_PLUS) ||
14456             tg3_flag(tp, 5780_CLASS))
14457                 tg3_flag_set(tp, 5750_PLUS);
14458
14459         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14460             tg3_flag(tp, 5750_PLUS))
14461                 tg3_flag_set(tp, 5705_PLUS);
14462 }
14463
14464 static int __devinit tg3_get_invariants(struct tg3 *tp)
14465 {
14466         u32 misc_ctrl_reg;
14467         u32 pci_state_reg, grc_misc_cfg;
14468         u32 val;
14469         u16 pci_cmd;
14470         int err;
14471
14472         /* Force memory write invalidate off.  If we leave it on,
14473          * then on 5700_BX chips we have to enable a workaround.
14474          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14475          * to match the cacheline size.  The Broadcom driver have this
14476          * workaround but turns MWI off all the times so never uses
14477          * it.  This seems to suggest that the workaround is insufficient.
14478          */
14479         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14480         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14481         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14482
14483         /* Important! -- Make sure register accesses are byteswapped
14484          * correctly.  Also, for those chips that require it, make
14485          * sure that indirect register accesses are enabled before
14486          * the first operation.
14487          */
14488         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14489                               &misc_ctrl_reg);
14490         tp->misc_host_ctrl |= (misc_ctrl_reg &
14491                                MISC_HOST_CTRL_CHIPREV);
14492         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14493                                tp->misc_host_ctrl);
14494
14495         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14496
14497         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14498          * we need to disable memory and use config. cycles
14499          * only to access all registers. The 5702/03 chips
14500          * can mistakenly decode the special cycles from the
14501          * ICH chipsets as memory write cycles, causing corruption
14502          * of register and memory space. Only certain ICH bridges
14503          * will drive special cycles with non-zero data during the
14504          * address phase which can fall within the 5703's address
14505          * range. This is not an ICH bug as the PCI spec allows
14506          * non-zero address during special cycles. However, only
14507          * these ICH bridges are known to drive non-zero addresses
14508          * during special cycles.
14509          *
14510          * Since special cycles do not cross PCI bridges, we only
14511          * enable this workaround if the 5703 is on the secondary
14512          * bus of these ICH bridges.
14513          */
14514         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14515             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14516                 static struct tg3_dev_id {
14517                         u32     vendor;
14518                         u32     device;
14519                         u32     rev;
14520                 } ich_chipsets[] = {
14521                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14522                           PCI_ANY_ID },
14523                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14524                           PCI_ANY_ID },
14525                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14526                           0xa },
14527                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14528                           PCI_ANY_ID },
14529                         { },
14530                 };
14531                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14532                 struct pci_dev *bridge = NULL;
14533
14534                 while (pci_id->vendor != 0) {
14535                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14536                                                 bridge);
14537                         if (!bridge) {
14538                                 pci_id++;
14539                                 continue;
14540                         }
14541                         if (pci_id->rev != PCI_ANY_ID) {
14542                                 if (bridge->revision > pci_id->rev)
14543                                         continue;
14544                         }
14545                         if (bridge->subordinate &&
14546                             (bridge->subordinate->number ==
14547                              tp->pdev->bus->number)) {
14548                                 tg3_flag_set(tp, ICH_WORKAROUND);
14549                                 pci_dev_put(bridge);
14550                                 break;
14551                         }
14552                 }
14553         }
14554
14555         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14556                 static struct tg3_dev_id {
14557                         u32     vendor;
14558                         u32     device;
14559                 } bridge_chipsets[] = {
14560                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14561                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14562                         { },
14563                 };
14564                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14565                 struct pci_dev *bridge = NULL;
14566
14567                 while (pci_id->vendor != 0) {
14568                         bridge = pci_get_device(pci_id->vendor,
14569                                                 pci_id->device,
14570                                                 bridge);
14571                         if (!bridge) {
14572                                 pci_id++;
14573                                 continue;
14574                         }
14575                         if (bridge->subordinate &&
14576                             (bridge->subordinate->number <=
14577                              tp->pdev->bus->number) &&
14578                             (bridge->subordinate->busn_res.end >=
14579                              tp->pdev->bus->number)) {
14580                                 tg3_flag_set(tp, 5701_DMA_BUG);
14581                                 pci_dev_put(bridge);
14582                                 break;
14583                         }
14584                 }
14585         }
14586
14587         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14588          * DMA addresses > 40-bit. This bridge may have other additional
14589          * 57xx devices behind it in some 4-port NIC designs for example.
14590          * Any tg3 device found behind the bridge will also need the 40-bit
14591          * DMA workaround.
14592          */
14593         if (tg3_flag(tp, 5780_CLASS)) {
14594                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14595                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14596         } else {
14597                 struct pci_dev *bridge = NULL;
14598
14599                 do {
14600                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14601                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14602                                                 bridge);
14603                         if (bridge && bridge->subordinate &&
14604                             (bridge->subordinate->number <=
14605                              tp->pdev->bus->number) &&
14606                             (bridge->subordinate->busn_res.end >=
14607                              tp->pdev->bus->number)) {
14608                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14609                                 pci_dev_put(bridge);
14610                                 break;
14611                         }
14612                 } while (bridge);
14613         }
14614
14615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14616             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14617                 tp->pdev_peer = tg3_find_peer(tp);
14618
14619         /* Determine TSO capabilities */
14620         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14621                 ; /* Do nothing. HW bug. */
14622         else if (tg3_flag(tp, 57765_PLUS))
14623                 tg3_flag_set(tp, HW_TSO_3);
14624         else if (tg3_flag(tp, 5755_PLUS) ||
14625                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14626                 tg3_flag_set(tp, HW_TSO_2);
14627         else if (tg3_flag(tp, 5750_PLUS)) {
14628                 tg3_flag_set(tp, HW_TSO_1);
14629                 tg3_flag_set(tp, TSO_BUG);
14630                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14631                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14632                         tg3_flag_clear(tp, TSO_BUG);
14633         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14634                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14635                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14636                         tg3_flag_set(tp, TSO_BUG);
14637                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14638                         tp->fw_needed = FIRMWARE_TG3TSO5;
14639                 else
14640                         tp->fw_needed = FIRMWARE_TG3TSO;
14641         }
14642
14643         /* Selectively allow TSO based on operating conditions */
14644         if (tg3_flag(tp, HW_TSO_1) ||
14645             tg3_flag(tp, HW_TSO_2) ||
14646             tg3_flag(tp, HW_TSO_3) ||
14647             tp->fw_needed) {
14648                 /* For firmware TSO, assume ASF is disabled.
14649                  * We'll disable TSO later if we discover ASF
14650                  * is enabled in tg3_get_eeprom_hw_cfg().
14651                  */
14652                 tg3_flag_set(tp, TSO_CAPABLE);
14653         } else {
14654                 tg3_flag_clear(tp, TSO_CAPABLE);
14655                 tg3_flag_clear(tp, TSO_BUG);
14656                 tp->fw_needed = NULL;
14657         }
14658
14659         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14660                 tp->fw_needed = FIRMWARE_TG3;
14661
14662         tp->irq_max = 1;
14663
14664         if (tg3_flag(tp, 5750_PLUS)) {
14665                 tg3_flag_set(tp, SUPPORT_MSI);
14666                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14667                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14668                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14669                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14670                      tp->pdev_peer == tp->pdev))
14671                         tg3_flag_clear(tp, SUPPORT_MSI);
14672
14673                 if (tg3_flag(tp, 5755_PLUS) ||
14674                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14675                         tg3_flag_set(tp, 1SHOT_MSI);
14676                 }
14677
14678                 if (tg3_flag(tp, 57765_PLUS)) {
14679                         tg3_flag_set(tp, SUPPORT_MSIX);
14680                         tp->irq_max = TG3_IRQ_MAX_VECS;
14681                 }
14682         }
14683
14684         tp->txq_max = 1;
14685         tp->rxq_max = 1;
14686         if (tp->irq_max > 1) {
14687                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14688                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14689
14690                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14691                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14692                         tp->txq_max = tp->irq_max - 1;
14693         }
14694
14695         if (tg3_flag(tp, 5755_PLUS) ||
14696             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14697                 tg3_flag_set(tp, SHORT_DMA_BUG);
14698
14699         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14700                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14701
14702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14703             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14704             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14705                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14706
14707         if (tg3_flag(tp, 57765_PLUS) &&
14708             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14709                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14710
14711         if (!tg3_flag(tp, 5705_PLUS) ||
14712             tg3_flag(tp, 5780_CLASS) ||
14713             tg3_flag(tp, USE_JUMBO_BDFLAG))
14714                 tg3_flag_set(tp, JUMBO_CAPABLE);
14715
14716         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14717                               &pci_state_reg);
14718
14719         if (pci_is_pcie(tp->pdev)) {
14720                 u16 lnkctl;
14721
14722                 tg3_flag_set(tp, PCI_EXPRESS);
14723
14724                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
14725                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14726                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14727                             ASIC_REV_5906) {
14728                                 tg3_flag_clear(tp, HW_TSO_2);
14729                                 tg3_flag_clear(tp, TSO_CAPABLE);
14730                         }
14731                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14732                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14733                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14734                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14735                                 tg3_flag_set(tp, CLKREQ_BUG);
14736                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14737                         tg3_flag_set(tp, L1PLLPD_EN);
14738                 }
14739         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14740                 /* BCM5785 devices are effectively PCIe devices, and should
14741                  * follow PCIe codepaths, but do not have a PCIe capabilities
14742                  * section.
14743                  */
14744                 tg3_flag_set(tp, PCI_EXPRESS);
14745         } else if (!tg3_flag(tp, 5705_PLUS) ||
14746                    tg3_flag(tp, 5780_CLASS)) {
14747                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14748                 if (!tp->pcix_cap) {
14749                         dev_err(&tp->pdev->dev,
14750                                 "Cannot find PCI-X capability, aborting\n");
14751                         return -EIO;
14752                 }
14753
14754                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14755                         tg3_flag_set(tp, PCIX_MODE);
14756         }
14757
14758         /* If we have an AMD 762 or VIA K8T800 chipset, write
14759          * reordering to the mailbox registers done by the host
14760          * controller can cause major troubles.  We read back from
14761          * every mailbox register write to force the writes to be
14762          * posted to the chip in order.
14763          */
14764         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14765             !tg3_flag(tp, PCI_EXPRESS))
14766                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14767
14768         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14769                              &tp->pci_cacheline_sz);
14770         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14771                              &tp->pci_lat_timer);
14772         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14773             tp->pci_lat_timer < 64) {
14774                 tp->pci_lat_timer = 64;
14775                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14776                                       tp->pci_lat_timer);
14777         }
14778
14779         /* Important! -- It is critical that the PCI-X hw workaround
14780          * situation is decided before the first MMIO register access.
14781          */
14782         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14783                 /* 5700 BX chips need to have their TX producer index
14784                  * mailboxes written twice to workaround a bug.
14785                  */
14786                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14787
14788                 /* If we are in PCI-X mode, enable register write workaround.
14789                  *
14790                  * The workaround is to use indirect register accesses
14791                  * for all chip writes not to mailbox registers.
14792                  */
14793                 if (tg3_flag(tp, PCIX_MODE)) {
14794                         u32 pm_reg;
14795
14796                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14797
14798                         /* The chip can have it's power management PCI config
14799                          * space registers clobbered due to this bug.
14800                          * So explicitly force the chip into D0 here.
14801                          */
14802                         pci_read_config_dword(tp->pdev,
14803                                               tp->pm_cap + PCI_PM_CTRL,
14804                                               &pm_reg);
14805                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14806                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14807                         pci_write_config_dword(tp->pdev,
14808                                                tp->pm_cap + PCI_PM_CTRL,
14809                                                pm_reg);
14810
14811                         /* Also, force SERR#/PERR# in PCI command. */
14812                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14813                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14814                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14815                 }
14816         }
14817
14818         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14819                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14820         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14821                 tg3_flag_set(tp, PCI_32BIT);
14822
14823         /* Chip-specific fixup from Broadcom driver */
14824         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14825             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14826                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14827                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14828         }
14829
14830         /* Default fast path register access methods */
14831         tp->read32 = tg3_read32;
14832         tp->write32 = tg3_write32;
14833         tp->read32_mbox = tg3_read32;
14834         tp->write32_mbox = tg3_write32;
14835         tp->write32_tx_mbox = tg3_write32;
14836         tp->write32_rx_mbox = tg3_write32;
14837
14838         /* Various workaround register access methods */
14839         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14840                 tp->write32 = tg3_write_indirect_reg32;
14841         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14842                  (tg3_flag(tp, PCI_EXPRESS) &&
14843                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14844                 /*
14845                  * Back to back register writes can cause problems on these
14846                  * chips, the workaround is to read back all reg writes
14847                  * except those to mailbox regs.
14848                  *
14849                  * See tg3_write_indirect_reg32().
14850                  */
14851                 tp->write32 = tg3_write_flush_reg32;
14852         }
14853
14854         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14855                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14856                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14857                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14858         }
14859
14860         if (tg3_flag(tp, ICH_WORKAROUND)) {
14861                 tp->read32 = tg3_read_indirect_reg32;
14862                 tp->write32 = tg3_write_indirect_reg32;
14863                 tp->read32_mbox = tg3_read_indirect_mbox;
14864                 tp->write32_mbox = tg3_write_indirect_mbox;
14865                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14866                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14867
14868                 iounmap(tp->regs);
14869                 tp->regs = NULL;
14870
14871                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14872                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14873                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14874         }
14875         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14876                 tp->read32_mbox = tg3_read32_mbox_5906;
14877                 tp->write32_mbox = tg3_write32_mbox_5906;
14878                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14879                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14880         }
14881
14882         if (tp->write32 == tg3_write_indirect_reg32 ||
14883             (tg3_flag(tp, PCIX_MODE) &&
14884              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14885               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14886                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14887
14888         /* The memory arbiter has to be enabled in order for SRAM accesses
14889          * to succeed.  Normally on powerup the tg3 chip firmware will make
14890          * sure it is enabled, but other entities such as system netboot
14891          * code might disable it.
14892          */
14893         val = tr32(MEMARB_MODE);
14894         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14895
14896         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14897         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14898             tg3_flag(tp, 5780_CLASS)) {
14899                 if (tg3_flag(tp, PCIX_MODE)) {
14900                         pci_read_config_dword(tp->pdev,
14901                                               tp->pcix_cap + PCI_X_STATUS,
14902                                               &val);
14903                         tp->pci_fn = val & 0x7;
14904                 }
14905         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14906                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14907                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14908                     NIC_SRAM_CPMUSTAT_SIG) {
14909                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14910                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14911                 }
14912         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14913                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14914                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14915                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14916                     NIC_SRAM_CPMUSTAT_SIG) {
14917                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14918                                      TG3_CPMU_STATUS_FSHFT_5719;
14919                 }
14920         }
14921
14922         /* Get eeprom hw config before calling tg3_set_power_state().
14923          * In particular, the TG3_FLAG_IS_NIC flag must be
14924          * determined before calling tg3_set_power_state() so that
14925          * we know whether or not to switch out of Vaux power.
14926          * When the flag is set, it means that GPIO1 is used for eeprom
14927          * write protect and also implies that it is a LOM where GPIOs
14928          * are not used to switch power.
14929          */
14930         tg3_get_eeprom_hw_cfg(tp);
14931
14932         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14933                 tg3_flag_clear(tp, TSO_CAPABLE);
14934                 tg3_flag_clear(tp, TSO_BUG);
14935                 tp->fw_needed = NULL;
14936         }
14937
14938         if (tg3_flag(tp, ENABLE_APE)) {
14939                 /* Allow reads and writes to the
14940                  * APE register and memory space.
14941                  */
14942                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14943                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14944                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14945                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14946                                        pci_state_reg);
14947
14948                 tg3_ape_lock_init(tp);
14949         }
14950
14951         /* Set up tp->grc_local_ctrl before calling
14952          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14953          * will bring 5700's external PHY out of reset.
14954          * It is also used as eeprom write protect on LOMs.
14955          */
14956         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14958             tg3_flag(tp, EEPROM_WRITE_PROT))
14959                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14960                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14961         /* Unused GPIO3 must be driven as output on 5752 because there
14962          * are no pull-up resistors on unused GPIO pins.
14963          */
14964         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14965                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14966
14967         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14968             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14969             tg3_flag(tp, 57765_CLASS))
14970                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14971
14972         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14973             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14974                 /* Turn off the debug UART. */
14975                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14976                 if (tg3_flag(tp, IS_NIC))
14977                         /* Keep VMain power. */
14978                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14979                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14980         }
14981
14982         /* Switch out of Vaux if it is a NIC */
14983         tg3_pwrsrc_switch_to_vmain(tp);
14984
14985         /* Derive initial jumbo mode from MTU assigned in
14986          * ether_setup() via the alloc_etherdev() call
14987          */
14988         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14989                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14990
14991         /* Determine WakeOnLan speed to use. */
14992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14993             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14994             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14995             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14996                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14997         } else {
14998                 tg3_flag_set(tp, WOL_SPEED_100MB);
14999         }
15000
15001         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15002                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15003
15004         /* A few boards don't want Ethernet@WireSpeed phy feature */
15005         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15006             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15007              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15008              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15009             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15010             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15011                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15012
15013         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15014             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15015                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15016         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15017                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15018
15019         if (tg3_flag(tp, 5705_PLUS) &&
15020             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15021             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15022             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15023             !tg3_flag(tp, 57765_PLUS)) {
15024                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15025                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15026                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15027                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15028                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15029                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15030                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15031                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15032                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15033                 } else
15034                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15035         }
15036
15037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15038             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15039                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15040                 if (tp->phy_otp == 0)
15041                         tp->phy_otp = TG3_OTP_DEFAULT;
15042         }
15043
15044         if (tg3_flag(tp, CPMU_PRESENT))
15045                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15046         else
15047                 tp->mi_mode = MAC_MI_MODE_BASE;
15048
15049         tp->coalesce_mode = 0;
15050         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15051             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15052                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15053
15054         /* Set these bits to enable statistics workaround. */
15055         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15056             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15057             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15058                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15059                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15060         }
15061
15062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15063             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15064                 tg3_flag_set(tp, USE_PHYLIB);
15065
15066         err = tg3_mdio_init(tp);
15067         if (err)
15068                 return err;
15069
15070         /* Initialize data/descriptor byte/word swapping. */
15071         val = tr32(GRC_MODE);
15072         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15073                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15074                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15075                         GRC_MODE_B2HRX_ENABLE |
15076                         GRC_MODE_HTX2B_ENABLE |
15077                         GRC_MODE_HOST_STACKUP);
15078         else
15079                 val &= GRC_MODE_HOST_STACKUP;
15080
15081         tw32(GRC_MODE, val | tp->grc_mode);
15082
15083         tg3_switch_clocks(tp);
15084
15085         /* Clear this out for sanity. */
15086         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15087
15088         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15089                               &pci_state_reg);
15090         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15091             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15092                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15093
15094                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15095                     chiprevid == CHIPREV_ID_5701_B0 ||
15096                     chiprevid == CHIPREV_ID_5701_B2 ||
15097                     chiprevid == CHIPREV_ID_5701_B5) {
15098                         void __iomem *sram_base;
15099
15100                         /* Write some dummy words into the SRAM status block
15101                          * area, see if it reads back correctly.  If the return
15102                          * value is bad, force enable the PCIX workaround.
15103                          */
15104                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15105
15106                         writel(0x00000000, sram_base);
15107                         writel(0x00000000, sram_base + 4);
15108                         writel(0xffffffff, sram_base + 4);
15109                         if (readl(sram_base) != 0x00000000)
15110                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15111                 }
15112         }
15113
15114         udelay(50);
15115         tg3_nvram_init(tp);
15116
15117         grc_misc_cfg = tr32(GRC_MISC_CFG);
15118         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15119
15120         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15121             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15122              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15123                 tg3_flag_set(tp, IS_5788);
15124
15125         if (!tg3_flag(tp, IS_5788) &&
15126             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15127                 tg3_flag_set(tp, TAGGED_STATUS);
15128         if (tg3_flag(tp, TAGGED_STATUS)) {
15129                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15130                                       HOSTCC_MODE_CLRTICK_TXBD);
15131
15132                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15133                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15134                                        tp->misc_host_ctrl);
15135         }
15136
15137         /* Preserve the APE MAC_MODE bits */
15138         if (tg3_flag(tp, ENABLE_APE))
15139                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15140         else
15141                 tp->mac_mode = 0;
15142
15143         /* these are limited to 10/100 only */
15144         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15145              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15146             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15147              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15148              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15149               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15150               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15151             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15152              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
15153               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15154               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15155             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15156             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15157             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15158             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15159                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15160
15161         err = tg3_phy_probe(tp);
15162         if (err) {
15163                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15164                 /* ... but do not return immediately ... */
15165                 tg3_mdio_fini(tp);
15166         }
15167
15168         tg3_read_vpd(tp);
15169         tg3_read_fw_ver(tp);
15170
15171         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15172                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15173         } else {
15174                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15175                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15176                 else
15177                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15178         }
15179
15180         /* 5700 {AX,BX} chips have a broken status block link
15181          * change bit implementation, so we must use the
15182          * status register in those cases.
15183          */
15184         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15185                 tg3_flag_set(tp, USE_LINKCHG_REG);
15186         else
15187                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15188
15189         /* The led_ctrl is set during tg3_phy_probe, here we might
15190          * have to force the link status polling mechanism based
15191          * upon subsystem IDs.
15192          */
15193         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15194             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15195             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15196                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15197                 tg3_flag_set(tp, USE_LINKCHG_REG);
15198         }
15199
15200         /* For all SERDES we poll the MAC status register. */
15201         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15202                 tg3_flag_set(tp, POLL_SERDES);
15203         else
15204                 tg3_flag_clear(tp, POLL_SERDES);
15205
15206         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15207         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15208         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15209             tg3_flag(tp, PCIX_MODE)) {
15210                 tp->rx_offset = NET_SKB_PAD;
15211 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15212                 tp->rx_copy_thresh = ~(u16)0;
15213 #endif
15214         }
15215
15216         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15217         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15218         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15219
15220         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15221
15222         /* Increment the rx prod index on the rx std ring by at most
15223          * 8 for these chips to workaround hw errata.
15224          */
15225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15226             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15227             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15228                 tp->rx_std_max_post = 8;
15229
15230         if (tg3_flag(tp, ASPM_WORKAROUND))
15231                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15232                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15233
15234         return err;
15235 }
15236
15237 #ifdef CONFIG_SPARC
15238 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15239 {
15240         struct net_device *dev = tp->dev;
15241         struct pci_dev *pdev = tp->pdev;
15242         struct device_node *dp = pci_device_to_OF_node(pdev);
15243         const unsigned char *addr;
15244         int len;
15245
15246         addr = of_get_property(dp, "local-mac-address", &len);
15247         if (addr && len == 6) {
15248                 memcpy(dev->dev_addr, addr, 6);
15249                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15250                 return 0;
15251         }
15252         return -ENODEV;
15253 }
15254
15255 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15256 {
15257         struct net_device *dev = tp->dev;
15258
15259         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15260         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15261         return 0;
15262 }
15263 #endif
15264
15265 static int __devinit tg3_get_device_address(struct tg3 *tp)
15266 {
15267         struct net_device *dev = tp->dev;
15268         u32 hi, lo, mac_offset;
15269         int addr_ok = 0;
15270
15271 #ifdef CONFIG_SPARC
15272         if (!tg3_get_macaddr_sparc(tp))
15273                 return 0;
15274 #endif
15275
15276         mac_offset = 0x7c;
15277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15278             tg3_flag(tp, 5780_CLASS)) {
15279                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15280                         mac_offset = 0xcc;
15281                 if (tg3_nvram_lock(tp))
15282                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15283                 else
15284                         tg3_nvram_unlock(tp);
15285         } else if (tg3_flag(tp, 5717_PLUS)) {
15286                 if (tp->pci_fn & 1)
15287                         mac_offset = 0xcc;
15288                 if (tp->pci_fn > 1)
15289                         mac_offset += 0x18c;
15290         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15291                 mac_offset = 0x10;
15292
15293         /* First try to get it from MAC address mailbox. */
15294         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15295         if ((hi >> 16) == 0x484b) {
15296                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15297                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15298
15299                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15300                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15301                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15302                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15303                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15304
15305                 /* Some old bootcode may report a 0 MAC address in SRAM */
15306                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15307         }
15308         if (!addr_ok) {
15309                 /* Next, try NVRAM. */
15310                 if (!tg3_flag(tp, NO_NVRAM) &&
15311                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15312                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15313                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15314                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15315                 }
15316                 /* Finally just fetch it out of the MAC control regs. */
15317                 else {
15318                         hi = tr32(MAC_ADDR_0_HIGH);
15319                         lo = tr32(MAC_ADDR_0_LOW);
15320
15321                         dev->dev_addr[5] = lo & 0xff;
15322                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15323                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15324                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15325                         dev->dev_addr[1] = hi & 0xff;
15326                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15327                 }
15328         }
15329
15330         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15331 #ifdef CONFIG_SPARC
15332                 if (!tg3_get_default_macaddr_sparc(tp))
15333                         return 0;
15334 #endif
15335                 return -EINVAL;
15336         }
15337         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15338         return 0;
15339 }
15340
15341 #define BOUNDARY_SINGLE_CACHELINE       1
15342 #define BOUNDARY_MULTI_CACHELINE        2
15343
15344 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15345 {
15346         int cacheline_size;
15347         u8 byte;
15348         int goal;
15349
15350         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15351         if (byte == 0)
15352                 cacheline_size = 1024;
15353         else
15354                 cacheline_size = (int) byte * 4;
15355
15356         /* On 5703 and later chips, the boundary bits have no
15357          * effect.
15358          */
15359         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15360             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15361             !tg3_flag(tp, PCI_EXPRESS))
15362                 goto out;
15363
15364 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15365         goal = BOUNDARY_MULTI_CACHELINE;
15366 #else
15367 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15368         goal = BOUNDARY_SINGLE_CACHELINE;
15369 #else
15370         goal = 0;
15371 #endif
15372 #endif
15373
15374         if (tg3_flag(tp, 57765_PLUS)) {
15375                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15376                 goto out;
15377         }
15378
15379         if (!goal)
15380                 goto out;
15381
15382         /* PCI controllers on most RISC systems tend to disconnect
15383          * when a device tries to burst across a cache-line boundary.
15384          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15385          *
15386          * Unfortunately, for PCI-E there are only limited
15387          * write-side controls for this, and thus for reads
15388          * we will still get the disconnects.  We'll also waste
15389          * these PCI cycles for both read and write for chips
15390          * other than 5700 and 5701 which do not implement the
15391          * boundary bits.
15392          */
15393         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15394                 switch (cacheline_size) {
15395                 case 16:
15396                 case 32:
15397                 case 64:
15398                 case 128:
15399                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15400                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15401                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15402                         } else {
15403                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15404                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15405                         }
15406                         break;
15407
15408                 case 256:
15409                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15410                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15411                         break;
15412
15413                 default:
15414                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15415                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15416                         break;
15417                 }
15418         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15419                 switch (cacheline_size) {
15420                 case 16:
15421                 case 32:
15422                 case 64:
15423                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15424                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15425                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15426                                 break;
15427                         }
15428                         /* fallthrough */
15429                 case 128:
15430                 default:
15431                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15432                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15433                         break;
15434                 }
15435         } else {
15436                 switch (cacheline_size) {
15437                 case 16:
15438                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15439                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15440                                         DMA_RWCTRL_WRITE_BNDRY_16);
15441                                 break;
15442                         }
15443                         /* fallthrough */
15444                 case 32:
15445                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15446                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15447                                         DMA_RWCTRL_WRITE_BNDRY_32);
15448                                 break;
15449                         }
15450                         /* fallthrough */
15451                 case 64:
15452                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15453                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15454                                         DMA_RWCTRL_WRITE_BNDRY_64);
15455                                 break;
15456                         }
15457                         /* fallthrough */
15458                 case 128:
15459                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15460                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15461                                         DMA_RWCTRL_WRITE_BNDRY_128);
15462                                 break;
15463                         }
15464                         /* fallthrough */
15465                 case 256:
15466                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15467                                 DMA_RWCTRL_WRITE_BNDRY_256);
15468                         break;
15469                 case 512:
15470                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15471                                 DMA_RWCTRL_WRITE_BNDRY_512);
15472                         break;
15473                 case 1024:
15474                 default:
15475                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15476                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15477                         break;
15478                 }
15479         }
15480
15481 out:
15482         return val;
15483 }
15484
15485 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15486 {
15487         struct tg3_internal_buffer_desc test_desc;
15488         u32 sram_dma_descs;
15489         int i, ret;
15490
15491         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15492
15493         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15494         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15495         tw32(RDMAC_STATUS, 0);
15496         tw32(WDMAC_STATUS, 0);
15497
15498         tw32(BUFMGR_MODE, 0);
15499         tw32(FTQ_RESET, 0);
15500
15501         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15502         test_desc.addr_lo = buf_dma & 0xffffffff;
15503         test_desc.nic_mbuf = 0x00002100;
15504         test_desc.len = size;
15505
15506         /*
15507          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15508          * the *second* time the tg3 driver was getting loaded after an
15509          * initial scan.
15510          *
15511          * Broadcom tells me:
15512          *   ...the DMA engine is connected to the GRC block and a DMA
15513          *   reset may affect the GRC block in some unpredictable way...
15514          *   The behavior of resets to individual blocks has not been tested.
15515          *
15516          * Broadcom noted the GRC reset will also reset all sub-components.
15517          */
15518         if (to_device) {
15519                 test_desc.cqid_sqid = (13 << 8) | 2;
15520
15521                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15522                 udelay(40);
15523         } else {
15524                 test_desc.cqid_sqid = (16 << 8) | 7;
15525
15526                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15527                 udelay(40);
15528         }
15529         test_desc.flags = 0x00000005;
15530
15531         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15532                 u32 val;
15533
15534                 val = *(((u32 *)&test_desc) + i);
15535                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15536                                        sram_dma_descs + (i * sizeof(u32)));
15537                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15538         }
15539         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15540
15541         if (to_device)
15542                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15543         else
15544                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15545
15546         ret = -ENODEV;
15547         for (i = 0; i < 40; i++) {
15548                 u32 val;
15549
15550                 if (to_device)
15551                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15552                 else
15553                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15554                 if ((val & 0xffff) == sram_dma_descs) {
15555                         ret = 0;
15556                         break;
15557                 }
15558
15559                 udelay(100);
15560         }
15561
15562         return ret;
15563 }
15564
15565 #define TEST_BUFFER_SIZE        0x2000
15566
15567 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15568         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15569         { },
15570 };
15571
15572 static int __devinit tg3_test_dma(struct tg3 *tp)
15573 {
15574         dma_addr_t buf_dma;
15575         u32 *buf, saved_dma_rwctrl;
15576         int ret = 0;
15577
15578         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15579                                  &buf_dma, GFP_KERNEL);
15580         if (!buf) {
15581                 ret = -ENOMEM;
15582                 goto out_nofree;
15583         }
15584
15585         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15586                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15587
15588         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15589
15590         if (tg3_flag(tp, 57765_PLUS))
15591                 goto out;
15592
15593         if (tg3_flag(tp, PCI_EXPRESS)) {
15594                 /* DMA read watermark not used on PCIE */
15595                 tp->dma_rwctrl |= 0x00180000;
15596         } else if (!tg3_flag(tp, PCIX_MODE)) {
15597                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15598                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15599                         tp->dma_rwctrl |= 0x003f0000;
15600                 else
15601                         tp->dma_rwctrl |= 0x003f000f;
15602         } else {
15603                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15604                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15605                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15606                         u32 read_water = 0x7;
15607
15608                         /* If the 5704 is behind the EPB bridge, we can
15609                          * do the less restrictive ONE_DMA workaround for
15610                          * better performance.
15611                          */
15612                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15613                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15614                                 tp->dma_rwctrl |= 0x8000;
15615                         else if (ccval == 0x6 || ccval == 0x7)
15616                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15617
15618                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15619                                 read_water = 4;
15620                         /* Set bit 23 to enable PCIX hw bug fix */
15621                         tp->dma_rwctrl |=
15622                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15623                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15624                                 (1 << 23);
15625                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15626                         /* 5780 always in PCIX mode */
15627                         tp->dma_rwctrl |= 0x00144000;
15628                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15629                         /* 5714 always in PCIX mode */
15630                         tp->dma_rwctrl |= 0x00148000;
15631                 } else {
15632                         tp->dma_rwctrl |= 0x001b000f;
15633                 }
15634         }
15635
15636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15638                 tp->dma_rwctrl &= 0xfffffff0;
15639
15640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15641             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15642                 /* Remove this if it causes problems for some boards. */
15643                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15644
15645                 /* On 5700/5701 chips, we need to set this bit.
15646                  * Otherwise the chip will issue cacheline transactions
15647                  * to streamable DMA memory with not all the byte
15648                  * enables turned on.  This is an error on several
15649                  * RISC PCI controllers, in particular sparc64.
15650                  *
15651                  * On 5703/5704 chips, this bit has been reassigned
15652                  * a different meaning.  In particular, it is used
15653                  * on those chips to enable a PCI-X workaround.
15654                  */
15655                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15656         }
15657
15658         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15659
15660 #if 0
15661         /* Unneeded, already done by tg3_get_invariants.  */
15662         tg3_switch_clocks(tp);
15663 #endif
15664
15665         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15666             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15667                 goto out;
15668
15669         /* It is best to perform DMA test with maximum write burst size
15670          * to expose the 5700/5701 write DMA bug.
15671          */
15672         saved_dma_rwctrl = tp->dma_rwctrl;
15673         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15674         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15675
15676         while (1) {
15677                 u32 *p = buf, i;
15678
15679                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15680                         p[i] = i;
15681
15682                 /* Send the buffer to the chip. */
15683                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15684                 if (ret) {
15685                         dev_err(&tp->pdev->dev,
15686                                 "%s: Buffer write failed. err = %d\n",
15687                                 __func__, ret);
15688                         break;
15689                 }
15690
15691 #if 0
15692                 /* validate data reached card RAM correctly. */
15693                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15694                         u32 val;
15695                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15696                         if (le32_to_cpu(val) != p[i]) {
15697                                 dev_err(&tp->pdev->dev,
15698                                         "%s: Buffer corrupted on device! "
15699                                         "(%d != %d)\n", __func__, val, i);
15700                                 /* ret = -ENODEV here? */
15701                         }
15702                         p[i] = 0;
15703                 }
15704 #endif
15705                 /* Now read it back. */
15706                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15707                 if (ret) {
15708                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15709                                 "err = %d\n", __func__, ret);
15710                         break;
15711                 }
15712
15713                 /* Verify it. */
15714                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15715                         if (p[i] == i)
15716                                 continue;
15717
15718                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15719                             DMA_RWCTRL_WRITE_BNDRY_16) {
15720                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15721                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15722                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15723                                 break;
15724                         } else {
15725                                 dev_err(&tp->pdev->dev,
15726                                         "%s: Buffer corrupted on read back! "
15727                                         "(%d != %d)\n", __func__, p[i], i);
15728                                 ret = -ENODEV;
15729                                 goto out;
15730                         }
15731                 }
15732
15733                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15734                         /* Success. */
15735                         ret = 0;
15736                         break;
15737                 }
15738         }
15739         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15740             DMA_RWCTRL_WRITE_BNDRY_16) {
15741                 /* DMA test passed without adjusting DMA boundary,
15742                  * now look for chipsets that are known to expose the
15743                  * DMA bug without failing the test.
15744                  */
15745                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15746                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15747                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15748                 } else {
15749                         /* Safe to use the calculated DMA boundary. */
15750                         tp->dma_rwctrl = saved_dma_rwctrl;
15751                 }
15752
15753                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15754         }
15755
15756 out:
15757         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15758 out_nofree:
15759         return ret;
15760 }
15761
15762 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15763 {
15764         if (tg3_flag(tp, 57765_PLUS)) {
15765                 tp->bufmgr_config.mbuf_read_dma_low_water =
15766                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15767                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15768                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15769                 tp->bufmgr_config.mbuf_high_water =
15770                         DEFAULT_MB_HIGH_WATER_57765;
15771
15772                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15773                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15774                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15775                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15776                 tp->bufmgr_config.mbuf_high_water_jumbo =
15777                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15778         } else if (tg3_flag(tp, 5705_PLUS)) {
15779                 tp->bufmgr_config.mbuf_read_dma_low_water =
15780                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15781                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15782                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15783                 tp->bufmgr_config.mbuf_high_water =
15784                         DEFAULT_MB_HIGH_WATER_5705;
15785                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15786                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15787                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15788                         tp->bufmgr_config.mbuf_high_water =
15789                                 DEFAULT_MB_HIGH_WATER_5906;
15790                 }
15791
15792                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15793                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15794                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15795                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15796                 tp->bufmgr_config.mbuf_high_water_jumbo =
15797                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15798         } else {
15799                 tp->bufmgr_config.mbuf_read_dma_low_water =
15800                         DEFAULT_MB_RDMA_LOW_WATER;
15801                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15802                         DEFAULT_MB_MACRX_LOW_WATER;
15803                 tp->bufmgr_config.mbuf_high_water =
15804                         DEFAULT_MB_HIGH_WATER;
15805
15806                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15807                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15808                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15809                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15810                 tp->bufmgr_config.mbuf_high_water_jumbo =
15811                         DEFAULT_MB_HIGH_WATER_JUMBO;
15812         }
15813
15814         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15815         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15816 }
15817
15818 static char * __devinit tg3_phy_string(struct tg3 *tp)
15819 {
15820         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15821         case TG3_PHY_ID_BCM5400:        return "5400";
15822         case TG3_PHY_ID_BCM5401:        return "5401";
15823         case TG3_PHY_ID_BCM5411:        return "5411";
15824         case TG3_PHY_ID_BCM5701:        return "5701";
15825         case TG3_PHY_ID_BCM5703:        return "5703";
15826         case TG3_PHY_ID_BCM5704:        return "5704";
15827         case TG3_PHY_ID_BCM5705:        return "5705";
15828         case TG3_PHY_ID_BCM5750:        return "5750";
15829         case TG3_PHY_ID_BCM5752:        return "5752";
15830         case TG3_PHY_ID_BCM5714:        return "5714";
15831         case TG3_PHY_ID_BCM5780:        return "5780";
15832         case TG3_PHY_ID_BCM5755:        return "5755";
15833         case TG3_PHY_ID_BCM5787:        return "5787";
15834         case TG3_PHY_ID_BCM5784:        return "5784";
15835         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15836         case TG3_PHY_ID_BCM5906:        return "5906";
15837         case TG3_PHY_ID_BCM5761:        return "5761";
15838         case TG3_PHY_ID_BCM5718C:       return "5718C";
15839         case TG3_PHY_ID_BCM5718S:       return "5718S";
15840         case TG3_PHY_ID_BCM57765:       return "57765";
15841         case TG3_PHY_ID_BCM5719C:       return "5719C";
15842         case TG3_PHY_ID_BCM5720C:       return "5720C";
15843         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15844         case 0:                 return "serdes";
15845         default:                return "unknown";
15846         }
15847 }
15848
15849 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15850 {
15851         if (tg3_flag(tp, PCI_EXPRESS)) {
15852                 strcpy(str, "PCI Express");
15853                 return str;
15854         } else if (tg3_flag(tp, PCIX_MODE)) {
15855                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15856
15857                 strcpy(str, "PCIX:");
15858
15859                 if ((clock_ctrl == 7) ||
15860                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15861                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15862                         strcat(str, "133MHz");
15863                 else if (clock_ctrl == 0)
15864                         strcat(str, "33MHz");
15865                 else if (clock_ctrl == 2)
15866                         strcat(str, "50MHz");
15867                 else if (clock_ctrl == 4)
15868                         strcat(str, "66MHz");
15869                 else if (clock_ctrl == 6)
15870                         strcat(str, "100MHz");
15871         } else {
15872                 strcpy(str, "PCI:");
15873                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15874                         strcat(str, "66MHz");
15875                 else
15876                         strcat(str, "33MHz");
15877         }
15878         if (tg3_flag(tp, PCI_32BIT))
15879                 strcat(str, ":32-bit");
15880         else
15881                 strcat(str, ":64-bit");
15882         return str;
15883 }
15884
15885 static void __devinit tg3_init_coal(struct tg3 *tp)
15886 {
15887         struct ethtool_coalesce *ec = &tp->coal;
15888
15889         memset(ec, 0, sizeof(*ec));
15890         ec->cmd = ETHTOOL_GCOALESCE;
15891         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15892         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15893         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15894         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15895         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15896         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15897         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15898         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15899         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15900
15901         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15902                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15903                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15904                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15905                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15906                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15907         }
15908
15909         if (tg3_flag(tp, 5705_PLUS)) {
15910                 ec->rx_coalesce_usecs_irq = 0;
15911                 ec->tx_coalesce_usecs_irq = 0;
15912                 ec->stats_block_coalesce_usecs = 0;
15913         }
15914 }
15915
15916 static int __devinit tg3_init_one(struct pci_dev *pdev,
15917                                   const struct pci_device_id *ent)
15918 {
15919         struct net_device *dev;
15920         struct tg3 *tp;
15921         int i, err, pm_cap;
15922         u32 sndmbx, rcvmbx, intmbx;
15923         char str[40];
15924         u64 dma_mask, persist_dma_mask;
15925         netdev_features_t features = 0;
15926
15927         printk_once(KERN_INFO "%s\n", version);
15928
15929         err = pci_enable_device(pdev);
15930         if (err) {
15931                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15932                 return err;
15933         }
15934
15935         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15936         if (err) {
15937                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15938                 goto err_out_disable_pdev;
15939         }
15940
15941         pci_set_master(pdev);
15942
15943         /* Find power-management capability. */
15944         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15945         if (pm_cap == 0) {
15946                 dev_err(&pdev->dev,
15947                         "Cannot find Power Management capability, aborting\n");
15948                 err = -EIO;
15949                 goto err_out_free_res;
15950         }
15951
15952         err = pci_set_power_state(pdev, PCI_D0);
15953         if (err) {
15954                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15955                 goto err_out_free_res;
15956         }
15957
15958         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15959         if (!dev) {
15960                 err = -ENOMEM;
15961                 goto err_out_power_down;
15962         }
15963
15964         SET_NETDEV_DEV(dev, &pdev->dev);
15965
15966         tp = netdev_priv(dev);
15967         tp->pdev = pdev;
15968         tp->dev = dev;
15969         tp->pm_cap = pm_cap;
15970         tp->rx_mode = TG3_DEF_RX_MODE;
15971         tp->tx_mode = TG3_DEF_TX_MODE;
15972
15973         if (tg3_debug > 0)
15974                 tp->msg_enable = tg3_debug;
15975         else
15976                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15977
15978         /* The word/byte swap controls here control register access byte
15979          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15980          * setting below.
15981          */
15982         tp->misc_host_ctrl =
15983                 MISC_HOST_CTRL_MASK_PCI_INT |
15984                 MISC_HOST_CTRL_WORD_SWAP |
15985                 MISC_HOST_CTRL_INDIR_ACCESS |
15986                 MISC_HOST_CTRL_PCISTATE_RW;
15987
15988         /* The NONFRM (non-frame) byte/word swap controls take effect
15989          * on descriptor entries, anything which isn't packet data.
15990          *
15991          * The StrongARM chips on the board (one for tx, one for rx)
15992          * are running in big-endian mode.
15993          */
15994         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15995                         GRC_MODE_WSWAP_NONFRM_DATA);
15996 #ifdef __BIG_ENDIAN
15997         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15998 #endif
15999         spin_lock_init(&tp->lock);
16000         spin_lock_init(&tp->indirect_lock);
16001         INIT_WORK(&tp->reset_task, tg3_reset_task);
16002
16003         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16004         if (!tp->regs) {
16005                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16006                 err = -ENOMEM;
16007                 goto err_out_free_dev;
16008         }
16009
16010         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16011             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16012             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16013             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16014             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16015             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16016             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16017             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16018                 tg3_flag_set(tp, ENABLE_APE);
16019                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16020                 if (!tp->aperegs) {
16021                         dev_err(&pdev->dev,
16022                                 "Cannot map APE registers, aborting\n");
16023                         err = -ENOMEM;
16024                         goto err_out_iounmap;
16025                 }
16026         }
16027
16028         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16029         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16030
16031         dev->ethtool_ops = &tg3_ethtool_ops;
16032         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16033         dev->netdev_ops = &tg3_netdev_ops;
16034         dev->irq = pdev->irq;
16035
16036         err = tg3_get_invariants(tp);
16037         if (err) {
16038                 dev_err(&pdev->dev,
16039                         "Problem fetching invariants of chip, aborting\n");
16040                 goto err_out_apeunmap;
16041         }
16042
16043         /* The EPB bridge inside 5714, 5715, and 5780 and any
16044          * device behind the EPB cannot support DMA addresses > 40-bit.
16045          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16046          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16047          * do DMA address check in tg3_start_xmit().
16048          */
16049         if (tg3_flag(tp, IS_5788))
16050                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16051         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16052                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16053 #ifdef CONFIG_HIGHMEM
16054                 dma_mask = DMA_BIT_MASK(64);
16055 #endif
16056         } else
16057                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16058
16059         /* Configure DMA attributes. */
16060         if (dma_mask > DMA_BIT_MASK(32)) {
16061                 err = pci_set_dma_mask(pdev, dma_mask);
16062                 if (!err) {
16063                         features |= NETIF_F_HIGHDMA;
16064                         err = pci_set_consistent_dma_mask(pdev,
16065                                                           persist_dma_mask);
16066                         if (err < 0) {
16067                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16068                                         "DMA for consistent allocations\n");
16069                                 goto err_out_apeunmap;
16070                         }
16071                 }
16072         }
16073         if (err || dma_mask == DMA_BIT_MASK(32)) {
16074                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16075                 if (err) {
16076                         dev_err(&pdev->dev,
16077                                 "No usable DMA configuration, aborting\n");
16078                         goto err_out_apeunmap;
16079                 }
16080         }
16081
16082         tg3_init_bufmgr_config(tp);
16083
16084         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16085
16086         /* 5700 B0 chips do not support checksumming correctly due
16087          * to hardware bugs.
16088          */
16089         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16090                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16091
16092                 if (tg3_flag(tp, 5755_PLUS))
16093                         features |= NETIF_F_IPV6_CSUM;
16094         }
16095
16096         /* TSO is on by default on chips that support hardware TSO.
16097          * Firmware TSO on older chips gives lower performance, so it
16098          * is off by default, but can be enabled using ethtool.
16099          */
16100         if ((tg3_flag(tp, HW_TSO_1) ||
16101              tg3_flag(tp, HW_TSO_2) ||
16102              tg3_flag(tp, HW_TSO_3)) &&
16103             (features & NETIF_F_IP_CSUM))
16104                 features |= NETIF_F_TSO;
16105         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16106                 if (features & NETIF_F_IPV6_CSUM)
16107                         features |= NETIF_F_TSO6;
16108                 if (tg3_flag(tp, HW_TSO_3) ||
16109                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16110                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16111                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16112                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16113                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16114                         features |= NETIF_F_TSO_ECN;
16115         }
16116
16117         dev->features |= features;
16118         dev->vlan_features |= features;
16119
16120         /*
16121          * Add loopback capability only for a subset of devices that support
16122          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16123          * loopback for the remaining devices.
16124          */
16125         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16126             !tg3_flag(tp, CPMU_PRESENT))
16127                 /* Add the loopback capability */
16128                 features |= NETIF_F_LOOPBACK;
16129
16130         dev->hw_features |= features;
16131
16132         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16133             !tg3_flag(tp, TSO_CAPABLE) &&
16134             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16135                 tg3_flag_set(tp, MAX_RXPEND_64);
16136                 tp->rx_pending = 63;
16137         }
16138
16139         err = tg3_get_device_address(tp);
16140         if (err) {
16141                 dev_err(&pdev->dev,
16142                         "Could not obtain valid ethernet address, aborting\n");
16143                 goto err_out_apeunmap;
16144         }
16145
16146         /*
16147          * Reset chip in case UNDI or EFI driver did not shutdown
16148          * DMA self test will enable WDMAC and we'll see (spurious)
16149          * pending DMA on the PCI bus at that point.
16150          */
16151         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16152             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16153                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16154                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16155         }
16156
16157         err = tg3_test_dma(tp);
16158         if (err) {
16159                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16160                 goto err_out_apeunmap;
16161         }
16162
16163         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16164         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16165         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16166         for (i = 0; i < tp->irq_max; i++) {
16167                 struct tg3_napi *tnapi = &tp->napi[i];
16168
16169                 tnapi->tp = tp;
16170                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16171
16172                 tnapi->int_mbox = intmbx;
16173                 if (i <= 4)
16174                         intmbx += 0x8;
16175                 else
16176                         intmbx += 0x4;
16177
16178                 tnapi->consmbox = rcvmbx;
16179                 tnapi->prodmbox = sndmbx;
16180
16181                 if (i)
16182                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16183                 else
16184                         tnapi->coal_now = HOSTCC_MODE_NOW;
16185
16186                 if (!tg3_flag(tp, SUPPORT_MSIX))
16187                         break;
16188
16189                 /*
16190                  * If we support MSIX, we'll be using RSS.  If we're using
16191                  * RSS, the first vector only handles link interrupts and the
16192                  * remaining vectors handle rx and tx interrupts.  Reuse the
16193                  * mailbox values for the next iteration.  The values we setup
16194                  * above are still useful for the single vectored mode.
16195                  */
16196                 if (!i)
16197                         continue;
16198
16199                 rcvmbx += 0x8;
16200
16201                 if (sndmbx & 0x4)
16202                         sndmbx -= 0x4;
16203                 else
16204                         sndmbx += 0xc;
16205         }
16206
16207         tg3_init_coal(tp);
16208
16209         pci_set_drvdata(pdev, dev);
16210
16211         if (tg3_flag(tp, 5717_PLUS)) {
16212                 /* Resume a low-power mode */
16213                 tg3_frob_aux_power(tp, false);
16214         }
16215
16216         tg3_timer_init(tp);
16217
16218         err = register_netdev(dev);
16219         if (err) {
16220                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16221                 goto err_out_apeunmap;
16222         }
16223
16224         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16225                     tp->board_part_number,
16226                     tp->pci_chip_rev_id,
16227                     tg3_bus_string(tp, str),
16228                     dev->dev_addr);
16229
16230         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16231                 struct phy_device *phydev;
16232                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16233                 netdev_info(dev,
16234                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16235                             phydev->drv->name, dev_name(&phydev->dev));
16236         } else {
16237                 char *ethtype;
16238
16239                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16240                         ethtype = "10/100Base-TX";
16241                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16242                         ethtype = "1000Base-SX";
16243                 else
16244                         ethtype = "10/100/1000Base-T";
16245
16246                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16247                             "(WireSpeed[%d], EEE[%d])\n",
16248                             tg3_phy_string(tp), ethtype,
16249                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16250                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16251         }
16252
16253         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16254                     (dev->features & NETIF_F_RXCSUM) != 0,
16255                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16256                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16257                     tg3_flag(tp, ENABLE_ASF) != 0,
16258                     tg3_flag(tp, TSO_CAPABLE) != 0);
16259         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16260                     tp->dma_rwctrl,
16261                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16262                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16263
16264         pci_save_state(pdev);
16265
16266         return 0;
16267
16268 err_out_apeunmap:
16269         if (tp->aperegs) {
16270                 iounmap(tp->aperegs);
16271                 tp->aperegs = NULL;
16272         }
16273
16274 err_out_iounmap:
16275         if (tp->regs) {
16276                 iounmap(tp->regs);
16277                 tp->regs = NULL;
16278         }
16279
16280 err_out_free_dev:
16281         free_netdev(dev);
16282
16283 err_out_power_down:
16284         pci_set_power_state(pdev, PCI_D3hot);
16285
16286 err_out_free_res:
16287         pci_release_regions(pdev);
16288
16289 err_out_disable_pdev:
16290         pci_disable_device(pdev);
16291         pci_set_drvdata(pdev, NULL);
16292         return err;
16293 }
16294
16295 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16296 {
16297         struct net_device *dev = pci_get_drvdata(pdev);
16298
16299         if (dev) {
16300                 struct tg3 *tp = netdev_priv(dev);
16301
16302                 release_firmware(tp->fw);
16303
16304                 tg3_reset_task_cancel(tp);
16305
16306                 if (tg3_flag(tp, USE_PHYLIB)) {
16307                         tg3_phy_fini(tp);
16308                         tg3_mdio_fini(tp);
16309                 }
16310
16311                 unregister_netdev(dev);
16312                 if (tp->aperegs) {
16313                         iounmap(tp->aperegs);
16314                         tp->aperegs = NULL;
16315                 }
16316                 if (tp->regs) {
16317                         iounmap(tp->regs);
16318                         tp->regs = NULL;
16319                 }
16320                 free_netdev(dev);
16321                 pci_release_regions(pdev);
16322                 pci_disable_device(pdev);
16323                 pci_set_drvdata(pdev, NULL);
16324         }
16325 }
16326
16327 #ifdef CONFIG_PM_SLEEP
16328 static int tg3_suspend(struct device *device)
16329 {
16330         struct pci_dev *pdev = to_pci_dev(device);
16331         struct net_device *dev = pci_get_drvdata(pdev);
16332         struct tg3 *tp = netdev_priv(dev);
16333         int err;
16334
16335         if (!netif_running(dev))
16336                 return 0;
16337
16338         tg3_reset_task_cancel(tp);
16339         tg3_phy_stop(tp);
16340         tg3_netif_stop(tp);
16341
16342         tg3_timer_stop(tp);
16343
16344         tg3_full_lock(tp, 1);
16345         tg3_disable_ints(tp);
16346         tg3_full_unlock(tp);
16347
16348         netif_device_detach(dev);
16349
16350         tg3_full_lock(tp, 0);
16351         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16352         tg3_flag_clear(tp, INIT_COMPLETE);
16353         tg3_full_unlock(tp);
16354
16355         err = tg3_power_down_prepare(tp);
16356         if (err) {
16357                 int err2;
16358
16359                 tg3_full_lock(tp, 0);
16360
16361                 tg3_flag_set(tp, INIT_COMPLETE);
16362                 err2 = tg3_restart_hw(tp, 1);
16363                 if (err2)
16364                         goto out;
16365
16366                 tg3_timer_start(tp);
16367
16368                 netif_device_attach(dev);
16369                 tg3_netif_start(tp);
16370
16371 out:
16372                 tg3_full_unlock(tp);
16373
16374                 if (!err2)
16375                         tg3_phy_start(tp);
16376         }
16377
16378         return err;
16379 }
16380
16381 static int tg3_resume(struct device *device)
16382 {
16383         struct pci_dev *pdev = to_pci_dev(device);
16384         struct net_device *dev = pci_get_drvdata(pdev);
16385         struct tg3 *tp = netdev_priv(dev);
16386         int err;
16387
16388         if (!netif_running(dev))
16389                 return 0;
16390
16391         netif_device_attach(dev);
16392
16393         tg3_full_lock(tp, 0);
16394
16395         tg3_flag_set(tp, INIT_COMPLETE);
16396         err = tg3_restart_hw(tp, 1);
16397         if (err)
16398                 goto out;
16399
16400         tg3_timer_start(tp);
16401
16402         tg3_netif_start(tp);
16403
16404 out:
16405         tg3_full_unlock(tp);
16406
16407         if (!err)
16408                 tg3_phy_start(tp);
16409
16410         return err;
16411 }
16412
16413 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16414 #define TG3_PM_OPS (&tg3_pm_ops)
16415
16416 #else
16417
16418 #define TG3_PM_OPS NULL
16419
16420 #endif /* CONFIG_PM_SLEEP */
16421
16422 /**
16423  * tg3_io_error_detected - called when PCI error is detected
16424  * @pdev: Pointer to PCI device
16425  * @state: The current pci connection state
16426  *
16427  * This function is called after a PCI bus error affecting
16428  * this device has been detected.
16429  */
16430 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16431                                               pci_channel_state_t state)
16432 {
16433         struct net_device *netdev = pci_get_drvdata(pdev);
16434         struct tg3 *tp = netdev_priv(netdev);
16435         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16436
16437         netdev_info(netdev, "PCI I/O error detected\n");
16438
16439         rtnl_lock();
16440
16441         if (!netif_running(netdev))
16442                 goto done;
16443
16444         tg3_phy_stop(tp);
16445
16446         tg3_netif_stop(tp);
16447
16448         tg3_timer_stop(tp);
16449
16450         /* Want to make sure that the reset task doesn't run */
16451         tg3_reset_task_cancel(tp);
16452
16453         netif_device_detach(netdev);
16454
16455         /* Clean up software state, even if MMIO is blocked */
16456         tg3_full_lock(tp, 0);
16457         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16458         tg3_full_unlock(tp);
16459
16460 done:
16461         if (state == pci_channel_io_perm_failure)
16462                 err = PCI_ERS_RESULT_DISCONNECT;
16463         else
16464                 pci_disable_device(pdev);
16465
16466         rtnl_unlock();
16467
16468         return err;
16469 }
16470
16471 /**
16472  * tg3_io_slot_reset - called after the pci bus has been reset.
16473  * @pdev: Pointer to PCI device
16474  *
16475  * Restart the card from scratch, as if from a cold-boot.
16476  * At this point, the card has exprienced a hard reset,
16477  * followed by fixups by BIOS, and has its config space
16478  * set up identically to what it was at cold boot.
16479  */
16480 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16481 {
16482         struct net_device *netdev = pci_get_drvdata(pdev);
16483         struct tg3 *tp = netdev_priv(netdev);
16484         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16485         int err;
16486
16487         rtnl_lock();
16488
16489         if (pci_enable_device(pdev)) {
16490                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16491                 goto done;
16492         }
16493
16494         pci_set_master(pdev);
16495         pci_restore_state(pdev);
16496         pci_save_state(pdev);
16497
16498         if (!netif_running(netdev)) {
16499                 rc = PCI_ERS_RESULT_RECOVERED;
16500                 goto done;
16501         }
16502
16503         err = tg3_power_up(tp);
16504         if (err)
16505                 goto done;
16506
16507         rc = PCI_ERS_RESULT_RECOVERED;
16508
16509 done:
16510         rtnl_unlock();
16511
16512         return rc;
16513 }
16514
16515 /**
16516  * tg3_io_resume - called when traffic can start flowing again.
16517  * @pdev: Pointer to PCI device
16518  *
16519  * This callback is called when the error recovery driver tells
16520  * us that its OK to resume normal operation.
16521  */
16522 static void tg3_io_resume(struct pci_dev *pdev)
16523 {
16524         struct net_device *netdev = pci_get_drvdata(pdev);
16525         struct tg3 *tp = netdev_priv(netdev);
16526         int err;
16527
16528         rtnl_lock();
16529
16530         if (!netif_running(netdev))
16531                 goto done;
16532
16533         tg3_full_lock(tp, 0);
16534         tg3_flag_set(tp, INIT_COMPLETE);
16535         err = tg3_restart_hw(tp, 1);
16536         tg3_full_unlock(tp);
16537         if (err) {
16538                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16539                 goto done;
16540         }
16541
16542         netif_device_attach(netdev);
16543
16544         tg3_timer_start(tp);
16545
16546         tg3_netif_start(tp);
16547
16548         tg3_phy_start(tp);
16549
16550 done:
16551         rtnl_unlock();
16552 }
16553
16554 static const struct pci_error_handlers tg3_err_handler = {
16555         .error_detected = tg3_io_error_detected,
16556         .slot_reset     = tg3_io_slot_reset,
16557         .resume         = tg3_io_resume
16558 };
16559
16560 static struct pci_driver tg3_driver = {
16561         .name           = DRV_MODULE_NAME,
16562         .id_table       = tg3_pci_tbl,
16563         .probe          = tg3_init_one,
16564         .remove         = __devexit_p(tg3_remove_one),
16565         .err_handler    = &tg3_err_handler,
16566         .driver.pm      = TG3_PM_OPS,
16567 };
16568
16569 static int __init tg3_init(void)
16570 {
16571         return pci_register_driver(&tg3_driver);
16572 }
16573
16574 static void __exit tg3_cleanup(void)
16575 {
16576         pci_unregister_driver(&tg3_driver);
16577 }
16578
16579 module_init(tg3_init);
16580 module_exit(tg3_cleanup);