Merge tag 'powerpc-5.0-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/ip.h>
61
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68
69 #define BAR_0   0
70 #define BAR_2   2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         return test_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         set_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag)                              \
92         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)                          \
94         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)                        \
96         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME         "tg3"
99 #define TG3_MAJ_NUM                     3
100 #define TG3_MIN_NUM                     137
101 #define DRV_MODULE_VERSION      \
102         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE      "May 11, 2014"
104
105 #define RESET_KIND_SHUTDOWN     0
106 #define RESET_KIND_INIT         1
107 #define RESET_KIND_SUSPEND      2
108
109 #define TG3_DEF_RX_MODE         0
110 #define TG3_DEF_TX_MODE         0
111 #define TG3_DEF_MSG_ENABLE        \
112         (NETIF_MSG_DRV          | \
113          NETIF_MSG_PROBE        | \
114          NETIF_MSG_LINK         | \
115          NETIF_MSG_TIMER        | \
116          NETIF_MSG_IFDOWN       | \
117          NETIF_MSG_IFUP         | \
118          NETIF_MSG_RX_ERR       | \
119          NETIF_MSG_TX_ERR)
120
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
122
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126
127 #define TG3_TX_TIMEOUT                  (5 * HZ)
128
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU                     ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING         200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
146
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153
154 #define TG3_TX_RING_SIZE                512
155 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
156
157 #define TG3_RX_STD_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
164                                  TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166
167 #define TG3_DMA_BYTE_ENAB               64
168
169 #define TG3_RX_STD_DMA_SZ               1536
170 #define TG3_RX_JMB_DMA_SZ               9046
171
172 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
173
174 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD           256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
197 #else
198         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
199 #endif
200
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
205 #endif
206
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K            2048
210 #define TG3_TX_BD_DMA_MAX_4K            4096
211
212 #define TG3_RAW_IP_ALIGN 2
213
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216
217 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
218 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219
220 #define FIRMWARE_TG3            "tigon/tg3.bin"
221 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
224
225 static char version[] =
226         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
227
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
235
236 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
239
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
242
243 static const struct pci_device_id tg3_pci_tbl[] = {
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267                         TG3_DRV_DATA_FLAG_5705_10_100},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271                         TG3_DRV_DATA_FLAG_5705_10_100},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293                         PCI_VENDOR_ID_LENOVO,
294                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359         {}
360 };
361
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
363
364 static const struct {
365         const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367         { "rx_octets" },
368         { "rx_fragments" },
369         { "rx_ucast_packets" },
370         { "rx_mcast_packets" },
371         { "rx_bcast_packets" },
372         { "rx_fcs_errors" },
373         { "rx_align_errors" },
374         { "rx_xon_pause_rcvd" },
375         { "rx_xoff_pause_rcvd" },
376         { "rx_mac_ctrl_rcvd" },
377         { "rx_xoff_entered" },
378         { "rx_frame_too_long_errors" },
379         { "rx_jabbers" },
380         { "rx_undersize_packets" },
381         { "rx_in_length_errors" },
382         { "rx_out_length_errors" },
383         { "rx_64_or_less_octet_packets" },
384         { "rx_65_to_127_octet_packets" },
385         { "rx_128_to_255_octet_packets" },
386         { "rx_256_to_511_octet_packets" },
387         { "rx_512_to_1023_octet_packets" },
388         { "rx_1024_to_1522_octet_packets" },
389         { "rx_1523_to_2047_octet_packets" },
390         { "rx_2048_to_4095_octet_packets" },
391         { "rx_4096_to_8191_octet_packets" },
392         { "rx_8192_to_9022_octet_packets" },
393
394         { "tx_octets" },
395         { "tx_collisions" },
396
397         { "tx_xon_sent" },
398         { "tx_xoff_sent" },
399         { "tx_flow_control" },
400         { "tx_mac_errors" },
401         { "tx_single_collisions" },
402         { "tx_mult_collisions" },
403         { "tx_deferred" },
404         { "tx_excessive_collisions" },
405         { "tx_late_collisions" },
406         { "tx_collide_2times" },
407         { "tx_collide_3times" },
408         { "tx_collide_4times" },
409         { "tx_collide_5times" },
410         { "tx_collide_6times" },
411         { "tx_collide_7times" },
412         { "tx_collide_8times" },
413         { "tx_collide_9times" },
414         { "tx_collide_10times" },
415         { "tx_collide_11times" },
416         { "tx_collide_12times" },
417         { "tx_collide_13times" },
418         { "tx_collide_14times" },
419         { "tx_collide_15times" },
420         { "tx_ucast_packets" },
421         { "tx_mcast_packets" },
422         { "tx_bcast_packets" },
423         { "tx_carrier_sense_errors" },
424         { "tx_discards" },
425         { "tx_errors" },
426
427         { "dma_writeq_full" },
428         { "dma_write_prioq_full" },
429         { "rxbds_empty" },
430         { "rx_discards" },
431         { "rx_errors" },
432         { "rx_threshold_hit" },
433
434         { "dma_readq_full" },
435         { "dma_read_prioq_full" },
436         { "tx_comp_queue_full" },
437
438         { "ring_set_send_prod_index" },
439         { "ring_status_update" },
440         { "nic_irqs" },
441         { "nic_avoided_irqs" },
442         { "nic_tx_threshold_hit" },
443
444         { "mbuf_lwm_thresh_hit" },
445 };
446
447 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST          0
449 #define TG3_LINK_TEST           1
450 #define TG3_REGISTER_TEST       2
451 #define TG3_MEMORY_TEST         3
452 #define TG3_MAC_LOOPB_TEST      4
453 #define TG3_PHY_LOOPB_TEST      5
454 #define TG3_EXT_LOOPB_TEST      6
455 #define TG3_INTERRUPT_TEST      7
456
457
458 static const struct {
459         const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
462         [TG3_LINK_TEST]         = { "link test         (online) " },
463         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
464         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
465         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
466         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
467         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
468         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
469 };
470
471 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
472
473
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
475 {
476         writel(val, tp->regs + off);
477 }
478
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
480 {
481         return readl(tp->regs + off);
482 }
483
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
485 {
486         writel(val, tp->aperegs + off);
487 }
488
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
490 {
491         return readl(tp->aperegs + off);
492 }
493
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
495 {
496         unsigned long flags;
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
505 {
506         writel(val, tp->regs + off);
507         readl(tp->regs + off);
508 }
509
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 {
512         unsigned long flags;
513         u32 val;
514
515         spin_lock_irqsave(&tp->indirect_lock, flags);
516         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518         spin_unlock_irqrestore(&tp->indirect_lock, flags);
519         return val;
520 }
521
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         unsigned long flags;
525
526         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528                                        TG3_64BIT_REG_LOW, val);
529                 return;
530         }
531         if (off == TG3_RX_STD_PROD_IDX_REG) {
532                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533                                        TG3_64BIT_REG_LOW, val);
534                 return;
535         }
536
537         spin_lock_irqsave(&tp->indirect_lock, flags);
538         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540         spin_unlock_irqrestore(&tp->indirect_lock, flags);
541
542         /* In indirect mode when disabling interrupts, we also need
543          * to clear the interrupt bit in the GRC local ctrl register.
544          */
545         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546             (val == 0x1)) {
547                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
549         }
550 }
551
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 {
554         unsigned long flags;
555         u32 val;
556
557         spin_lock_irqsave(&tp->indirect_lock, flags);
558         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560         spin_unlock_irqrestore(&tp->indirect_lock, flags);
561         return val;
562 }
563
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565  * where it is unsafe to read back the register without some delay.
566  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
568  */
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
570 {
571         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572                 /* Non-posted methods */
573                 tp->write32(tp, off, val);
574         else {
575                 /* Posted method */
576                 tg3_write32(tp, off, val);
577                 if (usec_wait)
578                         udelay(usec_wait);
579                 tp->read32(tp, off);
580         }
581         /* Wait again after the read for the posted method to guarantee that
582          * the wait time is met.
583          */
584         if (usec_wait)
585                 udelay(usec_wait);
586 }
587
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
589 {
590         tp->write32_mbox(tp, off, val);
591         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593              !tg3_flag(tp, ICH_WORKAROUND)))
594                 tp->read32_mbox(tp, off);
595 }
596
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
598 {
599         void __iomem *mbox = tp->regs + off;
600         writel(val, mbox);
601         if (tg3_flag(tp, TXD_MBOX_HWBUG))
602                 writel(val, mbox);
603         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604             tg3_flag(tp, FLUSH_POSTED_WRITES))
605                 readl(mbox);
606 }
607
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
609 {
610         return readl(tp->regs + off + GRCMBOX_BASE);
611 }
612
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
614 {
615         writel(val, tp->regs + off + GRCMBOX_BASE);
616 }
617
618 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
623
624 #define tw32(reg, val)                  tp->write32(tp, reg, val)
625 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg)                       tp->read32(tp, reg)
628
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
630 {
631         unsigned long flags;
632
633         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635                 return;
636
637         spin_lock_irqsave(&tp->indirect_lock, flags);
638         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         } else {
645                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         }
651         spin_unlock_irqrestore(&tp->indirect_lock, flags);
652 }
653
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
655 {
656         unsigned long flags;
657
658         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660                 *val = 0;
661                 return;
662         }
663
664         spin_lock_irqsave(&tp->indirect_lock, flags);
665         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
668
669                 /* Always leave this as zero. */
670                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         } else {
672                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673                 *val = tr32(TG3PCI_MEM_WIN_DATA);
674
675                 /* Always leave this as zero. */
676                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
677         }
678         spin_unlock_irqrestore(&tp->indirect_lock, flags);
679 }
680
681 static void tg3_ape_lock_init(struct tg3 *tp)
682 {
683         int i;
684         u32 regbase, bit;
685
686         if (tg3_asic_rev(tp) == ASIC_REV_5761)
687                 regbase = TG3_APE_LOCK_GRANT;
688         else
689                 regbase = TG3_APE_PER_LOCK_GRANT;
690
691         /* Make sure the driver hasn't any stale locks. */
692         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693                 switch (i) {
694                 case TG3_APE_LOCK_PHY0:
695                 case TG3_APE_LOCK_PHY1:
696                 case TG3_APE_LOCK_PHY2:
697                 case TG3_APE_LOCK_PHY3:
698                         bit = APE_LOCK_GRANT_DRIVER;
699                         break;
700                 default:
701                         if (!tp->pci_fn)
702                                 bit = APE_LOCK_GRANT_DRIVER;
703                         else
704                                 bit = 1 << tp->pci_fn;
705                 }
706                 tg3_ape_write32(tp, regbase + 4 * i, bit);
707         }
708
709 }
710
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
712 {
713         int i, off;
714         int ret = 0;
715         u32 status, req, gnt, bit;
716
717         if (!tg3_flag(tp, ENABLE_APE))
718                 return 0;
719
720         switch (locknum) {
721         case TG3_APE_LOCK_GPIO:
722                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
723                         return 0;
724                 /* else: fall through */
725         case TG3_APE_LOCK_GRC:
726         case TG3_APE_LOCK_MEM:
727                 if (!tp->pci_fn)
728                         bit = APE_LOCK_REQ_DRIVER;
729                 else
730                         bit = 1 << tp->pci_fn;
731                 break;
732         case TG3_APE_LOCK_PHY0:
733         case TG3_APE_LOCK_PHY1:
734         case TG3_APE_LOCK_PHY2:
735         case TG3_APE_LOCK_PHY3:
736                 bit = APE_LOCK_REQ_DRIVER;
737                 break;
738         default:
739                 return -EINVAL;
740         }
741
742         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
743                 req = TG3_APE_LOCK_REQ;
744                 gnt = TG3_APE_LOCK_GRANT;
745         } else {
746                 req = TG3_APE_PER_LOCK_REQ;
747                 gnt = TG3_APE_PER_LOCK_GRANT;
748         }
749
750         off = 4 * locknum;
751
752         tg3_ape_write32(tp, req + off, bit);
753
754         /* Wait for up to 1 millisecond to acquire lock. */
755         for (i = 0; i < 100; i++) {
756                 status = tg3_ape_read32(tp, gnt + off);
757                 if (status == bit)
758                         break;
759                 if (pci_channel_offline(tp->pdev))
760                         break;
761
762                 udelay(10);
763         }
764
765         if (status != bit) {
766                 /* Revoke the lock request. */
767                 tg3_ape_write32(tp, gnt + off, bit);
768                 ret = -EBUSY;
769         }
770
771         return ret;
772 }
773
774 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
775 {
776         u32 gnt, bit;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (locknum) {
782         case TG3_APE_LOCK_GPIO:
783                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
784                         return;
785                 /* else: fall through */
786         case TG3_APE_LOCK_GRC:
787         case TG3_APE_LOCK_MEM:
788                 if (!tp->pci_fn)
789                         bit = APE_LOCK_GRANT_DRIVER;
790                 else
791                         bit = 1 << tp->pci_fn;
792                 break;
793         case TG3_APE_LOCK_PHY0:
794         case TG3_APE_LOCK_PHY1:
795         case TG3_APE_LOCK_PHY2:
796         case TG3_APE_LOCK_PHY3:
797                 bit = APE_LOCK_GRANT_DRIVER;
798                 break;
799         default:
800                 return;
801         }
802
803         if (tg3_asic_rev(tp) == ASIC_REV_5761)
804                 gnt = TG3_APE_LOCK_GRANT;
805         else
806                 gnt = TG3_APE_PER_LOCK_GRANT;
807
808         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
809 }
810
811 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
812 {
813         u32 apedata;
814
815         while (timeout_us) {
816                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
817                         return -EBUSY;
818
819                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
820                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
821                         break;
822
823                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
824
825                 udelay(10);
826                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
827         }
828
829         return timeout_us ? 0 : -EBUSY;
830 }
831
832 #ifdef CONFIG_TIGON3_HWMON
833 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
834 {
835         u32 i, apedata;
836
837         for (i = 0; i < timeout_us / 10; i++) {
838                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
839
840                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
841                         break;
842
843                 udelay(10);
844         }
845
846         return i == timeout_us / 10;
847 }
848
849 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
850                                    u32 len)
851 {
852         int err;
853         u32 i, bufoff, msgoff, maxlen, apedata;
854
855         if (!tg3_flag(tp, APE_HAS_NCSI))
856                 return 0;
857
858         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
859         if (apedata != APE_SEG_SIG_MAGIC)
860                 return -ENODEV;
861
862         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
863         if (!(apedata & APE_FW_STATUS_READY))
864                 return -EAGAIN;
865
866         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
867                  TG3_APE_SHMEM_BASE;
868         msgoff = bufoff + 2 * sizeof(u32);
869         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
870
871         while (len) {
872                 u32 length;
873
874                 /* Cap xfer sizes to scratchpad limits. */
875                 length = (len > maxlen) ? maxlen : len;
876                 len -= length;
877
878                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
879                 if (!(apedata & APE_FW_STATUS_READY))
880                         return -EAGAIN;
881
882                 /* Wait for up to 1 msec for APE to service previous event. */
883                 err = tg3_ape_event_lock(tp, 1000);
884                 if (err)
885                         return err;
886
887                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
888                           APE_EVENT_STATUS_SCRTCHPD_READ |
889                           APE_EVENT_STATUS_EVENT_PENDING;
890                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
891
892                 tg3_ape_write32(tp, bufoff, base_off);
893                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
894
895                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
896                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
897
898                 base_off += length;
899
900                 if (tg3_ape_wait_for_event(tp, 30000))
901                         return -EAGAIN;
902
903                 for (i = 0; length; i += 4, length -= 4) {
904                         u32 val = tg3_ape_read32(tp, msgoff + i);
905                         memcpy(data, &val, sizeof(u32));
906                         data++;
907                 }
908         }
909
910         return 0;
911 }
912 #endif
913
914 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
915 {
916         int err;
917         u32 apedata;
918
919         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
920         if (apedata != APE_SEG_SIG_MAGIC)
921                 return -EAGAIN;
922
923         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
924         if (!(apedata & APE_FW_STATUS_READY))
925                 return -EAGAIN;
926
927         /* Wait for up to 20 millisecond for APE to service previous event. */
928         err = tg3_ape_event_lock(tp, 20000);
929         if (err)
930                 return err;
931
932         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
933                         event | APE_EVENT_STATUS_EVENT_PENDING);
934
935         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
936         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
937
938         return 0;
939 }
940
941 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
942 {
943         u32 event;
944         u32 apedata;
945
946         if (!tg3_flag(tp, ENABLE_APE))
947                 return;
948
949         switch (kind) {
950         case RESET_KIND_INIT:
951                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
952                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
953                                 APE_HOST_SEG_SIG_MAGIC);
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
955                                 APE_HOST_SEG_LEN_MAGIC);
956                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
957                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
958                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
959                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
960                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
961                                 APE_HOST_BEHAV_NO_PHYLOCK);
962                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
963                                     TG3_APE_HOST_DRVR_STATE_START);
964
965                 event = APE_EVENT_STATUS_STATE_START;
966                 break;
967         case RESET_KIND_SHUTDOWN:
968                 if (device_may_wakeup(&tp->pdev->dev) &&
969                     tg3_flag(tp, WOL_ENABLE)) {
970                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
971                                             TG3_APE_HOST_WOL_SPEED_AUTO);
972                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
973                 } else
974                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
975
976                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
977
978                 event = APE_EVENT_STATUS_STATE_UNLOAD;
979                 break;
980         default:
981                 return;
982         }
983
984         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
985
986         tg3_ape_send_event(tp, event);
987 }
988
989 static void tg3_send_ape_heartbeat(struct tg3 *tp,
990                                    unsigned long interval)
991 {
992         /* Check if hb interval has exceeded */
993         if (!tg3_flag(tp, ENABLE_APE) ||
994             time_before(jiffies, tp->ape_hb_jiffies + interval))
995                 return;
996
997         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
998         tp->ape_hb_jiffies = jiffies;
999 }
1000
1001 static void tg3_disable_ints(struct tg3 *tp)
1002 {
1003         int i;
1004
1005         tw32(TG3PCI_MISC_HOST_CTRL,
1006              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1007         for (i = 0; i < tp->irq_max; i++)
1008                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1009 }
1010
1011 static void tg3_enable_ints(struct tg3 *tp)
1012 {
1013         int i;
1014
1015         tp->irq_sync = 0;
1016         wmb();
1017
1018         tw32(TG3PCI_MISC_HOST_CTRL,
1019              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1020
1021         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1022         for (i = 0; i < tp->irq_cnt; i++) {
1023                 struct tg3_napi *tnapi = &tp->napi[i];
1024
1025                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1026                 if (tg3_flag(tp, 1SHOT_MSI))
1027                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1028
1029                 tp->coal_now |= tnapi->coal_now;
1030         }
1031
1032         /* Force an initial interrupt */
1033         if (!tg3_flag(tp, TAGGED_STATUS) &&
1034             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1035                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1036         else
1037                 tw32(HOSTCC_MODE, tp->coal_now);
1038
1039         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1040 }
1041
1042 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1043 {
1044         struct tg3 *tp = tnapi->tp;
1045         struct tg3_hw_status *sblk = tnapi->hw_status;
1046         unsigned int work_exists = 0;
1047
1048         /* check for phy events */
1049         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1050                 if (sblk->status & SD_STATUS_LINK_CHG)
1051                         work_exists = 1;
1052         }
1053
1054         /* check for TX work to do */
1055         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1056                 work_exists = 1;
1057
1058         /* check for RX work to do */
1059         if (tnapi->rx_rcb_prod_idx &&
1060             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1061                 work_exists = 1;
1062
1063         return work_exists;
1064 }
1065
1066 /* tg3_int_reenable
1067  *  similar to tg3_enable_ints, but it accurately determines whether there
1068  *  is new work pending and can return without flushing the PIO write
1069  *  which reenables interrupts
1070  */
1071 static void tg3_int_reenable(struct tg3_napi *tnapi)
1072 {
1073         struct tg3 *tp = tnapi->tp;
1074
1075         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1076         mmiowb();
1077
1078         /* When doing tagged status, this work check is unnecessary.
1079          * The last_tag we write above tells the chip which piece of
1080          * work we've completed.
1081          */
1082         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1083                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1084                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1085 }
1086
1087 static void tg3_switch_clocks(struct tg3 *tp)
1088 {
1089         u32 clock_ctrl;
1090         u32 orig_clock_ctrl;
1091
1092         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1093                 return;
1094
1095         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1096
1097         orig_clock_ctrl = clock_ctrl;
1098         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1099                        CLOCK_CTRL_CLKRUN_OENABLE |
1100                        0x1f);
1101         tp->pci_clock_ctrl = clock_ctrl;
1102
1103         if (tg3_flag(tp, 5705_PLUS)) {
1104                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1105                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1107                 }
1108         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1109                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1110                             clock_ctrl |
1111                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1112                             40);
1113                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1114                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1115                             40);
1116         }
1117         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1118 }
1119
1120 #define PHY_BUSY_LOOPS  5000
1121
1122 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1123                          u32 *val)
1124 {
1125         u32 frame_val;
1126         unsigned int loops;
1127         int ret;
1128
1129         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1130                 tw32_f(MAC_MI_MODE,
1131                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1132                 udelay(80);
1133         }
1134
1135         tg3_ape_lock(tp, tp->phy_ape_lock);
1136
1137         *val = 0x0;
1138
1139         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1140                       MI_COM_PHY_ADDR_MASK);
1141         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1142                       MI_COM_REG_ADDR_MASK);
1143         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1144
1145         tw32_f(MAC_MI_COM, frame_val);
1146
1147         loops = PHY_BUSY_LOOPS;
1148         while (loops != 0) {
1149                 udelay(10);
1150                 frame_val = tr32(MAC_MI_COM);
1151
1152                 if ((frame_val & MI_COM_BUSY) == 0) {
1153                         udelay(5);
1154                         frame_val = tr32(MAC_MI_COM);
1155                         break;
1156                 }
1157                 loops -= 1;
1158         }
1159
1160         ret = -EBUSY;
1161         if (loops != 0) {
1162                 *val = frame_val & MI_COM_DATA_MASK;
1163                 ret = 0;
1164         }
1165
1166         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1167                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1168                 udelay(80);
1169         }
1170
1171         tg3_ape_unlock(tp, tp->phy_ape_lock);
1172
1173         return ret;
1174 }
1175
1176 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1177 {
1178         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1179 }
1180
1181 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1182                           u32 val)
1183 {
1184         u32 frame_val;
1185         unsigned int loops;
1186         int ret;
1187
1188         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1189             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1190                 return 0;
1191
1192         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1193                 tw32_f(MAC_MI_MODE,
1194                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1195                 udelay(80);
1196         }
1197
1198         tg3_ape_lock(tp, tp->phy_ape_lock);
1199
1200         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1201                       MI_COM_PHY_ADDR_MASK);
1202         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1203                       MI_COM_REG_ADDR_MASK);
1204         frame_val |= (val & MI_COM_DATA_MASK);
1205         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1206
1207         tw32_f(MAC_MI_COM, frame_val);
1208
1209         loops = PHY_BUSY_LOOPS;
1210         while (loops != 0) {
1211                 udelay(10);
1212                 frame_val = tr32(MAC_MI_COM);
1213                 if ((frame_val & MI_COM_BUSY) == 0) {
1214                         udelay(5);
1215                         frame_val = tr32(MAC_MI_COM);
1216                         break;
1217                 }
1218                 loops -= 1;
1219         }
1220
1221         ret = -EBUSY;
1222         if (loops != 0)
1223                 ret = 0;
1224
1225         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1226                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1227                 udelay(80);
1228         }
1229
1230         tg3_ape_unlock(tp, tp->phy_ape_lock);
1231
1232         return ret;
1233 }
1234
1235 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1236 {
1237         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1238 }
1239
1240 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1241 {
1242         int err;
1243
1244         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1245         if (err)
1246                 goto done;
1247
1248         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1249         if (err)
1250                 goto done;
1251
1252         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1253                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1254         if (err)
1255                 goto done;
1256
1257         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1258
1259 done:
1260         return err;
1261 }
1262
1263 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1264 {
1265         int err;
1266
1267         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1268         if (err)
1269                 goto done;
1270
1271         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1272         if (err)
1273                 goto done;
1274
1275         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1276                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1277         if (err)
1278                 goto done;
1279
1280         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1281
1282 done:
1283         return err;
1284 }
1285
1286 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1287 {
1288         int err;
1289
1290         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1291         if (!err)
1292                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1293
1294         return err;
1295 }
1296
1297 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1298 {
1299         int err;
1300
1301         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1302         if (!err)
1303                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1304
1305         return err;
1306 }
1307
1308 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1309 {
1310         int err;
1311
1312         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1313                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1314                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1315         if (!err)
1316                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1317
1318         return err;
1319 }
1320
1321 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1322 {
1323         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1324                 set |= MII_TG3_AUXCTL_MISC_WREN;
1325
1326         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1327 }
1328
1329 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1330 {
1331         u32 val;
1332         int err;
1333
1334         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1335
1336         if (err)
1337                 return err;
1338
1339         if (enable)
1340                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1341         else
1342                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1343
1344         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1345                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1346
1347         return err;
1348 }
1349
1350 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1351 {
1352         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1353                             reg | val | MII_TG3_MISC_SHDW_WREN);
1354 }
1355
1356 static int tg3_bmcr_reset(struct tg3 *tp)
1357 {
1358         u32 phy_control;
1359         int limit, err;
1360
1361         /* OK, reset it, and poll the BMCR_RESET bit until it
1362          * clears or we time out.
1363          */
1364         phy_control = BMCR_RESET;
1365         err = tg3_writephy(tp, MII_BMCR, phy_control);
1366         if (err != 0)
1367                 return -EBUSY;
1368
1369         limit = 5000;
1370         while (limit--) {
1371                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1372                 if (err != 0)
1373                         return -EBUSY;
1374
1375                 if ((phy_control & BMCR_RESET) == 0) {
1376                         udelay(40);
1377                         break;
1378                 }
1379                 udelay(10);
1380         }
1381         if (limit < 0)
1382                 return -EBUSY;
1383
1384         return 0;
1385 }
1386
1387 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1388 {
1389         struct tg3 *tp = bp->priv;
1390         u32 val;
1391
1392         spin_lock_bh(&tp->lock);
1393
1394         if (__tg3_readphy(tp, mii_id, reg, &val))
1395                 val = -EIO;
1396
1397         spin_unlock_bh(&tp->lock);
1398
1399         return val;
1400 }
1401
1402 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1403 {
1404         struct tg3 *tp = bp->priv;
1405         u32 ret = 0;
1406
1407         spin_lock_bh(&tp->lock);
1408
1409         if (__tg3_writephy(tp, mii_id, reg, val))
1410                 ret = -EIO;
1411
1412         spin_unlock_bh(&tp->lock);
1413
1414         return ret;
1415 }
1416
1417 static void tg3_mdio_config_5785(struct tg3 *tp)
1418 {
1419         u32 val;
1420         struct phy_device *phydev;
1421
1422         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1423         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1424         case PHY_ID_BCM50610:
1425         case PHY_ID_BCM50610M:
1426                 val = MAC_PHYCFG2_50610_LED_MODES;
1427                 break;
1428         case PHY_ID_BCMAC131:
1429                 val = MAC_PHYCFG2_AC131_LED_MODES;
1430                 break;
1431         case PHY_ID_RTL8211C:
1432                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1433                 break;
1434         case PHY_ID_RTL8201E:
1435                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1436                 break;
1437         default:
1438                 return;
1439         }
1440
1441         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1442                 tw32(MAC_PHYCFG2, val);
1443
1444                 val = tr32(MAC_PHYCFG1);
1445                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1446                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1447                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1448                 tw32(MAC_PHYCFG1, val);
1449
1450                 return;
1451         }
1452
1453         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1454                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1455                        MAC_PHYCFG2_FMODE_MASK_MASK |
1456                        MAC_PHYCFG2_GMODE_MASK_MASK |
1457                        MAC_PHYCFG2_ACT_MASK_MASK   |
1458                        MAC_PHYCFG2_QUAL_MASK_MASK |
1459                        MAC_PHYCFG2_INBAND_ENABLE;
1460
1461         tw32(MAC_PHYCFG2, val);
1462
1463         val = tr32(MAC_PHYCFG1);
1464         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1465                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1466         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1467                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1468                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1469                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1470                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1471         }
1472         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1473                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1474         tw32(MAC_PHYCFG1, val);
1475
1476         val = tr32(MAC_EXT_RGMII_MODE);
1477         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1478                  MAC_RGMII_MODE_RX_QUALITY |
1479                  MAC_RGMII_MODE_RX_ACTIVITY |
1480                  MAC_RGMII_MODE_RX_ENG_DET |
1481                  MAC_RGMII_MODE_TX_ENABLE |
1482                  MAC_RGMII_MODE_TX_LOWPWR |
1483                  MAC_RGMII_MODE_TX_RESET);
1484         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1485                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1486                         val |= MAC_RGMII_MODE_RX_INT_B |
1487                                MAC_RGMII_MODE_RX_QUALITY |
1488                                MAC_RGMII_MODE_RX_ACTIVITY |
1489                                MAC_RGMII_MODE_RX_ENG_DET;
1490                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1491                         val |= MAC_RGMII_MODE_TX_ENABLE |
1492                                MAC_RGMII_MODE_TX_LOWPWR |
1493                                MAC_RGMII_MODE_TX_RESET;
1494         }
1495         tw32(MAC_EXT_RGMII_MODE, val);
1496 }
1497
1498 static void tg3_mdio_start(struct tg3 *tp)
1499 {
1500         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1501         tw32_f(MAC_MI_MODE, tp->mi_mode);
1502         udelay(80);
1503
1504         if (tg3_flag(tp, MDIOBUS_INITED) &&
1505             tg3_asic_rev(tp) == ASIC_REV_5785)
1506                 tg3_mdio_config_5785(tp);
1507 }
1508
1509 static int tg3_mdio_init(struct tg3 *tp)
1510 {
1511         int i;
1512         u32 reg;
1513         struct phy_device *phydev;
1514
1515         if (tg3_flag(tp, 5717_PLUS)) {
1516                 u32 is_serdes;
1517
1518                 tp->phy_addr = tp->pci_fn + 1;
1519
1520                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1521                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1522                 else
1523                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1524                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1525                 if (is_serdes)
1526                         tp->phy_addr += 7;
1527         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1528                 int addr;
1529
1530                 addr = ssb_gige_get_phyaddr(tp->pdev);
1531                 if (addr < 0)
1532                         return addr;
1533                 tp->phy_addr = addr;
1534         } else
1535                 tp->phy_addr = TG3_PHY_MII_ADDR;
1536
1537         tg3_mdio_start(tp);
1538
1539         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1540                 return 0;
1541
1542         tp->mdio_bus = mdiobus_alloc();
1543         if (tp->mdio_bus == NULL)
1544                 return -ENOMEM;
1545
1546         tp->mdio_bus->name     = "tg3 mdio bus";
1547         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1548                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1549         tp->mdio_bus->priv     = tp;
1550         tp->mdio_bus->parent   = &tp->pdev->dev;
1551         tp->mdio_bus->read     = &tg3_mdio_read;
1552         tp->mdio_bus->write    = &tg3_mdio_write;
1553         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1554
1555         /* The bus registration will look for all the PHYs on the mdio bus.
1556          * Unfortunately, it does not ensure the PHY is powered up before
1557          * accessing the PHY ID registers.  A chip reset is the
1558          * quickest way to bring the device back to an operational state..
1559          */
1560         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1561                 tg3_bmcr_reset(tp);
1562
1563         i = mdiobus_register(tp->mdio_bus);
1564         if (i) {
1565                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1566                 mdiobus_free(tp->mdio_bus);
1567                 return i;
1568         }
1569
1570         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1571
1572         if (!phydev || !phydev->drv) {
1573                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1574                 mdiobus_unregister(tp->mdio_bus);
1575                 mdiobus_free(tp->mdio_bus);
1576                 return -ENODEV;
1577         }
1578
1579         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1580         case PHY_ID_BCM57780:
1581                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1582                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583                 break;
1584         case PHY_ID_BCM50610:
1585         case PHY_ID_BCM50610M:
1586                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1587                                      PHY_BRCM_RX_REFCLK_UNUSED |
1588                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1589                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1590                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1591                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1592                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1593                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1594                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1595                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1596                 /* fall through */
1597         case PHY_ID_RTL8211C:
1598                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1599                 break;
1600         case PHY_ID_RTL8201E:
1601         case PHY_ID_BCMAC131:
1602                 phydev->interface = PHY_INTERFACE_MODE_MII;
1603                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1604                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1605                 break;
1606         }
1607
1608         tg3_flag_set(tp, MDIOBUS_INITED);
1609
1610         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1611                 tg3_mdio_config_5785(tp);
1612
1613         return 0;
1614 }
1615
1616 static void tg3_mdio_fini(struct tg3 *tp)
1617 {
1618         if (tg3_flag(tp, MDIOBUS_INITED)) {
1619                 tg3_flag_clear(tp, MDIOBUS_INITED);
1620                 mdiobus_unregister(tp->mdio_bus);
1621                 mdiobus_free(tp->mdio_bus);
1622         }
1623 }
1624
1625 /* tp->lock is held. */
1626 static inline void tg3_generate_fw_event(struct tg3 *tp)
1627 {
1628         u32 val;
1629
1630         val = tr32(GRC_RX_CPU_EVENT);
1631         val |= GRC_RX_CPU_DRIVER_EVENT;
1632         tw32_f(GRC_RX_CPU_EVENT, val);
1633
1634         tp->last_event_jiffies = jiffies;
1635 }
1636
1637 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1638
1639 /* tp->lock is held. */
1640 static void tg3_wait_for_event_ack(struct tg3 *tp)
1641 {
1642         int i;
1643         unsigned int delay_cnt;
1644         long time_remain;
1645
1646         /* If enough time has passed, no wait is necessary. */
1647         time_remain = (long)(tp->last_event_jiffies + 1 +
1648                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1649                       (long)jiffies;
1650         if (time_remain < 0)
1651                 return;
1652
1653         /* Check if we can shorten the wait time. */
1654         delay_cnt = jiffies_to_usecs(time_remain);
1655         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1656                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1657         delay_cnt = (delay_cnt >> 3) + 1;
1658
1659         for (i = 0; i < delay_cnt; i++) {
1660                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1661                         break;
1662                 if (pci_channel_offline(tp->pdev))
1663                         break;
1664
1665                 udelay(8);
1666         }
1667 }
1668
1669 /* tp->lock is held. */
1670 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1671 {
1672         u32 reg, val;
1673
1674         val = 0;
1675         if (!tg3_readphy(tp, MII_BMCR, &reg))
1676                 val = reg << 16;
1677         if (!tg3_readphy(tp, MII_BMSR, &reg))
1678                 val |= (reg & 0xffff);
1679         *data++ = val;
1680
1681         val = 0;
1682         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1683                 val = reg << 16;
1684         if (!tg3_readphy(tp, MII_LPA, &reg))
1685                 val |= (reg & 0xffff);
1686         *data++ = val;
1687
1688         val = 0;
1689         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1690                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1691                         val = reg << 16;
1692                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1693                         val |= (reg & 0xffff);
1694         }
1695         *data++ = val;
1696
1697         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1698                 val = reg << 16;
1699         else
1700                 val = 0;
1701         *data++ = val;
1702 }
1703
1704 /* tp->lock is held. */
1705 static void tg3_ump_link_report(struct tg3 *tp)
1706 {
1707         u32 data[4];
1708
1709         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1710                 return;
1711
1712         tg3_phy_gather_ump_data(tp, data);
1713
1714         tg3_wait_for_event_ack(tp);
1715
1716         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1717         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1718         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1719         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1720         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1721         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1722
1723         tg3_generate_fw_event(tp);
1724 }
1725
1726 /* tp->lock is held. */
1727 static void tg3_stop_fw(struct tg3 *tp)
1728 {
1729         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1730                 /* Wait for RX cpu to ACK the previous event. */
1731                 tg3_wait_for_event_ack(tp);
1732
1733                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1734
1735                 tg3_generate_fw_event(tp);
1736
1737                 /* Wait for RX cpu to ACK this event. */
1738                 tg3_wait_for_event_ack(tp);
1739         }
1740 }
1741
1742 /* tp->lock is held. */
1743 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1744 {
1745         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1746                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1747
1748         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1749                 switch (kind) {
1750                 case RESET_KIND_INIT:
1751                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1752                                       DRV_STATE_START);
1753                         break;
1754
1755                 case RESET_KIND_SHUTDOWN:
1756                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1757                                       DRV_STATE_UNLOAD);
1758                         break;
1759
1760                 case RESET_KIND_SUSPEND:
1761                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1762                                       DRV_STATE_SUSPEND);
1763                         break;
1764
1765                 default:
1766                         break;
1767                 }
1768         }
1769 }
1770
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1773 {
1774         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1775                 switch (kind) {
1776                 case RESET_KIND_INIT:
1777                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778                                       DRV_STATE_START_DONE);
1779                         break;
1780
1781                 case RESET_KIND_SHUTDOWN:
1782                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783                                       DRV_STATE_UNLOAD_DONE);
1784                         break;
1785
1786                 default:
1787                         break;
1788                 }
1789         }
1790 }
1791
1792 /* tp->lock is held. */
1793 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1794 {
1795         if (tg3_flag(tp, ENABLE_ASF)) {
1796                 switch (kind) {
1797                 case RESET_KIND_INIT:
1798                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1799                                       DRV_STATE_START);
1800                         break;
1801
1802                 case RESET_KIND_SHUTDOWN:
1803                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1804                                       DRV_STATE_UNLOAD);
1805                         break;
1806
1807                 case RESET_KIND_SUSPEND:
1808                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1809                                       DRV_STATE_SUSPEND);
1810                         break;
1811
1812                 default:
1813                         break;
1814                 }
1815         }
1816 }
1817
1818 static int tg3_poll_fw(struct tg3 *tp)
1819 {
1820         int i;
1821         u32 val;
1822
1823         if (tg3_flag(tp, NO_FWARE_REPORTED))
1824                 return 0;
1825
1826         if (tg3_flag(tp, IS_SSB_CORE)) {
1827                 /* We don't use firmware. */
1828                 return 0;
1829         }
1830
1831         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1832                 /* Wait up to 20ms for init done. */
1833                 for (i = 0; i < 200; i++) {
1834                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1835                                 return 0;
1836                         if (pci_channel_offline(tp->pdev))
1837                                 return -ENODEV;
1838
1839                         udelay(100);
1840                 }
1841                 return -ENODEV;
1842         }
1843
1844         /* Wait for firmware initialization to complete. */
1845         for (i = 0; i < 100000; i++) {
1846                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1847                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1848                         break;
1849                 if (pci_channel_offline(tp->pdev)) {
1850                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1851                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1852                                 netdev_info(tp->dev, "No firmware running\n");
1853                         }
1854
1855                         break;
1856                 }
1857
1858                 udelay(10);
1859         }
1860
1861         /* Chip might not be fitted with firmware.  Some Sun onboard
1862          * parts are configured like that.  So don't signal the timeout
1863          * of the above loop as an error, but do report the lack of
1864          * running firmware once.
1865          */
1866         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1867                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1868
1869                 netdev_info(tp->dev, "No firmware running\n");
1870         }
1871
1872         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1873                 /* The 57765 A0 needs a little more
1874                  * time to do some important work.
1875                  */
1876                 mdelay(10);
1877         }
1878
1879         return 0;
1880 }
1881
1882 static void tg3_link_report(struct tg3 *tp)
1883 {
1884         if (!netif_carrier_ok(tp->dev)) {
1885                 netif_info(tp, link, tp->dev, "Link is down\n");
1886                 tg3_ump_link_report(tp);
1887         } else if (netif_msg_link(tp)) {
1888                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1889                             (tp->link_config.active_speed == SPEED_1000 ?
1890                              1000 :
1891                              (tp->link_config.active_speed == SPEED_100 ?
1892                               100 : 10)),
1893                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1894                              "full" : "half"));
1895
1896                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1897                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1898                             "on" : "off",
1899                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1900                             "on" : "off");
1901
1902                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1903                         netdev_info(tp->dev, "EEE is %s\n",
1904                                     tp->setlpicnt ? "enabled" : "disabled");
1905
1906                 tg3_ump_link_report(tp);
1907         }
1908
1909         tp->link_up = netif_carrier_ok(tp->dev);
1910 }
1911
1912 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1913 {
1914         u32 flowctrl = 0;
1915
1916         if (adv & ADVERTISE_PAUSE_CAP) {
1917                 flowctrl |= FLOW_CTRL_RX;
1918                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1919                         flowctrl |= FLOW_CTRL_TX;
1920         } else if (adv & ADVERTISE_PAUSE_ASYM)
1921                 flowctrl |= FLOW_CTRL_TX;
1922
1923         return flowctrl;
1924 }
1925
1926 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1927 {
1928         u16 miireg;
1929
1930         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1931                 miireg = ADVERTISE_1000XPAUSE;
1932         else if (flow_ctrl & FLOW_CTRL_TX)
1933                 miireg = ADVERTISE_1000XPSE_ASYM;
1934         else if (flow_ctrl & FLOW_CTRL_RX)
1935                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1936         else
1937                 miireg = 0;
1938
1939         return miireg;
1940 }
1941
1942 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1943 {
1944         u32 flowctrl = 0;
1945
1946         if (adv & ADVERTISE_1000XPAUSE) {
1947                 flowctrl |= FLOW_CTRL_RX;
1948                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1949                         flowctrl |= FLOW_CTRL_TX;
1950         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1951                 flowctrl |= FLOW_CTRL_TX;
1952
1953         return flowctrl;
1954 }
1955
1956 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1957 {
1958         u8 cap = 0;
1959
1960         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1961                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1962         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1963                 if (lcladv & ADVERTISE_1000XPAUSE)
1964                         cap = FLOW_CTRL_RX;
1965                 if (rmtadv & ADVERTISE_1000XPAUSE)
1966                         cap = FLOW_CTRL_TX;
1967         }
1968
1969         return cap;
1970 }
1971
1972 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1973 {
1974         u8 autoneg;
1975         u8 flowctrl = 0;
1976         u32 old_rx_mode = tp->rx_mode;
1977         u32 old_tx_mode = tp->tx_mode;
1978
1979         if (tg3_flag(tp, USE_PHYLIB))
1980                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1981         else
1982                 autoneg = tp->link_config.autoneg;
1983
1984         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1985                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1986                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1987                 else
1988                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1989         } else
1990                 flowctrl = tp->link_config.flowctrl;
1991
1992         tp->link_config.active_flowctrl = flowctrl;
1993
1994         if (flowctrl & FLOW_CTRL_RX)
1995                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1996         else
1997                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1998
1999         if (old_rx_mode != tp->rx_mode)
2000                 tw32_f(MAC_RX_MODE, tp->rx_mode);
2001
2002         if (flowctrl & FLOW_CTRL_TX)
2003                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2004         else
2005                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2006
2007         if (old_tx_mode != tp->tx_mode)
2008                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2009 }
2010
2011 static void tg3_adjust_link(struct net_device *dev)
2012 {
2013         u8 oldflowctrl, linkmesg = 0;
2014         u32 mac_mode, lcl_adv, rmt_adv;
2015         struct tg3 *tp = netdev_priv(dev);
2016         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2017
2018         spin_lock_bh(&tp->lock);
2019
2020         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2021                                     MAC_MODE_HALF_DUPLEX);
2022
2023         oldflowctrl = tp->link_config.active_flowctrl;
2024
2025         if (phydev->link) {
2026                 lcl_adv = 0;
2027                 rmt_adv = 0;
2028
2029                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2030                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2031                 else if (phydev->speed == SPEED_1000 ||
2032                          tg3_asic_rev(tp) != ASIC_REV_5785)
2033                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2034                 else
2035                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2036
2037                 if (phydev->duplex == DUPLEX_HALF)
2038                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2039                 else {
2040                         lcl_adv = mii_advertise_flowctrl(
2041                                   tp->link_config.flowctrl);
2042
2043                         if (phydev->pause)
2044                                 rmt_adv = LPA_PAUSE_CAP;
2045                         if (phydev->asym_pause)
2046                                 rmt_adv |= LPA_PAUSE_ASYM;
2047                 }
2048
2049                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2050         } else
2051                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2052
2053         if (mac_mode != tp->mac_mode) {
2054                 tp->mac_mode = mac_mode;
2055                 tw32_f(MAC_MODE, tp->mac_mode);
2056                 udelay(40);
2057         }
2058
2059         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2060                 if (phydev->speed == SPEED_10)
2061                         tw32(MAC_MI_STAT,
2062                              MAC_MI_STAT_10MBPS_MODE |
2063                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2064                 else
2065                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2066         }
2067
2068         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2069                 tw32(MAC_TX_LENGTHS,
2070                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2071                       (6 << TX_LENGTHS_IPG_SHIFT) |
2072                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2073         else
2074                 tw32(MAC_TX_LENGTHS,
2075                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2076                       (6 << TX_LENGTHS_IPG_SHIFT) |
2077                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2078
2079         if (phydev->link != tp->old_link ||
2080             phydev->speed != tp->link_config.active_speed ||
2081             phydev->duplex != tp->link_config.active_duplex ||
2082             oldflowctrl != tp->link_config.active_flowctrl)
2083                 linkmesg = 1;
2084
2085         tp->old_link = phydev->link;
2086         tp->link_config.active_speed = phydev->speed;
2087         tp->link_config.active_duplex = phydev->duplex;
2088
2089         spin_unlock_bh(&tp->lock);
2090
2091         if (linkmesg)
2092                 tg3_link_report(tp);
2093 }
2094
2095 static int tg3_phy_init(struct tg3 *tp)
2096 {
2097         struct phy_device *phydev;
2098
2099         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2100                 return 0;
2101
2102         /* Bring the PHY back to a known state. */
2103         tg3_bmcr_reset(tp);
2104
2105         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2106
2107         /* Attach the MAC to the PHY. */
2108         phydev = phy_connect(tp->dev, phydev_name(phydev),
2109                              tg3_adjust_link, phydev->interface);
2110         if (IS_ERR(phydev)) {
2111                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2112                 return PTR_ERR(phydev);
2113         }
2114
2115         /* Mask with MAC supported features. */
2116         switch (phydev->interface) {
2117         case PHY_INTERFACE_MODE_GMII:
2118         case PHY_INTERFACE_MODE_RGMII:
2119                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2120                         phy_set_max_speed(phydev, SPEED_1000);
2121                         phy_support_asym_pause(phydev);
2122                         break;
2123                 }
2124                 /* fall through */
2125         case PHY_INTERFACE_MODE_MII:
2126                 phy_set_max_speed(phydev, SPEED_100);
2127                 phy_support_asym_pause(phydev);
2128                 break;
2129         default:
2130                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2131                 return -EINVAL;
2132         }
2133
2134         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2135
2136         phy_attached_info(phydev);
2137
2138         return 0;
2139 }
2140
2141 static void tg3_phy_start(struct tg3 *tp)
2142 {
2143         struct phy_device *phydev;
2144
2145         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2146                 return;
2147
2148         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2149
2150         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2151                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2152                 phydev->speed = tp->link_config.speed;
2153                 phydev->duplex = tp->link_config.duplex;
2154                 phydev->autoneg = tp->link_config.autoneg;
2155                 ethtool_convert_legacy_u32_to_link_mode(
2156                         phydev->advertising, tp->link_config.advertising);
2157         }
2158
2159         phy_start(phydev);
2160
2161         phy_start_aneg(phydev);
2162 }
2163
2164 static void tg3_phy_stop(struct tg3 *tp)
2165 {
2166         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2167                 return;
2168
2169         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2170 }
2171
2172 static void tg3_phy_fini(struct tg3 *tp)
2173 {
2174         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2175                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2176                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2177         }
2178 }
2179
2180 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2181 {
2182         int err;
2183         u32 val;
2184
2185         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2186                 return 0;
2187
2188         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2189                 /* Cannot do read-modify-write on 5401 */
2190                 err = tg3_phy_auxctl_write(tp,
2191                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2192                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2193                                            0x4c20);
2194                 goto done;
2195         }
2196
2197         err = tg3_phy_auxctl_read(tp,
2198                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2199         if (err)
2200                 return err;
2201
2202         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2203         err = tg3_phy_auxctl_write(tp,
2204                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2205
2206 done:
2207         return err;
2208 }
2209
2210 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2211 {
2212         u32 phytest;
2213
2214         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2215                 u32 phy;
2216
2217                 tg3_writephy(tp, MII_TG3_FET_TEST,
2218                              phytest | MII_TG3_FET_SHADOW_EN);
2219                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2220                         if (enable)
2221                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2222                         else
2223                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2224                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2225                 }
2226                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2227         }
2228 }
2229
2230 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2231 {
2232         u32 reg;
2233
2234         if (!tg3_flag(tp, 5705_PLUS) ||
2235             (tg3_flag(tp, 5717_PLUS) &&
2236              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2237                 return;
2238
2239         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2240                 tg3_phy_fet_toggle_apd(tp, enable);
2241                 return;
2242         }
2243
2244         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2245               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2246               MII_TG3_MISC_SHDW_SCR5_SDTL |
2247               MII_TG3_MISC_SHDW_SCR5_C125OE;
2248         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2249                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2250
2251         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2252
2253
2254         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2255         if (enable)
2256                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2257
2258         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2259 }
2260
2261 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2262 {
2263         u32 phy;
2264
2265         if (!tg3_flag(tp, 5705_PLUS) ||
2266             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2267                 return;
2268
2269         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2270                 u32 ephy;
2271
2272                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2273                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2274
2275                         tg3_writephy(tp, MII_TG3_FET_TEST,
2276                                      ephy | MII_TG3_FET_SHADOW_EN);
2277                         if (!tg3_readphy(tp, reg, &phy)) {
2278                                 if (enable)
2279                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2280                                 else
2281                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2282                                 tg3_writephy(tp, reg, phy);
2283                         }
2284                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2285                 }
2286         } else {
2287                 int ret;
2288
2289                 ret = tg3_phy_auxctl_read(tp,
2290                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2291                 if (!ret) {
2292                         if (enable)
2293                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2294                         else
2295                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2296                         tg3_phy_auxctl_write(tp,
2297                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2298                 }
2299         }
2300 }
2301
2302 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2303 {
2304         int ret;
2305         u32 val;
2306
2307         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2308                 return;
2309
2310         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2311         if (!ret)
2312                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2313                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2314 }
2315
2316 static void tg3_phy_apply_otp(struct tg3 *tp)
2317 {
2318         u32 otp, phy;
2319
2320         if (!tp->phy_otp)
2321                 return;
2322
2323         otp = tp->phy_otp;
2324
2325         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2326                 return;
2327
2328         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2329         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2330         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2331
2332         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2333               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2334         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2335
2336         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2337         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2338         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2339
2340         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2341         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2342
2343         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2344         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2345
2346         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2347               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2348         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2349
2350         tg3_phy_toggle_auxctl_smdsp(tp, false);
2351 }
2352
2353 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2354 {
2355         u32 val;
2356         struct ethtool_eee *dest = &tp->eee;
2357
2358         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2359                 return;
2360
2361         if (eee)
2362                 dest = eee;
2363
2364         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2365                 return;
2366
2367         /* Pull eee_active */
2368         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2369             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2370                 dest->eee_active = 1;
2371         } else
2372                 dest->eee_active = 0;
2373
2374         /* Pull lp advertised settings */
2375         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2376                 return;
2377         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2378
2379         /* Pull advertised and eee_enabled settings */
2380         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2381                 return;
2382         dest->eee_enabled = !!val;
2383         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2384
2385         /* Pull tx_lpi_enabled */
2386         val = tr32(TG3_CPMU_EEE_MODE);
2387         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2388
2389         /* Pull lpi timer value */
2390         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2391 }
2392
2393 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2394 {
2395         u32 val;
2396
2397         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2398                 return;
2399
2400         tp->setlpicnt = 0;
2401
2402         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2403             current_link_up &&
2404             tp->link_config.active_duplex == DUPLEX_FULL &&
2405             (tp->link_config.active_speed == SPEED_100 ||
2406              tp->link_config.active_speed == SPEED_1000)) {
2407                 u32 eeectl;
2408
2409                 if (tp->link_config.active_speed == SPEED_1000)
2410                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2411                 else
2412                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2413
2414                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2415
2416                 tg3_eee_pull_config(tp, NULL);
2417                 if (tp->eee.eee_active)
2418                         tp->setlpicnt = 2;
2419         }
2420
2421         if (!tp->setlpicnt) {
2422                 if (current_link_up &&
2423                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2424                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2425                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2426                 }
2427
2428                 val = tr32(TG3_CPMU_EEE_MODE);
2429                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2430         }
2431 }
2432
2433 static void tg3_phy_eee_enable(struct tg3 *tp)
2434 {
2435         u32 val;
2436
2437         if (tp->link_config.active_speed == SPEED_1000 &&
2438             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2439              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2440              tg3_flag(tp, 57765_CLASS)) &&
2441             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2442                 val = MII_TG3_DSP_TAP26_ALNOKO |
2443                       MII_TG3_DSP_TAP26_RMRXSTO;
2444                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2445                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2446         }
2447
2448         val = tr32(TG3_CPMU_EEE_MODE);
2449         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2450 }
2451
2452 static int tg3_wait_macro_done(struct tg3 *tp)
2453 {
2454         int limit = 100;
2455
2456         while (limit--) {
2457                 u32 tmp32;
2458
2459                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2460                         if ((tmp32 & 0x1000) == 0)
2461                                 break;
2462                 }
2463         }
2464         if (limit < 0)
2465                 return -EBUSY;
2466
2467         return 0;
2468 }
2469
2470 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2471 {
2472         static const u32 test_pat[4][6] = {
2473         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2474         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2475         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2476         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2477         };
2478         int chan;
2479
2480         for (chan = 0; chan < 4; chan++) {
2481                 int i;
2482
2483                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2484                              (chan * 0x2000) | 0x0200);
2485                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2486
2487                 for (i = 0; i < 6; i++)
2488                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2489                                      test_pat[chan][i]);
2490
2491                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2492                 if (tg3_wait_macro_done(tp)) {
2493                         *resetp = 1;
2494                         return -EBUSY;
2495                 }
2496
2497                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2498                              (chan * 0x2000) | 0x0200);
2499                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2500                 if (tg3_wait_macro_done(tp)) {
2501                         *resetp = 1;
2502                         return -EBUSY;
2503                 }
2504
2505                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2506                 if (tg3_wait_macro_done(tp)) {
2507                         *resetp = 1;
2508                         return -EBUSY;
2509                 }
2510
2511                 for (i = 0; i < 6; i += 2) {
2512                         u32 low, high;
2513
2514                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2515                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2516                             tg3_wait_macro_done(tp)) {
2517                                 *resetp = 1;
2518                                 return -EBUSY;
2519                         }
2520                         low &= 0x7fff;
2521                         high &= 0x000f;
2522                         if (low != test_pat[chan][i] ||
2523                             high != test_pat[chan][i+1]) {
2524                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2525                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2526                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2527
2528                                 return -EBUSY;
2529                         }
2530                 }
2531         }
2532
2533         return 0;
2534 }
2535
2536 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2537 {
2538         int chan;
2539
2540         for (chan = 0; chan < 4; chan++) {
2541                 int i;
2542
2543                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2544                              (chan * 0x2000) | 0x0200);
2545                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2546                 for (i = 0; i < 6; i++)
2547                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2548                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2549                 if (tg3_wait_macro_done(tp))
2550                         return -EBUSY;
2551         }
2552
2553         return 0;
2554 }
2555
2556 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2557 {
2558         u32 reg32, phy9_orig;
2559         int retries, do_phy_reset, err;
2560
2561         retries = 10;
2562         do_phy_reset = 1;
2563         do {
2564                 if (do_phy_reset) {
2565                         err = tg3_bmcr_reset(tp);
2566                         if (err)
2567                                 return err;
2568                         do_phy_reset = 0;
2569                 }
2570
2571                 /* Disable transmitter and interrupt.  */
2572                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2573                         continue;
2574
2575                 reg32 |= 0x3000;
2576                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2577
2578                 /* Set full-duplex, 1000 mbps.  */
2579                 tg3_writephy(tp, MII_BMCR,
2580                              BMCR_FULLDPLX | BMCR_SPEED1000);
2581
2582                 /* Set to master mode.  */
2583                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2584                         continue;
2585
2586                 tg3_writephy(tp, MII_CTRL1000,
2587                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2588
2589                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2590                 if (err)
2591                         return err;
2592
2593                 /* Block the PHY control access.  */
2594                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2595
2596                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2597                 if (!err)
2598                         break;
2599         } while (--retries);
2600
2601         err = tg3_phy_reset_chanpat(tp);
2602         if (err)
2603                 return err;
2604
2605         tg3_phydsp_write(tp, 0x8005, 0x0000);
2606
2607         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2608         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2609
2610         tg3_phy_toggle_auxctl_smdsp(tp, false);
2611
2612         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2613
2614         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2615         if (err)
2616                 return err;
2617
2618         reg32 &= ~0x3000;
2619         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2620
2621         return 0;
2622 }
2623
2624 static void tg3_carrier_off(struct tg3 *tp)
2625 {
2626         netif_carrier_off(tp->dev);
2627         tp->link_up = false;
2628 }
2629
2630 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2631 {
2632         if (tg3_flag(tp, ENABLE_ASF))
2633                 netdev_warn(tp->dev,
2634                             "Management side-band traffic will be interrupted during phy settings change\n");
2635 }
2636
2637 /* This will reset the tigon3 PHY if there is no valid
2638  * link unless the FORCE argument is non-zero.
2639  */
2640 static int tg3_phy_reset(struct tg3 *tp)
2641 {
2642         u32 val, cpmuctrl;
2643         int err;
2644
2645         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2646                 val = tr32(GRC_MISC_CFG);
2647                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2648                 udelay(40);
2649         }
2650         err  = tg3_readphy(tp, MII_BMSR, &val);
2651         err |= tg3_readphy(tp, MII_BMSR, &val);
2652         if (err != 0)
2653                 return -EBUSY;
2654
2655         if (netif_running(tp->dev) && tp->link_up) {
2656                 netif_carrier_off(tp->dev);
2657                 tg3_link_report(tp);
2658         }
2659
2660         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2661             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2662             tg3_asic_rev(tp) == ASIC_REV_5705) {
2663                 err = tg3_phy_reset_5703_4_5(tp);
2664                 if (err)
2665                         return err;
2666                 goto out;
2667         }
2668
2669         cpmuctrl = 0;
2670         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2671             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2672                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2673                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2674                         tw32(TG3_CPMU_CTRL,
2675                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2676         }
2677
2678         err = tg3_bmcr_reset(tp);
2679         if (err)
2680                 return err;
2681
2682         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2683                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2684                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2685
2686                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2687         }
2688
2689         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2690             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2691                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2692                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2693                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2694                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2695                         udelay(40);
2696                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2697                 }
2698         }
2699
2700         if (tg3_flag(tp, 5717_PLUS) &&
2701             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2702                 return 0;
2703
2704         tg3_phy_apply_otp(tp);
2705
2706         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2707                 tg3_phy_toggle_apd(tp, true);
2708         else
2709                 tg3_phy_toggle_apd(tp, false);
2710
2711 out:
2712         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2713             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2715                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2716                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2717         }
2718
2719         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2720                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2721                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2722         }
2723
2724         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2725                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2726                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2727                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2728                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2729                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2730                 }
2731         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2732                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2733                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2734                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2735                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2736                                 tg3_writephy(tp, MII_TG3_TEST1,
2737                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2738                         } else
2739                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2740
2741                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2742                 }
2743         }
2744
2745         /* Set Extended packet length bit (bit 14) on all chips that */
2746         /* support jumbo frames */
2747         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2748                 /* Cannot do read-modify-write on 5401 */
2749                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2750         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751                 /* Set bit 14 with read-modify-write to preserve other bits */
2752                 err = tg3_phy_auxctl_read(tp,
2753                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2754                 if (!err)
2755                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2756                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2757         }
2758
2759         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2760          * jumbo frames transmission.
2761          */
2762         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2763                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2764                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2765                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2766         }
2767
2768         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2769                 /* adjust output voltage */
2770                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2771         }
2772
2773         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2774                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2775
2776         tg3_phy_toggle_automdix(tp, true);
2777         tg3_phy_set_wirespeed(tp);
2778         return 0;
2779 }
2780
2781 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2782 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2783 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2784                                           TG3_GPIO_MSG_NEED_VAUX)
2785 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2786         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2787          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2788          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2789          (TG3_GPIO_MSG_DRVR_PRES << 12))
2790
2791 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2792         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2793          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2794          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2795          (TG3_GPIO_MSG_NEED_VAUX << 12))
2796
2797 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2798 {
2799         u32 status, shift;
2800
2801         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2802             tg3_asic_rev(tp) == ASIC_REV_5719)
2803                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2804         else
2805                 status = tr32(TG3_CPMU_DRV_STATUS);
2806
2807         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2808         status &= ~(TG3_GPIO_MSG_MASK << shift);
2809         status |= (newstat << shift);
2810
2811         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2812             tg3_asic_rev(tp) == ASIC_REV_5719)
2813                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2814         else
2815                 tw32(TG3_CPMU_DRV_STATUS, status);
2816
2817         return status >> TG3_APE_GPIO_MSG_SHIFT;
2818 }
2819
2820 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2821 {
2822         if (!tg3_flag(tp, IS_NIC))
2823                 return 0;
2824
2825         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2826             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2827             tg3_asic_rev(tp) == ASIC_REV_5720) {
2828                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2829                         return -EIO;
2830
2831                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2832
2833                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2834                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2835
2836                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2837         } else {
2838                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2839                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2840         }
2841
2842         return 0;
2843 }
2844
2845 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2846 {
2847         u32 grc_local_ctrl;
2848
2849         if (!tg3_flag(tp, IS_NIC) ||
2850             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2851             tg3_asic_rev(tp) == ASIC_REV_5701)
2852                 return;
2853
2854         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2855
2856         tw32_wait_f(GRC_LOCAL_CTRL,
2857                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2858                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2859
2860         tw32_wait_f(GRC_LOCAL_CTRL,
2861                     grc_local_ctrl,
2862                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2863
2864         tw32_wait_f(GRC_LOCAL_CTRL,
2865                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2866                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2867 }
2868
2869 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2870 {
2871         if (!tg3_flag(tp, IS_NIC))
2872                 return;
2873
2874         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2875             tg3_asic_rev(tp) == ASIC_REV_5701) {
2876                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2877                             (GRC_LCLCTRL_GPIO_OE0 |
2878                              GRC_LCLCTRL_GPIO_OE1 |
2879                              GRC_LCLCTRL_GPIO_OE2 |
2880                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2881                              GRC_LCLCTRL_GPIO_OUTPUT1),
2882                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2883         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2884                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2885                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2886                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2887                                      GRC_LCLCTRL_GPIO_OE1 |
2888                                      GRC_LCLCTRL_GPIO_OE2 |
2889                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2890                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2891                                      tp->grc_local_ctrl;
2892                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2893                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2894
2895                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2896                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2897                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2898
2899                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2900                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2901                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2902         } else {
2903                 u32 no_gpio2;
2904                 u32 grc_local_ctrl = 0;
2905
2906                 /* Workaround to prevent overdrawing Amps. */
2907                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2908                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2909                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2910                                     grc_local_ctrl,
2911                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2912                 }
2913
2914                 /* On 5753 and variants, GPIO2 cannot be used. */
2915                 no_gpio2 = tp->nic_sram_data_cfg &
2916                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2917
2918                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2919                                   GRC_LCLCTRL_GPIO_OE1 |
2920                                   GRC_LCLCTRL_GPIO_OE2 |
2921                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2922                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2923                 if (no_gpio2) {
2924                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2925                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2926                 }
2927                 tw32_wait_f(GRC_LOCAL_CTRL,
2928                             tp->grc_local_ctrl | grc_local_ctrl,
2929                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2930
2931                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2932
2933                 tw32_wait_f(GRC_LOCAL_CTRL,
2934                             tp->grc_local_ctrl | grc_local_ctrl,
2935                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2936
2937                 if (!no_gpio2) {
2938                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2939                         tw32_wait_f(GRC_LOCAL_CTRL,
2940                                     tp->grc_local_ctrl | grc_local_ctrl,
2941                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2942                 }
2943         }
2944 }
2945
2946 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2947 {
2948         u32 msg = 0;
2949
2950         /* Serialize power state transitions */
2951         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2952                 return;
2953
2954         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2955                 msg = TG3_GPIO_MSG_NEED_VAUX;
2956
2957         msg = tg3_set_function_status(tp, msg);
2958
2959         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2960                 goto done;
2961
2962         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2963                 tg3_pwrsrc_switch_to_vaux(tp);
2964         else
2965                 tg3_pwrsrc_die_with_vmain(tp);
2966
2967 done:
2968         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2969 }
2970
2971 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2972 {
2973         bool need_vaux = false;
2974
2975         /* The GPIOs do something completely different on 57765. */
2976         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2977                 return;
2978
2979         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2980             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2981             tg3_asic_rev(tp) == ASIC_REV_5720) {
2982                 tg3_frob_aux_power_5717(tp, include_wol ?
2983                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2984                 return;
2985         }
2986
2987         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2988                 struct net_device *dev_peer;
2989
2990                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2991
2992                 /* remove_one() may have been run on the peer. */
2993                 if (dev_peer) {
2994                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2995
2996                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2997                                 return;
2998
2999                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3000                             tg3_flag(tp_peer, ENABLE_ASF))
3001                                 need_vaux = true;
3002                 }
3003         }
3004
3005         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3006             tg3_flag(tp, ENABLE_ASF))
3007                 need_vaux = true;
3008
3009         if (need_vaux)
3010                 tg3_pwrsrc_switch_to_vaux(tp);
3011         else
3012                 tg3_pwrsrc_die_with_vmain(tp);
3013 }
3014
3015 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3016 {
3017         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3018                 return 1;
3019         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3020                 if (speed != SPEED_10)
3021                         return 1;
3022         } else if (speed == SPEED_10)
3023                 return 1;
3024
3025         return 0;
3026 }
3027
3028 static bool tg3_phy_power_bug(struct tg3 *tp)
3029 {
3030         switch (tg3_asic_rev(tp)) {
3031         case ASIC_REV_5700:
3032         case ASIC_REV_5704:
3033                 return true;
3034         case ASIC_REV_5780:
3035                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3036                         return true;
3037                 return false;
3038         case ASIC_REV_5717:
3039                 if (!tp->pci_fn)
3040                         return true;
3041                 return false;
3042         case ASIC_REV_5719:
3043         case ASIC_REV_5720:
3044                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3045                     !tp->pci_fn)
3046                         return true;
3047                 return false;
3048         }
3049
3050         return false;
3051 }
3052
3053 static bool tg3_phy_led_bug(struct tg3 *tp)
3054 {
3055         switch (tg3_asic_rev(tp)) {
3056         case ASIC_REV_5719:
3057         case ASIC_REV_5720:
3058                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3059                     !tp->pci_fn)
3060                         return true;
3061                 return false;
3062         }
3063
3064         return false;
3065 }
3066
3067 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3068 {
3069         u32 val;
3070
3071         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3072                 return;
3073
3074         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3075                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3076                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3077                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3078
3079                         sg_dig_ctrl |=
3080                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3081                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3082                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3083                 }
3084                 return;
3085         }
3086
3087         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3088                 tg3_bmcr_reset(tp);
3089                 val = tr32(GRC_MISC_CFG);
3090                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3091                 udelay(40);
3092                 return;
3093         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3094                 u32 phytest;
3095                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3096                         u32 phy;
3097
3098                         tg3_writephy(tp, MII_ADVERTISE, 0);
3099                         tg3_writephy(tp, MII_BMCR,
3100                                      BMCR_ANENABLE | BMCR_ANRESTART);
3101
3102                         tg3_writephy(tp, MII_TG3_FET_TEST,
3103                                      phytest | MII_TG3_FET_SHADOW_EN);
3104                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3105                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3106                                 tg3_writephy(tp,
3107                                              MII_TG3_FET_SHDW_AUXMODE4,
3108                                              phy);
3109                         }
3110                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3111                 }
3112                 return;
3113         } else if (do_low_power) {
3114                 if (!tg3_phy_led_bug(tp))
3115                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3116                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3117
3118                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3119                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3120                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3121                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3122         }
3123
3124         /* The PHY should not be powered down on some chips because
3125          * of bugs.
3126          */
3127         if (tg3_phy_power_bug(tp))
3128                 return;
3129
3130         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3131             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3132                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3133                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3134                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3135                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3136         }
3137
3138         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3139 }
3140
3141 /* tp->lock is held. */
3142 static int tg3_nvram_lock(struct tg3 *tp)
3143 {
3144         if (tg3_flag(tp, NVRAM)) {
3145                 int i;
3146
3147                 if (tp->nvram_lock_cnt == 0) {
3148                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3149                         for (i = 0; i < 8000; i++) {
3150                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3151                                         break;
3152                                 udelay(20);
3153                         }
3154                         if (i == 8000) {
3155                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3156                                 return -ENODEV;
3157                         }
3158                 }
3159                 tp->nvram_lock_cnt++;
3160         }
3161         return 0;
3162 }
3163
3164 /* tp->lock is held. */
3165 static void tg3_nvram_unlock(struct tg3 *tp)
3166 {
3167         if (tg3_flag(tp, NVRAM)) {
3168                 if (tp->nvram_lock_cnt > 0)
3169                         tp->nvram_lock_cnt--;
3170                 if (tp->nvram_lock_cnt == 0)
3171                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3172         }
3173 }
3174
3175 /* tp->lock is held. */
3176 static void tg3_enable_nvram_access(struct tg3 *tp)
3177 {
3178         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3179                 u32 nvaccess = tr32(NVRAM_ACCESS);
3180
3181                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3182         }
3183 }
3184
3185 /* tp->lock is held. */
3186 static void tg3_disable_nvram_access(struct tg3 *tp)
3187 {
3188         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3189                 u32 nvaccess = tr32(NVRAM_ACCESS);
3190
3191                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3192         }
3193 }
3194
3195 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3196                                         u32 offset, u32 *val)
3197 {
3198         u32 tmp;
3199         int i;
3200
3201         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3202                 return -EINVAL;
3203
3204         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3205                                         EEPROM_ADDR_DEVID_MASK |
3206                                         EEPROM_ADDR_READ);
3207         tw32(GRC_EEPROM_ADDR,
3208              tmp |
3209              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3210              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3211               EEPROM_ADDR_ADDR_MASK) |
3212              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3213
3214         for (i = 0; i < 1000; i++) {
3215                 tmp = tr32(GRC_EEPROM_ADDR);
3216
3217                 if (tmp & EEPROM_ADDR_COMPLETE)
3218                         break;
3219                 msleep(1);
3220         }
3221         if (!(tmp & EEPROM_ADDR_COMPLETE))
3222                 return -EBUSY;
3223
3224         tmp = tr32(GRC_EEPROM_DATA);
3225
3226         /*
3227          * The data will always be opposite the native endian
3228          * format.  Perform a blind byteswap to compensate.
3229          */
3230         *val = swab32(tmp);
3231
3232         return 0;
3233 }
3234
3235 #define NVRAM_CMD_TIMEOUT 10000
3236
3237 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3238 {
3239         int i;
3240
3241         tw32(NVRAM_CMD, nvram_cmd);
3242         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3243                 usleep_range(10, 40);
3244                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3245                         udelay(10);
3246                         break;
3247                 }
3248         }
3249
3250         if (i == NVRAM_CMD_TIMEOUT)
3251                 return -EBUSY;
3252
3253         return 0;
3254 }
3255
3256 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3257 {
3258         if (tg3_flag(tp, NVRAM) &&
3259             tg3_flag(tp, NVRAM_BUFFERED) &&
3260             tg3_flag(tp, FLASH) &&
3261             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3262             (tp->nvram_jedecnum == JEDEC_ATMEL))
3263
3264                 addr = ((addr / tp->nvram_pagesize) <<
3265                         ATMEL_AT45DB0X1B_PAGE_POS) +
3266                        (addr % tp->nvram_pagesize);
3267
3268         return addr;
3269 }
3270
3271 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3272 {
3273         if (tg3_flag(tp, NVRAM) &&
3274             tg3_flag(tp, NVRAM_BUFFERED) &&
3275             tg3_flag(tp, FLASH) &&
3276             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3277             (tp->nvram_jedecnum == JEDEC_ATMEL))
3278
3279                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3280                         tp->nvram_pagesize) +
3281                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3282
3283         return addr;
3284 }
3285
3286 /* NOTE: Data read in from NVRAM is byteswapped according to
3287  * the byteswapping settings for all other register accesses.
3288  * tg3 devices are BE devices, so on a BE machine, the data
3289  * returned will be exactly as it is seen in NVRAM.  On a LE
3290  * machine, the 32-bit value will be byteswapped.
3291  */
3292 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3293 {
3294         int ret;
3295
3296         if (!tg3_flag(tp, NVRAM))
3297                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3298
3299         offset = tg3_nvram_phys_addr(tp, offset);
3300
3301         if (offset > NVRAM_ADDR_MSK)
3302                 return -EINVAL;
3303
3304         ret = tg3_nvram_lock(tp);
3305         if (ret)
3306                 return ret;
3307
3308         tg3_enable_nvram_access(tp);
3309
3310         tw32(NVRAM_ADDR, offset);
3311         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3312                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3313
3314         if (ret == 0)
3315                 *val = tr32(NVRAM_RDDATA);
3316
3317         tg3_disable_nvram_access(tp);
3318
3319         tg3_nvram_unlock(tp);
3320
3321         return ret;
3322 }
3323
3324 /* Ensures NVRAM data is in bytestream format. */
3325 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3326 {
3327         u32 v;
3328         int res = tg3_nvram_read(tp, offset, &v);
3329         if (!res)
3330                 *val = cpu_to_be32(v);
3331         return res;
3332 }
3333
3334 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3335                                     u32 offset, u32 len, u8 *buf)
3336 {
3337         int i, j, rc = 0;
3338         u32 val;
3339
3340         for (i = 0; i < len; i += 4) {
3341                 u32 addr;
3342                 __be32 data;
3343
3344                 addr = offset + i;
3345
3346                 memcpy(&data, buf + i, 4);
3347
3348                 /*
3349                  * The SEEPROM interface expects the data to always be opposite
3350                  * the native endian format.  We accomplish this by reversing
3351                  * all the operations that would have been performed on the
3352                  * data from a call to tg3_nvram_read_be32().
3353                  */
3354                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3355
3356                 val = tr32(GRC_EEPROM_ADDR);
3357                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3358
3359                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3360                         EEPROM_ADDR_READ);
3361                 tw32(GRC_EEPROM_ADDR, val |
3362                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3363                         (addr & EEPROM_ADDR_ADDR_MASK) |
3364                         EEPROM_ADDR_START |
3365                         EEPROM_ADDR_WRITE);
3366
3367                 for (j = 0; j < 1000; j++) {
3368                         val = tr32(GRC_EEPROM_ADDR);
3369
3370                         if (val & EEPROM_ADDR_COMPLETE)
3371                                 break;
3372                         msleep(1);
3373                 }
3374                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3375                         rc = -EBUSY;
3376                         break;
3377                 }
3378         }
3379
3380         return rc;
3381 }
3382
3383 /* offset and length are dword aligned */
3384 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3385                 u8 *buf)
3386 {
3387         int ret = 0;
3388         u32 pagesize = tp->nvram_pagesize;
3389         u32 pagemask = pagesize - 1;
3390         u32 nvram_cmd;
3391         u8 *tmp;
3392
3393         tmp = kmalloc(pagesize, GFP_KERNEL);
3394         if (tmp == NULL)
3395                 return -ENOMEM;
3396
3397         while (len) {
3398                 int j;
3399                 u32 phy_addr, page_off, size;
3400
3401                 phy_addr = offset & ~pagemask;
3402
3403                 for (j = 0; j < pagesize; j += 4) {
3404                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3405                                                   (__be32 *) (tmp + j));
3406                         if (ret)
3407                                 break;
3408                 }
3409                 if (ret)
3410                         break;
3411
3412                 page_off = offset & pagemask;
3413                 size = pagesize;
3414                 if (len < size)
3415                         size = len;
3416
3417                 len -= size;
3418
3419                 memcpy(tmp + page_off, buf, size);
3420
3421                 offset = offset + (pagesize - page_off);
3422
3423                 tg3_enable_nvram_access(tp);
3424
3425                 /*
3426                  * Before we can erase the flash page, we need
3427                  * to issue a special "write enable" command.
3428                  */
3429                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3430
3431                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3432                         break;
3433
3434                 /* Erase the target page */
3435                 tw32(NVRAM_ADDR, phy_addr);
3436
3437                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3438                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3439
3440                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3441                         break;
3442
3443                 /* Issue another write enable to start the write. */
3444                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3445
3446                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3447                         break;
3448
3449                 for (j = 0; j < pagesize; j += 4) {
3450                         __be32 data;
3451
3452                         data = *((__be32 *) (tmp + j));
3453
3454                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3455
3456                         tw32(NVRAM_ADDR, phy_addr + j);
3457
3458                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3459                                 NVRAM_CMD_WR;
3460
3461                         if (j == 0)
3462                                 nvram_cmd |= NVRAM_CMD_FIRST;
3463                         else if (j == (pagesize - 4))
3464                                 nvram_cmd |= NVRAM_CMD_LAST;
3465
3466                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3467                         if (ret)
3468                                 break;
3469                 }
3470                 if (ret)
3471                         break;
3472         }
3473
3474         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3475         tg3_nvram_exec_cmd(tp, nvram_cmd);
3476
3477         kfree(tmp);
3478
3479         return ret;
3480 }
3481
3482 /* offset and length are dword aligned */
3483 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3484                 u8 *buf)
3485 {
3486         int i, ret = 0;
3487
3488         for (i = 0; i < len; i += 4, offset += 4) {
3489                 u32 page_off, phy_addr, nvram_cmd;
3490                 __be32 data;
3491
3492                 memcpy(&data, buf + i, 4);
3493                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3494
3495                 page_off = offset % tp->nvram_pagesize;
3496
3497                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3498
3499                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3500
3501                 if (page_off == 0 || i == 0)
3502                         nvram_cmd |= NVRAM_CMD_FIRST;
3503                 if (page_off == (tp->nvram_pagesize - 4))
3504                         nvram_cmd |= NVRAM_CMD_LAST;
3505
3506                 if (i == (len - 4))
3507                         nvram_cmd |= NVRAM_CMD_LAST;
3508
3509                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3510                     !tg3_flag(tp, FLASH) ||
3511                     !tg3_flag(tp, 57765_PLUS))
3512                         tw32(NVRAM_ADDR, phy_addr);
3513
3514                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3515                     !tg3_flag(tp, 5755_PLUS) &&
3516                     (tp->nvram_jedecnum == JEDEC_ST) &&
3517                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3518                         u32 cmd;
3519
3520                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3521                         ret = tg3_nvram_exec_cmd(tp, cmd);
3522                         if (ret)
3523                                 break;
3524                 }
3525                 if (!tg3_flag(tp, FLASH)) {
3526                         /* We always do complete word writes to eeprom. */
3527                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3528                 }
3529
3530                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3531                 if (ret)
3532                         break;
3533         }
3534         return ret;
3535 }
3536
3537 /* offset and length are dword aligned */
3538 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3539 {
3540         int ret;
3541
3542         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3543                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3544                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3545                 udelay(40);
3546         }
3547
3548         if (!tg3_flag(tp, NVRAM)) {
3549                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3550         } else {
3551                 u32 grc_mode;
3552
3553                 ret = tg3_nvram_lock(tp);
3554                 if (ret)
3555                         return ret;
3556
3557                 tg3_enable_nvram_access(tp);
3558                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3559                         tw32(NVRAM_WRITE1, 0x406);
3560
3561                 grc_mode = tr32(GRC_MODE);
3562                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3563
3564                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3565                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3566                                 buf);
3567                 } else {
3568                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3569                                 buf);
3570                 }
3571
3572                 grc_mode = tr32(GRC_MODE);
3573                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3574
3575                 tg3_disable_nvram_access(tp);
3576                 tg3_nvram_unlock(tp);
3577         }
3578
3579         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3580                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3581                 udelay(40);
3582         }
3583
3584         return ret;
3585 }
3586
3587 #define RX_CPU_SCRATCH_BASE     0x30000
3588 #define RX_CPU_SCRATCH_SIZE     0x04000
3589 #define TX_CPU_SCRATCH_BASE     0x34000
3590 #define TX_CPU_SCRATCH_SIZE     0x04000
3591
3592 /* tp->lock is held. */
3593 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3594 {
3595         int i;
3596         const int iters = 10000;
3597
3598         for (i = 0; i < iters; i++) {
3599                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3600                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3601                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3602                         break;
3603                 if (pci_channel_offline(tp->pdev))
3604                         return -EBUSY;
3605         }
3606
3607         return (i == iters) ? -EBUSY : 0;
3608 }
3609
3610 /* tp->lock is held. */
3611 static int tg3_rxcpu_pause(struct tg3 *tp)
3612 {
3613         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3614
3615         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3616         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3617         udelay(10);
3618
3619         return rc;
3620 }
3621
3622 /* tp->lock is held. */
3623 static int tg3_txcpu_pause(struct tg3 *tp)
3624 {
3625         return tg3_pause_cpu(tp, TX_CPU_BASE);
3626 }
3627
3628 /* tp->lock is held. */
3629 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3630 {
3631         tw32(cpu_base + CPU_STATE, 0xffffffff);
3632         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3633 }
3634
3635 /* tp->lock is held. */
3636 static void tg3_rxcpu_resume(struct tg3 *tp)
3637 {
3638         tg3_resume_cpu(tp, RX_CPU_BASE);
3639 }
3640
3641 /* tp->lock is held. */
3642 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3643 {
3644         int rc;
3645
3646         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3647
3648         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3649                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3650
3651                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3652                 return 0;
3653         }
3654         if (cpu_base == RX_CPU_BASE) {
3655                 rc = tg3_rxcpu_pause(tp);
3656         } else {
3657                 /*
3658                  * There is only an Rx CPU for the 5750 derivative in the
3659                  * BCM4785.
3660                  */
3661                 if (tg3_flag(tp, IS_SSB_CORE))
3662                         return 0;
3663
3664                 rc = tg3_txcpu_pause(tp);
3665         }
3666
3667         if (rc) {
3668                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3669                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3670                 return -ENODEV;
3671         }
3672
3673         /* Clear firmware's nvram arbitration. */
3674         if (tg3_flag(tp, NVRAM))
3675                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3676         return 0;
3677 }
3678
3679 static int tg3_fw_data_len(struct tg3 *tp,
3680                            const struct tg3_firmware_hdr *fw_hdr)
3681 {
3682         int fw_len;
3683
3684         /* Non fragmented firmware have one firmware header followed by a
3685          * contiguous chunk of data to be written. The length field in that
3686          * header is not the length of data to be written but the complete
3687          * length of the bss. The data length is determined based on
3688          * tp->fw->size minus headers.
3689          *
3690          * Fragmented firmware have a main header followed by multiple
3691          * fragments. Each fragment is identical to non fragmented firmware
3692          * with a firmware header followed by a contiguous chunk of data. In
3693          * the main header, the length field is unused and set to 0xffffffff.
3694          * In each fragment header the length is the entire size of that
3695          * fragment i.e. fragment data + header length. Data length is
3696          * therefore length field in the header minus TG3_FW_HDR_LEN.
3697          */
3698         if (tp->fw_len == 0xffffffff)
3699                 fw_len = be32_to_cpu(fw_hdr->len);
3700         else
3701                 fw_len = tp->fw->size;
3702
3703         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3704 }
3705
3706 /* tp->lock is held. */
3707 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3708                                  u32 cpu_scratch_base, int cpu_scratch_size,
3709                                  const struct tg3_firmware_hdr *fw_hdr)
3710 {
3711         int err, i;
3712         void (*write_op)(struct tg3 *, u32, u32);
3713         int total_len = tp->fw->size;
3714
3715         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3716                 netdev_err(tp->dev,
3717                            "%s: Trying to load TX cpu firmware which is 5705\n",
3718                            __func__);
3719                 return -EINVAL;
3720         }
3721
3722         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3723                 write_op = tg3_write_mem;
3724         else
3725                 write_op = tg3_write_indirect_reg32;
3726
3727         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3728                 /* It is possible that bootcode is still loading at this point.
3729                  * Get the nvram lock first before halting the cpu.
3730                  */
3731                 int lock_err = tg3_nvram_lock(tp);
3732                 err = tg3_halt_cpu(tp, cpu_base);
3733                 if (!lock_err)
3734                         tg3_nvram_unlock(tp);
3735                 if (err)
3736                         goto out;
3737
3738                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3739                         write_op(tp, cpu_scratch_base + i, 0);
3740                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3741                 tw32(cpu_base + CPU_MODE,
3742                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3743         } else {
3744                 /* Subtract additional main header for fragmented firmware and
3745                  * advance to the first fragment
3746                  */
3747                 total_len -= TG3_FW_HDR_LEN;
3748                 fw_hdr++;
3749         }
3750
3751         do {
3752                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3753                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3754                         write_op(tp, cpu_scratch_base +
3755                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3756                                      (i * sizeof(u32)),
3757                                  be32_to_cpu(fw_data[i]));
3758
3759                 total_len -= be32_to_cpu(fw_hdr->len);
3760
3761                 /* Advance to next fragment */
3762                 fw_hdr = (struct tg3_firmware_hdr *)
3763                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3764         } while (total_len > 0);
3765
3766         err = 0;
3767
3768 out:
3769         return err;
3770 }
3771
3772 /* tp->lock is held. */
3773 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3774 {
3775         int i;
3776         const int iters = 5;
3777
3778         tw32(cpu_base + CPU_STATE, 0xffffffff);
3779         tw32_f(cpu_base + CPU_PC, pc);
3780
3781         for (i = 0; i < iters; i++) {
3782                 if (tr32(cpu_base + CPU_PC) == pc)
3783                         break;
3784                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3785                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3786                 tw32_f(cpu_base + CPU_PC, pc);
3787                 udelay(1000);
3788         }
3789
3790         return (i == iters) ? -EBUSY : 0;
3791 }
3792
3793 /* tp->lock is held. */
3794 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3795 {
3796         const struct tg3_firmware_hdr *fw_hdr;
3797         int err;
3798
3799         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3800
3801         /* Firmware blob starts with version numbers, followed by
3802            start address and length. We are setting complete length.
3803            length = end_address_of_bss - start_address_of_text.
3804            Remainder is the blob to be loaded contiguously
3805            from start address. */
3806
3807         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3808                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3809                                     fw_hdr);
3810         if (err)
3811                 return err;
3812
3813         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3814                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3815                                     fw_hdr);
3816         if (err)
3817                 return err;
3818
3819         /* Now startup only the RX cpu. */
3820         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3821                                        be32_to_cpu(fw_hdr->base_addr));
3822         if (err) {
3823                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3824                            "should be %08x\n", __func__,
3825                            tr32(RX_CPU_BASE + CPU_PC),
3826                                 be32_to_cpu(fw_hdr->base_addr));
3827                 return -ENODEV;
3828         }
3829
3830         tg3_rxcpu_resume(tp);
3831
3832         return 0;
3833 }
3834
3835 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3836 {
3837         const int iters = 1000;
3838         int i;
3839         u32 val;
3840
3841         /* Wait for boot code to complete initialization and enter service
3842          * loop. It is then safe to download service patches
3843          */
3844         for (i = 0; i < iters; i++) {
3845                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3846                         break;
3847
3848                 udelay(10);
3849         }
3850
3851         if (i == iters) {
3852                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3853                 return -EBUSY;
3854         }
3855
3856         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3857         if (val & 0xff) {
3858                 netdev_warn(tp->dev,
3859                             "Other patches exist. Not downloading EEE patch\n");
3860                 return -EEXIST;
3861         }
3862
3863         return 0;
3864 }
3865
3866 /* tp->lock is held. */
3867 static void tg3_load_57766_firmware(struct tg3 *tp)
3868 {
3869         struct tg3_firmware_hdr *fw_hdr;
3870
3871         if (!tg3_flag(tp, NO_NVRAM))
3872                 return;
3873
3874         if (tg3_validate_rxcpu_state(tp))
3875                 return;
3876
3877         if (!tp->fw)
3878                 return;
3879
3880         /* This firmware blob has a different format than older firmware
3881          * releases as given below. The main difference is we have fragmented
3882          * data to be written to non-contiguous locations.
3883          *
3884          * In the beginning we have a firmware header identical to other
3885          * firmware which consists of version, base addr and length. The length
3886          * here is unused and set to 0xffffffff.
3887          *
3888          * This is followed by a series of firmware fragments which are
3889          * individually identical to previous firmware. i.e. they have the
3890          * firmware header and followed by data for that fragment. The version
3891          * field of the individual fragment header is unused.
3892          */
3893
3894         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3895         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3896                 return;
3897
3898         if (tg3_rxcpu_pause(tp))
3899                 return;
3900
3901         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3902         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3903
3904         tg3_rxcpu_resume(tp);
3905 }
3906
3907 /* tp->lock is held. */
3908 static int tg3_load_tso_firmware(struct tg3 *tp)
3909 {
3910         const struct tg3_firmware_hdr *fw_hdr;
3911         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3912         int err;
3913
3914         if (!tg3_flag(tp, FW_TSO))
3915                 return 0;
3916
3917         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3918
3919         /* Firmware blob starts with version numbers, followed by
3920            start address and length. We are setting complete length.
3921            length = end_address_of_bss - start_address_of_text.
3922            Remainder is the blob to be loaded contiguously
3923            from start address. */
3924
3925         cpu_scratch_size = tp->fw_len;
3926
3927         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3928                 cpu_base = RX_CPU_BASE;
3929                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3930         } else {
3931                 cpu_base = TX_CPU_BASE;
3932                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3933                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3934         }
3935
3936         err = tg3_load_firmware_cpu(tp, cpu_base,
3937                                     cpu_scratch_base, cpu_scratch_size,
3938                                     fw_hdr);
3939         if (err)
3940                 return err;
3941
3942         /* Now startup the cpu. */
3943         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3944                                        be32_to_cpu(fw_hdr->base_addr));
3945         if (err) {
3946                 netdev_err(tp->dev,
3947                            "%s fails to set CPU PC, is %08x should be %08x\n",
3948                            __func__, tr32(cpu_base + CPU_PC),
3949                            be32_to_cpu(fw_hdr->base_addr));
3950                 return -ENODEV;
3951         }
3952
3953         tg3_resume_cpu(tp, cpu_base);
3954         return 0;
3955 }
3956
3957 /* tp->lock is held. */
3958 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3959 {
3960         u32 addr_high, addr_low;
3961
3962         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3963         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3964                     (mac_addr[4] <<  8) | mac_addr[5]);
3965
3966         if (index < 4) {
3967                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3968                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3969         } else {
3970                 index -= 4;
3971                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3972                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3973         }
3974 }
3975
3976 /* tp->lock is held. */
3977 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3978 {
3979         u32 addr_high;
3980         int i;
3981
3982         for (i = 0; i < 4; i++) {
3983                 if (i == 1 && skip_mac_1)
3984                         continue;
3985                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3986         }
3987
3988         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3989             tg3_asic_rev(tp) == ASIC_REV_5704) {
3990                 for (i = 4; i < 16; i++)
3991                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3992         }
3993
3994         addr_high = (tp->dev->dev_addr[0] +
3995                      tp->dev->dev_addr[1] +
3996                      tp->dev->dev_addr[2] +
3997                      tp->dev->dev_addr[3] +
3998                      tp->dev->dev_addr[4] +
3999                      tp->dev->dev_addr[5]) &
4000                 TX_BACKOFF_SEED_MASK;
4001         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4002 }
4003
4004 static void tg3_enable_register_access(struct tg3 *tp)
4005 {
4006         /*
4007          * Make sure register accesses (indirect or otherwise) will function
4008          * correctly.
4009          */
4010         pci_write_config_dword(tp->pdev,
4011                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4012 }
4013
4014 static int tg3_power_up(struct tg3 *tp)
4015 {
4016         int err;
4017
4018         tg3_enable_register_access(tp);
4019
4020         err = pci_set_power_state(tp->pdev, PCI_D0);
4021         if (!err) {
4022                 /* Switch out of Vaux if it is a NIC */
4023                 tg3_pwrsrc_switch_to_vmain(tp);
4024         } else {
4025                 netdev_err(tp->dev, "Transition to D0 failed\n");
4026         }
4027
4028         return err;
4029 }
4030
4031 static int tg3_setup_phy(struct tg3 *, bool);
4032
4033 static int tg3_power_down_prepare(struct tg3 *tp)
4034 {
4035         u32 misc_host_ctrl;
4036         bool device_should_wake, do_low_power;
4037
4038         tg3_enable_register_access(tp);
4039
4040         /* Restore the CLKREQ setting. */
4041         if (tg3_flag(tp, CLKREQ_BUG))
4042                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4043                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4044
4045         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4046         tw32(TG3PCI_MISC_HOST_CTRL,
4047              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4048
4049         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4050                              tg3_flag(tp, WOL_ENABLE);
4051
4052         if (tg3_flag(tp, USE_PHYLIB)) {
4053                 do_low_power = false;
4054                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4055                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4056                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4057                         struct phy_device *phydev;
4058                         u32 phyid;
4059
4060                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4061
4062                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4063
4064                         tp->link_config.speed = phydev->speed;
4065                         tp->link_config.duplex = phydev->duplex;
4066                         tp->link_config.autoneg = phydev->autoneg;
4067                         ethtool_convert_link_mode_to_legacy_u32(
4068                                 &tp->link_config.advertising,
4069                                 phydev->advertising);
4070
4071                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4072                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4073                                          advertising);
4074                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4075                                          advertising);
4076                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4077                                          advertising);
4078
4079                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4080                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4081                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4082                                                          advertising);
4083                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4084                                                          advertising);
4085                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4086                                                          advertising);
4087                                 } else {
4088                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4089                                                          advertising);
4090                                 }
4091                         }
4092
4093                         linkmode_copy(phydev->advertising, advertising);
4094                         phy_start_aneg(phydev);
4095
4096                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4097                         if (phyid != PHY_ID_BCMAC131) {
4098                                 phyid &= PHY_BCM_OUI_MASK;
4099                                 if (phyid == PHY_BCM_OUI_1 ||
4100                                     phyid == PHY_BCM_OUI_2 ||
4101                                     phyid == PHY_BCM_OUI_3)
4102                                         do_low_power = true;
4103                         }
4104                 }
4105         } else {
4106                 do_low_power = true;
4107
4108                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4109                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4110
4111                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4112                         tg3_setup_phy(tp, false);
4113         }
4114
4115         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4116                 u32 val;
4117
4118                 val = tr32(GRC_VCPU_EXT_CTRL);
4119                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4120         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4121                 int i;
4122                 u32 val;
4123
4124                 for (i = 0; i < 200; i++) {
4125                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4126                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4127                                 break;
4128                         msleep(1);
4129                 }
4130         }
4131         if (tg3_flag(tp, WOL_CAP))
4132                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4133                                                      WOL_DRV_STATE_SHUTDOWN |
4134                                                      WOL_DRV_WOL |
4135                                                      WOL_SET_MAGIC_PKT);
4136
4137         if (device_should_wake) {
4138                 u32 mac_mode;
4139
4140                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4141                         if (do_low_power &&
4142                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4143                                 tg3_phy_auxctl_write(tp,
4144                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4145                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4146                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4147                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4148                                 udelay(40);
4149                         }
4150
4151                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4152                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4153                         else if (tp->phy_flags &
4154                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4155                                 if (tp->link_config.active_speed == SPEED_1000)
4156                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4157                                 else
4158                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4159                         } else
4160                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4161
4162                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4163                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4164                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4165                                              SPEED_100 : SPEED_10;
4166                                 if (tg3_5700_link_polarity(tp, speed))
4167                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4168                                 else
4169                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4170                         }
4171                 } else {
4172                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4173                 }
4174
4175                 if (!tg3_flag(tp, 5750_PLUS))
4176                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4177
4178                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4179                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4180                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4181                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4182
4183                 if (tg3_flag(tp, ENABLE_APE))
4184                         mac_mode |= MAC_MODE_APE_TX_EN |
4185                                     MAC_MODE_APE_RX_EN |
4186                                     MAC_MODE_TDE_ENABLE;
4187
4188                 tw32_f(MAC_MODE, mac_mode);
4189                 udelay(100);
4190
4191                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4192                 udelay(10);
4193         }
4194
4195         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4196             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4197              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4198                 u32 base_val;
4199
4200                 base_val = tp->pci_clock_ctrl;
4201                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4202                              CLOCK_CTRL_TXCLK_DISABLE);
4203
4204                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4205                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4206         } else if (tg3_flag(tp, 5780_CLASS) ||
4207                    tg3_flag(tp, CPMU_PRESENT) ||
4208                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4209                 /* do nothing */
4210         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4211                 u32 newbits1, newbits2;
4212
4213                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4214                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4215                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4216                                     CLOCK_CTRL_TXCLK_DISABLE |
4217                                     CLOCK_CTRL_ALTCLK);
4218                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4219                 } else if (tg3_flag(tp, 5705_PLUS)) {
4220                         newbits1 = CLOCK_CTRL_625_CORE;
4221                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4222                 } else {
4223                         newbits1 = CLOCK_CTRL_ALTCLK;
4224                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4225                 }
4226
4227                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4228                             40);
4229
4230                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4231                             40);
4232
4233                 if (!tg3_flag(tp, 5705_PLUS)) {
4234                         u32 newbits3;
4235
4236                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4237                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4238                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4239                                             CLOCK_CTRL_TXCLK_DISABLE |
4240                                             CLOCK_CTRL_44MHZ_CORE);
4241                         } else {
4242                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4243                         }
4244
4245                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4246                                     tp->pci_clock_ctrl | newbits3, 40);
4247                 }
4248         }
4249
4250         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4251                 tg3_power_down_phy(tp, do_low_power);
4252
4253         tg3_frob_aux_power(tp, true);
4254
4255         /* Workaround for unstable PLL clock */
4256         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4257             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4258              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4259                 u32 val = tr32(0x7d00);
4260
4261                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4262                 tw32(0x7d00, val);
4263                 if (!tg3_flag(tp, ENABLE_ASF)) {
4264                         int err;
4265
4266                         err = tg3_nvram_lock(tp);
4267                         tg3_halt_cpu(tp, RX_CPU_BASE);
4268                         if (!err)
4269                                 tg3_nvram_unlock(tp);
4270                 }
4271         }
4272
4273         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4274
4275         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4276
4277         return 0;
4278 }
4279
4280 static void tg3_power_down(struct tg3 *tp)
4281 {
4282         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4283         pci_set_power_state(tp->pdev, PCI_D3hot);
4284 }
4285
4286 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4287 {
4288         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4289         case MII_TG3_AUX_STAT_10HALF:
4290                 *speed = SPEED_10;
4291                 *duplex = DUPLEX_HALF;
4292                 break;
4293
4294         case MII_TG3_AUX_STAT_10FULL:
4295                 *speed = SPEED_10;
4296                 *duplex = DUPLEX_FULL;
4297                 break;
4298
4299         case MII_TG3_AUX_STAT_100HALF:
4300                 *speed = SPEED_100;
4301                 *duplex = DUPLEX_HALF;
4302                 break;
4303
4304         case MII_TG3_AUX_STAT_100FULL:
4305                 *speed = SPEED_100;
4306                 *duplex = DUPLEX_FULL;
4307                 break;
4308
4309         case MII_TG3_AUX_STAT_1000HALF:
4310                 *speed = SPEED_1000;
4311                 *duplex = DUPLEX_HALF;
4312                 break;
4313
4314         case MII_TG3_AUX_STAT_1000FULL:
4315                 *speed = SPEED_1000;
4316                 *duplex = DUPLEX_FULL;
4317                 break;
4318
4319         default:
4320                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4321                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4322                                  SPEED_10;
4323                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4324                                   DUPLEX_HALF;
4325                         break;
4326                 }
4327                 *speed = SPEED_UNKNOWN;
4328                 *duplex = DUPLEX_UNKNOWN;
4329                 break;
4330         }
4331 }
4332
4333 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4334 {
4335         int err = 0;
4336         u32 val, new_adv;
4337
4338         new_adv = ADVERTISE_CSMA;
4339         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4340         new_adv |= mii_advertise_flowctrl(flowctrl);
4341
4342         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4343         if (err)
4344                 goto done;
4345
4346         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4347                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4348
4349                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4350                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4351                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4352
4353                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4354                 if (err)
4355                         goto done;
4356         }
4357
4358         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4359                 goto done;
4360
4361         tw32(TG3_CPMU_EEE_MODE,
4362              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4363
4364         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4365         if (!err) {
4366                 u32 err2;
4367
4368                 val = 0;
4369                 /* Advertise 100-BaseTX EEE ability */
4370                 if (advertise & ADVERTISED_100baseT_Full)
4371                         val |= MDIO_AN_EEE_ADV_100TX;
4372                 /* Advertise 1000-BaseT EEE ability */
4373                 if (advertise & ADVERTISED_1000baseT_Full)
4374                         val |= MDIO_AN_EEE_ADV_1000T;
4375
4376                 if (!tp->eee.eee_enabled) {
4377                         val = 0;
4378                         tp->eee.advertised = 0;
4379                 } else {
4380                         tp->eee.advertised = advertise &
4381                                              (ADVERTISED_100baseT_Full |
4382                                               ADVERTISED_1000baseT_Full);
4383                 }
4384
4385                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4386                 if (err)
4387                         val = 0;
4388
4389                 switch (tg3_asic_rev(tp)) {
4390                 case ASIC_REV_5717:
4391                 case ASIC_REV_57765:
4392                 case ASIC_REV_57766:
4393                 case ASIC_REV_5719:
4394                         /* If we advertised any eee advertisements above... */
4395                         if (val)
4396                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4397                                       MII_TG3_DSP_TAP26_RMRXSTO |
4398                                       MII_TG3_DSP_TAP26_OPCSINPT;
4399                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4400                         /* Fall through */
4401                 case ASIC_REV_5720:
4402                 case ASIC_REV_5762:
4403                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4404                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4405                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4406                 }
4407
4408                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4409                 if (!err)
4410                         err = err2;
4411         }
4412
4413 done:
4414         return err;
4415 }
4416
4417 static void tg3_phy_copper_begin(struct tg3 *tp)
4418 {
4419         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4420             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4421                 u32 adv, fc;
4422
4423                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4424                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4425                         adv = ADVERTISED_10baseT_Half |
4426                               ADVERTISED_10baseT_Full;
4427                         if (tg3_flag(tp, WOL_SPEED_100MB))
4428                                 adv |= ADVERTISED_100baseT_Half |
4429                                        ADVERTISED_100baseT_Full;
4430                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4431                                 if (!(tp->phy_flags &
4432                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4433                                         adv |= ADVERTISED_1000baseT_Half;
4434                                 adv |= ADVERTISED_1000baseT_Full;
4435                         }
4436
4437                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4438                 } else {
4439                         adv = tp->link_config.advertising;
4440                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4441                                 adv &= ~(ADVERTISED_1000baseT_Half |
4442                                          ADVERTISED_1000baseT_Full);
4443
4444                         fc = tp->link_config.flowctrl;
4445                 }
4446
4447                 tg3_phy_autoneg_cfg(tp, adv, fc);
4448
4449                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4450                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4451                         /* Normally during power down we want to autonegotiate
4452                          * the lowest possible speed for WOL. However, to avoid
4453                          * link flap, we leave it untouched.
4454                          */
4455                         return;
4456                 }
4457
4458                 tg3_writephy(tp, MII_BMCR,
4459                              BMCR_ANENABLE | BMCR_ANRESTART);
4460         } else {
4461                 int i;
4462                 u32 bmcr, orig_bmcr;
4463
4464                 tp->link_config.active_speed = tp->link_config.speed;
4465                 tp->link_config.active_duplex = tp->link_config.duplex;
4466
4467                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4468                         /* With autoneg disabled, 5715 only links up when the
4469                          * advertisement register has the configured speed
4470                          * enabled.
4471                          */
4472                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4473                 }
4474
4475                 bmcr = 0;
4476                 switch (tp->link_config.speed) {
4477                 default:
4478                 case SPEED_10:
4479                         break;
4480
4481                 case SPEED_100:
4482                         bmcr |= BMCR_SPEED100;
4483                         break;
4484
4485                 case SPEED_1000:
4486                         bmcr |= BMCR_SPEED1000;
4487                         break;
4488                 }
4489
4490                 if (tp->link_config.duplex == DUPLEX_FULL)
4491                         bmcr |= BMCR_FULLDPLX;
4492
4493                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4494                     (bmcr != orig_bmcr)) {
4495                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4496                         for (i = 0; i < 1500; i++) {
4497                                 u32 tmp;
4498
4499                                 udelay(10);
4500                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4501                                     tg3_readphy(tp, MII_BMSR, &tmp))
4502                                         continue;
4503                                 if (!(tmp & BMSR_LSTATUS)) {
4504                                         udelay(40);
4505                                         break;
4506                                 }
4507                         }
4508                         tg3_writephy(tp, MII_BMCR, bmcr);
4509                         udelay(40);
4510                 }
4511         }
4512 }
4513
4514 static int tg3_phy_pull_config(struct tg3 *tp)
4515 {
4516         int err;
4517         u32 val;
4518
4519         err = tg3_readphy(tp, MII_BMCR, &val);
4520         if (err)
4521                 goto done;
4522
4523         if (!(val & BMCR_ANENABLE)) {
4524                 tp->link_config.autoneg = AUTONEG_DISABLE;
4525                 tp->link_config.advertising = 0;
4526                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4527
4528                 err = -EIO;
4529
4530                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4531                 case 0:
4532                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4533                                 goto done;
4534
4535                         tp->link_config.speed = SPEED_10;
4536                         break;
4537                 case BMCR_SPEED100:
4538                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4539                                 goto done;
4540
4541                         tp->link_config.speed = SPEED_100;
4542                         break;
4543                 case BMCR_SPEED1000:
4544                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4545                                 tp->link_config.speed = SPEED_1000;
4546                                 break;
4547                         }
4548                         /* Fall through */
4549                 default:
4550                         goto done;
4551                 }
4552
4553                 if (val & BMCR_FULLDPLX)
4554                         tp->link_config.duplex = DUPLEX_FULL;
4555                 else
4556                         tp->link_config.duplex = DUPLEX_HALF;
4557
4558                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4559
4560                 err = 0;
4561                 goto done;
4562         }
4563
4564         tp->link_config.autoneg = AUTONEG_ENABLE;
4565         tp->link_config.advertising = ADVERTISED_Autoneg;
4566         tg3_flag_set(tp, PAUSE_AUTONEG);
4567
4568         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4569                 u32 adv;
4570
4571                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4572                 if (err)
4573                         goto done;
4574
4575                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4576                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4577
4578                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4579         } else {
4580                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4581         }
4582
4583         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4584                 u32 adv;
4585
4586                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4587                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4588                         if (err)
4589                                 goto done;
4590
4591                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4592                 } else {
4593                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4594                         if (err)
4595                                 goto done;
4596
4597                         adv = tg3_decode_flowctrl_1000X(val);
4598                         tp->link_config.flowctrl = adv;
4599
4600                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4601                         adv = mii_adv_to_ethtool_adv_x(val);
4602                 }
4603
4604                 tp->link_config.advertising |= adv;
4605         }
4606
4607 done:
4608         return err;
4609 }
4610
4611 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4612 {
4613         int err;
4614
4615         /* Turn off tap power management. */
4616         /* Set Extended packet length bit */
4617         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4618
4619         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4620         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4621         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4622         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4623         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4624
4625         udelay(40);
4626
4627         return err;
4628 }
4629
4630 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4631 {
4632         struct ethtool_eee eee;
4633
4634         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4635                 return true;
4636
4637         tg3_eee_pull_config(tp, &eee);
4638
4639         if (tp->eee.eee_enabled) {
4640                 if (tp->eee.advertised != eee.advertised ||
4641                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4642                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4643                         return false;
4644         } else {
4645                 /* EEE is disabled but we're advertising */
4646                 if (eee.advertised)
4647                         return false;
4648         }
4649
4650         return true;
4651 }
4652
4653 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4654 {
4655         u32 advmsk, tgtadv, advertising;
4656
4657         advertising = tp->link_config.advertising;
4658         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4659
4660         advmsk = ADVERTISE_ALL;
4661         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4662                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4663                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4664         }
4665
4666         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4667                 return false;
4668
4669         if ((*lcladv & advmsk) != tgtadv)
4670                 return false;
4671
4672         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4673                 u32 tg3_ctrl;
4674
4675                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4676
4677                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4678                         return false;
4679
4680                 if (tgtadv &&
4681                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4682                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4683                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4684                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4685                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4686                 } else {
4687                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4688                 }
4689
4690                 if (tg3_ctrl != tgtadv)
4691                         return false;
4692         }
4693
4694         return true;
4695 }
4696
4697 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4698 {
4699         u32 lpeth = 0;
4700
4701         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4702                 u32 val;
4703
4704                 if (tg3_readphy(tp, MII_STAT1000, &val))
4705                         return false;
4706
4707                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4708         }
4709
4710         if (tg3_readphy(tp, MII_LPA, rmtadv))
4711                 return false;
4712
4713         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4714         tp->link_config.rmt_adv = lpeth;
4715
4716         return true;
4717 }
4718
4719 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4720 {
4721         if (curr_link_up != tp->link_up) {
4722                 if (curr_link_up) {
4723                         netif_carrier_on(tp->dev);
4724                 } else {
4725                         netif_carrier_off(tp->dev);
4726                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4727                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4728                 }
4729
4730                 tg3_link_report(tp);
4731                 return true;
4732         }
4733
4734         return false;
4735 }
4736
4737 static void tg3_clear_mac_status(struct tg3 *tp)
4738 {
4739         tw32(MAC_EVENT, 0);
4740
4741         tw32_f(MAC_STATUS,
4742                MAC_STATUS_SYNC_CHANGED |
4743                MAC_STATUS_CFG_CHANGED |
4744                MAC_STATUS_MI_COMPLETION |
4745                MAC_STATUS_LNKSTATE_CHANGED);
4746         udelay(40);
4747 }
4748
4749 static void tg3_setup_eee(struct tg3 *tp)
4750 {
4751         u32 val;
4752
4753         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4754               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4755         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4756                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4757
4758         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4759
4760         tw32_f(TG3_CPMU_EEE_CTRL,
4761                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4762
4763         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4764               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4765               TG3_CPMU_EEEMD_LPI_IN_RX |
4766               TG3_CPMU_EEEMD_EEE_ENABLE;
4767
4768         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4769                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4770
4771         if (tg3_flag(tp, ENABLE_APE))
4772                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4773
4774         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4775
4776         tw32_f(TG3_CPMU_EEE_DBTMR1,
4777                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4778                (tp->eee.tx_lpi_timer & 0xffff));
4779
4780         tw32_f(TG3_CPMU_EEE_DBTMR2,
4781                TG3_CPMU_DBTMR2_APE_TX_2047US |
4782                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4783 }
4784
4785 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4786 {
4787         bool current_link_up;
4788         u32 bmsr, val;
4789         u32 lcl_adv, rmt_adv;
4790         u16 current_speed;
4791         u8 current_duplex;
4792         int i, err;
4793
4794         tg3_clear_mac_status(tp);
4795
4796         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4797                 tw32_f(MAC_MI_MODE,
4798                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4799                 udelay(80);
4800         }
4801
4802         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4803
4804         /* Some third-party PHYs need to be reset on link going
4805          * down.
4806          */
4807         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4808              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4809              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4810             tp->link_up) {
4811                 tg3_readphy(tp, MII_BMSR, &bmsr);
4812                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4813                     !(bmsr & BMSR_LSTATUS))
4814                         force_reset = true;
4815         }
4816         if (force_reset)
4817                 tg3_phy_reset(tp);
4818
4819         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4820                 tg3_readphy(tp, MII_BMSR, &bmsr);
4821                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4822                     !tg3_flag(tp, INIT_COMPLETE))
4823                         bmsr = 0;
4824
4825                 if (!(bmsr & BMSR_LSTATUS)) {
4826                         err = tg3_init_5401phy_dsp(tp);
4827                         if (err)
4828                                 return err;
4829
4830                         tg3_readphy(tp, MII_BMSR, &bmsr);
4831                         for (i = 0; i < 1000; i++) {
4832                                 udelay(10);
4833                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4834                                     (bmsr & BMSR_LSTATUS)) {
4835                                         udelay(40);
4836                                         break;
4837                                 }
4838                         }
4839
4840                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4841                             TG3_PHY_REV_BCM5401_B0 &&
4842                             !(bmsr & BMSR_LSTATUS) &&
4843                             tp->link_config.active_speed == SPEED_1000) {
4844                                 err = tg3_phy_reset(tp);
4845                                 if (!err)
4846                                         err = tg3_init_5401phy_dsp(tp);
4847                                 if (err)
4848                                         return err;
4849                         }
4850                 }
4851         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4852                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4853                 /* 5701 {A0,B0} CRC bug workaround */
4854                 tg3_writephy(tp, 0x15, 0x0a75);
4855                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4856                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4857                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4858         }
4859
4860         /* Clear pending interrupts... */
4861         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4862         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4863
4864         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4865                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4866         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4867                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4868
4869         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4870             tg3_asic_rev(tp) == ASIC_REV_5701) {
4871                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4872                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4873                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4874                 else
4875                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4876         }
4877
4878         current_link_up = false;
4879         current_speed = SPEED_UNKNOWN;
4880         current_duplex = DUPLEX_UNKNOWN;
4881         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4882         tp->link_config.rmt_adv = 0;
4883
4884         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4885                 err = tg3_phy_auxctl_read(tp,
4886                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4887                                           &val);
4888                 if (!err && !(val & (1 << 10))) {
4889                         tg3_phy_auxctl_write(tp,
4890                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4891                                              val | (1 << 10));
4892                         goto relink;
4893                 }
4894         }
4895
4896         bmsr = 0;
4897         for (i = 0; i < 100; i++) {
4898                 tg3_readphy(tp, MII_BMSR, &bmsr);
4899                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4900                     (bmsr & BMSR_LSTATUS))
4901                         break;
4902                 udelay(40);
4903         }
4904
4905         if (bmsr & BMSR_LSTATUS) {
4906                 u32 aux_stat, bmcr;
4907
4908                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4909                 for (i = 0; i < 2000; i++) {
4910                         udelay(10);
4911                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4912                             aux_stat)
4913                                 break;
4914                 }
4915
4916                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4917                                              &current_speed,
4918                                              &current_duplex);
4919
4920                 bmcr = 0;
4921                 for (i = 0; i < 200; i++) {
4922                         tg3_readphy(tp, MII_BMCR, &bmcr);
4923                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4924                                 continue;
4925                         if (bmcr && bmcr != 0x7fff)
4926                                 break;
4927                         udelay(10);
4928                 }
4929
4930                 lcl_adv = 0;
4931                 rmt_adv = 0;
4932
4933                 tp->link_config.active_speed = current_speed;
4934                 tp->link_config.active_duplex = current_duplex;
4935
4936                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4937                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4938
4939                         if ((bmcr & BMCR_ANENABLE) &&
4940                             eee_config_ok &&
4941                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4942                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4943                                 current_link_up = true;
4944
4945                         /* EEE settings changes take effect only after a phy
4946                          * reset.  If we have skipped a reset due to Link Flap
4947                          * Avoidance being enabled, do it now.
4948                          */
4949                         if (!eee_config_ok &&
4950                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4951                             !force_reset) {
4952                                 tg3_setup_eee(tp);
4953                                 tg3_phy_reset(tp);
4954                         }
4955                 } else {
4956                         if (!(bmcr & BMCR_ANENABLE) &&
4957                             tp->link_config.speed == current_speed &&
4958                             tp->link_config.duplex == current_duplex) {
4959                                 current_link_up = true;
4960                         }
4961                 }
4962
4963                 if (current_link_up &&
4964                     tp->link_config.active_duplex == DUPLEX_FULL) {
4965                         u32 reg, bit;
4966
4967                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4968                                 reg = MII_TG3_FET_GEN_STAT;
4969                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4970                         } else {
4971                                 reg = MII_TG3_EXT_STAT;
4972                                 bit = MII_TG3_EXT_STAT_MDIX;
4973                         }
4974
4975                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4976                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4977
4978                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4979                 }
4980         }
4981
4982 relink:
4983         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4984                 tg3_phy_copper_begin(tp);
4985
4986                 if (tg3_flag(tp, ROBOSWITCH)) {
4987                         current_link_up = true;
4988                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4989                         current_speed = SPEED_1000;
4990                         current_duplex = DUPLEX_FULL;
4991                         tp->link_config.active_speed = current_speed;
4992                         tp->link_config.active_duplex = current_duplex;
4993                 }
4994
4995                 tg3_readphy(tp, MII_BMSR, &bmsr);
4996                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4997                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4998                         current_link_up = true;
4999         }
5000
5001         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5002         if (current_link_up) {
5003                 if (tp->link_config.active_speed == SPEED_100 ||
5004                     tp->link_config.active_speed == SPEED_10)
5005                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5006                 else
5007                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5008         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5009                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5010         else
5011                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5012
5013         /* In order for the 5750 core in BCM4785 chip to work properly
5014          * in RGMII mode, the Led Control Register must be set up.
5015          */
5016         if (tg3_flag(tp, RGMII_MODE)) {
5017                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5018                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5019
5020                 if (tp->link_config.active_speed == SPEED_10)
5021                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5022                 else if (tp->link_config.active_speed == SPEED_100)
5023                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5024                                      LED_CTRL_100MBPS_ON);
5025                 else if (tp->link_config.active_speed == SPEED_1000)
5026                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5027                                      LED_CTRL_1000MBPS_ON);
5028
5029                 tw32(MAC_LED_CTRL, led_ctrl);
5030                 udelay(40);
5031         }
5032
5033         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5034         if (tp->link_config.active_duplex == DUPLEX_HALF)
5035                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5036
5037         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5038                 if (current_link_up &&
5039                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5040                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5041                 else
5042                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5043         }
5044
5045         /* ??? Without this setting Netgear GA302T PHY does not
5046          * ??? send/receive packets...
5047          */
5048         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5049             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5050                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5051                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5052                 udelay(80);
5053         }
5054
5055         tw32_f(MAC_MODE, tp->mac_mode);
5056         udelay(40);
5057
5058         tg3_phy_eee_adjust(tp, current_link_up);
5059
5060         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5061                 /* Polled via timer. */
5062                 tw32_f(MAC_EVENT, 0);
5063         } else {
5064                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5065         }
5066         udelay(40);
5067
5068         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5069             current_link_up &&
5070             tp->link_config.active_speed == SPEED_1000 &&
5071             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5072                 udelay(120);
5073                 tw32_f(MAC_STATUS,
5074                      (MAC_STATUS_SYNC_CHANGED |
5075                       MAC_STATUS_CFG_CHANGED));
5076                 udelay(40);
5077                 tg3_write_mem(tp,
5078                               NIC_SRAM_FIRMWARE_MBOX,
5079                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5080         }
5081
5082         /* Prevent send BD corruption. */
5083         if (tg3_flag(tp, CLKREQ_BUG)) {
5084                 if (tp->link_config.active_speed == SPEED_100 ||
5085                     tp->link_config.active_speed == SPEED_10)
5086                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5087                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5088                 else
5089                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5090                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5091         }
5092
5093         tg3_test_and_report_link_chg(tp, current_link_up);
5094
5095         return 0;
5096 }
5097
5098 struct tg3_fiber_aneginfo {
5099         int state;
5100 #define ANEG_STATE_UNKNOWN              0
5101 #define ANEG_STATE_AN_ENABLE            1
5102 #define ANEG_STATE_RESTART_INIT         2
5103 #define ANEG_STATE_RESTART              3
5104 #define ANEG_STATE_DISABLE_LINK_OK      4
5105 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5106 #define ANEG_STATE_ABILITY_DETECT       6
5107 #define ANEG_STATE_ACK_DETECT_INIT      7
5108 #define ANEG_STATE_ACK_DETECT           8
5109 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5110 #define ANEG_STATE_COMPLETE_ACK         10
5111 #define ANEG_STATE_IDLE_DETECT_INIT     11
5112 #define ANEG_STATE_IDLE_DETECT          12
5113 #define ANEG_STATE_LINK_OK              13
5114 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5115 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5116
5117         u32 flags;
5118 #define MR_AN_ENABLE            0x00000001
5119 #define MR_RESTART_AN           0x00000002
5120 #define MR_AN_COMPLETE          0x00000004
5121 #define MR_PAGE_RX              0x00000008
5122 #define MR_NP_LOADED            0x00000010
5123 #define MR_TOGGLE_TX            0x00000020
5124 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5125 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5126 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5127 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5128 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5129 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5130 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5131 #define MR_TOGGLE_RX            0x00002000
5132 #define MR_NP_RX                0x00004000
5133
5134 #define MR_LINK_OK              0x80000000
5135
5136         unsigned long link_time, cur_time;
5137
5138         u32 ability_match_cfg;
5139         int ability_match_count;
5140
5141         char ability_match, idle_match, ack_match;
5142
5143         u32 txconfig, rxconfig;
5144 #define ANEG_CFG_NP             0x00000080
5145 #define ANEG_CFG_ACK            0x00000040
5146 #define ANEG_CFG_RF2            0x00000020
5147 #define ANEG_CFG_RF1            0x00000010
5148 #define ANEG_CFG_PS2            0x00000001
5149 #define ANEG_CFG_PS1            0x00008000
5150 #define ANEG_CFG_HD             0x00004000
5151 #define ANEG_CFG_FD             0x00002000
5152 #define ANEG_CFG_INVAL          0x00001f06
5153
5154 };
5155 #define ANEG_OK         0
5156 #define ANEG_DONE       1
5157 #define ANEG_TIMER_ENAB 2
5158 #define ANEG_FAILED     -1
5159
5160 #define ANEG_STATE_SETTLE_TIME  10000
5161
5162 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5163                                    struct tg3_fiber_aneginfo *ap)
5164 {
5165         u16 flowctrl;
5166         unsigned long delta;
5167         u32 rx_cfg_reg;
5168         int ret;
5169
5170         if (ap->state == ANEG_STATE_UNKNOWN) {
5171                 ap->rxconfig = 0;
5172                 ap->link_time = 0;
5173                 ap->cur_time = 0;
5174                 ap->ability_match_cfg = 0;
5175                 ap->ability_match_count = 0;
5176                 ap->ability_match = 0;
5177                 ap->idle_match = 0;
5178                 ap->ack_match = 0;
5179         }
5180         ap->cur_time++;
5181
5182         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5183                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5184
5185                 if (rx_cfg_reg != ap->ability_match_cfg) {
5186                         ap->ability_match_cfg = rx_cfg_reg;
5187                         ap->ability_match = 0;
5188                         ap->ability_match_count = 0;
5189                 } else {
5190                         if (++ap->ability_match_count > 1) {
5191                                 ap->ability_match = 1;
5192                                 ap->ability_match_cfg = rx_cfg_reg;
5193                         }
5194                 }
5195                 if (rx_cfg_reg & ANEG_CFG_ACK)
5196                         ap->ack_match = 1;
5197                 else
5198                         ap->ack_match = 0;
5199
5200                 ap->idle_match = 0;
5201         } else {
5202                 ap->idle_match = 1;
5203                 ap->ability_match_cfg = 0;
5204                 ap->ability_match_count = 0;
5205                 ap->ability_match = 0;
5206                 ap->ack_match = 0;
5207
5208                 rx_cfg_reg = 0;
5209         }
5210
5211         ap->rxconfig = rx_cfg_reg;
5212         ret = ANEG_OK;
5213
5214         switch (ap->state) {
5215         case ANEG_STATE_UNKNOWN:
5216                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5217                         ap->state = ANEG_STATE_AN_ENABLE;
5218
5219                 /* fall through */
5220         case ANEG_STATE_AN_ENABLE:
5221                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5222                 if (ap->flags & MR_AN_ENABLE) {
5223                         ap->link_time = 0;
5224                         ap->cur_time = 0;
5225                         ap->ability_match_cfg = 0;
5226                         ap->ability_match_count = 0;
5227                         ap->ability_match = 0;
5228                         ap->idle_match = 0;
5229                         ap->ack_match = 0;
5230
5231                         ap->state = ANEG_STATE_RESTART_INIT;
5232                 } else {
5233                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5234                 }
5235                 break;
5236
5237         case ANEG_STATE_RESTART_INIT:
5238                 ap->link_time = ap->cur_time;
5239                 ap->flags &= ~(MR_NP_LOADED);
5240                 ap->txconfig = 0;
5241                 tw32(MAC_TX_AUTO_NEG, 0);
5242                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5243                 tw32_f(MAC_MODE, tp->mac_mode);
5244                 udelay(40);
5245
5246                 ret = ANEG_TIMER_ENAB;
5247                 ap->state = ANEG_STATE_RESTART;
5248
5249                 /* fall through */
5250         case ANEG_STATE_RESTART:
5251                 delta = ap->cur_time - ap->link_time;
5252                 if (delta > ANEG_STATE_SETTLE_TIME)
5253                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5254                 else
5255                         ret = ANEG_TIMER_ENAB;
5256                 break;
5257
5258         case ANEG_STATE_DISABLE_LINK_OK:
5259                 ret = ANEG_DONE;
5260                 break;
5261
5262         case ANEG_STATE_ABILITY_DETECT_INIT:
5263                 ap->flags &= ~(MR_TOGGLE_TX);
5264                 ap->txconfig = ANEG_CFG_FD;
5265                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5266                 if (flowctrl & ADVERTISE_1000XPAUSE)
5267                         ap->txconfig |= ANEG_CFG_PS1;
5268                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5269                         ap->txconfig |= ANEG_CFG_PS2;
5270                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5271                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5272                 tw32_f(MAC_MODE, tp->mac_mode);
5273                 udelay(40);
5274
5275                 ap->state = ANEG_STATE_ABILITY_DETECT;
5276                 break;
5277
5278         case ANEG_STATE_ABILITY_DETECT:
5279                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5280                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5281                 break;
5282
5283         case ANEG_STATE_ACK_DETECT_INIT:
5284                 ap->txconfig |= ANEG_CFG_ACK;
5285                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5286                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5287                 tw32_f(MAC_MODE, tp->mac_mode);
5288                 udelay(40);
5289
5290                 ap->state = ANEG_STATE_ACK_DETECT;
5291
5292                 /* fall through */
5293         case ANEG_STATE_ACK_DETECT:
5294                 if (ap->ack_match != 0) {
5295                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5296                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5297                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5298                         } else {
5299                                 ap->state = ANEG_STATE_AN_ENABLE;
5300                         }
5301                 } else if (ap->ability_match != 0 &&
5302                            ap->rxconfig == 0) {
5303                         ap->state = ANEG_STATE_AN_ENABLE;
5304                 }
5305                 break;
5306
5307         case ANEG_STATE_COMPLETE_ACK_INIT:
5308                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5309                         ret = ANEG_FAILED;
5310                         break;
5311                 }
5312                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5313                                MR_LP_ADV_HALF_DUPLEX |
5314                                MR_LP_ADV_SYM_PAUSE |
5315                                MR_LP_ADV_ASYM_PAUSE |
5316                                MR_LP_ADV_REMOTE_FAULT1 |
5317                                MR_LP_ADV_REMOTE_FAULT2 |
5318                                MR_LP_ADV_NEXT_PAGE |
5319                                MR_TOGGLE_RX |
5320                                MR_NP_RX);
5321                 if (ap->rxconfig & ANEG_CFG_FD)
5322                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5323                 if (ap->rxconfig & ANEG_CFG_HD)
5324                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5325                 if (ap->rxconfig & ANEG_CFG_PS1)
5326                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5327                 if (ap->rxconfig & ANEG_CFG_PS2)
5328                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5329                 if (ap->rxconfig & ANEG_CFG_RF1)
5330                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5331                 if (ap->rxconfig & ANEG_CFG_RF2)
5332                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5333                 if (ap->rxconfig & ANEG_CFG_NP)
5334                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5335
5336                 ap->link_time = ap->cur_time;
5337
5338                 ap->flags ^= (MR_TOGGLE_TX);
5339                 if (ap->rxconfig & 0x0008)
5340                         ap->flags |= MR_TOGGLE_RX;
5341                 if (ap->rxconfig & ANEG_CFG_NP)
5342                         ap->flags |= MR_NP_RX;
5343                 ap->flags |= MR_PAGE_RX;
5344
5345                 ap->state = ANEG_STATE_COMPLETE_ACK;
5346                 ret = ANEG_TIMER_ENAB;
5347                 break;
5348
5349         case ANEG_STATE_COMPLETE_ACK:
5350                 if (ap->ability_match != 0 &&
5351                     ap->rxconfig == 0) {
5352                         ap->state = ANEG_STATE_AN_ENABLE;
5353                         break;
5354                 }
5355                 delta = ap->cur_time - ap->link_time;
5356                 if (delta > ANEG_STATE_SETTLE_TIME) {
5357                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5358                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5359                         } else {
5360                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5361                                     !(ap->flags & MR_NP_RX)) {
5362                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5363                                 } else {
5364                                         ret = ANEG_FAILED;
5365                                 }
5366                         }
5367                 }
5368                 break;
5369
5370         case ANEG_STATE_IDLE_DETECT_INIT:
5371                 ap->link_time = ap->cur_time;
5372                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5373                 tw32_f(MAC_MODE, tp->mac_mode);
5374                 udelay(40);
5375
5376                 ap->state = ANEG_STATE_IDLE_DETECT;
5377                 ret = ANEG_TIMER_ENAB;
5378                 break;
5379
5380         case ANEG_STATE_IDLE_DETECT:
5381                 if (ap->ability_match != 0 &&
5382                     ap->rxconfig == 0) {
5383                         ap->state = ANEG_STATE_AN_ENABLE;
5384                         break;
5385                 }
5386                 delta = ap->cur_time - ap->link_time;
5387                 if (delta > ANEG_STATE_SETTLE_TIME) {
5388                         /* XXX another gem from the Broadcom driver :( */
5389                         ap->state = ANEG_STATE_LINK_OK;
5390                 }
5391                 break;
5392
5393         case ANEG_STATE_LINK_OK:
5394                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5395                 ret = ANEG_DONE;
5396                 break;
5397
5398         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5399                 /* ??? unimplemented */
5400                 break;
5401
5402         case ANEG_STATE_NEXT_PAGE_WAIT:
5403                 /* ??? unimplemented */
5404                 break;
5405
5406         default:
5407                 ret = ANEG_FAILED;
5408                 break;
5409         }
5410
5411         return ret;
5412 }
5413
5414 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5415 {
5416         int res = 0;
5417         struct tg3_fiber_aneginfo aninfo;
5418         int status = ANEG_FAILED;
5419         unsigned int tick;
5420         u32 tmp;
5421
5422         tw32_f(MAC_TX_AUTO_NEG, 0);
5423
5424         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5425         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5426         udelay(40);
5427
5428         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5429         udelay(40);
5430
5431         memset(&aninfo, 0, sizeof(aninfo));
5432         aninfo.flags |= MR_AN_ENABLE;
5433         aninfo.state = ANEG_STATE_UNKNOWN;
5434         aninfo.cur_time = 0;
5435         tick = 0;
5436         while (++tick < 195000) {
5437                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5438                 if (status == ANEG_DONE || status == ANEG_FAILED)
5439                         break;
5440
5441                 udelay(1);
5442         }
5443
5444         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5445         tw32_f(MAC_MODE, tp->mac_mode);
5446         udelay(40);
5447
5448         *txflags = aninfo.txconfig;
5449         *rxflags = aninfo.flags;
5450
5451         if (status == ANEG_DONE &&
5452             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5453                              MR_LP_ADV_FULL_DUPLEX)))
5454                 res = 1;
5455
5456         return res;
5457 }
5458
5459 static void tg3_init_bcm8002(struct tg3 *tp)
5460 {
5461         u32 mac_status = tr32(MAC_STATUS);
5462         int i;
5463
5464         /* Reset when initting first time or we have a link. */
5465         if (tg3_flag(tp, INIT_COMPLETE) &&
5466             !(mac_status & MAC_STATUS_PCS_SYNCED))
5467                 return;
5468
5469         /* Set PLL lock range. */
5470         tg3_writephy(tp, 0x16, 0x8007);
5471
5472         /* SW reset */
5473         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5474
5475         /* Wait for reset to complete. */
5476         /* XXX schedule_timeout() ... */
5477         for (i = 0; i < 500; i++)
5478                 udelay(10);
5479
5480         /* Config mode; select PMA/Ch 1 regs. */
5481         tg3_writephy(tp, 0x10, 0x8411);
5482
5483         /* Enable auto-lock and comdet, select txclk for tx. */
5484         tg3_writephy(tp, 0x11, 0x0a10);
5485
5486         tg3_writephy(tp, 0x18, 0x00a0);
5487         tg3_writephy(tp, 0x16, 0x41ff);
5488
5489         /* Assert and deassert POR. */
5490         tg3_writephy(tp, 0x13, 0x0400);
5491         udelay(40);
5492         tg3_writephy(tp, 0x13, 0x0000);
5493
5494         tg3_writephy(tp, 0x11, 0x0a50);
5495         udelay(40);
5496         tg3_writephy(tp, 0x11, 0x0a10);
5497
5498         /* Wait for signal to stabilize */
5499         /* XXX schedule_timeout() ... */
5500         for (i = 0; i < 15000; i++)
5501                 udelay(10);
5502
5503         /* Deselect the channel register so we can read the PHYID
5504          * later.
5505          */
5506         tg3_writephy(tp, 0x10, 0x8011);
5507 }
5508
5509 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5510 {
5511         u16 flowctrl;
5512         bool current_link_up;
5513         u32 sg_dig_ctrl, sg_dig_status;
5514         u32 serdes_cfg, expected_sg_dig_ctrl;
5515         int workaround, port_a;
5516
5517         serdes_cfg = 0;
5518         expected_sg_dig_ctrl = 0;
5519         workaround = 0;
5520         port_a = 1;
5521         current_link_up = false;
5522
5523         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5524             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5525                 workaround = 1;
5526                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5527                         port_a = 0;
5528
5529                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5530                 /* preserve bits 20-23 for voltage regulator */
5531                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5532         }
5533
5534         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5535
5536         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5537                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5538                         if (workaround) {
5539                                 u32 val = serdes_cfg;
5540
5541                                 if (port_a)
5542                                         val |= 0xc010000;
5543                                 else
5544                                         val |= 0x4010000;
5545                                 tw32_f(MAC_SERDES_CFG, val);
5546                         }
5547
5548                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5549                 }
5550                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5551                         tg3_setup_flow_control(tp, 0, 0);
5552                         current_link_up = true;
5553                 }
5554                 goto out;
5555         }
5556
5557         /* Want auto-negotiation.  */
5558         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5559
5560         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5561         if (flowctrl & ADVERTISE_1000XPAUSE)
5562                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5563         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5564                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5565
5566         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5567                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5568                     tp->serdes_counter &&
5569                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5570                                     MAC_STATUS_RCVD_CFG)) ==
5571                      MAC_STATUS_PCS_SYNCED)) {
5572                         tp->serdes_counter--;
5573                         current_link_up = true;
5574                         goto out;
5575                 }
5576 restart_autoneg:
5577                 if (workaround)
5578                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5579                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5580                 udelay(5);
5581                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5582
5583                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5584                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5585         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5586                                  MAC_STATUS_SIGNAL_DET)) {
5587                 sg_dig_status = tr32(SG_DIG_STATUS);
5588                 mac_status = tr32(MAC_STATUS);
5589
5590                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5591                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5592                         u32 local_adv = 0, remote_adv = 0;
5593
5594                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5595                                 local_adv |= ADVERTISE_1000XPAUSE;
5596                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5597                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5598
5599                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5600                                 remote_adv |= LPA_1000XPAUSE;
5601                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5602                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5603
5604                         tp->link_config.rmt_adv =
5605                                            mii_adv_to_ethtool_adv_x(remote_adv);
5606
5607                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5608                         current_link_up = true;
5609                         tp->serdes_counter = 0;
5610                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5611                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5612                         if (tp->serdes_counter)
5613                                 tp->serdes_counter--;
5614                         else {
5615                                 if (workaround) {
5616                                         u32 val = serdes_cfg;
5617
5618                                         if (port_a)
5619                                                 val |= 0xc010000;
5620                                         else
5621                                                 val |= 0x4010000;
5622
5623                                         tw32_f(MAC_SERDES_CFG, val);
5624                                 }
5625
5626                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5627                                 udelay(40);
5628
5629                                 /* Link parallel detection - link is up */
5630                                 /* only if we have PCS_SYNC and not */
5631                                 /* receiving config code words */
5632                                 mac_status = tr32(MAC_STATUS);
5633                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5634                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5635                                         tg3_setup_flow_control(tp, 0, 0);
5636                                         current_link_up = true;
5637                                         tp->phy_flags |=
5638                                                 TG3_PHYFLG_PARALLEL_DETECT;
5639                                         tp->serdes_counter =
5640                                                 SERDES_PARALLEL_DET_TIMEOUT;
5641                                 } else
5642                                         goto restart_autoneg;
5643                         }
5644                 }
5645         } else {
5646                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5647                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5648         }
5649
5650 out:
5651         return current_link_up;
5652 }
5653
5654 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5655 {
5656         bool current_link_up = false;
5657
5658         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5659                 goto out;
5660
5661         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5662                 u32 txflags, rxflags;
5663                 int i;
5664
5665                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5666                         u32 local_adv = 0, remote_adv = 0;
5667
5668                         if (txflags & ANEG_CFG_PS1)
5669                                 local_adv |= ADVERTISE_1000XPAUSE;
5670                         if (txflags & ANEG_CFG_PS2)
5671                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5672
5673                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5674                                 remote_adv |= LPA_1000XPAUSE;
5675                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5676                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5677
5678                         tp->link_config.rmt_adv =
5679                                            mii_adv_to_ethtool_adv_x(remote_adv);
5680
5681                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5682
5683                         current_link_up = true;
5684                 }
5685                 for (i = 0; i < 30; i++) {
5686                         udelay(20);
5687                         tw32_f(MAC_STATUS,
5688                                (MAC_STATUS_SYNC_CHANGED |
5689                                 MAC_STATUS_CFG_CHANGED));
5690                         udelay(40);
5691                         if ((tr32(MAC_STATUS) &
5692                              (MAC_STATUS_SYNC_CHANGED |
5693                               MAC_STATUS_CFG_CHANGED)) == 0)
5694                                 break;
5695                 }
5696
5697                 mac_status = tr32(MAC_STATUS);
5698                 if (!current_link_up &&
5699                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5700                     !(mac_status & MAC_STATUS_RCVD_CFG))
5701                         current_link_up = true;
5702         } else {
5703                 tg3_setup_flow_control(tp, 0, 0);
5704
5705                 /* Forcing 1000FD link up. */
5706                 current_link_up = true;
5707
5708                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5709                 udelay(40);
5710
5711                 tw32_f(MAC_MODE, tp->mac_mode);
5712                 udelay(40);
5713         }
5714
5715 out:
5716         return current_link_up;
5717 }
5718
5719 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5720 {
5721         u32 orig_pause_cfg;
5722         u16 orig_active_speed;
5723         u8 orig_active_duplex;
5724         u32 mac_status;
5725         bool current_link_up;
5726         int i;
5727
5728         orig_pause_cfg = tp->link_config.active_flowctrl;
5729         orig_active_speed = tp->link_config.active_speed;
5730         orig_active_duplex = tp->link_config.active_duplex;
5731
5732         if (!tg3_flag(tp, HW_AUTONEG) &&
5733             tp->link_up &&
5734             tg3_flag(tp, INIT_COMPLETE)) {
5735                 mac_status = tr32(MAC_STATUS);
5736                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5737                                MAC_STATUS_SIGNAL_DET |
5738                                MAC_STATUS_CFG_CHANGED |
5739                                MAC_STATUS_RCVD_CFG);
5740                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5741                                    MAC_STATUS_SIGNAL_DET)) {
5742                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5743                                             MAC_STATUS_CFG_CHANGED));
5744                         return 0;
5745                 }
5746         }
5747
5748         tw32_f(MAC_TX_AUTO_NEG, 0);
5749
5750         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5751         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5752         tw32_f(MAC_MODE, tp->mac_mode);
5753         udelay(40);
5754
5755         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5756                 tg3_init_bcm8002(tp);
5757
5758         /* Enable link change event even when serdes polling.  */
5759         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5760         udelay(40);
5761
5762         current_link_up = false;
5763         tp->link_config.rmt_adv = 0;
5764         mac_status = tr32(MAC_STATUS);
5765
5766         if (tg3_flag(tp, HW_AUTONEG))
5767                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5768         else
5769                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5770
5771         tp->napi[0].hw_status->status =
5772                 (SD_STATUS_UPDATED |
5773                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5774
5775         for (i = 0; i < 100; i++) {
5776                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5777                                     MAC_STATUS_CFG_CHANGED));
5778                 udelay(5);
5779                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5780                                          MAC_STATUS_CFG_CHANGED |
5781                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5782                         break;
5783         }
5784
5785         mac_status = tr32(MAC_STATUS);
5786         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5787                 current_link_up = false;
5788                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5789                     tp->serdes_counter == 0) {
5790                         tw32_f(MAC_MODE, (tp->mac_mode |
5791                                           MAC_MODE_SEND_CONFIGS));
5792                         udelay(1);
5793                         tw32_f(MAC_MODE, tp->mac_mode);
5794                 }
5795         }
5796
5797         if (current_link_up) {
5798                 tp->link_config.active_speed = SPEED_1000;
5799                 tp->link_config.active_duplex = DUPLEX_FULL;
5800                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5801                                     LED_CTRL_LNKLED_OVERRIDE |
5802                                     LED_CTRL_1000MBPS_ON));
5803         } else {
5804                 tp->link_config.active_speed = SPEED_UNKNOWN;
5805                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5806                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5807                                     LED_CTRL_LNKLED_OVERRIDE |
5808                                     LED_CTRL_TRAFFIC_OVERRIDE));
5809         }
5810
5811         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5812                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5813                 if (orig_pause_cfg != now_pause_cfg ||
5814                     orig_active_speed != tp->link_config.active_speed ||
5815                     orig_active_duplex != tp->link_config.active_duplex)
5816                         tg3_link_report(tp);
5817         }
5818
5819         return 0;
5820 }
5821
5822 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5823 {
5824         int err = 0;
5825         u32 bmsr, bmcr;
5826         u16 current_speed = SPEED_UNKNOWN;
5827         u8 current_duplex = DUPLEX_UNKNOWN;
5828         bool current_link_up = false;
5829         u32 local_adv, remote_adv, sgsr;
5830
5831         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5832              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5833              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5834              (sgsr & SERDES_TG3_SGMII_MODE)) {
5835
5836                 if (force_reset)
5837                         tg3_phy_reset(tp);
5838
5839                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5840
5841                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5842                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5843                 } else {
5844                         current_link_up = true;
5845                         if (sgsr & SERDES_TG3_SPEED_1000) {
5846                                 current_speed = SPEED_1000;
5847                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5848                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5849                                 current_speed = SPEED_100;
5850                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5851                         } else {
5852                                 current_speed = SPEED_10;
5853                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5854                         }
5855
5856                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5857                                 current_duplex = DUPLEX_FULL;
5858                         else
5859                                 current_duplex = DUPLEX_HALF;
5860                 }
5861
5862                 tw32_f(MAC_MODE, tp->mac_mode);
5863                 udelay(40);
5864
5865                 tg3_clear_mac_status(tp);
5866
5867                 goto fiber_setup_done;
5868         }
5869
5870         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5871         tw32_f(MAC_MODE, tp->mac_mode);
5872         udelay(40);
5873
5874         tg3_clear_mac_status(tp);
5875
5876         if (force_reset)
5877                 tg3_phy_reset(tp);
5878
5879         tp->link_config.rmt_adv = 0;
5880
5881         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5882         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5883         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5884                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5885                         bmsr |= BMSR_LSTATUS;
5886                 else
5887                         bmsr &= ~BMSR_LSTATUS;
5888         }
5889
5890         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5891
5892         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5893             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5894                 /* do nothing, just check for link up at the end */
5895         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5896                 u32 adv, newadv;
5897
5898                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5899                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5900                                  ADVERTISE_1000XPAUSE |
5901                                  ADVERTISE_1000XPSE_ASYM |
5902                                  ADVERTISE_SLCT);
5903
5904                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5905                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5906
5907                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5908                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5909                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5910                         tg3_writephy(tp, MII_BMCR, bmcr);
5911
5912                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5913                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5914                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5915
5916                         return err;
5917                 }
5918         } else {
5919                 u32 new_bmcr;
5920
5921                 bmcr &= ~BMCR_SPEED1000;
5922                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5923
5924                 if (tp->link_config.duplex == DUPLEX_FULL)
5925                         new_bmcr |= BMCR_FULLDPLX;
5926
5927                 if (new_bmcr != bmcr) {
5928                         /* BMCR_SPEED1000 is a reserved bit that needs
5929                          * to be set on write.
5930                          */
5931                         new_bmcr |= BMCR_SPEED1000;
5932
5933                         /* Force a linkdown */
5934                         if (tp->link_up) {
5935                                 u32 adv;
5936
5937                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5938                                 adv &= ~(ADVERTISE_1000XFULL |
5939                                          ADVERTISE_1000XHALF |
5940                                          ADVERTISE_SLCT);
5941                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5942                                 tg3_writephy(tp, MII_BMCR, bmcr |
5943                                                            BMCR_ANRESTART |
5944                                                            BMCR_ANENABLE);
5945                                 udelay(10);
5946                                 tg3_carrier_off(tp);
5947                         }
5948                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5949                         bmcr = new_bmcr;
5950                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5951                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5952                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5953                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5954                                         bmsr |= BMSR_LSTATUS;
5955                                 else
5956                                         bmsr &= ~BMSR_LSTATUS;
5957                         }
5958                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5959                 }
5960         }
5961
5962         if (bmsr & BMSR_LSTATUS) {
5963                 current_speed = SPEED_1000;
5964                 current_link_up = true;
5965                 if (bmcr & BMCR_FULLDPLX)
5966                         current_duplex = DUPLEX_FULL;
5967                 else
5968                         current_duplex = DUPLEX_HALF;
5969
5970                 local_adv = 0;
5971                 remote_adv = 0;
5972
5973                 if (bmcr & BMCR_ANENABLE) {
5974                         u32 common;
5975
5976                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5977                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5978                         common = local_adv & remote_adv;
5979                         if (common & (ADVERTISE_1000XHALF |
5980                                       ADVERTISE_1000XFULL)) {
5981                                 if (common & ADVERTISE_1000XFULL)
5982                                         current_duplex = DUPLEX_FULL;
5983                                 else
5984                                         current_duplex = DUPLEX_HALF;
5985
5986                                 tp->link_config.rmt_adv =
5987                                            mii_adv_to_ethtool_adv_x(remote_adv);
5988                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5989                                 /* Link is up via parallel detect */
5990                         } else {
5991                                 current_link_up = false;
5992                         }
5993                 }
5994         }
5995
5996 fiber_setup_done:
5997         if (current_link_up && current_duplex == DUPLEX_FULL)
5998                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5999
6000         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6001         if (tp->link_config.active_duplex == DUPLEX_HALF)
6002                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
6003
6004         tw32_f(MAC_MODE, tp->mac_mode);
6005         udelay(40);
6006
6007         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6008
6009         tp->link_config.active_speed = current_speed;
6010         tp->link_config.active_duplex = current_duplex;
6011
6012         tg3_test_and_report_link_chg(tp, current_link_up);
6013         return err;
6014 }
6015
6016 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6017 {
6018         if (tp->serdes_counter) {
6019                 /* Give autoneg time to complete. */
6020                 tp->serdes_counter--;
6021                 return;
6022         }
6023
6024         if (!tp->link_up &&
6025             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6026                 u32 bmcr;
6027
6028                 tg3_readphy(tp, MII_BMCR, &bmcr);
6029                 if (bmcr & BMCR_ANENABLE) {
6030                         u32 phy1, phy2;
6031
6032                         /* Select shadow register 0x1f */
6033                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6034                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6035
6036                         /* Select expansion interrupt status register */
6037                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6038                                          MII_TG3_DSP_EXP1_INT_STAT);
6039                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6041
6042                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6043                                 /* We have signal detect and not receiving
6044                                  * config code words, link is up by parallel
6045                                  * detection.
6046                                  */
6047
6048                                 bmcr &= ~BMCR_ANENABLE;
6049                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6050                                 tg3_writephy(tp, MII_BMCR, bmcr);
6051                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6052                         }
6053                 }
6054         } else if (tp->link_up &&
6055                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6056                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6057                 u32 phy2;
6058
6059                 /* Select expansion interrupt status register */
6060                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6061                                  MII_TG3_DSP_EXP1_INT_STAT);
6062                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6063                 if (phy2 & 0x20) {
6064                         u32 bmcr;
6065
6066                         /* Config code words received, turn on autoneg. */
6067                         tg3_readphy(tp, MII_BMCR, &bmcr);
6068                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6069
6070                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6071
6072                 }
6073         }
6074 }
6075
6076 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6077 {
6078         u32 val;
6079         int err;
6080
6081         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6082                 err = tg3_setup_fiber_phy(tp, force_reset);
6083         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6084                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6085         else
6086                 err = tg3_setup_copper_phy(tp, force_reset);
6087
6088         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6089                 u32 scale;
6090
6091                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6092                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6093                         scale = 65;
6094                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6095                         scale = 6;
6096                 else
6097                         scale = 12;
6098
6099                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6100                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6101                 tw32(GRC_MISC_CFG, val);
6102         }
6103
6104         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6105               (6 << TX_LENGTHS_IPG_SHIFT);
6106         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6107             tg3_asic_rev(tp) == ASIC_REV_5762)
6108                 val |= tr32(MAC_TX_LENGTHS) &
6109                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6110                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6111
6112         if (tp->link_config.active_speed == SPEED_1000 &&
6113             tp->link_config.active_duplex == DUPLEX_HALF)
6114                 tw32(MAC_TX_LENGTHS, val |
6115                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6116         else
6117                 tw32(MAC_TX_LENGTHS, val |
6118                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6119
6120         if (!tg3_flag(tp, 5705_PLUS)) {
6121                 if (tp->link_up) {
6122                         tw32(HOSTCC_STAT_COAL_TICKS,
6123                              tp->coal.stats_block_coalesce_usecs);
6124                 } else {
6125                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6126                 }
6127         }
6128
6129         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6130                 val = tr32(PCIE_PWR_MGMT_THRESH);
6131                 if (!tp->link_up)
6132                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6133                               tp->pwrmgmt_thresh;
6134                 else
6135                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6136                 tw32(PCIE_PWR_MGMT_THRESH, val);
6137         }
6138
6139         return err;
6140 }
6141
6142 /* tp->lock must be held */
6143 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6144 {
6145         u64 stamp;
6146
6147         ptp_read_system_prets(sts);
6148         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6149         ptp_read_system_postts(sts);
6150         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6151
6152         return stamp;
6153 }
6154
6155 /* tp->lock must be held */
6156 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6157 {
6158         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6159
6160         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6161         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6162         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6163         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6164 }
6165
6166 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6167 static inline void tg3_full_unlock(struct tg3 *tp);
6168 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6169 {
6170         struct tg3 *tp = netdev_priv(dev);
6171
6172         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6173                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6174                                 SOF_TIMESTAMPING_SOFTWARE;
6175
6176         if (tg3_flag(tp, PTP_CAPABLE)) {
6177                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6178                                         SOF_TIMESTAMPING_RX_HARDWARE |
6179                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6180         }
6181
6182         if (tp->ptp_clock)
6183                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6184         else
6185                 info->phc_index = -1;
6186
6187         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6188
6189         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6190                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6191                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6192                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6193         return 0;
6194 }
6195
6196 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6197 {
6198         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6199         bool neg_adj = false;
6200         u32 correction = 0;
6201
6202         if (ppb < 0) {
6203                 neg_adj = true;
6204                 ppb = -ppb;
6205         }
6206
6207         /* Frequency adjustment is performed using hardware with a 24 bit
6208          * accumulator and a programmable correction value. On each clk, the
6209          * correction value gets added to the accumulator and when it
6210          * overflows, the time counter is incremented/decremented.
6211          *
6212          * So conversion from ppb to correction value is
6213          *              ppb * (1 << 24) / 1000000000
6214          */
6215         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6216                      TG3_EAV_REF_CLK_CORRECT_MASK;
6217
6218         tg3_full_lock(tp, 0);
6219
6220         if (correction)
6221                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6222                      TG3_EAV_REF_CLK_CORRECT_EN |
6223                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6224         else
6225                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6226
6227         tg3_full_unlock(tp);
6228
6229         return 0;
6230 }
6231
6232 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6233 {
6234         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6235
6236         tg3_full_lock(tp, 0);
6237         tp->ptp_adjust += delta;
6238         tg3_full_unlock(tp);
6239
6240         return 0;
6241 }
6242
6243 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6244                             struct ptp_system_timestamp *sts)
6245 {
6246         u64 ns;
6247         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6248
6249         tg3_full_lock(tp, 0);
6250         ns = tg3_refclk_read(tp, sts);
6251         ns += tp->ptp_adjust;
6252         tg3_full_unlock(tp);
6253
6254         *ts = ns_to_timespec64(ns);
6255
6256         return 0;
6257 }
6258
6259 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6260                            const struct timespec64 *ts)
6261 {
6262         u64 ns;
6263         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6264
6265         ns = timespec64_to_ns(ts);
6266
6267         tg3_full_lock(tp, 0);
6268         tg3_refclk_write(tp, ns);
6269         tp->ptp_adjust = 0;
6270         tg3_full_unlock(tp);
6271
6272         return 0;
6273 }
6274
6275 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6276                           struct ptp_clock_request *rq, int on)
6277 {
6278         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6279         u32 clock_ctl;
6280         int rval = 0;
6281
6282         switch (rq->type) {
6283         case PTP_CLK_REQ_PEROUT:
6284                 if (rq->perout.index != 0)
6285                         return -EINVAL;
6286
6287                 tg3_full_lock(tp, 0);
6288                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6289                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6290
6291                 if (on) {
6292                         u64 nsec;
6293
6294                         nsec = rq->perout.start.sec * 1000000000ULL +
6295                                rq->perout.start.nsec;
6296
6297                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6298                                 netdev_warn(tp->dev,
6299                                             "Device supports only a one-shot timesync output, period must be 0\n");
6300                                 rval = -EINVAL;
6301                                 goto err_out;
6302                         }
6303
6304                         if (nsec & (1ULL << 63)) {
6305                                 netdev_warn(tp->dev,
6306                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6307                                 rval = -EINVAL;
6308                                 goto err_out;
6309                         }
6310
6311                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6312                         tw32(TG3_EAV_WATCHDOG0_MSB,
6313                              TG3_EAV_WATCHDOG0_EN |
6314                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6315
6316                         tw32(TG3_EAV_REF_CLCK_CTL,
6317                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6318                 } else {
6319                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6320                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6321                 }
6322
6323 err_out:
6324                 tg3_full_unlock(tp);
6325                 return rval;
6326
6327         default:
6328                 break;
6329         }
6330
6331         return -EOPNOTSUPP;
6332 }
6333
6334 static const struct ptp_clock_info tg3_ptp_caps = {
6335         .owner          = THIS_MODULE,
6336         .name           = "tg3 clock",
6337         .max_adj        = 250000000,
6338         .n_alarm        = 0,
6339         .n_ext_ts       = 0,
6340         .n_per_out      = 1,
6341         .n_pins         = 0,
6342         .pps            = 0,
6343         .adjfreq        = tg3_ptp_adjfreq,
6344         .adjtime        = tg3_ptp_adjtime,
6345         .gettimex64     = tg3_ptp_gettimex,
6346         .settime64      = tg3_ptp_settime,
6347         .enable         = tg3_ptp_enable,
6348 };
6349
6350 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6351                                      struct skb_shared_hwtstamps *timestamp)
6352 {
6353         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6354         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6355                                            tp->ptp_adjust);
6356 }
6357
6358 /* tp->lock must be held */
6359 static void tg3_ptp_init(struct tg3 *tp)
6360 {
6361         if (!tg3_flag(tp, PTP_CAPABLE))
6362                 return;
6363
6364         /* Initialize the hardware clock to the system time. */
6365         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6366         tp->ptp_adjust = 0;
6367         tp->ptp_info = tg3_ptp_caps;
6368 }
6369
6370 /* tp->lock must be held */
6371 static void tg3_ptp_resume(struct tg3 *tp)
6372 {
6373         if (!tg3_flag(tp, PTP_CAPABLE))
6374                 return;
6375
6376         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6377         tp->ptp_adjust = 0;
6378 }
6379
6380 static void tg3_ptp_fini(struct tg3 *tp)
6381 {
6382         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6383                 return;
6384
6385         ptp_clock_unregister(tp->ptp_clock);
6386         tp->ptp_clock = NULL;
6387         tp->ptp_adjust = 0;
6388 }
6389
6390 static inline int tg3_irq_sync(struct tg3 *tp)
6391 {
6392         return tp->irq_sync;
6393 }
6394
6395 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6396 {
6397         int i;
6398
6399         dst = (u32 *)((u8 *)dst + off);
6400         for (i = 0; i < len; i += sizeof(u32))
6401                 *dst++ = tr32(off + i);
6402 }
6403
6404 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6405 {
6406         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6407         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6408         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6409         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6410         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6411         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6412         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6413         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6414         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6415         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6416         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6417         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6418         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6419         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6420         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6421         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6422         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6423         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6424         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6425
6426         if (tg3_flag(tp, SUPPORT_MSIX))
6427                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6428
6429         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6430         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6431         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6432         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6433         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6434         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6435         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6436         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6437
6438         if (!tg3_flag(tp, 5705_PLUS)) {
6439                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6440                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6441                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6442         }
6443
6444         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6445         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6446         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6447         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6448         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6449
6450         if (tg3_flag(tp, NVRAM))
6451                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6452 }
6453
6454 static void tg3_dump_state(struct tg3 *tp)
6455 {
6456         int i;
6457         u32 *regs;
6458
6459         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6460         if (!regs)
6461                 return;
6462
6463         if (tg3_flag(tp, PCI_EXPRESS)) {
6464                 /* Read up to but not including private PCI registers */
6465                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6466                         regs[i / sizeof(u32)] = tr32(i);
6467         } else
6468                 tg3_dump_legacy_regs(tp, regs);
6469
6470         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6471                 if (!regs[i + 0] && !regs[i + 1] &&
6472                     !regs[i + 2] && !regs[i + 3])
6473                         continue;
6474
6475                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6476                            i * 4,
6477                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6478         }
6479
6480         kfree(regs);
6481
6482         for (i = 0; i < tp->irq_cnt; i++) {
6483                 struct tg3_napi *tnapi = &tp->napi[i];
6484
6485                 /* SW status block */
6486                 netdev_err(tp->dev,
6487                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6488                            i,
6489                            tnapi->hw_status->status,
6490                            tnapi->hw_status->status_tag,
6491                            tnapi->hw_status->rx_jumbo_consumer,
6492                            tnapi->hw_status->rx_consumer,
6493                            tnapi->hw_status->rx_mini_consumer,
6494                            tnapi->hw_status->idx[0].rx_producer,
6495                            tnapi->hw_status->idx[0].tx_consumer);
6496
6497                 netdev_err(tp->dev,
6498                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6499                            i,
6500                            tnapi->last_tag, tnapi->last_irq_tag,
6501                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6502                            tnapi->rx_rcb_ptr,
6503                            tnapi->prodring.rx_std_prod_idx,
6504                            tnapi->prodring.rx_std_cons_idx,
6505                            tnapi->prodring.rx_jmb_prod_idx,
6506                            tnapi->prodring.rx_jmb_cons_idx);
6507         }
6508 }
6509
6510 /* This is called whenever we suspect that the system chipset is re-
6511  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6512  * is bogus tx completions. We try to recover by setting the
6513  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6514  * in the workqueue.
6515  */
6516 static void tg3_tx_recover(struct tg3 *tp)
6517 {
6518         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6519                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6520
6521         netdev_warn(tp->dev,
6522                     "The system may be re-ordering memory-mapped I/O "
6523                     "cycles to the network device, attempting to recover. "
6524                     "Please report the problem to the driver maintainer "
6525                     "and include system chipset information.\n");
6526
6527         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6528 }
6529
6530 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6531 {
6532         /* Tell compiler to fetch tx indices from memory. */
6533         barrier();
6534         return tnapi->tx_pending -
6535                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6536 }
6537
6538 /* Tigon3 never reports partial packet sends.  So we do not
6539  * need special logic to handle SKBs that have not had all
6540  * of their frags sent yet, like SunGEM does.
6541  */
6542 static void tg3_tx(struct tg3_napi *tnapi)
6543 {
6544         struct tg3 *tp = tnapi->tp;
6545         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6546         u32 sw_idx = tnapi->tx_cons;
6547         struct netdev_queue *txq;
6548         int index = tnapi - tp->napi;
6549         unsigned int pkts_compl = 0, bytes_compl = 0;
6550
6551         if (tg3_flag(tp, ENABLE_TSS))
6552                 index--;
6553
6554         txq = netdev_get_tx_queue(tp->dev, index);
6555
6556         while (sw_idx != hw_idx) {
6557                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6558                 struct sk_buff *skb = ri->skb;
6559                 int i, tx_bug = 0;
6560
6561                 if (unlikely(skb == NULL)) {
6562                         tg3_tx_recover(tp);
6563                         return;
6564                 }
6565
6566                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6567                         struct skb_shared_hwtstamps timestamp;
6568                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6569                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6570
6571                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6572
6573                         skb_tstamp_tx(skb, &timestamp);
6574                 }
6575
6576                 pci_unmap_single(tp->pdev,
6577                                  dma_unmap_addr(ri, mapping),
6578                                  skb_headlen(skb),
6579                                  PCI_DMA_TODEVICE);
6580
6581                 ri->skb = NULL;
6582
6583                 while (ri->fragmented) {
6584                         ri->fragmented = false;
6585                         sw_idx = NEXT_TX(sw_idx);
6586                         ri = &tnapi->tx_buffers[sw_idx];
6587                 }
6588
6589                 sw_idx = NEXT_TX(sw_idx);
6590
6591                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6592                         ri = &tnapi->tx_buffers[sw_idx];
6593                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6594                                 tx_bug = 1;
6595
6596                         pci_unmap_page(tp->pdev,
6597                                        dma_unmap_addr(ri, mapping),
6598                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6599                                        PCI_DMA_TODEVICE);
6600
6601                         while (ri->fragmented) {
6602                                 ri->fragmented = false;
6603                                 sw_idx = NEXT_TX(sw_idx);
6604                                 ri = &tnapi->tx_buffers[sw_idx];
6605                         }
6606
6607                         sw_idx = NEXT_TX(sw_idx);
6608                 }
6609
6610                 pkts_compl++;
6611                 bytes_compl += skb->len;
6612
6613                 dev_consume_skb_any(skb);
6614
6615                 if (unlikely(tx_bug)) {
6616                         tg3_tx_recover(tp);
6617                         return;
6618                 }
6619         }
6620
6621         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6622
6623         tnapi->tx_cons = sw_idx;
6624
6625         /* Need to make the tx_cons update visible to tg3_start_xmit()
6626          * before checking for netif_queue_stopped().  Without the
6627          * memory barrier, there is a small possibility that tg3_start_xmit()
6628          * will miss it and cause the queue to be stopped forever.
6629          */
6630         smp_mb();
6631
6632         if (unlikely(netif_tx_queue_stopped(txq) &&
6633                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6634                 __netif_tx_lock(txq, smp_processor_id());
6635                 if (netif_tx_queue_stopped(txq) &&
6636                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6637                         netif_tx_wake_queue(txq);
6638                 __netif_tx_unlock(txq);
6639         }
6640 }
6641
6642 static void tg3_frag_free(bool is_frag, void *data)
6643 {
6644         if (is_frag)
6645                 skb_free_frag(data);
6646         else
6647                 kfree(data);
6648 }
6649
6650 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6651 {
6652         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6653                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6654
6655         if (!ri->data)
6656                 return;
6657
6658         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6659                          map_sz, PCI_DMA_FROMDEVICE);
6660         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6661         ri->data = NULL;
6662 }
6663
6664
6665 /* Returns size of skb allocated or < 0 on error.
6666  *
6667  * We only need to fill in the address because the other members
6668  * of the RX descriptor are invariant, see tg3_init_rings.
6669  *
6670  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6671  * posting buffers we only dirty the first cache line of the RX
6672  * descriptor (containing the address).  Whereas for the RX status
6673  * buffers the cpu only reads the last cacheline of the RX descriptor
6674  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6675  */
6676 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6677                              u32 opaque_key, u32 dest_idx_unmasked,
6678                              unsigned int *frag_size)
6679 {
6680         struct tg3_rx_buffer_desc *desc;
6681         struct ring_info *map;
6682         u8 *data;
6683         dma_addr_t mapping;
6684         int skb_size, data_size, dest_idx;
6685
6686         switch (opaque_key) {
6687         case RXD_OPAQUE_RING_STD:
6688                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6689                 desc = &tpr->rx_std[dest_idx];
6690                 map = &tpr->rx_std_buffers[dest_idx];
6691                 data_size = tp->rx_pkt_map_sz;
6692                 break;
6693
6694         case RXD_OPAQUE_RING_JUMBO:
6695                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6696                 desc = &tpr->rx_jmb[dest_idx].std;
6697                 map = &tpr->rx_jmb_buffers[dest_idx];
6698                 data_size = TG3_RX_JMB_MAP_SZ;
6699                 break;
6700
6701         default:
6702                 return -EINVAL;
6703         }
6704
6705         /* Do not overwrite any of the map or rp information
6706          * until we are sure we can commit to a new buffer.
6707          *
6708          * Callers depend upon this behavior and assume that
6709          * we leave everything unchanged if we fail.
6710          */
6711         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6712                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6713         if (skb_size <= PAGE_SIZE) {
6714                 data = netdev_alloc_frag(skb_size);
6715                 *frag_size = skb_size;
6716         } else {
6717                 data = kmalloc(skb_size, GFP_ATOMIC);
6718                 *frag_size = 0;
6719         }
6720         if (!data)
6721                 return -ENOMEM;
6722
6723         mapping = pci_map_single(tp->pdev,
6724                                  data + TG3_RX_OFFSET(tp),
6725                                  data_size,
6726                                  PCI_DMA_FROMDEVICE);
6727         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6728                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6729                 return -EIO;
6730         }
6731
6732         map->data = data;
6733         dma_unmap_addr_set(map, mapping, mapping);
6734
6735         desc->addr_hi = ((u64)mapping >> 32);
6736         desc->addr_lo = ((u64)mapping & 0xffffffff);
6737
6738         return data_size;
6739 }
6740
6741 /* We only need to move over in the address because the other
6742  * members of the RX descriptor are invariant.  See notes above
6743  * tg3_alloc_rx_data for full details.
6744  */
6745 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6746                            struct tg3_rx_prodring_set *dpr,
6747                            u32 opaque_key, int src_idx,
6748                            u32 dest_idx_unmasked)
6749 {
6750         struct tg3 *tp = tnapi->tp;
6751         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6752         struct ring_info *src_map, *dest_map;
6753         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6754         int dest_idx;
6755
6756         switch (opaque_key) {
6757         case RXD_OPAQUE_RING_STD:
6758                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6759                 dest_desc = &dpr->rx_std[dest_idx];
6760                 dest_map = &dpr->rx_std_buffers[dest_idx];
6761                 src_desc = &spr->rx_std[src_idx];
6762                 src_map = &spr->rx_std_buffers[src_idx];
6763                 break;
6764
6765         case RXD_OPAQUE_RING_JUMBO:
6766                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6767                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6768                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6769                 src_desc = &spr->rx_jmb[src_idx].std;
6770                 src_map = &spr->rx_jmb_buffers[src_idx];
6771                 break;
6772
6773         default:
6774                 return;
6775         }
6776
6777         dest_map->data = src_map->data;
6778         dma_unmap_addr_set(dest_map, mapping,
6779                            dma_unmap_addr(src_map, mapping));
6780         dest_desc->addr_hi = src_desc->addr_hi;
6781         dest_desc->addr_lo = src_desc->addr_lo;
6782
6783         /* Ensure that the update to the skb happens after the physical
6784          * addresses have been transferred to the new BD location.
6785          */
6786         smp_wmb();
6787
6788         src_map->data = NULL;
6789 }
6790
6791 /* The RX ring scheme is composed of multiple rings which post fresh
6792  * buffers to the chip, and one special ring the chip uses to report
6793  * status back to the host.
6794  *
6795  * The special ring reports the status of received packets to the
6796  * host.  The chip does not write into the original descriptor the
6797  * RX buffer was obtained from.  The chip simply takes the original
6798  * descriptor as provided by the host, updates the status and length
6799  * field, then writes this into the next status ring entry.
6800  *
6801  * Each ring the host uses to post buffers to the chip is described
6802  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6803  * it is first placed into the on-chip ram.  When the packet's length
6804  * is known, it walks down the TG3_BDINFO entries to select the ring.
6805  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6806  * which is within the range of the new packet's length is chosen.
6807  *
6808  * The "separate ring for rx status" scheme may sound queer, but it makes
6809  * sense from a cache coherency perspective.  If only the host writes
6810  * to the buffer post rings, and only the chip writes to the rx status
6811  * rings, then cache lines never move beyond shared-modified state.
6812  * If both the host and chip were to write into the same ring, cache line
6813  * eviction could occur since both entities want it in an exclusive state.
6814  */
6815 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6816 {
6817         struct tg3 *tp = tnapi->tp;
6818         u32 work_mask, rx_std_posted = 0;
6819         u32 std_prod_idx, jmb_prod_idx;
6820         u32 sw_idx = tnapi->rx_rcb_ptr;
6821         u16 hw_idx;
6822         int received;
6823         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6824
6825         hw_idx = *(tnapi->rx_rcb_prod_idx);
6826         /*
6827          * We need to order the read of hw_idx and the read of
6828          * the opaque cookie.
6829          */
6830         rmb();
6831         work_mask = 0;
6832         received = 0;
6833         std_prod_idx = tpr->rx_std_prod_idx;
6834         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6835         while (sw_idx != hw_idx && budget > 0) {
6836                 struct ring_info *ri;
6837                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6838                 unsigned int len;
6839                 struct sk_buff *skb;
6840                 dma_addr_t dma_addr;
6841                 u32 opaque_key, desc_idx, *post_ptr;
6842                 u8 *data;
6843                 u64 tstamp = 0;
6844
6845                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6846                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6847                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6848                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6849                         dma_addr = dma_unmap_addr(ri, mapping);
6850                         data = ri->data;
6851                         post_ptr = &std_prod_idx;
6852                         rx_std_posted++;
6853                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6854                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6855                         dma_addr = dma_unmap_addr(ri, mapping);
6856                         data = ri->data;
6857                         post_ptr = &jmb_prod_idx;
6858                 } else
6859                         goto next_pkt_nopost;
6860
6861                 work_mask |= opaque_key;
6862
6863                 if (desc->err_vlan & RXD_ERR_MASK) {
6864                 drop_it:
6865                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6866                                        desc_idx, *post_ptr);
6867                 drop_it_no_recycle:
6868                         /* Other statistics kept track of by card. */
6869                         tp->rx_dropped++;
6870                         goto next_pkt;
6871                 }
6872
6873                 prefetch(data + TG3_RX_OFFSET(tp));
6874                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6875                       ETH_FCS_LEN;
6876
6877                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6878                      RXD_FLAG_PTPSTAT_PTPV1 ||
6879                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6880                      RXD_FLAG_PTPSTAT_PTPV2) {
6881                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6882                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6883                 }
6884
6885                 if (len > TG3_RX_COPY_THRESH(tp)) {
6886                         int skb_size;
6887                         unsigned int frag_size;
6888
6889                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6890                                                     *post_ptr, &frag_size);
6891                         if (skb_size < 0)
6892                                 goto drop_it;
6893
6894                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6895                                          PCI_DMA_FROMDEVICE);
6896
6897                         /* Ensure that the update to the data happens
6898                          * after the usage of the old DMA mapping.
6899                          */
6900                         smp_wmb();
6901
6902                         ri->data = NULL;
6903
6904                         skb = build_skb(data, frag_size);
6905                         if (!skb) {
6906                                 tg3_frag_free(frag_size != 0, data);
6907                                 goto drop_it_no_recycle;
6908                         }
6909                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6910                 } else {
6911                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6912                                        desc_idx, *post_ptr);
6913
6914                         skb = netdev_alloc_skb(tp->dev,
6915                                                len + TG3_RAW_IP_ALIGN);
6916                         if (skb == NULL)
6917                                 goto drop_it_no_recycle;
6918
6919                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6920                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6921                         memcpy(skb->data,
6922                                data + TG3_RX_OFFSET(tp),
6923                                len);
6924                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6925                 }
6926
6927                 skb_put(skb, len);
6928                 if (tstamp)
6929                         tg3_hwclock_to_timestamp(tp, tstamp,
6930                                                  skb_hwtstamps(skb));
6931
6932                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6933                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6934                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6935                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6936                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6937                 else
6938                         skb_checksum_none_assert(skb);
6939
6940                 skb->protocol = eth_type_trans(skb, tp->dev);
6941
6942                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6943                     skb->protocol != htons(ETH_P_8021Q) &&
6944                     skb->protocol != htons(ETH_P_8021AD)) {
6945                         dev_kfree_skb_any(skb);
6946                         goto drop_it_no_recycle;
6947                 }
6948
6949                 if (desc->type_flags & RXD_FLAG_VLAN &&
6950                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6951                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6952                                                desc->err_vlan & RXD_VLAN_MASK);
6953
6954                 napi_gro_receive(&tnapi->napi, skb);
6955
6956                 received++;
6957                 budget--;
6958
6959 next_pkt:
6960                 (*post_ptr)++;
6961
6962                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6963                         tpr->rx_std_prod_idx = std_prod_idx &
6964                                                tp->rx_std_ring_mask;
6965                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6966                                      tpr->rx_std_prod_idx);
6967                         work_mask &= ~RXD_OPAQUE_RING_STD;
6968                         rx_std_posted = 0;
6969                 }
6970 next_pkt_nopost:
6971                 sw_idx++;
6972                 sw_idx &= tp->rx_ret_ring_mask;
6973
6974                 /* Refresh hw_idx to see if there is new work */
6975                 if (sw_idx == hw_idx) {
6976                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6977                         rmb();
6978                 }
6979         }
6980
6981         /* ACK the status ring. */
6982         tnapi->rx_rcb_ptr = sw_idx;
6983         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6984
6985         /* Refill RX ring(s). */
6986         if (!tg3_flag(tp, ENABLE_RSS)) {
6987                 /* Sync BD data before updating mailbox */
6988                 wmb();
6989
6990                 if (work_mask & RXD_OPAQUE_RING_STD) {
6991                         tpr->rx_std_prod_idx = std_prod_idx &
6992                                                tp->rx_std_ring_mask;
6993                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6994                                      tpr->rx_std_prod_idx);
6995                 }
6996                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6997                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6998                                                tp->rx_jmb_ring_mask;
6999                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7000                                      tpr->rx_jmb_prod_idx);
7001                 }
7002                 mmiowb();
7003         } else if (work_mask) {
7004                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7005                  * updated before the producer indices can be updated.
7006                  */
7007                 smp_wmb();
7008
7009                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7010                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7011
7012                 if (tnapi != &tp->napi[1]) {
7013                         tp->rx_refill = true;
7014                         napi_schedule(&tp->napi[1].napi);
7015                 }
7016         }
7017
7018         return received;
7019 }
7020
7021 static void tg3_poll_link(struct tg3 *tp)
7022 {
7023         /* handle link change and other phy events */
7024         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7025                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7026
7027                 if (sblk->status & SD_STATUS_LINK_CHG) {
7028                         sblk->status = SD_STATUS_UPDATED |
7029                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7030                         spin_lock(&tp->lock);
7031                         if (tg3_flag(tp, USE_PHYLIB)) {
7032                                 tw32_f(MAC_STATUS,
7033                                      (MAC_STATUS_SYNC_CHANGED |
7034                                       MAC_STATUS_CFG_CHANGED |
7035                                       MAC_STATUS_MI_COMPLETION |
7036                                       MAC_STATUS_LNKSTATE_CHANGED));
7037                                 udelay(40);
7038                         } else
7039                                 tg3_setup_phy(tp, false);
7040                         spin_unlock(&tp->lock);
7041                 }
7042         }
7043 }
7044
7045 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7046                                 struct tg3_rx_prodring_set *dpr,
7047                                 struct tg3_rx_prodring_set *spr)
7048 {
7049         u32 si, di, cpycnt, src_prod_idx;
7050         int i, err = 0;
7051
7052         while (1) {
7053                 src_prod_idx = spr->rx_std_prod_idx;
7054
7055                 /* Make sure updates to the rx_std_buffers[] entries and the
7056                  * standard producer index are seen in the correct order.
7057                  */
7058                 smp_rmb();
7059
7060                 if (spr->rx_std_cons_idx == src_prod_idx)
7061                         break;
7062
7063                 if (spr->rx_std_cons_idx < src_prod_idx)
7064                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7065                 else
7066                         cpycnt = tp->rx_std_ring_mask + 1 -
7067                                  spr->rx_std_cons_idx;
7068
7069                 cpycnt = min(cpycnt,
7070                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7071
7072                 si = spr->rx_std_cons_idx;
7073                 di = dpr->rx_std_prod_idx;
7074
7075                 for (i = di; i < di + cpycnt; i++) {
7076                         if (dpr->rx_std_buffers[i].data) {
7077                                 cpycnt = i - di;
7078                                 err = -ENOSPC;
7079                                 break;
7080                         }
7081                 }
7082
7083                 if (!cpycnt)
7084                         break;
7085
7086                 /* Ensure that updates to the rx_std_buffers ring and the
7087                  * shadowed hardware producer ring from tg3_recycle_skb() are
7088                  * ordered correctly WRT the skb check above.
7089                  */
7090                 smp_rmb();
7091
7092                 memcpy(&dpr->rx_std_buffers[di],
7093                        &spr->rx_std_buffers[si],
7094                        cpycnt * sizeof(struct ring_info));
7095
7096                 for (i = 0; i < cpycnt; i++, di++, si++) {
7097                         struct tg3_rx_buffer_desc *sbd, *dbd;
7098                         sbd = &spr->rx_std[si];
7099                         dbd = &dpr->rx_std[di];
7100                         dbd->addr_hi = sbd->addr_hi;
7101                         dbd->addr_lo = sbd->addr_lo;
7102                 }
7103
7104                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7105                                        tp->rx_std_ring_mask;
7106                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7107                                        tp->rx_std_ring_mask;
7108         }
7109
7110         while (1) {
7111                 src_prod_idx = spr->rx_jmb_prod_idx;
7112
7113                 /* Make sure updates to the rx_jmb_buffers[] entries and
7114                  * the jumbo producer index are seen in the correct order.
7115                  */
7116                 smp_rmb();
7117
7118                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7119                         break;
7120
7121                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7122                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7123                 else
7124                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7125                                  spr->rx_jmb_cons_idx;
7126
7127                 cpycnt = min(cpycnt,
7128                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7129
7130                 si = spr->rx_jmb_cons_idx;
7131                 di = dpr->rx_jmb_prod_idx;
7132
7133                 for (i = di; i < di + cpycnt; i++) {
7134                         if (dpr->rx_jmb_buffers[i].data) {
7135                                 cpycnt = i - di;
7136                                 err = -ENOSPC;
7137                                 break;
7138                         }
7139                 }
7140
7141                 if (!cpycnt)
7142                         break;
7143
7144                 /* Ensure that updates to the rx_jmb_buffers ring and the
7145                  * shadowed hardware producer ring from tg3_recycle_skb() are
7146                  * ordered correctly WRT the skb check above.
7147                  */
7148                 smp_rmb();
7149
7150                 memcpy(&dpr->rx_jmb_buffers[di],
7151                        &spr->rx_jmb_buffers[si],
7152                        cpycnt * sizeof(struct ring_info));
7153
7154                 for (i = 0; i < cpycnt; i++, di++, si++) {
7155                         struct tg3_rx_buffer_desc *sbd, *dbd;
7156                         sbd = &spr->rx_jmb[si].std;
7157                         dbd = &dpr->rx_jmb[di].std;
7158                         dbd->addr_hi = sbd->addr_hi;
7159                         dbd->addr_lo = sbd->addr_lo;
7160                 }
7161
7162                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7163                                        tp->rx_jmb_ring_mask;
7164                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7165                                        tp->rx_jmb_ring_mask;
7166         }
7167
7168         return err;
7169 }
7170
7171 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7172 {
7173         struct tg3 *tp = tnapi->tp;
7174
7175         /* run TX completion thread */
7176         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7177                 tg3_tx(tnapi);
7178                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7179                         return work_done;
7180         }
7181
7182         if (!tnapi->rx_rcb_prod_idx)
7183                 return work_done;
7184
7185         /* run RX thread, within the bounds set by NAPI.
7186          * All RX "locking" is done by ensuring outside
7187          * code synchronizes with tg3->napi.poll()
7188          */
7189         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7190                 work_done += tg3_rx(tnapi, budget - work_done);
7191
7192         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7193                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7194                 int i, err = 0;
7195                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7196                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7197
7198                 tp->rx_refill = false;
7199                 for (i = 1; i <= tp->rxq_cnt; i++)
7200                         err |= tg3_rx_prodring_xfer(tp, dpr,
7201                                                     &tp->napi[i].prodring);
7202
7203                 wmb();
7204
7205                 if (std_prod_idx != dpr->rx_std_prod_idx)
7206                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7207                                      dpr->rx_std_prod_idx);
7208
7209                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7210                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7211                                      dpr->rx_jmb_prod_idx);
7212
7213                 mmiowb();
7214
7215                 if (err)
7216                         tw32_f(HOSTCC_MODE, tp->coal_now);
7217         }
7218
7219         return work_done;
7220 }
7221
7222 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7223 {
7224         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7225                 schedule_work(&tp->reset_task);
7226 }
7227
7228 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7229 {
7230         cancel_work_sync(&tp->reset_task);
7231         tg3_flag_clear(tp, RESET_TASK_PENDING);
7232         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7233 }
7234
7235 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7236 {
7237         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7238         struct tg3 *tp = tnapi->tp;
7239         int work_done = 0;
7240         struct tg3_hw_status *sblk = tnapi->hw_status;
7241
7242         while (1) {
7243                 work_done = tg3_poll_work(tnapi, work_done, budget);
7244
7245                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7246                         goto tx_recovery;
7247
7248                 if (unlikely(work_done >= budget))
7249                         break;
7250
7251                 /* tp->last_tag is used in tg3_int_reenable() below
7252                  * to tell the hw how much work has been processed,
7253                  * so we must read it before checking for more work.
7254                  */
7255                 tnapi->last_tag = sblk->status_tag;
7256                 tnapi->last_irq_tag = tnapi->last_tag;
7257                 rmb();
7258
7259                 /* check for RX/TX work to do */
7260                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7261                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7262
7263                         /* This test here is not race free, but will reduce
7264                          * the number of interrupts by looping again.
7265                          */
7266                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7267                                 continue;
7268
7269                         napi_complete_done(napi, work_done);
7270                         /* Reenable interrupts. */
7271                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7272
7273                         /* This test here is synchronized by napi_schedule()
7274                          * and napi_complete() to close the race condition.
7275                          */
7276                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7277                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7278                                                   HOSTCC_MODE_ENABLE |
7279                                                   tnapi->coal_now);
7280                         }
7281                         mmiowb();
7282                         break;
7283                 }
7284         }
7285
7286         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7287         return work_done;
7288
7289 tx_recovery:
7290         /* work_done is guaranteed to be less than budget. */
7291         napi_complete(napi);
7292         tg3_reset_task_schedule(tp);
7293         return work_done;
7294 }
7295
7296 static void tg3_process_error(struct tg3 *tp)
7297 {
7298         u32 val;
7299         bool real_error = false;
7300
7301         if (tg3_flag(tp, ERROR_PROCESSED))
7302                 return;
7303
7304         /* Check Flow Attention register */
7305         val = tr32(HOSTCC_FLOW_ATTN);
7306         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7307                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7308                 real_error = true;
7309         }
7310
7311         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7312                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7313                 real_error = true;
7314         }
7315
7316         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7317                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7318                 real_error = true;
7319         }
7320
7321         if (!real_error)
7322                 return;
7323
7324         tg3_dump_state(tp);
7325
7326         tg3_flag_set(tp, ERROR_PROCESSED);
7327         tg3_reset_task_schedule(tp);
7328 }
7329
7330 static int tg3_poll(struct napi_struct *napi, int budget)
7331 {
7332         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7333         struct tg3 *tp = tnapi->tp;
7334         int work_done = 0;
7335         struct tg3_hw_status *sblk = tnapi->hw_status;
7336
7337         while (1) {
7338                 if (sblk->status & SD_STATUS_ERROR)
7339                         tg3_process_error(tp);
7340
7341                 tg3_poll_link(tp);
7342
7343                 work_done = tg3_poll_work(tnapi, work_done, budget);
7344
7345                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7346                         goto tx_recovery;
7347
7348                 if (unlikely(work_done >= budget))
7349                         break;
7350
7351                 if (tg3_flag(tp, TAGGED_STATUS)) {
7352                         /* tp->last_tag is used in tg3_int_reenable() below
7353                          * to tell the hw how much work has been processed,
7354                          * so we must read it before checking for more work.
7355                          */
7356                         tnapi->last_tag = sblk->status_tag;
7357                         tnapi->last_irq_tag = tnapi->last_tag;
7358                         rmb();
7359                 } else
7360                         sblk->status &= ~SD_STATUS_UPDATED;
7361
7362                 if (likely(!tg3_has_work(tnapi))) {
7363                         napi_complete_done(napi, work_done);
7364                         tg3_int_reenable(tnapi);
7365                         break;
7366                 }
7367         }
7368
7369         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7370         return work_done;
7371
7372 tx_recovery:
7373         /* work_done is guaranteed to be less than budget. */
7374         napi_complete(napi);
7375         tg3_reset_task_schedule(tp);
7376         return work_done;
7377 }
7378
7379 static void tg3_napi_disable(struct tg3 *tp)
7380 {
7381         int i;
7382
7383         for (i = tp->irq_cnt - 1; i >= 0; i--)
7384                 napi_disable(&tp->napi[i].napi);
7385 }
7386
7387 static void tg3_napi_enable(struct tg3 *tp)
7388 {
7389         int i;
7390
7391         for (i = 0; i < tp->irq_cnt; i++)
7392                 napi_enable(&tp->napi[i].napi);
7393 }
7394
7395 static void tg3_napi_init(struct tg3 *tp)
7396 {
7397         int i;
7398
7399         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7400         for (i = 1; i < tp->irq_cnt; i++)
7401                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7402 }
7403
7404 static void tg3_napi_fini(struct tg3 *tp)
7405 {
7406         int i;
7407
7408         for (i = 0; i < tp->irq_cnt; i++)
7409                 netif_napi_del(&tp->napi[i].napi);
7410 }
7411
7412 static inline void tg3_netif_stop(struct tg3 *tp)
7413 {
7414         netif_trans_update(tp->dev);    /* prevent tx timeout */
7415         tg3_napi_disable(tp);
7416         netif_carrier_off(tp->dev);
7417         netif_tx_disable(tp->dev);
7418 }
7419
7420 /* tp->lock must be held */
7421 static inline void tg3_netif_start(struct tg3 *tp)
7422 {
7423         tg3_ptp_resume(tp);
7424
7425         /* NOTE: unconditional netif_tx_wake_all_queues is only
7426          * appropriate so long as all callers are assured to
7427          * have free tx slots (such as after tg3_init_hw)
7428          */
7429         netif_tx_wake_all_queues(tp->dev);
7430
7431         if (tp->link_up)
7432                 netif_carrier_on(tp->dev);
7433
7434         tg3_napi_enable(tp);
7435         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7436         tg3_enable_ints(tp);
7437 }
7438
7439 static void tg3_irq_quiesce(struct tg3 *tp)
7440         __releases(tp->lock)
7441         __acquires(tp->lock)
7442 {
7443         int i;
7444
7445         BUG_ON(tp->irq_sync);
7446
7447         tp->irq_sync = 1;
7448         smp_mb();
7449
7450         spin_unlock_bh(&tp->lock);
7451
7452         for (i = 0; i < tp->irq_cnt; i++)
7453                 synchronize_irq(tp->napi[i].irq_vec);
7454
7455         spin_lock_bh(&tp->lock);
7456 }
7457
7458 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7459  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7460  * with as well.  Most of the time, this is not necessary except when
7461  * shutting down the device.
7462  */
7463 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7464 {
7465         spin_lock_bh(&tp->lock);
7466         if (irq_sync)
7467                 tg3_irq_quiesce(tp);
7468 }
7469
7470 static inline void tg3_full_unlock(struct tg3 *tp)
7471 {
7472         spin_unlock_bh(&tp->lock);
7473 }
7474
7475 /* One-shot MSI handler - Chip automatically disables interrupt
7476  * after sending MSI so driver doesn't have to do it.
7477  */
7478 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7479 {
7480         struct tg3_napi *tnapi = dev_id;
7481         struct tg3 *tp = tnapi->tp;
7482
7483         prefetch(tnapi->hw_status);
7484         if (tnapi->rx_rcb)
7485                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7486
7487         if (likely(!tg3_irq_sync(tp)))
7488                 napi_schedule(&tnapi->napi);
7489
7490         return IRQ_HANDLED;
7491 }
7492
7493 /* MSI ISR - No need to check for interrupt sharing and no need to
7494  * flush status block and interrupt mailbox. PCI ordering rules
7495  * guarantee that MSI will arrive after the status block.
7496  */
7497 static irqreturn_t tg3_msi(int irq, void *dev_id)
7498 {
7499         struct tg3_napi *tnapi = dev_id;
7500         struct tg3 *tp = tnapi->tp;
7501
7502         prefetch(tnapi->hw_status);
7503         if (tnapi->rx_rcb)
7504                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7505         /*
7506          * Writing any value to intr-mbox-0 clears PCI INTA# and
7507          * chip-internal interrupt pending events.
7508          * Writing non-zero to intr-mbox-0 additional tells the
7509          * NIC to stop sending us irqs, engaging "in-intr-handler"
7510          * event coalescing.
7511          */
7512         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7513         if (likely(!tg3_irq_sync(tp)))
7514                 napi_schedule(&tnapi->napi);
7515
7516         return IRQ_RETVAL(1);
7517 }
7518
7519 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7520 {
7521         struct tg3_napi *tnapi = dev_id;
7522         struct tg3 *tp = tnapi->tp;
7523         struct tg3_hw_status *sblk = tnapi->hw_status;
7524         unsigned int handled = 1;
7525
7526         /* In INTx mode, it is possible for the interrupt to arrive at
7527          * the CPU before the status block posted prior to the interrupt.
7528          * Reading the PCI State register will confirm whether the
7529          * interrupt is ours and will flush the status block.
7530          */
7531         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7532                 if (tg3_flag(tp, CHIP_RESETTING) ||
7533                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7534                         handled = 0;
7535                         goto out;
7536                 }
7537         }
7538
7539         /*
7540          * Writing any value to intr-mbox-0 clears PCI INTA# and
7541          * chip-internal interrupt pending events.
7542          * Writing non-zero to intr-mbox-0 additional tells the
7543          * NIC to stop sending us irqs, engaging "in-intr-handler"
7544          * event coalescing.
7545          *
7546          * Flush the mailbox to de-assert the IRQ immediately to prevent
7547          * spurious interrupts.  The flush impacts performance but
7548          * excessive spurious interrupts can be worse in some cases.
7549          */
7550         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7551         if (tg3_irq_sync(tp))
7552                 goto out;
7553         sblk->status &= ~SD_STATUS_UPDATED;
7554         if (likely(tg3_has_work(tnapi))) {
7555                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7556                 napi_schedule(&tnapi->napi);
7557         } else {
7558                 /* No work, shared interrupt perhaps?  re-enable
7559                  * interrupts, and flush that PCI write
7560                  */
7561                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7562                                0x00000000);
7563         }
7564 out:
7565         return IRQ_RETVAL(handled);
7566 }
7567
7568 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7569 {
7570         struct tg3_napi *tnapi = dev_id;
7571         struct tg3 *tp = tnapi->tp;
7572         struct tg3_hw_status *sblk = tnapi->hw_status;
7573         unsigned int handled = 1;
7574
7575         /* In INTx mode, it is possible for the interrupt to arrive at
7576          * the CPU before the status block posted prior to the interrupt.
7577          * Reading the PCI State register will confirm whether the
7578          * interrupt is ours and will flush the status block.
7579          */
7580         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7581                 if (tg3_flag(tp, CHIP_RESETTING) ||
7582                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7583                         handled = 0;
7584                         goto out;
7585                 }
7586         }
7587
7588         /*
7589          * writing any value to intr-mbox-0 clears PCI INTA# and
7590          * chip-internal interrupt pending events.
7591          * writing non-zero to intr-mbox-0 additional tells the
7592          * NIC to stop sending us irqs, engaging "in-intr-handler"
7593          * event coalescing.
7594          *
7595          * Flush the mailbox to de-assert the IRQ immediately to prevent
7596          * spurious interrupts.  The flush impacts performance but
7597          * excessive spurious interrupts can be worse in some cases.
7598          */
7599         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7600
7601         /*
7602          * In a shared interrupt configuration, sometimes other devices'
7603          * interrupts will scream.  We record the current status tag here
7604          * so that the above check can report that the screaming interrupts
7605          * are unhandled.  Eventually they will be silenced.
7606          */
7607         tnapi->last_irq_tag = sblk->status_tag;
7608
7609         if (tg3_irq_sync(tp))
7610                 goto out;
7611
7612         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7613
7614         napi_schedule(&tnapi->napi);
7615
7616 out:
7617         return IRQ_RETVAL(handled);
7618 }
7619
7620 /* ISR for interrupt test */
7621 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7622 {
7623         struct tg3_napi *tnapi = dev_id;
7624         struct tg3 *tp = tnapi->tp;
7625         struct tg3_hw_status *sblk = tnapi->hw_status;
7626
7627         if ((sblk->status & SD_STATUS_UPDATED) ||
7628             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7629                 tg3_disable_ints(tp);
7630                 return IRQ_RETVAL(1);
7631         }
7632         return IRQ_RETVAL(0);
7633 }
7634
7635 #ifdef CONFIG_NET_POLL_CONTROLLER
7636 static void tg3_poll_controller(struct net_device *dev)
7637 {
7638         int i;
7639         struct tg3 *tp = netdev_priv(dev);
7640
7641         if (tg3_irq_sync(tp))
7642                 return;
7643
7644         for (i = 0; i < tp->irq_cnt; i++)
7645                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7646 }
7647 #endif
7648
7649 static void tg3_tx_timeout(struct net_device *dev)
7650 {
7651         struct tg3 *tp = netdev_priv(dev);
7652
7653         if (netif_msg_tx_err(tp)) {
7654                 netdev_err(dev, "transmit timed out, resetting\n");
7655                 tg3_dump_state(tp);
7656         }
7657
7658         tg3_reset_task_schedule(tp);
7659 }
7660
7661 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7662 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7663 {
7664         u32 base = (u32) mapping & 0xffffffff;
7665
7666         return base + len + 8 < base;
7667 }
7668
7669 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7670  * of any 4GB boundaries: 4G, 8G, etc
7671  */
7672 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7673                                            u32 len, u32 mss)
7674 {
7675         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7676                 u32 base = (u32) mapping & 0xffffffff;
7677
7678                 return ((base + len + (mss & 0x3fff)) < base);
7679         }
7680         return 0;
7681 }
7682
7683 /* Test for DMA addresses > 40-bit */
7684 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7685                                           int len)
7686 {
7687 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7688         if (tg3_flag(tp, 40BIT_DMA_BUG))
7689                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7690         return 0;
7691 #else
7692         return 0;
7693 #endif
7694 }
7695
7696 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7697                                  dma_addr_t mapping, u32 len, u32 flags,
7698                                  u32 mss, u32 vlan)
7699 {
7700         txbd->addr_hi = ((u64) mapping >> 32);
7701         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7702         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7703         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7704 }
7705
7706 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7707                             dma_addr_t map, u32 len, u32 flags,
7708                             u32 mss, u32 vlan)
7709 {
7710         struct tg3 *tp = tnapi->tp;
7711         bool hwbug = false;
7712
7713         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7714                 hwbug = true;
7715
7716         if (tg3_4g_overflow_test(map, len))
7717                 hwbug = true;
7718
7719         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7720                 hwbug = true;
7721
7722         if (tg3_40bit_overflow_test(tp, map, len))
7723                 hwbug = true;
7724
7725         if (tp->dma_limit) {
7726                 u32 prvidx = *entry;
7727                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7728                 while (len > tp->dma_limit && *budget) {
7729                         u32 frag_len = tp->dma_limit;
7730                         len -= tp->dma_limit;
7731
7732                         /* Avoid the 8byte DMA problem */
7733                         if (len <= 8) {
7734                                 len += tp->dma_limit / 2;
7735                                 frag_len = tp->dma_limit / 2;
7736                         }
7737
7738                         tnapi->tx_buffers[*entry].fragmented = true;
7739
7740                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7741                                       frag_len, tmp_flag, mss, vlan);
7742                         *budget -= 1;
7743                         prvidx = *entry;
7744                         *entry = NEXT_TX(*entry);
7745
7746                         map += frag_len;
7747                 }
7748
7749                 if (len) {
7750                         if (*budget) {
7751                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7752                                               len, flags, mss, vlan);
7753                                 *budget -= 1;
7754                                 *entry = NEXT_TX(*entry);
7755                         } else {
7756                                 hwbug = true;
7757                                 tnapi->tx_buffers[prvidx].fragmented = false;
7758                         }
7759                 }
7760         } else {
7761                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7762                               len, flags, mss, vlan);
7763                 *entry = NEXT_TX(*entry);
7764         }
7765
7766         return hwbug;
7767 }
7768
7769 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7770 {
7771         int i;
7772         struct sk_buff *skb;
7773         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7774
7775         skb = txb->skb;
7776         txb->skb = NULL;
7777
7778         pci_unmap_single(tnapi->tp->pdev,
7779                          dma_unmap_addr(txb, mapping),
7780                          skb_headlen(skb),
7781                          PCI_DMA_TODEVICE);
7782
7783         while (txb->fragmented) {
7784                 txb->fragmented = false;
7785                 entry = NEXT_TX(entry);
7786                 txb = &tnapi->tx_buffers[entry];
7787         }
7788
7789         for (i = 0; i <= last; i++) {
7790                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7791
7792                 entry = NEXT_TX(entry);
7793                 txb = &tnapi->tx_buffers[entry];
7794
7795                 pci_unmap_page(tnapi->tp->pdev,
7796                                dma_unmap_addr(txb, mapping),
7797                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7798
7799                 while (txb->fragmented) {
7800                         txb->fragmented = false;
7801                         entry = NEXT_TX(entry);
7802                         txb = &tnapi->tx_buffers[entry];
7803                 }
7804         }
7805 }
7806
7807 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7808 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7809                                        struct sk_buff **pskb,
7810                                        u32 *entry, u32 *budget,
7811                                        u32 base_flags, u32 mss, u32 vlan)
7812 {
7813         struct tg3 *tp = tnapi->tp;
7814         struct sk_buff *new_skb, *skb = *pskb;
7815         dma_addr_t new_addr = 0;
7816         int ret = 0;
7817
7818         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7819                 new_skb = skb_copy(skb, GFP_ATOMIC);
7820         else {
7821                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7822
7823                 new_skb = skb_copy_expand(skb,
7824                                           skb_headroom(skb) + more_headroom,
7825                                           skb_tailroom(skb), GFP_ATOMIC);
7826         }
7827
7828         if (!new_skb) {
7829                 ret = -1;
7830         } else {
7831                 /* New SKB is guaranteed to be linear. */
7832                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7833                                           PCI_DMA_TODEVICE);
7834                 /* Make sure the mapping succeeded */
7835                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7836                         dev_kfree_skb_any(new_skb);
7837                         ret = -1;
7838                 } else {
7839                         u32 save_entry = *entry;
7840
7841                         base_flags |= TXD_FLAG_END;
7842
7843                         tnapi->tx_buffers[*entry].skb = new_skb;
7844                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7845                                            mapping, new_addr);
7846
7847                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7848                                             new_skb->len, base_flags,
7849                                             mss, vlan)) {
7850                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7851                                 dev_kfree_skb_any(new_skb);
7852                                 ret = -1;
7853                         }
7854                 }
7855         }
7856
7857         dev_consume_skb_any(skb);
7858         *pskb = new_skb;
7859         return ret;
7860 }
7861
7862 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7863 {
7864         /* Check if we will never have enough descriptors,
7865          * as gso_segs can be more than current ring size
7866          */
7867         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7868 }
7869
7870 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7871
7872 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7873  * indicated in tg3_tx_frag_set()
7874  */
7875 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7876                        struct netdev_queue *txq, struct sk_buff *skb)
7877 {
7878         struct sk_buff *segs, *nskb;
7879         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7880
7881         /* Estimate the number of fragments in the worst case */
7882         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7883                 netif_tx_stop_queue(txq);
7884
7885                 /* netif_tx_stop_queue() must be done before checking
7886                  * checking tx index in tg3_tx_avail() below, because in
7887                  * tg3_tx(), we update tx index before checking for
7888                  * netif_tx_queue_stopped().
7889                  */
7890                 smp_mb();
7891                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7892                         return NETDEV_TX_BUSY;
7893
7894                 netif_tx_wake_queue(txq);
7895         }
7896
7897         segs = skb_gso_segment(skb, tp->dev->features &
7898                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7899         if (IS_ERR(segs) || !segs)
7900                 goto tg3_tso_bug_end;
7901
7902         do {
7903                 nskb = segs;
7904                 segs = segs->next;
7905                 nskb->next = NULL;
7906                 tg3_start_xmit(nskb, tp->dev);
7907         } while (segs);
7908
7909 tg3_tso_bug_end:
7910         dev_consume_skb_any(skb);
7911
7912         return NETDEV_TX_OK;
7913 }
7914
7915 /* hard_start_xmit for all devices */
7916 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7917 {
7918         struct tg3 *tp = netdev_priv(dev);
7919         u32 len, entry, base_flags, mss, vlan = 0;
7920         u32 budget;
7921         int i = -1, would_hit_hwbug;
7922         dma_addr_t mapping;
7923         struct tg3_napi *tnapi;
7924         struct netdev_queue *txq;
7925         unsigned int last;
7926         struct iphdr *iph = NULL;
7927         struct tcphdr *tcph = NULL;
7928         __sum16 tcp_csum = 0, ip_csum = 0;
7929         __be16 ip_tot_len = 0;
7930
7931         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7932         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7933         if (tg3_flag(tp, ENABLE_TSS))
7934                 tnapi++;
7935
7936         budget = tg3_tx_avail(tnapi);
7937
7938         /* We are running in BH disabled context with netif_tx_lock
7939          * and TX reclaim runs via tp->napi.poll inside of a software
7940          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7941          * no IRQ context deadlocks to worry about either.  Rejoice!
7942          */
7943         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7944                 if (!netif_tx_queue_stopped(txq)) {
7945                         netif_tx_stop_queue(txq);
7946
7947                         /* This is a hard error, log it. */
7948                         netdev_err(dev,
7949                                    "BUG! Tx Ring full when queue awake!\n");
7950                 }
7951                 return NETDEV_TX_BUSY;
7952         }
7953
7954         entry = tnapi->tx_prod;
7955         base_flags = 0;
7956
7957         mss = skb_shinfo(skb)->gso_size;
7958         if (mss) {
7959                 u32 tcp_opt_len, hdr_len;
7960
7961                 if (skb_cow_head(skb, 0))
7962                         goto drop;
7963
7964                 iph = ip_hdr(skb);
7965                 tcp_opt_len = tcp_optlen(skb);
7966
7967                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7968
7969                 /* HW/FW can not correctly segment packets that have been
7970                  * vlan encapsulated.
7971                  */
7972                 if (skb->protocol == htons(ETH_P_8021Q) ||
7973                     skb->protocol == htons(ETH_P_8021AD)) {
7974                         if (tg3_tso_bug_gso_check(tnapi, skb))
7975                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7976                         goto drop;
7977                 }
7978
7979                 if (!skb_is_gso_v6(skb)) {
7980                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7981                             tg3_flag(tp, TSO_BUG)) {
7982                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7983                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7984                                 goto drop;
7985                         }
7986                         ip_csum = iph->check;
7987                         ip_tot_len = iph->tot_len;
7988                         iph->check = 0;
7989                         iph->tot_len = htons(mss + hdr_len);
7990                 }
7991
7992                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7993                                TXD_FLAG_CPU_POST_DMA);
7994
7995                 tcph = tcp_hdr(skb);
7996                 tcp_csum = tcph->check;
7997
7998                 if (tg3_flag(tp, HW_TSO_1) ||
7999                     tg3_flag(tp, HW_TSO_2) ||
8000                     tg3_flag(tp, HW_TSO_3)) {
8001                         tcph->check = 0;
8002                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8003                 } else {
8004                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8005                                                          0, IPPROTO_TCP, 0);
8006                 }
8007
8008                 if (tg3_flag(tp, HW_TSO_3)) {
8009                         mss |= (hdr_len & 0xc) << 12;
8010                         if (hdr_len & 0x10)
8011                                 base_flags |= 0x00000010;
8012                         base_flags |= (hdr_len & 0x3e0) << 5;
8013                 } else if (tg3_flag(tp, HW_TSO_2))
8014                         mss |= hdr_len << 9;
8015                 else if (tg3_flag(tp, HW_TSO_1) ||
8016                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8017                         if (tcp_opt_len || iph->ihl > 5) {
8018                                 int tsflags;
8019
8020                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8021                                 mss |= (tsflags << 11);
8022                         }
8023                 } else {
8024                         if (tcp_opt_len || iph->ihl > 5) {
8025                                 int tsflags;
8026
8027                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8028                                 base_flags |= tsflags << 12;
8029                         }
8030                 }
8031         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8032                 /* HW/FW can not correctly checksum packets that have been
8033                  * vlan encapsulated.
8034                  */
8035                 if (skb->protocol == htons(ETH_P_8021Q) ||
8036                     skb->protocol == htons(ETH_P_8021AD)) {
8037                         if (skb_checksum_help(skb))
8038                                 goto drop;
8039                 } else  {
8040                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8041                 }
8042         }
8043
8044         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8045             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8046                 base_flags |= TXD_FLAG_JMB_PKT;
8047
8048         if (skb_vlan_tag_present(skb)) {
8049                 base_flags |= TXD_FLAG_VLAN;
8050                 vlan = skb_vlan_tag_get(skb);
8051         }
8052
8053         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8054             tg3_flag(tp, TX_TSTAMP_EN)) {
8055                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8056                 base_flags |= TXD_FLAG_HWTSTAMP;
8057         }
8058
8059         len = skb_headlen(skb);
8060
8061         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8062         if (pci_dma_mapping_error(tp->pdev, mapping))
8063                 goto drop;
8064
8065
8066         tnapi->tx_buffers[entry].skb = skb;
8067         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8068
8069         would_hit_hwbug = 0;
8070
8071         if (tg3_flag(tp, 5701_DMA_BUG))
8072                 would_hit_hwbug = 1;
8073
8074         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8075                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8076                             mss, vlan)) {
8077                 would_hit_hwbug = 1;
8078         } else if (skb_shinfo(skb)->nr_frags > 0) {
8079                 u32 tmp_mss = mss;
8080
8081                 if (!tg3_flag(tp, HW_TSO_1) &&
8082                     !tg3_flag(tp, HW_TSO_2) &&
8083                     !tg3_flag(tp, HW_TSO_3))
8084                         tmp_mss = 0;
8085
8086                 /* Now loop through additional data
8087                  * fragments, and queue them.
8088                  */
8089                 last = skb_shinfo(skb)->nr_frags - 1;
8090                 for (i = 0; i <= last; i++) {
8091                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8092
8093                         len = skb_frag_size(frag);
8094                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8095                                                    len, DMA_TO_DEVICE);
8096
8097                         tnapi->tx_buffers[entry].skb = NULL;
8098                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8099                                            mapping);
8100                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8101                                 goto dma_error;
8102
8103                         if (!budget ||
8104                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8105                                             len, base_flags |
8106                                             ((i == last) ? TXD_FLAG_END : 0),
8107                                             tmp_mss, vlan)) {
8108                                 would_hit_hwbug = 1;
8109                                 break;
8110                         }
8111                 }
8112         }
8113
8114         if (would_hit_hwbug) {
8115                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8116
8117                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8118                         /* If it's a TSO packet, do GSO instead of
8119                          * allocating and copying to a large linear SKB
8120                          */
8121                         if (ip_tot_len) {
8122                                 iph->check = ip_csum;
8123                                 iph->tot_len = ip_tot_len;
8124                         }
8125                         tcph->check = tcp_csum;
8126                         return tg3_tso_bug(tp, tnapi, txq, skb);
8127                 }
8128
8129                 /* If the workaround fails due to memory/mapping
8130                  * failure, silently drop this packet.
8131                  */
8132                 entry = tnapi->tx_prod;
8133                 budget = tg3_tx_avail(tnapi);
8134                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8135                                                 base_flags, mss, vlan))
8136                         goto drop_nofree;
8137         }
8138
8139         skb_tx_timestamp(skb);
8140         netdev_tx_sent_queue(txq, skb->len);
8141
8142         /* Sync BD data before updating mailbox */
8143         wmb();
8144
8145         tnapi->tx_prod = entry;
8146         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8147                 netif_tx_stop_queue(txq);
8148
8149                 /* netif_tx_stop_queue() must be done before checking
8150                  * checking tx index in tg3_tx_avail() below, because in
8151                  * tg3_tx(), we update tx index before checking for
8152                  * netif_tx_queue_stopped().
8153                  */
8154                 smp_mb();
8155                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8156                         netif_tx_wake_queue(txq);
8157         }
8158
8159         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8160                 /* Packets are ready, update Tx producer idx on card. */
8161                 tw32_tx_mbox(tnapi->prodmbox, entry);
8162                 mmiowb();
8163         }
8164
8165         return NETDEV_TX_OK;
8166
8167 dma_error:
8168         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8169         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8170 drop:
8171         dev_kfree_skb_any(skb);
8172 drop_nofree:
8173         tp->tx_dropped++;
8174         return NETDEV_TX_OK;
8175 }
8176
8177 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8178 {
8179         if (enable) {
8180                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8181                                   MAC_MODE_PORT_MODE_MASK);
8182
8183                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8184
8185                 if (!tg3_flag(tp, 5705_PLUS))
8186                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8187
8188                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8189                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8190                 else
8191                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8192         } else {
8193                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8194
8195                 if (tg3_flag(tp, 5705_PLUS) ||
8196                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8197                     tg3_asic_rev(tp) == ASIC_REV_5700)
8198                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8199         }
8200
8201         tw32(MAC_MODE, tp->mac_mode);
8202         udelay(40);
8203 }
8204
8205 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8206 {
8207         u32 val, bmcr, mac_mode, ptest = 0;
8208
8209         tg3_phy_toggle_apd(tp, false);
8210         tg3_phy_toggle_automdix(tp, false);
8211
8212         if (extlpbk && tg3_phy_set_extloopbk(tp))
8213                 return -EIO;
8214
8215         bmcr = BMCR_FULLDPLX;
8216         switch (speed) {
8217         case SPEED_10:
8218                 break;
8219         case SPEED_100:
8220                 bmcr |= BMCR_SPEED100;
8221                 break;
8222         case SPEED_1000:
8223         default:
8224                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8225                         speed = SPEED_100;
8226                         bmcr |= BMCR_SPEED100;
8227                 } else {
8228                         speed = SPEED_1000;
8229                         bmcr |= BMCR_SPEED1000;
8230                 }
8231         }
8232
8233         if (extlpbk) {
8234                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8235                         tg3_readphy(tp, MII_CTRL1000, &val);
8236                         val |= CTL1000_AS_MASTER |
8237                                CTL1000_ENABLE_MASTER;
8238                         tg3_writephy(tp, MII_CTRL1000, val);
8239                 } else {
8240                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8241                                 MII_TG3_FET_PTEST_TRIM_2;
8242                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8243                 }
8244         } else
8245                 bmcr |= BMCR_LOOPBACK;
8246
8247         tg3_writephy(tp, MII_BMCR, bmcr);
8248
8249         /* The write needs to be flushed for the FETs */
8250         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8251                 tg3_readphy(tp, MII_BMCR, &bmcr);
8252
8253         udelay(40);
8254
8255         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8256             tg3_asic_rev(tp) == ASIC_REV_5785) {
8257                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8258                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8259                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8260
8261                 /* The write needs to be flushed for the AC131 */
8262                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8263         }
8264
8265         /* Reset to prevent losing 1st rx packet intermittently */
8266         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8267             tg3_flag(tp, 5780_CLASS)) {
8268                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8269                 udelay(10);
8270                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8271         }
8272
8273         mac_mode = tp->mac_mode &
8274                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8275         if (speed == SPEED_1000)
8276                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8277         else
8278                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8279
8280         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8281                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8282
8283                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8284                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8285                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8286                         mac_mode |= MAC_MODE_LINK_POLARITY;
8287
8288                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8289                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8290         }
8291
8292         tw32(MAC_MODE, mac_mode);
8293         udelay(40);
8294
8295         return 0;
8296 }
8297
8298 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8299 {
8300         struct tg3 *tp = netdev_priv(dev);
8301
8302         if (features & NETIF_F_LOOPBACK) {
8303                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8304                         return;
8305
8306                 spin_lock_bh(&tp->lock);
8307                 tg3_mac_loopback(tp, true);
8308                 netif_carrier_on(tp->dev);
8309                 spin_unlock_bh(&tp->lock);
8310                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8311         } else {
8312                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8313                         return;
8314
8315                 spin_lock_bh(&tp->lock);
8316                 tg3_mac_loopback(tp, false);
8317                 /* Force link status check */
8318                 tg3_setup_phy(tp, true);
8319                 spin_unlock_bh(&tp->lock);
8320                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8321         }
8322 }
8323
8324 static netdev_features_t tg3_fix_features(struct net_device *dev,
8325         netdev_features_t features)
8326 {
8327         struct tg3 *tp = netdev_priv(dev);
8328
8329         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8330                 features &= ~NETIF_F_ALL_TSO;
8331
8332         return features;
8333 }
8334
8335 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8336 {
8337         netdev_features_t changed = dev->features ^ features;
8338
8339         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8340                 tg3_set_loopback(dev, features);
8341
8342         return 0;
8343 }
8344
8345 static void tg3_rx_prodring_free(struct tg3 *tp,
8346                                  struct tg3_rx_prodring_set *tpr)
8347 {
8348         int i;
8349
8350         if (tpr != &tp->napi[0].prodring) {
8351                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8352                      i = (i + 1) & tp->rx_std_ring_mask)
8353                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8354                                         tp->rx_pkt_map_sz);
8355
8356                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8357                         for (i = tpr->rx_jmb_cons_idx;
8358                              i != tpr->rx_jmb_prod_idx;
8359                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8360                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8361                                                 TG3_RX_JMB_MAP_SZ);
8362                         }
8363                 }
8364
8365                 return;
8366         }
8367
8368         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8369                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8370                                 tp->rx_pkt_map_sz);
8371
8372         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8373                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8374                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8375                                         TG3_RX_JMB_MAP_SZ);
8376         }
8377 }
8378
8379 /* Initialize rx rings for packet processing.
8380  *
8381  * The chip has been shut down and the driver detached from
8382  * the networking, so no interrupts or new tx packets will
8383  * end up in the driver.  tp->{tx,}lock are held and thus
8384  * we may not sleep.
8385  */
8386 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8387                                  struct tg3_rx_prodring_set *tpr)
8388 {
8389         u32 i, rx_pkt_dma_sz;
8390
8391         tpr->rx_std_cons_idx = 0;
8392         tpr->rx_std_prod_idx = 0;
8393         tpr->rx_jmb_cons_idx = 0;
8394         tpr->rx_jmb_prod_idx = 0;
8395
8396         if (tpr != &tp->napi[0].prodring) {
8397                 memset(&tpr->rx_std_buffers[0], 0,
8398                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8399                 if (tpr->rx_jmb_buffers)
8400                         memset(&tpr->rx_jmb_buffers[0], 0,
8401                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8402                 goto done;
8403         }
8404
8405         /* Zero out all descriptors. */
8406         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8407
8408         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8409         if (tg3_flag(tp, 5780_CLASS) &&
8410             tp->dev->mtu > ETH_DATA_LEN)
8411                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8412         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8413
8414         /* Initialize invariants of the rings, we only set this
8415          * stuff once.  This works because the card does not
8416          * write into the rx buffer posting rings.
8417          */
8418         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8419                 struct tg3_rx_buffer_desc *rxd;
8420
8421                 rxd = &tpr->rx_std[i];
8422                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8423                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8424                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8425                                (i << RXD_OPAQUE_INDEX_SHIFT));
8426         }
8427
8428         /* Now allocate fresh SKBs for each rx ring. */
8429         for (i = 0; i < tp->rx_pending; i++) {
8430                 unsigned int frag_size;
8431
8432                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8433                                       &frag_size) < 0) {
8434                         netdev_warn(tp->dev,
8435                                     "Using a smaller RX standard ring. Only "
8436                                     "%d out of %d buffers were allocated "
8437                                     "successfully\n", i, tp->rx_pending);
8438                         if (i == 0)
8439                                 goto initfail;
8440                         tp->rx_pending = i;
8441                         break;
8442                 }
8443         }
8444
8445         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8446                 goto done;
8447
8448         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8449
8450         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8451                 goto done;
8452
8453         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8454                 struct tg3_rx_buffer_desc *rxd;
8455
8456                 rxd = &tpr->rx_jmb[i].std;
8457                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8458                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8459                                   RXD_FLAG_JUMBO;
8460                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8461                        (i << RXD_OPAQUE_INDEX_SHIFT));
8462         }
8463
8464         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8465                 unsigned int frag_size;
8466
8467                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8468                                       &frag_size) < 0) {
8469                         netdev_warn(tp->dev,
8470                                     "Using a smaller RX jumbo ring. Only %d "
8471                                     "out of %d buffers were allocated "
8472                                     "successfully\n", i, tp->rx_jumbo_pending);
8473                         if (i == 0)
8474                                 goto initfail;
8475                         tp->rx_jumbo_pending = i;
8476                         break;
8477                 }
8478         }
8479
8480 done:
8481         return 0;
8482
8483 initfail:
8484         tg3_rx_prodring_free(tp, tpr);
8485         return -ENOMEM;
8486 }
8487
8488 static void tg3_rx_prodring_fini(struct tg3 *tp,
8489                                  struct tg3_rx_prodring_set *tpr)
8490 {
8491         kfree(tpr->rx_std_buffers);
8492         tpr->rx_std_buffers = NULL;
8493         kfree(tpr->rx_jmb_buffers);
8494         tpr->rx_jmb_buffers = NULL;
8495         if (tpr->rx_std) {
8496                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8497                                   tpr->rx_std, tpr->rx_std_mapping);
8498                 tpr->rx_std = NULL;
8499         }
8500         if (tpr->rx_jmb) {
8501                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8502                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8503                 tpr->rx_jmb = NULL;
8504         }
8505 }
8506
8507 static int tg3_rx_prodring_init(struct tg3 *tp,
8508                                 struct tg3_rx_prodring_set *tpr)
8509 {
8510         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8511                                       GFP_KERNEL);
8512         if (!tpr->rx_std_buffers)
8513                 return -ENOMEM;
8514
8515         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8516                                          TG3_RX_STD_RING_BYTES(tp),
8517                                          &tpr->rx_std_mapping,
8518                                          GFP_KERNEL);
8519         if (!tpr->rx_std)
8520                 goto err_out;
8521
8522         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8523                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8524                                               GFP_KERNEL);
8525                 if (!tpr->rx_jmb_buffers)
8526                         goto err_out;
8527
8528                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8529                                                  TG3_RX_JMB_RING_BYTES(tp),
8530                                                  &tpr->rx_jmb_mapping,
8531                                                  GFP_KERNEL);
8532                 if (!tpr->rx_jmb)
8533                         goto err_out;
8534         }
8535
8536         return 0;
8537
8538 err_out:
8539         tg3_rx_prodring_fini(tp, tpr);
8540         return -ENOMEM;
8541 }
8542
8543 /* Free up pending packets in all rx/tx rings.
8544  *
8545  * The chip has been shut down and the driver detached from
8546  * the networking, so no interrupts or new tx packets will
8547  * end up in the driver.  tp->{tx,}lock is not held and we are not
8548  * in an interrupt context and thus may sleep.
8549  */
8550 static void tg3_free_rings(struct tg3 *tp)
8551 {
8552         int i, j;
8553
8554         for (j = 0; j < tp->irq_cnt; j++) {
8555                 struct tg3_napi *tnapi = &tp->napi[j];
8556
8557                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8558
8559                 if (!tnapi->tx_buffers)
8560                         continue;
8561
8562                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8563                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8564
8565                         if (!skb)
8566                                 continue;
8567
8568                         tg3_tx_skb_unmap(tnapi, i,
8569                                          skb_shinfo(skb)->nr_frags - 1);
8570
8571                         dev_consume_skb_any(skb);
8572                 }
8573                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8574         }
8575 }
8576
8577 /* Initialize tx/rx rings for packet processing.
8578  *
8579  * The chip has been shut down and the driver detached from
8580  * the networking, so no interrupts or new tx packets will
8581  * end up in the driver.  tp->{tx,}lock are held and thus
8582  * we may not sleep.
8583  */
8584 static int tg3_init_rings(struct tg3 *tp)
8585 {
8586         int i;
8587
8588         /* Free up all the SKBs. */
8589         tg3_free_rings(tp);
8590
8591         for (i = 0; i < tp->irq_cnt; i++) {
8592                 struct tg3_napi *tnapi = &tp->napi[i];
8593
8594                 tnapi->last_tag = 0;
8595                 tnapi->last_irq_tag = 0;
8596                 tnapi->hw_status->status = 0;
8597                 tnapi->hw_status->status_tag = 0;
8598                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8599
8600                 tnapi->tx_prod = 0;
8601                 tnapi->tx_cons = 0;
8602                 if (tnapi->tx_ring)
8603                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8604
8605                 tnapi->rx_rcb_ptr = 0;
8606                 if (tnapi->rx_rcb)
8607                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8608
8609                 if (tnapi->prodring.rx_std &&
8610                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8611                         tg3_free_rings(tp);
8612                         return -ENOMEM;
8613                 }
8614         }
8615
8616         return 0;
8617 }
8618
8619 static void tg3_mem_tx_release(struct tg3 *tp)
8620 {
8621         int i;
8622
8623         for (i = 0; i < tp->irq_max; i++) {
8624                 struct tg3_napi *tnapi = &tp->napi[i];
8625
8626                 if (tnapi->tx_ring) {
8627                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8628                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8629                         tnapi->tx_ring = NULL;
8630                 }
8631
8632                 kfree(tnapi->tx_buffers);
8633                 tnapi->tx_buffers = NULL;
8634         }
8635 }
8636
8637 static int tg3_mem_tx_acquire(struct tg3 *tp)
8638 {
8639         int i;
8640         struct tg3_napi *tnapi = &tp->napi[0];
8641
8642         /* If multivector TSS is enabled, vector 0 does not handle
8643          * tx interrupts.  Don't allocate any resources for it.
8644          */
8645         if (tg3_flag(tp, ENABLE_TSS))
8646                 tnapi++;
8647
8648         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8649                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8650                                             sizeof(struct tg3_tx_ring_info),
8651                                             GFP_KERNEL);
8652                 if (!tnapi->tx_buffers)
8653                         goto err_out;
8654
8655                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8656                                                     TG3_TX_RING_BYTES,
8657                                                     &tnapi->tx_desc_mapping,
8658                                                     GFP_KERNEL);
8659                 if (!tnapi->tx_ring)
8660                         goto err_out;
8661         }
8662
8663         return 0;
8664
8665 err_out:
8666         tg3_mem_tx_release(tp);
8667         return -ENOMEM;
8668 }
8669
8670 static void tg3_mem_rx_release(struct tg3 *tp)
8671 {
8672         int i;
8673
8674         for (i = 0; i < tp->irq_max; i++) {
8675                 struct tg3_napi *tnapi = &tp->napi[i];
8676
8677                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8678
8679                 if (!tnapi->rx_rcb)
8680                         continue;
8681
8682                 dma_free_coherent(&tp->pdev->dev,
8683                                   TG3_RX_RCB_RING_BYTES(tp),
8684                                   tnapi->rx_rcb,
8685                                   tnapi->rx_rcb_mapping);
8686                 tnapi->rx_rcb = NULL;
8687         }
8688 }
8689
8690 static int tg3_mem_rx_acquire(struct tg3 *tp)
8691 {
8692         unsigned int i, limit;
8693
8694         limit = tp->rxq_cnt;
8695
8696         /* If RSS is enabled, we need a (dummy) producer ring
8697          * set on vector zero.  This is the true hw prodring.
8698          */
8699         if (tg3_flag(tp, ENABLE_RSS))
8700                 limit++;
8701
8702         for (i = 0; i < limit; i++) {
8703                 struct tg3_napi *tnapi = &tp->napi[i];
8704
8705                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8706                         goto err_out;
8707
8708                 /* If multivector RSS is enabled, vector 0
8709                  * does not handle rx or tx interrupts.
8710                  * Don't allocate any resources for it.
8711                  */
8712                 if (!i && tg3_flag(tp, ENABLE_RSS))
8713                         continue;
8714
8715                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8716                                                    TG3_RX_RCB_RING_BYTES(tp),
8717                                                    &tnapi->rx_rcb_mapping,
8718                                                    GFP_KERNEL);
8719                 if (!tnapi->rx_rcb)
8720                         goto err_out;
8721         }
8722
8723         return 0;
8724
8725 err_out:
8726         tg3_mem_rx_release(tp);
8727         return -ENOMEM;
8728 }
8729
8730 /*
8731  * Must not be invoked with interrupt sources disabled and
8732  * the hardware shutdown down.
8733  */
8734 static void tg3_free_consistent(struct tg3 *tp)
8735 {
8736         int i;
8737
8738         for (i = 0; i < tp->irq_cnt; i++) {
8739                 struct tg3_napi *tnapi = &tp->napi[i];
8740
8741                 if (tnapi->hw_status) {
8742                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8743                                           tnapi->hw_status,
8744                                           tnapi->status_mapping);
8745                         tnapi->hw_status = NULL;
8746                 }
8747         }
8748
8749         tg3_mem_rx_release(tp);
8750         tg3_mem_tx_release(tp);
8751
8752         /* tp->hw_stats can be referenced safely:
8753          *     1. under rtnl_lock
8754          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8755          */
8756         if (tp->hw_stats) {
8757                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8758                                   tp->hw_stats, tp->stats_mapping);
8759                 tp->hw_stats = NULL;
8760         }
8761 }
8762
8763 /*
8764  * Must not be invoked with interrupt sources disabled and
8765  * the hardware shutdown down.  Can sleep.
8766  */
8767 static int tg3_alloc_consistent(struct tg3 *tp)
8768 {
8769         int i;
8770
8771         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8772                                           sizeof(struct tg3_hw_stats),
8773                                           &tp->stats_mapping, GFP_KERNEL);
8774         if (!tp->hw_stats)
8775                 goto err_out;
8776
8777         for (i = 0; i < tp->irq_cnt; i++) {
8778                 struct tg3_napi *tnapi = &tp->napi[i];
8779                 struct tg3_hw_status *sblk;
8780
8781                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8782                                                       TG3_HW_STATUS_SIZE,
8783                                                       &tnapi->status_mapping,
8784                                                       GFP_KERNEL);
8785                 if (!tnapi->hw_status)
8786                         goto err_out;
8787
8788                 sblk = tnapi->hw_status;
8789
8790                 if (tg3_flag(tp, ENABLE_RSS)) {
8791                         u16 *prodptr = NULL;
8792
8793                         /*
8794                          * When RSS is enabled, the status block format changes
8795                          * slightly.  The "rx_jumbo_consumer", "reserved",
8796                          * and "rx_mini_consumer" members get mapped to the
8797                          * other three rx return ring producer indexes.
8798                          */
8799                         switch (i) {
8800                         case 1:
8801                                 prodptr = &sblk->idx[0].rx_producer;
8802                                 break;
8803                         case 2:
8804                                 prodptr = &sblk->rx_jumbo_consumer;
8805                                 break;
8806                         case 3:
8807                                 prodptr = &sblk->reserved;
8808                                 break;
8809                         case 4:
8810                                 prodptr = &sblk->rx_mini_consumer;
8811                                 break;
8812                         }
8813                         tnapi->rx_rcb_prod_idx = prodptr;
8814                 } else {
8815                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8816                 }
8817         }
8818
8819         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8820                 goto err_out;
8821
8822         return 0;
8823
8824 err_out:
8825         tg3_free_consistent(tp);
8826         return -ENOMEM;
8827 }
8828
8829 #define MAX_WAIT_CNT 1000
8830
8831 /* To stop a block, clear the enable bit and poll till it
8832  * clears.  tp->lock is held.
8833  */
8834 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8835 {
8836         unsigned int i;
8837         u32 val;
8838
8839         if (tg3_flag(tp, 5705_PLUS)) {
8840                 switch (ofs) {
8841                 case RCVLSC_MODE:
8842                 case DMAC_MODE:
8843                 case MBFREE_MODE:
8844                 case BUFMGR_MODE:
8845                 case MEMARB_MODE:
8846                         /* We can't enable/disable these bits of the
8847                          * 5705/5750, just say success.
8848                          */
8849                         return 0;
8850
8851                 default:
8852                         break;
8853                 }
8854         }
8855
8856         val = tr32(ofs);
8857         val &= ~enable_bit;
8858         tw32_f(ofs, val);
8859
8860         for (i = 0; i < MAX_WAIT_CNT; i++) {
8861                 if (pci_channel_offline(tp->pdev)) {
8862                         dev_err(&tp->pdev->dev,
8863                                 "tg3_stop_block device offline, "
8864                                 "ofs=%lx enable_bit=%x\n",
8865                                 ofs, enable_bit);
8866                         return -ENODEV;
8867                 }
8868
8869                 udelay(100);
8870                 val = tr32(ofs);
8871                 if ((val & enable_bit) == 0)
8872                         break;
8873         }
8874
8875         if (i == MAX_WAIT_CNT && !silent) {
8876                 dev_err(&tp->pdev->dev,
8877                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8878                         ofs, enable_bit);
8879                 return -ENODEV;
8880         }
8881
8882         return 0;
8883 }
8884
8885 /* tp->lock is held. */
8886 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8887 {
8888         int i, err;
8889
8890         tg3_disable_ints(tp);
8891
8892         if (pci_channel_offline(tp->pdev)) {
8893                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8894                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8895                 err = -ENODEV;
8896                 goto err_no_dev;
8897         }
8898
8899         tp->rx_mode &= ~RX_MODE_ENABLE;
8900         tw32_f(MAC_RX_MODE, tp->rx_mode);
8901         udelay(10);
8902
8903         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8904         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8905         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8906         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8907         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8908         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8909
8910         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8911         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8912         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8913         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8914         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8915         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8916         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8917
8918         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8919         tw32_f(MAC_MODE, tp->mac_mode);
8920         udelay(40);
8921
8922         tp->tx_mode &= ~TX_MODE_ENABLE;
8923         tw32_f(MAC_TX_MODE, tp->tx_mode);
8924
8925         for (i = 0; i < MAX_WAIT_CNT; i++) {
8926                 udelay(100);
8927                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8928                         break;
8929         }
8930         if (i >= MAX_WAIT_CNT) {
8931                 dev_err(&tp->pdev->dev,
8932                         "%s timed out, TX_MODE_ENABLE will not clear "
8933                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8934                 err |= -ENODEV;
8935         }
8936
8937         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8938         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8939         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8940
8941         tw32(FTQ_RESET, 0xffffffff);
8942         tw32(FTQ_RESET, 0x00000000);
8943
8944         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8945         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8946
8947 err_no_dev:
8948         for (i = 0; i < tp->irq_cnt; i++) {
8949                 struct tg3_napi *tnapi = &tp->napi[i];
8950                 if (tnapi->hw_status)
8951                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8952         }
8953
8954         return err;
8955 }
8956
8957 /* Save PCI command register before chip reset */
8958 static void tg3_save_pci_state(struct tg3 *tp)
8959 {
8960         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8961 }
8962
8963 /* Restore PCI state after chip reset */
8964 static void tg3_restore_pci_state(struct tg3 *tp)
8965 {
8966         u32 val;
8967
8968         /* Re-enable indirect register accesses. */
8969         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8970                                tp->misc_host_ctrl);
8971
8972         /* Set MAX PCI retry to zero. */
8973         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8974         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8975             tg3_flag(tp, PCIX_MODE))
8976                 val |= PCISTATE_RETRY_SAME_DMA;
8977         /* Allow reads and writes to the APE register and memory space. */
8978         if (tg3_flag(tp, ENABLE_APE))
8979                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8980                        PCISTATE_ALLOW_APE_SHMEM_WR |
8981                        PCISTATE_ALLOW_APE_PSPACE_WR;
8982         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8983
8984         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8985
8986         if (!tg3_flag(tp, PCI_EXPRESS)) {
8987                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8988                                       tp->pci_cacheline_sz);
8989                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8990                                       tp->pci_lat_timer);
8991         }
8992
8993         /* Make sure PCI-X relaxed ordering bit is clear. */
8994         if (tg3_flag(tp, PCIX_MODE)) {
8995                 u16 pcix_cmd;
8996
8997                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8998                                      &pcix_cmd);
8999                 pcix_cmd &= ~PCI_X_CMD_ERO;
9000                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9001                                       pcix_cmd);
9002         }
9003
9004         if (tg3_flag(tp, 5780_CLASS)) {
9005
9006                 /* Chip reset on 5780 will reset MSI enable bit,
9007                  * so need to restore it.
9008                  */
9009                 if (tg3_flag(tp, USING_MSI)) {
9010                         u16 ctrl;
9011
9012                         pci_read_config_word(tp->pdev,
9013                                              tp->msi_cap + PCI_MSI_FLAGS,
9014                                              &ctrl);
9015                         pci_write_config_word(tp->pdev,
9016                                               tp->msi_cap + PCI_MSI_FLAGS,
9017                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9018                         val = tr32(MSGINT_MODE);
9019                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9020                 }
9021         }
9022 }
9023
9024 static void tg3_override_clk(struct tg3 *tp)
9025 {
9026         u32 val;
9027
9028         switch (tg3_asic_rev(tp)) {
9029         case ASIC_REV_5717:
9030                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9031                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9032                      TG3_CPMU_MAC_ORIDE_ENABLE);
9033                 break;
9034
9035         case ASIC_REV_5719:
9036         case ASIC_REV_5720:
9037                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9038                 break;
9039
9040         default:
9041                 return;
9042         }
9043 }
9044
9045 static void tg3_restore_clk(struct tg3 *tp)
9046 {
9047         u32 val;
9048
9049         switch (tg3_asic_rev(tp)) {
9050         case ASIC_REV_5717:
9051                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9052                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9053                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9054                 break;
9055
9056         case ASIC_REV_5719:
9057         case ASIC_REV_5720:
9058                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9059                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9060                 break;
9061
9062         default:
9063                 return;
9064         }
9065 }
9066
9067 /* tp->lock is held. */
9068 static int tg3_chip_reset(struct tg3 *tp)
9069         __releases(tp->lock)
9070         __acquires(tp->lock)
9071 {
9072         u32 val;
9073         void (*write_op)(struct tg3 *, u32, u32);
9074         int i, err;
9075
9076         if (!pci_device_is_present(tp->pdev))
9077                 return -ENODEV;
9078
9079         tg3_nvram_lock(tp);
9080
9081         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9082
9083         /* No matching tg3_nvram_unlock() after this because
9084          * chip reset below will undo the nvram lock.
9085          */
9086         tp->nvram_lock_cnt = 0;
9087
9088         /* GRC_MISC_CFG core clock reset will clear the memory
9089          * enable bit in PCI register 4 and the MSI enable bit
9090          * on some chips, so we save relevant registers here.
9091          */
9092         tg3_save_pci_state(tp);
9093
9094         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9095             tg3_flag(tp, 5755_PLUS))
9096                 tw32(GRC_FASTBOOT_PC, 0);
9097
9098         /*
9099          * We must avoid the readl() that normally takes place.
9100          * It locks machines, causes machine checks, and other
9101          * fun things.  So, temporarily disable the 5701
9102          * hardware workaround, while we do the reset.
9103          */
9104         write_op = tp->write32;
9105         if (write_op == tg3_write_flush_reg32)
9106                 tp->write32 = tg3_write32;
9107
9108         /* Prevent the irq handler from reading or writing PCI registers
9109          * during chip reset when the memory enable bit in the PCI command
9110          * register may be cleared.  The chip does not generate interrupt
9111          * at this time, but the irq handler may still be called due to irq
9112          * sharing or irqpoll.
9113          */
9114         tg3_flag_set(tp, CHIP_RESETTING);
9115         for (i = 0; i < tp->irq_cnt; i++) {
9116                 struct tg3_napi *tnapi = &tp->napi[i];
9117                 if (tnapi->hw_status) {
9118                         tnapi->hw_status->status = 0;
9119                         tnapi->hw_status->status_tag = 0;
9120                 }
9121                 tnapi->last_tag = 0;
9122                 tnapi->last_irq_tag = 0;
9123         }
9124         smp_mb();
9125
9126         tg3_full_unlock(tp);
9127
9128         for (i = 0; i < tp->irq_cnt; i++)
9129                 synchronize_irq(tp->napi[i].irq_vec);
9130
9131         tg3_full_lock(tp, 0);
9132
9133         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9134                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9135                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9136         }
9137
9138         /* do the reset */
9139         val = GRC_MISC_CFG_CORECLK_RESET;
9140
9141         if (tg3_flag(tp, PCI_EXPRESS)) {
9142                 /* Force PCIe 1.0a mode */
9143                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9144                     !tg3_flag(tp, 57765_PLUS) &&
9145                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9146                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9147                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9148
9149                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9150                         tw32(GRC_MISC_CFG, (1 << 29));
9151                         val |= (1 << 29);
9152                 }
9153         }
9154
9155         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9156                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9157                 tw32(GRC_VCPU_EXT_CTRL,
9158                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9159         }
9160
9161         /* Set the clock to the highest frequency to avoid timeouts. With link
9162          * aware mode, the clock speed could be slow and bootcode does not
9163          * complete within the expected time. Override the clock to allow the
9164          * bootcode to finish sooner and then restore it.
9165          */
9166         tg3_override_clk(tp);
9167
9168         /* Manage gphy power for all CPMU absent PCIe devices. */
9169         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9170                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9171
9172         tw32(GRC_MISC_CFG, val);
9173
9174         /* restore 5701 hardware bug workaround write method */
9175         tp->write32 = write_op;
9176
9177         /* Unfortunately, we have to delay before the PCI read back.
9178          * Some 575X chips even will not respond to a PCI cfg access
9179          * when the reset command is given to the chip.
9180          *
9181          * How do these hardware designers expect things to work
9182          * properly if the PCI write is posted for a long period
9183          * of time?  It is always necessary to have some method by
9184          * which a register read back can occur to push the write
9185          * out which does the reset.
9186          *
9187          * For most tg3 variants the trick below was working.
9188          * Ho hum...
9189          */
9190         udelay(120);
9191
9192         /* Flush PCI posted writes.  The normal MMIO registers
9193          * are inaccessible at this time so this is the only
9194          * way to make this reliably (actually, this is no longer
9195          * the case, see above).  I tried to use indirect
9196          * register read/write but this upset some 5701 variants.
9197          */
9198         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9199
9200         udelay(120);
9201
9202         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9203                 u16 val16;
9204
9205                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9206                         int j;
9207                         u32 cfg_val;
9208
9209                         /* Wait for link training to complete.  */
9210                         for (j = 0; j < 5000; j++)
9211                                 udelay(100);
9212
9213                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9214                         pci_write_config_dword(tp->pdev, 0xc4,
9215                                                cfg_val | (1 << 15));
9216                 }
9217
9218                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9219                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9220                 /*
9221                  * Older PCIe devices only support the 128 byte
9222                  * MPS setting.  Enforce the restriction.
9223                  */
9224                 if (!tg3_flag(tp, CPMU_PRESENT))
9225                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9226                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9227
9228                 /* Clear error status */
9229                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9230                                       PCI_EXP_DEVSTA_CED |
9231                                       PCI_EXP_DEVSTA_NFED |
9232                                       PCI_EXP_DEVSTA_FED |
9233                                       PCI_EXP_DEVSTA_URD);
9234         }
9235
9236         tg3_restore_pci_state(tp);
9237
9238         tg3_flag_clear(tp, CHIP_RESETTING);
9239         tg3_flag_clear(tp, ERROR_PROCESSED);
9240
9241         val = 0;
9242         if (tg3_flag(tp, 5780_CLASS))
9243                 val = tr32(MEMARB_MODE);
9244         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9245
9246         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9247                 tg3_stop_fw(tp);
9248                 tw32(0x5000, 0x400);
9249         }
9250
9251         if (tg3_flag(tp, IS_SSB_CORE)) {
9252                 /*
9253                  * BCM4785: In order to avoid repercussions from using
9254                  * potentially defective internal ROM, stop the Rx RISC CPU,
9255                  * which is not required.
9256                  */
9257                 tg3_stop_fw(tp);
9258                 tg3_halt_cpu(tp, RX_CPU_BASE);
9259         }
9260
9261         err = tg3_poll_fw(tp);
9262         if (err)
9263                 return err;
9264
9265         tw32(GRC_MODE, tp->grc_mode);
9266
9267         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9268                 val = tr32(0xc4);
9269
9270                 tw32(0xc4, val | (1 << 15));
9271         }
9272
9273         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9274             tg3_asic_rev(tp) == ASIC_REV_5705) {
9275                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9276                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9277                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9278                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9279         }
9280
9281         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9282                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9283                 val = tp->mac_mode;
9284         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9285                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9286                 val = tp->mac_mode;
9287         } else
9288                 val = 0;
9289
9290         tw32_f(MAC_MODE, val);
9291         udelay(40);
9292
9293         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9294
9295         tg3_mdio_start(tp);
9296
9297         if (tg3_flag(tp, PCI_EXPRESS) &&
9298             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9299             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9300             !tg3_flag(tp, 57765_PLUS)) {
9301                 val = tr32(0x7c00);
9302
9303                 tw32(0x7c00, val | (1 << 25));
9304         }
9305
9306         tg3_restore_clk(tp);
9307
9308         /* Increase the core clock speed to fix tx timeout issue for 5762
9309          * with 100Mbps link speed.
9310          */
9311         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9312                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9313                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9314                      TG3_CPMU_MAC_ORIDE_ENABLE);
9315         }
9316
9317         /* Reprobe ASF enable state.  */
9318         tg3_flag_clear(tp, ENABLE_ASF);
9319         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9320                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9321
9322         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9323         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9324         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9325                 u32 nic_cfg;
9326
9327                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9328                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9329                         tg3_flag_set(tp, ENABLE_ASF);
9330                         tp->last_event_jiffies = jiffies;
9331                         if (tg3_flag(tp, 5750_PLUS))
9332                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9333
9334                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9335                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9336                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9337                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9338                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9339                 }
9340         }
9341
9342         return 0;
9343 }
9344
9345 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9346 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9347 static void __tg3_set_rx_mode(struct net_device *);
9348
9349 /* tp->lock is held. */
9350 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9351 {
9352         int err;
9353
9354         tg3_stop_fw(tp);
9355
9356         tg3_write_sig_pre_reset(tp, kind);
9357
9358         tg3_abort_hw(tp, silent);
9359         err = tg3_chip_reset(tp);
9360
9361         __tg3_set_mac_addr(tp, false);
9362
9363         tg3_write_sig_legacy(tp, kind);
9364         tg3_write_sig_post_reset(tp, kind);
9365
9366         if (tp->hw_stats) {
9367                 /* Save the stats across chip resets... */
9368                 tg3_get_nstats(tp, &tp->net_stats_prev);
9369                 tg3_get_estats(tp, &tp->estats_prev);
9370
9371                 /* And make sure the next sample is new data */
9372                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9373         }
9374
9375         return err;
9376 }
9377
9378 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9379 {
9380         struct tg3 *tp = netdev_priv(dev);
9381         struct sockaddr *addr = p;
9382         int err = 0;
9383         bool skip_mac_1 = false;
9384
9385         if (!is_valid_ether_addr(addr->sa_data))
9386                 return -EADDRNOTAVAIL;
9387
9388         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9389
9390         if (!netif_running(dev))
9391                 return 0;
9392
9393         if (tg3_flag(tp, ENABLE_ASF)) {
9394                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9395
9396                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9397                 addr0_low = tr32(MAC_ADDR_0_LOW);
9398                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9399                 addr1_low = tr32(MAC_ADDR_1_LOW);
9400
9401                 /* Skip MAC addr 1 if ASF is using it. */
9402                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9403                     !(addr1_high == 0 && addr1_low == 0))
9404                         skip_mac_1 = true;
9405         }
9406         spin_lock_bh(&tp->lock);
9407         __tg3_set_mac_addr(tp, skip_mac_1);
9408         __tg3_set_rx_mode(dev);
9409         spin_unlock_bh(&tp->lock);
9410
9411         return err;
9412 }
9413
9414 /* tp->lock is held. */
9415 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9416                            dma_addr_t mapping, u32 maxlen_flags,
9417                            u32 nic_addr)
9418 {
9419         tg3_write_mem(tp,
9420                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9421                       ((u64) mapping >> 32));
9422         tg3_write_mem(tp,
9423                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9424                       ((u64) mapping & 0xffffffff));
9425         tg3_write_mem(tp,
9426                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9427                        maxlen_flags);
9428
9429         if (!tg3_flag(tp, 5705_PLUS))
9430                 tg3_write_mem(tp,
9431                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9432                               nic_addr);
9433 }
9434
9435
9436 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9437 {
9438         int i = 0;
9439
9440         if (!tg3_flag(tp, ENABLE_TSS)) {
9441                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9442                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9443                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9444         } else {
9445                 tw32(HOSTCC_TXCOL_TICKS, 0);
9446                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9447                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9448
9449                 for (; i < tp->txq_cnt; i++) {
9450                         u32 reg;
9451
9452                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9453                         tw32(reg, ec->tx_coalesce_usecs);
9454                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9455                         tw32(reg, ec->tx_max_coalesced_frames);
9456                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9457                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9458                 }
9459         }
9460
9461         for (; i < tp->irq_max - 1; i++) {
9462                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9463                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9464                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9465         }
9466 }
9467
9468 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9469 {
9470         int i = 0;
9471         u32 limit = tp->rxq_cnt;
9472
9473         if (!tg3_flag(tp, ENABLE_RSS)) {
9474                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9475                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9476                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9477                 limit--;
9478         } else {
9479                 tw32(HOSTCC_RXCOL_TICKS, 0);
9480                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9481                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9482         }
9483
9484         for (; i < limit; i++) {
9485                 u32 reg;
9486
9487                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9488                 tw32(reg, ec->rx_coalesce_usecs);
9489                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9490                 tw32(reg, ec->rx_max_coalesced_frames);
9491                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9492                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9493         }
9494
9495         for (; i < tp->irq_max - 1; i++) {
9496                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9497                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9498                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9499         }
9500 }
9501
9502 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9503 {
9504         tg3_coal_tx_init(tp, ec);
9505         tg3_coal_rx_init(tp, ec);
9506
9507         if (!tg3_flag(tp, 5705_PLUS)) {
9508                 u32 val = ec->stats_block_coalesce_usecs;
9509
9510                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9511                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9512
9513                 if (!tp->link_up)
9514                         val = 0;
9515
9516                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9517         }
9518 }
9519
9520 /* tp->lock is held. */
9521 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9522 {
9523         u32 txrcb, limit;
9524
9525         /* Disable all transmit rings but the first. */
9526         if (!tg3_flag(tp, 5705_PLUS))
9527                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9528         else if (tg3_flag(tp, 5717_PLUS))
9529                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9530         else if (tg3_flag(tp, 57765_CLASS) ||
9531                  tg3_asic_rev(tp) == ASIC_REV_5762)
9532                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9533         else
9534                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9535
9536         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9537              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9538                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9539                               BDINFO_FLAGS_DISABLED);
9540 }
9541
9542 /* tp->lock is held. */
9543 static void tg3_tx_rcbs_init(struct tg3 *tp)
9544 {
9545         int i = 0;
9546         u32 txrcb = NIC_SRAM_SEND_RCB;
9547
9548         if (tg3_flag(tp, ENABLE_TSS))
9549                 i++;
9550
9551         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9552                 struct tg3_napi *tnapi = &tp->napi[i];
9553
9554                 if (!tnapi->tx_ring)
9555                         continue;
9556
9557                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9558                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9559                                NIC_SRAM_TX_BUFFER_DESC);
9560         }
9561 }
9562
9563 /* tp->lock is held. */
9564 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9565 {
9566         u32 rxrcb, limit;
9567
9568         /* Disable all receive return rings but the first. */
9569         if (tg3_flag(tp, 5717_PLUS))
9570                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9571         else if (!tg3_flag(tp, 5705_PLUS))
9572                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9573         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9574                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9575                  tg3_flag(tp, 57765_CLASS))
9576                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9577         else
9578                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9579
9580         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9581              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9582                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9583                               BDINFO_FLAGS_DISABLED);
9584 }
9585
9586 /* tp->lock is held. */
9587 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9588 {
9589         int i = 0;
9590         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9591
9592         if (tg3_flag(tp, ENABLE_RSS))
9593                 i++;
9594
9595         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9596                 struct tg3_napi *tnapi = &tp->napi[i];
9597
9598                 if (!tnapi->rx_rcb)
9599                         continue;
9600
9601                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9602                                (tp->rx_ret_ring_mask + 1) <<
9603                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9604         }
9605 }
9606
9607 /* tp->lock is held. */
9608 static void tg3_rings_reset(struct tg3 *tp)
9609 {
9610         int i;
9611         u32 stblk;
9612         struct tg3_napi *tnapi = &tp->napi[0];
9613
9614         tg3_tx_rcbs_disable(tp);
9615
9616         tg3_rx_ret_rcbs_disable(tp);
9617
9618         /* Disable interrupts */
9619         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9620         tp->napi[0].chk_msi_cnt = 0;
9621         tp->napi[0].last_rx_cons = 0;
9622         tp->napi[0].last_tx_cons = 0;
9623
9624         /* Zero mailbox registers. */
9625         if (tg3_flag(tp, SUPPORT_MSIX)) {
9626                 for (i = 1; i < tp->irq_max; i++) {
9627                         tp->napi[i].tx_prod = 0;
9628                         tp->napi[i].tx_cons = 0;
9629                         if (tg3_flag(tp, ENABLE_TSS))
9630                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9631                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9632                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9633                         tp->napi[i].chk_msi_cnt = 0;
9634                         tp->napi[i].last_rx_cons = 0;
9635                         tp->napi[i].last_tx_cons = 0;
9636                 }
9637                 if (!tg3_flag(tp, ENABLE_TSS))
9638                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9639         } else {
9640                 tp->napi[0].tx_prod = 0;
9641                 tp->napi[0].tx_cons = 0;
9642                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9643                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9644         }
9645
9646         /* Make sure the NIC-based send BD rings are disabled. */
9647         if (!tg3_flag(tp, 5705_PLUS)) {
9648                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9649                 for (i = 0; i < 16; i++)
9650                         tw32_tx_mbox(mbox + i * 8, 0);
9651         }
9652
9653         /* Clear status block in ram. */
9654         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9655
9656         /* Set status block DMA address */
9657         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9658              ((u64) tnapi->status_mapping >> 32));
9659         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9660              ((u64) tnapi->status_mapping & 0xffffffff));
9661
9662         stblk = HOSTCC_STATBLCK_RING1;
9663
9664         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9665                 u64 mapping = (u64)tnapi->status_mapping;
9666                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9667                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9668                 stblk += 8;
9669
9670                 /* Clear status block in ram. */
9671                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9672         }
9673
9674         tg3_tx_rcbs_init(tp);
9675         tg3_rx_ret_rcbs_init(tp);
9676 }
9677
9678 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9679 {
9680         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9681
9682         if (!tg3_flag(tp, 5750_PLUS) ||
9683             tg3_flag(tp, 5780_CLASS) ||
9684             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9685             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9686             tg3_flag(tp, 57765_PLUS))
9687                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9688         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9689                  tg3_asic_rev(tp) == ASIC_REV_5787)
9690                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9691         else
9692                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9693
9694         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9695         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9696
9697         val = min(nic_rep_thresh, host_rep_thresh);
9698         tw32(RCVBDI_STD_THRESH, val);
9699
9700         if (tg3_flag(tp, 57765_PLUS))
9701                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9702
9703         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9704                 return;
9705
9706         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9707
9708         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9709
9710         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9711         tw32(RCVBDI_JUMBO_THRESH, val);
9712
9713         if (tg3_flag(tp, 57765_PLUS))
9714                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9715 }
9716
9717 static inline u32 calc_crc(unsigned char *buf, int len)
9718 {
9719         u32 reg;
9720         u32 tmp;
9721         int j, k;
9722
9723         reg = 0xffffffff;
9724
9725         for (j = 0; j < len; j++) {
9726                 reg ^= buf[j];
9727
9728                 for (k = 0; k < 8; k++) {
9729                         tmp = reg & 0x01;
9730
9731                         reg >>= 1;
9732
9733                         if (tmp)
9734                                 reg ^= CRC32_POLY_LE;
9735                 }
9736         }
9737
9738         return ~reg;
9739 }
9740
9741 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9742 {
9743         /* accept or reject all multicast frames */
9744         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9745         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9746         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9747         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9748 }
9749
9750 static void __tg3_set_rx_mode(struct net_device *dev)
9751 {
9752         struct tg3 *tp = netdev_priv(dev);
9753         u32 rx_mode;
9754
9755         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9756                                   RX_MODE_KEEP_VLAN_TAG);
9757
9758 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9759         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9760          * flag clear.
9761          */
9762         if (!tg3_flag(tp, ENABLE_ASF))
9763                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9764 #endif
9765
9766         if (dev->flags & IFF_PROMISC) {
9767                 /* Promiscuous mode. */
9768                 rx_mode |= RX_MODE_PROMISC;
9769         } else if (dev->flags & IFF_ALLMULTI) {
9770                 /* Accept all multicast. */
9771                 tg3_set_multi(tp, 1);
9772         } else if (netdev_mc_empty(dev)) {
9773                 /* Reject all multicast. */
9774                 tg3_set_multi(tp, 0);
9775         } else {
9776                 /* Accept one or more multicast(s). */
9777                 struct netdev_hw_addr *ha;
9778                 u32 mc_filter[4] = { 0, };
9779                 u32 regidx;
9780                 u32 bit;
9781                 u32 crc;
9782
9783                 netdev_for_each_mc_addr(ha, dev) {
9784                         crc = calc_crc(ha->addr, ETH_ALEN);
9785                         bit = ~crc & 0x7f;
9786                         regidx = (bit & 0x60) >> 5;
9787                         bit &= 0x1f;
9788                         mc_filter[regidx] |= (1 << bit);
9789                 }
9790
9791                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9792                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9793                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9794                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9795         }
9796
9797         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9798                 rx_mode |= RX_MODE_PROMISC;
9799         } else if (!(dev->flags & IFF_PROMISC)) {
9800                 /* Add all entries into to the mac addr filter list */
9801                 int i = 0;
9802                 struct netdev_hw_addr *ha;
9803
9804                 netdev_for_each_uc_addr(ha, dev) {
9805                         __tg3_set_one_mac_addr(tp, ha->addr,
9806                                                i + TG3_UCAST_ADDR_IDX(tp));
9807                         i++;
9808                 }
9809         }
9810
9811         if (rx_mode != tp->rx_mode) {
9812                 tp->rx_mode = rx_mode;
9813                 tw32_f(MAC_RX_MODE, rx_mode);
9814                 udelay(10);
9815         }
9816 }
9817
9818 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9819 {
9820         int i;
9821
9822         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9823                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9824 }
9825
9826 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9827 {
9828         int i;
9829
9830         if (!tg3_flag(tp, SUPPORT_MSIX))
9831                 return;
9832
9833         if (tp->rxq_cnt == 1) {
9834                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9835                 return;
9836         }
9837
9838         /* Validate table against current IRQ count */
9839         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9840                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9841                         break;
9842         }
9843
9844         if (i != TG3_RSS_INDIR_TBL_SIZE)
9845                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9846 }
9847
9848 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9849 {
9850         int i = 0;
9851         u32 reg = MAC_RSS_INDIR_TBL_0;
9852
9853         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9854                 u32 val = tp->rss_ind_tbl[i];
9855                 i++;
9856                 for (; i % 8; i++) {
9857                         val <<= 4;
9858                         val |= tp->rss_ind_tbl[i];
9859                 }
9860                 tw32(reg, val);
9861                 reg += 4;
9862         }
9863 }
9864
9865 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9866 {
9867         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9868                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9869         else
9870                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9871 }
9872
9873 /* tp->lock is held. */
9874 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9875 {
9876         u32 val, rdmac_mode;
9877         int i, err, limit;
9878         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9879
9880         tg3_disable_ints(tp);
9881
9882         tg3_stop_fw(tp);
9883
9884         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9885
9886         if (tg3_flag(tp, INIT_COMPLETE))
9887                 tg3_abort_hw(tp, 1);
9888
9889         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9890             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9891                 tg3_phy_pull_config(tp);
9892                 tg3_eee_pull_config(tp, NULL);
9893                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9894         }
9895
9896         /* Enable MAC control of LPI */
9897         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9898                 tg3_setup_eee(tp);
9899
9900         if (reset_phy)
9901                 tg3_phy_reset(tp);
9902
9903         err = tg3_chip_reset(tp);
9904         if (err)
9905                 return err;
9906
9907         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9908
9909         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9910                 val = tr32(TG3_CPMU_CTRL);
9911                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9912                 tw32(TG3_CPMU_CTRL, val);
9913
9914                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9915                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9916                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9917                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9918
9919                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9920                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9921                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9922                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9923
9924                 val = tr32(TG3_CPMU_HST_ACC);
9925                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9926                 val |= CPMU_HST_ACC_MACCLK_6_25;
9927                 tw32(TG3_CPMU_HST_ACC, val);
9928         }
9929
9930         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9931                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9932                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9933                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9934                 tw32(PCIE_PWR_MGMT_THRESH, val);
9935
9936                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9937                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9938
9939                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9940
9941                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9942                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9943         }
9944
9945         if (tg3_flag(tp, L1PLLPD_EN)) {
9946                 u32 grc_mode = tr32(GRC_MODE);
9947
9948                 /* Access the lower 1K of PL PCIE block registers. */
9949                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9950                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9951
9952                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9953                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9954                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9955
9956                 tw32(GRC_MODE, grc_mode);
9957         }
9958
9959         if (tg3_flag(tp, 57765_CLASS)) {
9960                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9961                         u32 grc_mode = tr32(GRC_MODE);
9962
9963                         /* Access the lower 1K of PL PCIE block registers. */
9964                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9965                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9966
9967                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9968                                    TG3_PCIE_PL_LO_PHYCTL5);
9969                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9970                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9971
9972                         tw32(GRC_MODE, grc_mode);
9973                 }
9974
9975                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9976                         u32 grc_mode;
9977
9978                         /* Fix transmit hangs */
9979                         val = tr32(TG3_CPMU_PADRNG_CTL);
9980                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9981                         tw32(TG3_CPMU_PADRNG_CTL, val);
9982
9983                         grc_mode = tr32(GRC_MODE);
9984
9985                         /* Access the lower 1K of DL PCIE block registers. */
9986                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9987                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9988
9989                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9990                                    TG3_PCIE_DL_LO_FTSMAX);
9991                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9992                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9993                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9994
9995                         tw32(GRC_MODE, grc_mode);
9996                 }
9997
9998                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9999                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10000                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10001                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10002         }
10003
10004         /* This works around an issue with Athlon chipsets on
10005          * B3 tigon3 silicon.  This bit has no effect on any
10006          * other revision.  But do not set this on PCI Express
10007          * chips and don't even touch the clocks if the CPMU is present.
10008          */
10009         if (!tg3_flag(tp, CPMU_PRESENT)) {
10010                 if (!tg3_flag(tp, PCI_EXPRESS))
10011                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10012                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10013         }
10014
10015         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10016             tg3_flag(tp, PCIX_MODE)) {
10017                 val = tr32(TG3PCI_PCISTATE);
10018                 val |= PCISTATE_RETRY_SAME_DMA;
10019                 tw32(TG3PCI_PCISTATE, val);
10020         }
10021
10022         if (tg3_flag(tp, ENABLE_APE)) {
10023                 /* Allow reads and writes to the
10024                  * APE register and memory space.
10025                  */
10026                 val = tr32(TG3PCI_PCISTATE);
10027                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10028                        PCISTATE_ALLOW_APE_SHMEM_WR |
10029                        PCISTATE_ALLOW_APE_PSPACE_WR;
10030                 tw32(TG3PCI_PCISTATE, val);
10031         }
10032
10033         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10034                 /* Enable some hw fixes.  */
10035                 val = tr32(TG3PCI_MSI_DATA);
10036                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10037                 tw32(TG3PCI_MSI_DATA, val);
10038         }
10039
10040         /* Descriptor ring init may make accesses to the
10041          * NIC SRAM area to setup the TX descriptors, so we
10042          * can only do this after the hardware has been
10043          * successfully reset.
10044          */
10045         err = tg3_init_rings(tp);
10046         if (err)
10047                 return err;
10048
10049         if (tg3_flag(tp, 57765_PLUS)) {
10050                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10051                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10052                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10053                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10054                 if (!tg3_flag(tp, 57765_CLASS) &&
10055                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10056                     tg3_asic_rev(tp) != ASIC_REV_5762)
10057                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10058                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10059         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10060                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10061                 /* This value is determined during the probe time DMA
10062                  * engine test, tg3_test_dma.
10063                  */
10064                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10065         }
10066
10067         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10068                           GRC_MODE_4X_NIC_SEND_RINGS |
10069                           GRC_MODE_NO_TX_PHDR_CSUM |
10070                           GRC_MODE_NO_RX_PHDR_CSUM);
10071         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10072
10073         /* Pseudo-header checksum is done by hardware logic and not
10074          * the offload processers, so make the chip do the pseudo-
10075          * header checksums on receive.  For transmit it is more
10076          * convenient to do the pseudo-header checksum in software
10077          * as Linux does that on transmit for us in all cases.
10078          */
10079         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10080
10081         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10082         if (tp->rxptpctl)
10083                 tw32(TG3_RX_PTP_CTL,
10084                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10085
10086         if (tg3_flag(tp, PTP_CAPABLE))
10087                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10088
10089         tw32(GRC_MODE, tp->grc_mode | val);
10090
10091         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10092          * south bridge limitation. As a workaround, Driver is setting MRRS
10093          * to 2048 instead of default 4096.
10094          */
10095         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10096             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10097                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10098                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10099         }
10100
10101         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10102         val = tr32(GRC_MISC_CFG);
10103         val &= ~0xff;
10104         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10105         tw32(GRC_MISC_CFG, val);
10106
10107         /* Initialize MBUF/DESC pool. */
10108         if (tg3_flag(tp, 5750_PLUS)) {
10109                 /* Do nothing.  */
10110         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10111                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10112                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10113                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10114                 else
10115                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10116                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10117                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10118         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10119                 int fw_len;
10120
10121                 fw_len = tp->fw_len;
10122                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10123                 tw32(BUFMGR_MB_POOL_ADDR,
10124                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10125                 tw32(BUFMGR_MB_POOL_SIZE,
10126                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10127         }
10128
10129         if (tp->dev->mtu <= ETH_DATA_LEN) {
10130                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10131                      tp->bufmgr_config.mbuf_read_dma_low_water);
10132                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10133                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10134                 tw32(BUFMGR_MB_HIGH_WATER,
10135                      tp->bufmgr_config.mbuf_high_water);
10136         } else {
10137                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10138                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10139                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10140                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10141                 tw32(BUFMGR_MB_HIGH_WATER,
10142                      tp->bufmgr_config.mbuf_high_water_jumbo);
10143         }
10144         tw32(BUFMGR_DMA_LOW_WATER,
10145              tp->bufmgr_config.dma_low_water);
10146         tw32(BUFMGR_DMA_HIGH_WATER,
10147              tp->bufmgr_config.dma_high_water);
10148
10149         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10150         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10151                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10152         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10153             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10154             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10155             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10156                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10157         tw32(BUFMGR_MODE, val);
10158         for (i = 0; i < 2000; i++) {
10159                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10160                         break;
10161                 udelay(10);
10162         }
10163         if (i >= 2000) {
10164                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10165                 return -ENODEV;
10166         }
10167
10168         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10169                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10170
10171         tg3_setup_rxbd_thresholds(tp);
10172
10173         /* Initialize TG3_BDINFO's at:
10174          *  RCVDBDI_STD_BD:     standard eth size rx ring
10175          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10176          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10177          *
10178          * like so:
10179          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10180          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10181          *                              ring attribute flags
10182          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10183          *
10184          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10185          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10186          *
10187          * The size of each ring is fixed in the firmware, but the location is
10188          * configurable.
10189          */
10190         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10191              ((u64) tpr->rx_std_mapping >> 32));
10192         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10193              ((u64) tpr->rx_std_mapping & 0xffffffff));
10194         if (!tg3_flag(tp, 5717_PLUS))
10195                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10196                      NIC_SRAM_RX_BUFFER_DESC);
10197
10198         /* Disable the mini ring */
10199         if (!tg3_flag(tp, 5705_PLUS))
10200                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10201                      BDINFO_FLAGS_DISABLED);
10202
10203         /* Program the jumbo buffer descriptor ring control
10204          * blocks on those devices that have them.
10205          */
10206         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10207             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10208
10209                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10210                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10211                              ((u64) tpr->rx_jmb_mapping >> 32));
10212                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10213                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10214                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10215                               BDINFO_FLAGS_MAXLEN_SHIFT;
10216                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10217                              val | BDINFO_FLAGS_USE_EXT_RECV);
10218                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10219                             tg3_flag(tp, 57765_CLASS) ||
10220                             tg3_asic_rev(tp) == ASIC_REV_5762)
10221                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10222                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10223                 } else {
10224                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10225                              BDINFO_FLAGS_DISABLED);
10226                 }
10227
10228                 if (tg3_flag(tp, 57765_PLUS)) {
10229                         val = TG3_RX_STD_RING_SIZE(tp);
10230                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10231                         val |= (TG3_RX_STD_DMA_SZ << 2);
10232                 } else
10233                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10234         } else
10235                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10236
10237         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10238
10239         tpr->rx_std_prod_idx = tp->rx_pending;
10240         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10241
10242         tpr->rx_jmb_prod_idx =
10243                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10244         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10245
10246         tg3_rings_reset(tp);
10247
10248         /* Initialize MAC address and backoff seed. */
10249         __tg3_set_mac_addr(tp, false);
10250
10251         /* MTU + ethernet header + FCS + optional VLAN tag */
10252         tw32(MAC_RX_MTU_SIZE,
10253              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10254
10255         /* The slot time is changed by tg3_setup_phy if we
10256          * run at gigabit with half duplex.
10257          */
10258         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10259               (6 << TX_LENGTHS_IPG_SHIFT) |
10260               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10261
10262         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10263             tg3_asic_rev(tp) == ASIC_REV_5762)
10264                 val |= tr32(MAC_TX_LENGTHS) &
10265                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10266                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10267
10268         tw32(MAC_TX_LENGTHS, val);
10269
10270         /* Receive rules. */
10271         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10272         tw32(RCVLPC_CONFIG, 0x0181);
10273
10274         /* Calculate RDMAC_MODE setting early, we need it to determine
10275          * the RCVLPC_STATE_ENABLE mask.
10276          */
10277         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10278                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10279                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10280                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10281                       RDMAC_MODE_LNGREAD_ENAB);
10282
10283         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10284                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10285
10286         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10287             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10288             tg3_asic_rev(tp) == ASIC_REV_57780)
10289                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10290                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10291                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10292
10293         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10294             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10295                 if (tg3_flag(tp, TSO_CAPABLE) &&
10296                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10297                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10298                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10299                            !tg3_flag(tp, IS_5788)) {
10300                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10301                 }
10302         }
10303
10304         if (tg3_flag(tp, PCI_EXPRESS))
10305                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10306
10307         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10308                 tp->dma_limit = 0;
10309                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10310                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10311                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10312                 }
10313         }
10314
10315         if (tg3_flag(tp, HW_TSO_1) ||
10316             tg3_flag(tp, HW_TSO_2) ||
10317             tg3_flag(tp, HW_TSO_3))
10318                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10319
10320         if (tg3_flag(tp, 57765_PLUS) ||
10321             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10322             tg3_asic_rev(tp) == ASIC_REV_57780)
10323                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10324
10325         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10326             tg3_asic_rev(tp) == ASIC_REV_5762)
10327                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10328
10329         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10330             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10331             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10332             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10333             tg3_flag(tp, 57765_PLUS)) {
10334                 u32 tgtreg;
10335
10336                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10337                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10338                 else
10339                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10340
10341                 val = tr32(tgtreg);
10342                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10343                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10344                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10345                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10346                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10347                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10348                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10349                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10350                 }
10351                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10352         }
10353
10354         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10355             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10356             tg3_asic_rev(tp) == ASIC_REV_5762) {
10357                 u32 tgtreg;
10358
10359                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10360                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10361                 else
10362                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10363
10364                 val = tr32(tgtreg);
10365                 tw32(tgtreg, val |
10366                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10367                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10368         }
10369
10370         /* Receive/send statistics. */
10371         if (tg3_flag(tp, 5750_PLUS)) {
10372                 val = tr32(RCVLPC_STATS_ENABLE);
10373                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10374                 tw32(RCVLPC_STATS_ENABLE, val);
10375         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10376                    tg3_flag(tp, TSO_CAPABLE)) {
10377                 val = tr32(RCVLPC_STATS_ENABLE);
10378                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10379                 tw32(RCVLPC_STATS_ENABLE, val);
10380         } else {
10381                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10382         }
10383         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10384         tw32(SNDDATAI_STATSENAB, 0xffffff);
10385         tw32(SNDDATAI_STATSCTRL,
10386              (SNDDATAI_SCTRL_ENABLE |
10387               SNDDATAI_SCTRL_FASTUPD));
10388
10389         /* Setup host coalescing engine. */
10390         tw32(HOSTCC_MODE, 0);
10391         for (i = 0; i < 2000; i++) {
10392                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10393                         break;
10394                 udelay(10);
10395         }
10396
10397         __tg3_set_coalesce(tp, &tp->coal);
10398
10399         if (!tg3_flag(tp, 5705_PLUS)) {
10400                 /* Status/statistics block address.  See tg3_timer,
10401                  * the tg3_periodic_fetch_stats call there, and
10402                  * tg3_get_stats to see how this works for 5705/5750 chips.
10403                  */
10404                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10405                      ((u64) tp->stats_mapping >> 32));
10406                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10407                      ((u64) tp->stats_mapping & 0xffffffff));
10408                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10409
10410                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10411
10412                 /* Clear statistics and status block memory areas */
10413                 for (i = NIC_SRAM_STATS_BLK;
10414                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10415                      i += sizeof(u32)) {
10416                         tg3_write_mem(tp, i, 0);
10417                         udelay(40);
10418                 }
10419         }
10420
10421         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10422
10423         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10424         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10425         if (!tg3_flag(tp, 5705_PLUS))
10426                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10427
10428         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10429                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10430                 /* reset to prevent losing 1st rx packet intermittently */
10431                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10432                 udelay(10);
10433         }
10434
10435         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10436                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10437                         MAC_MODE_FHDE_ENABLE;
10438         if (tg3_flag(tp, ENABLE_APE))
10439                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10440         if (!tg3_flag(tp, 5705_PLUS) &&
10441             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10442             tg3_asic_rev(tp) != ASIC_REV_5700)
10443                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10444         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10445         udelay(40);
10446
10447         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10448          * If TG3_FLAG_IS_NIC is zero, we should read the
10449          * register to preserve the GPIO settings for LOMs. The GPIOs,
10450          * whether used as inputs or outputs, are set by boot code after
10451          * reset.
10452          */
10453         if (!tg3_flag(tp, IS_NIC)) {
10454                 u32 gpio_mask;
10455
10456                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10457                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10458                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10459
10460                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10461                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10462                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10463
10464                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10465                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10466
10467                 tp->grc_local_ctrl &= ~gpio_mask;
10468                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10469
10470                 /* GPIO1 must be driven high for eeprom write protect */
10471                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10472                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10473                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10474         }
10475         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10476         udelay(100);
10477
10478         if (tg3_flag(tp, USING_MSIX)) {
10479                 val = tr32(MSGINT_MODE);
10480                 val |= MSGINT_MODE_ENABLE;
10481                 if (tp->irq_cnt > 1)
10482                         val |= MSGINT_MODE_MULTIVEC_EN;
10483                 if (!tg3_flag(tp, 1SHOT_MSI))
10484                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10485                 tw32(MSGINT_MODE, val);
10486         }
10487
10488         if (!tg3_flag(tp, 5705_PLUS)) {
10489                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10490                 udelay(40);
10491         }
10492
10493         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10494                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10495                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10496                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10497                WDMAC_MODE_LNGREAD_ENAB);
10498
10499         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10500             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10501                 if (tg3_flag(tp, TSO_CAPABLE) &&
10502                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10503                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10504                         /* nothing */
10505                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10506                            !tg3_flag(tp, IS_5788)) {
10507                         val |= WDMAC_MODE_RX_ACCEL;
10508                 }
10509         }
10510
10511         /* Enable host coalescing bug fix */
10512         if (tg3_flag(tp, 5755_PLUS))
10513                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10514
10515         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10516                 val |= WDMAC_MODE_BURST_ALL_DATA;
10517
10518         tw32_f(WDMAC_MODE, val);
10519         udelay(40);
10520
10521         if (tg3_flag(tp, PCIX_MODE)) {
10522                 u16 pcix_cmd;
10523
10524                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10525                                      &pcix_cmd);
10526                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10527                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10528                         pcix_cmd |= PCI_X_CMD_READ_2K;
10529                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10530                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10531                         pcix_cmd |= PCI_X_CMD_READ_2K;
10532                 }
10533                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10534                                       pcix_cmd);
10535         }
10536
10537         tw32_f(RDMAC_MODE, rdmac_mode);
10538         udelay(40);
10539
10540         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10541             tg3_asic_rev(tp) == ASIC_REV_5720) {
10542                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10543                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10544                                 break;
10545                 }
10546                 if (i < TG3_NUM_RDMA_CHANNELS) {
10547                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10548                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10549                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10550                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10551                 }
10552         }
10553
10554         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10555         if (!tg3_flag(tp, 5705_PLUS))
10556                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10557
10558         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10559                 tw32(SNDDATAC_MODE,
10560                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10561         else
10562                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10563
10564         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10565         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10566         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10567         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10568                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10569         tw32(RCVDBDI_MODE, val);
10570         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10571         if (tg3_flag(tp, HW_TSO_1) ||
10572             tg3_flag(tp, HW_TSO_2) ||
10573             tg3_flag(tp, HW_TSO_3))
10574                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10575         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10576         if (tg3_flag(tp, ENABLE_TSS))
10577                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10578         tw32(SNDBDI_MODE, val);
10579         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10580
10581         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10582                 err = tg3_load_5701_a0_firmware_fix(tp);
10583                 if (err)
10584                         return err;
10585         }
10586
10587         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10588                 /* Ignore any errors for the firmware download. If download
10589                  * fails, the device will operate with EEE disabled
10590                  */
10591                 tg3_load_57766_firmware(tp);
10592         }
10593
10594         if (tg3_flag(tp, TSO_CAPABLE)) {
10595                 err = tg3_load_tso_firmware(tp);
10596                 if (err)
10597                         return err;
10598         }
10599
10600         tp->tx_mode = TX_MODE_ENABLE;
10601
10602         if (tg3_flag(tp, 5755_PLUS) ||
10603             tg3_asic_rev(tp) == ASIC_REV_5906)
10604                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10605
10606         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10607             tg3_asic_rev(tp) == ASIC_REV_5762) {
10608                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10609                 tp->tx_mode &= ~val;
10610                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10611         }
10612
10613         tw32_f(MAC_TX_MODE, tp->tx_mode);
10614         udelay(100);
10615
10616         if (tg3_flag(tp, ENABLE_RSS)) {
10617                 u32 rss_key[10];
10618
10619                 tg3_rss_write_indir_tbl(tp);
10620
10621                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10622
10623                 for (i = 0; i < 10 ; i++)
10624                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10625         }
10626
10627         tp->rx_mode = RX_MODE_ENABLE;
10628         if (tg3_flag(tp, 5755_PLUS))
10629                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10630
10631         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10632                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10633
10634         if (tg3_flag(tp, ENABLE_RSS))
10635                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10636                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10637                                RX_MODE_RSS_IPV6_HASH_EN |
10638                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10639                                RX_MODE_RSS_IPV4_HASH_EN |
10640                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10641
10642         tw32_f(MAC_RX_MODE, tp->rx_mode);
10643         udelay(10);
10644
10645         tw32(MAC_LED_CTRL, tp->led_ctrl);
10646
10647         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10648         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10649                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10650                 udelay(10);
10651         }
10652         tw32_f(MAC_RX_MODE, tp->rx_mode);
10653         udelay(10);
10654
10655         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10656                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10657                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10658                         /* Set drive transmission level to 1.2V  */
10659                         /* only if the signal pre-emphasis bit is not set  */
10660                         val = tr32(MAC_SERDES_CFG);
10661                         val &= 0xfffff000;
10662                         val |= 0x880;
10663                         tw32(MAC_SERDES_CFG, val);
10664                 }
10665                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10666                         tw32(MAC_SERDES_CFG, 0x616000);
10667         }
10668
10669         /* Prevent chip from dropping frames when flow control
10670          * is enabled.
10671          */
10672         if (tg3_flag(tp, 57765_CLASS))
10673                 val = 1;
10674         else
10675                 val = 2;
10676         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10677
10678         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10679             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10680                 /* Use hardware link auto-negotiation */
10681                 tg3_flag_set(tp, HW_AUTONEG);
10682         }
10683
10684         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10685             tg3_asic_rev(tp) == ASIC_REV_5714) {
10686                 u32 tmp;
10687
10688                 tmp = tr32(SERDES_RX_CTRL);
10689                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10690                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10691                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10692                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10693         }
10694
10695         if (!tg3_flag(tp, USE_PHYLIB)) {
10696                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10697                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10698
10699                 err = tg3_setup_phy(tp, false);
10700                 if (err)
10701                         return err;
10702
10703                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10704                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10705                         u32 tmp;
10706
10707                         /* Clear CRC stats. */
10708                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10709                                 tg3_writephy(tp, MII_TG3_TEST1,
10710                                              tmp | MII_TG3_TEST1_CRC_EN);
10711                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10712                         }
10713                 }
10714         }
10715
10716         __tg3_set_rx_mode(tp->dev);
10717
10718         /* Initialize receive rules. */
10719         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10720         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10721         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10722         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10723
10724         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10725                 limit = 8;
10726         else
10727                 limit = 16;
10728         if (tg3_flag(tp, ENABLE_ASF))
10729                 limit -= 4;
10730         switch (limit) {
10731         case 16:
10732                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10733                 /* fall through */
10734         case 15:
10735                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10736                 /* fall through */
10737         case 14:
10738                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10739                 /* fall through */
10740         case 13:
10741                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10742                 /* fall through */
10743         case 12:
10744                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10745                 /* fall through */
10746         case 11:
10747                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10748                 /* fall through */
10749         case 10:
10750                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10751                 /* fall through */
10752         case 9:
10753                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10754                 /* fall through */
10755         case 8:
10756                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10757                 /* fall through */
10758         case 7:
10759                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10760                 /* fall through */
10761         case 6:
10762                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10763                 /* fall through */
10764         case 5:
10765                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10766                 /* fall through */
10767         case 4:
10768                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10769         case 3:
10770                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10771         case 2:
10772         case 1:
10773
10774         default:
10775                 break;
10776         }
10777
10778         if (tg3_flag(tp, ENABLE_APE))
10779                 /* Write our heartbeat update interval to APE. */
10780                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10781                                 APE_HOST_HEARTBEAT_INT_5SEC);
10782
10783         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10784
10785         return 0;
10786 }
10787
10788 /* Called at device open time to get the chip ready for
10789  * packet processing.  Invoked with tp->lock held.
10790  */
10791 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10792 {
10793         /* Chip may have been just powered on. If so, the boot code may still
10794          * be running initialization. Wait for it to finish to avoid races in
10795          * accessing the hardware.
10796          */
10797         tg3_enable_register_access(tp);
10798         tg3_poll_fw(tp);
10799
10800         tg3_switch_clocks(tp);
10801
10802         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10803
10804         return tg3_reset_hw(tp, reset_phy);
10805 }
10806
10807 #ifdef CONFIG_TIGON3_HWMON
10808 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10809 {
10810         int i;
10811
10812         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10813                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10814
10815                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10816                 off += len;
10817
10818                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10819                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10820                         memset(ocir, 0, TG3_OCIR_LEN);
10821         }
10822 }
10823
10824 /* sysfs attributes for hwmon */
10825 static ssize_t tg3_show_temp(struct device *dev,
10826                              struct device_attribute *devattr, char *buf)
10827 {
10828         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10829         struct tg3 *tp = dev_get_drvdata(dev);
10830         u32 temperature;
10831
10832         spin_lock_bh(&tp->lock);
10833         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10834                                 sizeof(temperature));
10835         spin_unlock_bh(&tp->lock);
10836         return sprintf(buf, "%u\n", temperature * 1000);
10837 }
10838
10839
10840 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10841                           TG3_TEMP_SENSOR_OFFSET);
10842 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10843                           TG3_TEMP_CAUTION_OFFSET);
10844 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10845                           TG3_TEMP_MAX_OFFSET);
10846
10847 static struct attribute *tg3_attrs[] = {
10848         &sensor_dev_attr_temp1_input.dev_attr.attr,
10849         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10850         &sensor_dev_attr_temp1_max.dev_attr.attr,
10851         NULL
10852 };
10853 ATTRIBUTE_GROUPS(tg3);
10854
10855 static void tg3_hwmon_close(struct tg3 *tp)
10856 {
10857         if (tp->hwmon_dev) {
10858                 hwmon_device_unregister(tp->hwmon_dev);
10859                 tp->hwmon_dev = NULL;
10860         }
10861 }
10862
10863 static void tg3_hwmon_open(struct tg3 *tp)
10864 {
10865         int i;
10866         u32 size = 0;
10867         struct pci_dev *pdev = tp->pdev;
10868         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10869
10870         tg3_sd_scan_scratchpad(tp, ocirs);
10871
10872         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10873                 if (!ocirs[i].src_data_length)
10874                         continue;
10875
10876                 size += ocirs[i].src_hdr_length;
10877                 size += ocirs[i].src_data_length;
10878         }
10879
10880         if (!size)
10881                 return;
10882
10883         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10884                                                           tp, tg3_groups);
10885         if (IS_ERR(tp->hwmon_dev)) {
10886                 tp->hwmon_dev = NULL;
10887                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10888         }
10889 }
10890 #else
10891 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10892 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10893 #endif /* CONFIG_TIGON3_HWMON */
10894
10895
10896 #define TG3_STAT_ADD32(PSTAT, REG) \
10897 do {    u32 __val = tr32(REG); \
10898         (PSTAT)->low += __val; \
10899         if ((PSTAT)->low < __val) \
10900                 (PSTAT)->high += 1; \
10901 } while (0)
10902
10903 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10904 {
10905         struct tg3_hw_stats *sp = tp->hw_stats;
10906
10907         if (!tp->link_up)
10908                 return;
10909
10910         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10911         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10912         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10913         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10914         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10915         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10916         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10917         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10918         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10919         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10920         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10921         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10922         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10923         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10924                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10925                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10926                 u32 val;
10927
10928                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10929                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10930                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10931                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10932         }
10933
10934         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10935         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10936         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10937         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10938         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10939         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10940         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10941         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10942         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10943         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10944         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10945         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10946         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10947         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10948
10949         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10950         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10951             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10952             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10953             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10954                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10955         } else {
10956                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10957                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10958                 if (val) {
10959                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10960                         sp->rx_discards.low += val;
10961                         if (sp->rx_discards.low < val)
10962                                 sp->rx_discards.high += 1;
10963                 }
10964                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10965         }
10966         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10967 }
10968
10969 static void tg3_chk_missed_msi(struct tg3 *tp)
10970 {
10971         u32 i;
10972
10973         for (i = 0; i < tp->irq_cnt; i++) {
10974                 struct tg3_napi *tnapi = &tp->napi[i];
10975
10976                 if (tg3_has_work(tnapi)) {
10977                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10978                             tnapi->last_tx_cons == tnapi->tx_cons) {
10979                                 if (tnapi->chk_msi_cnt < 1) {
10980                                         tnapi->chk_msi_cnt++;
10981                                         return;
10982                                 }
10983                                 tg3_msi(0, tnapi);
10984                         }
10985                 }
10986                 tnapi->chk_msi_cnt = 0;
10987                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10988                 tnapi->last_tx_cons = tnapi->tx_cons;
10989         }
10990 }
10991
10992 static void tg3_timer(struct timer_list *t)
10993 {
10994         struct tg3 *tp = from_timer(tp, t, timer);
10995
10996         spin_lock(&tp->lock);
10997
10998         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10999                 spin_unlock(&tp->lock);
11000                 goto restart_timer;
11001         }
11002
11003         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11004             tg3_flag(tp, 57765_CLASS))
11005                 tg3_chk_missed_msi(tp);
11006
11007         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11008                 /* BCM4785: Flush posted writes from GbE to host memory. */
11009                 tr32(HOSTCC_MODE);
11010         }
11011
11012         if (!tg3_flag(tp, TAGGED_STATUS)) {
11013                 /* All of this garbage is because when using non-tagged
11014                  * IRQ status the mailbox/status_block protocol the chip
11015                  * uses with the cpu is race prone.
11016                  */
11017                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11018                         tw32(GRC_LOCAL_CTRL,
11019                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11020                 } else {
11021                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11022                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11023                 }
11024
11025                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11026                         spin_unlock(&tp->lock);
11027                         tg3_reset_task_schedule(tp);
11028                         goto restart_timer;
11029                 }
11030         }
11031
11032         /* This part only runs once per second. */
11033         if (!--tp->timer_counter) {
11034                 if (tg3_flag(tp, 5705_PLUS))
11035                         tg3_periodic_fetch_stats(tp);
11036
11037                 if (tp->setlpicnt && !--tp->setlpicnt)
11038                         tg3_phy_eee_enable(tp);
11039
11040                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11041                         u32 mac_stat;
11042                         int phy_event;
11043
11044                         mac_stat = tr32(MAC_STATUS);
11045
11046                         phy_event = 0;
11047                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11048                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11049                                         phy_event = 1;
11050                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11051                                 phy_event = 1;
11052
11053                         if (phy_event)
11054                                 tg3_setup_phy(tp, false);
11055                 } else if (tg3_flag(tp, POLL_SERDES)) {
11056                         u32 mac_stat = tr32(MAC_STATUS);
11057                         int need_setup = 0;
11058
11059                         if (tp->link_up &&
11060                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11061                                 need_setup = 1;
11062                         }
11063                         if (!tp->link_up &&
11064                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11065                                          MAC_STATUS_SIGNAL_DET))) {
11066                                 need_setup = 1;
11067                         }
11068                         if (need_setup) {
11069                                 if (!tp->serdes_counter) {
11070                                         tw32_f(MAC_MODE,
11071                                              (tp->mac_mode &
11072                                               ~MAC_MODE_PORT_MODE_MASK));
11073                                         udelay(40);
11074                                         tw32_f(MAC_MODE, tp->mac_mode);
11075                                         udelay(40);
11076                                 }
11077                                 tg3_setup_phy(tp, false);
11078                         }
11079                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11080                            tg3_flag(tp, 5780_CLASS)) {
11081                         tg3_serdes_parallel_detect(tp);
11082                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11083                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11084                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11085                                          TG3_CPMU_STATUS_LINK_MASK);
11086
11087                         if (link_up != tp->link_up)
11088                                 tg3_setup_phy(tp, false);
11089                 }
11090
11091                 tp->timer_counter = tp->timer_multiplier;
11092         }
11093
11094         /* Heartbeat is only sent once every 2 seconds.
11095          *
11096          * The heartbeat is to tell the ASF firmware that the host
11097          * driver is still alive.  In the event that the OS crashes,
11098          * ASF needs to reset the hardware to free up the FIFO space
11099          * that may be filled with rx packets destined for the host.
11100          * If the FIFO is full, ASF will no longer function properly.
11101          *
11102          * Unintended resets have been reported on real time kernels
11103          * where the timer doesn't run on time.  Netpoll will also have
11104          * same problem.
11105          *
11106          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11107          * to check the ring condition when the heartbeat is expiring
11108          * before doing the reset.  This will prevent most unintended
11109          * resets.
11110          */
11111         if (!--tp->asf_counter) {
11112                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11113                         tg3_wait_for_event_ack(tp);
11114
11115                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11116                                       FWCMD_NICDRV_ALIVE3);
11117                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11118                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11119                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11120
11121                         tg3_generate_fw_event(tp);
11122                 }
11123                 tp->asf_counter = tp->asf_multiplier;
11124         }
11125
11126         /* Update the APE heartbeat every 5 seconds.*/
11127         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11128
11129         spin_unlock(&tp->lock);
11130
11131 restart_timer:
11132         tp->timer.expires = jiffies + tp->timer_offset;
11133         add_timer(&tp->timer);
11134 }
11135
11136 static void tg3_timer_init(struct tg3 *tp)
11137 {
11138         if (tg3_flag(tp, TAGGED_STATUS) &&
11139             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11140             !tg3_flag(tp, 57765_CLASS))
11141                 tp->timer_offset = HZ;
11142         else
11143                 tp->timer_offset = HZ / 10;
11144
11145         BUG_ON(tp->timer_offset > HZ);
11146
11147         tp->timer_multiplier = (HZ / tp->timer_offset);
11148         tp->asf_multiplier = (HZ / tp->timer_offset) *
11149                              TG3_FW_UPDATE_FREQ_SEC;
11150
11151         timer_setup(&tp->timer, tg3_timer, 0);
11152 }
11153
11154 static void tg3_timer_start(struct tg3 *tp)
11155 {
11156         tp->asf_counter   = tp->asf_multiplier;
11157         tp->timer_counter = tp->timer_multiplier;
11158
11159         tp->timer.expires = jiffies + tp->timer_offset;
11160         add_timer(&tp->timer);
11161 }
11162
11163 static void tg3_timer_stop(struct tg3 *tp)
11164 {
11165         del_timer_sync(&tp->timer);
11166 }
11167
11168 /* Restart hardware after configuration changes, self-test, etc.
11169  * Invoked with tp->lock held.
11170  */
11171 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11172         __releases(tp->lock)
11173         __acquires(tp->lock)
11174 {
11175         int err;
11176
11177         err = tg3_init_hw(tp, reset_phy);
11178         if (err) {
11179                 netdev_err(tp->dev,
11180                            "Failed to re-initialize device, aborting\n");
11181                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11182                 tg3_full_unlock(tp);
11183                 tg3_timer_stop(tp);
11184                 tp->irq_sync = 0;
11185                 tg3_napi_enable(tp);
11186                 dev_close(tp->dev);
11187                 tg3_full_lock(tp, 0);
11188         }
11189         return err;
11190 }
11191
11192 static void tg3_reset_task(struct work_struct *work)
11193 {
11194         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11195         int err;
11196
11197         rtnl_lock();
11198         tg3_full_lock(tp, 0);
11199
11200         if (!netif_running(tp->dev)) {
11201                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11202                 tg3_full_unlock(tp);
11203                 rtnl_unlock();
11204                 return;
11205         }
11206
11207         tg3_full_unlock(tp);
11208
11209         tg3_phy_stop(tp);
11210
11211         tg3_netif_stop(tp);
11212
11213         tg3_full_lock(tp, 1);
11214
11215         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11216                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11217                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11218                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11219                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11220         }
11221
11222         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11223         err = tg3_init_hw(tp, true);
11224         if (err)
11225                 goto out;
11226
11227         tg3_netif_start(tp);
11228
11229 out:
11230         tg3_full_unlock(tp);
11231
11232         if (!err)
11233                 tg3_phy_start(tp);
11234
11235         tg3_flag_clear(tp, RESET_TASK_PENDING);
11236         rtnl_unlock();
11237 }
11238
11239 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11240 {
11241         irq_handler_t fn;
11242         unsigned long flags;
11243         char *name;
11244         struct tg3_napi *tnapi = &tp->napi[irq_num];
11245
11246         if (tp->irq_cnt == 1)
11247                 name = tp->dev->name;
11248         else {
11249                 name = &tnapi->irq_lbl[0];
11250                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11251                         snprintf(name, IFNAMSIZ,
11252                                  "%s-txrx-%d", tp->dev->name, irq_num);
11253                 else if (tnapi->tx_buffers)
11254                         snprintf(name, IFNAMSIZ,
11255                                  "%s-tx-%d", tp->dev->name, irq_num);
11256                 else if (tnapi->rx_rcb)
11257                         snprintf(name, IFNAMSIZ,
11258                                  "%s-rx-%d", tp->dev->name, irq_num);
11259                 else
11260                         snprintf(name, IFNAMSIZ,
11261                                  "%s-%d", tp->dev->name, irq_num);
11262                 name[IFNAMSIZ-1] = 0;
11263         }
11264
11265         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11266                 fn = tg3_msi;
11267                 if (tg3_flag(tp, 1SHOT_MSI))
11268                         fn = tg3_msi_1shot;
11269                 flags = 0;
11270         } else {
11271                 fn = tg3_interrupt;
11272                 if (tg3_flag(tp, TAGGED_STATUS))
11273                         fn = tg3_interrupt_tagged;
11274                 flags = IRQF_SHARED;
11275         }
11276
11277         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11278 }
11279
11280 static int tg3_test_interrupt(struct tg3 *tp)
11281 {
11282         struct tg3_napi *tnapi = &tp->napi[0];
11283         struct net_device *dev = tp->dev;
11284         int err, i, intr_ok = 0;
11285         u32 val;
11286
11287         if (!netif_running(dev))
11288                 return -ENODEV;
11289
11290         tg3_disable_ints(tp);
11291
11292         free_irq(tnapi->irq_vec, tnapi);
11293
11294         /*
11295          * Turn off MSI one shot mode.  Otherwise this test has no
11296          * observable way to know whether the interrupt was delivered.
11297          */
11298         if (tg3_flag(tp, 57765_PLUS)) {
11299                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11300                 tw32(MSGINT_MODE, val);
11301         }
11302
11303         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11304                           IRQF_SHARED, dev->name, tnapi);
11305         if (err)
11306                 return err;
11307
11308         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11309         tg3_enable_ints(tp);
11310
11311         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11312                tnapi->coal_now);
11313
11314         for (i = 0; i < 5; i++) {
11315                 u32 int_mbox, misc_host_ctrl;
11316
11317                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11318                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11319
11320                 if ((int_mbox != 0) ||
11321                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11322                         intr_ok = 1;
11323                         break;
11324                 }
11325
11326                 if (tg3_flag(tp, 57765_PLUS) &&
11327                     tnapi->hw_status->status_tag != tnapi->last_tag)
11328                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11329
11330                 msleep(10);
11331         }
11332
11333         tg3_disable_ints(tp);
11334
11335         free_irq(tnapi->irq_vec, tnapi);
11336
11337         err = tg3_request_irq(tp, 0);
11338
11339         if (err)
11340                 return err;
11341
11342         if (intr_ok) {
11343                 /* Reenable MSI one shot mode. */
11344                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11345                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11346                         tw32(MSGINT_MODE, val);
11347                 }
11348                 return 0;
11349         }
11350
11351         return -EIO;
11352 }
11353
11354 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11355  * successfully restored
11356  */
11357 static int tg3_test_msi(struct tg3 *tp)
11358 {
11359         int err;
11360         u16 pci_cmd;
11361
11362         if (!tg3_flag(tp, USING_MSI))
11363                 return 0;
11364
11365         /* Turn off SERR reporting in case MSI terminates with Master
11366          * Abort.
11367          */
11368         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11369         pci_write_config_word(tp->pdev, PCI_COMMAND,
11370                               pci_cmd & ~PCI_COMMAND_SERR);
11371
11372         err = tg3_test_interrupt(tp);
11373
11374         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11375
11376         if (!err)
11377                 return 0;
11378
11379         /* other failures */
11380         if (err != -EIO)
11381                 return err;
11382
11383         /* MSI test failed, go back to INTx mode */
11384         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11385                     "to INTx mode. Please report this failure to the PCI "
11386                     "maintainer and include system chipset information\n");
11387
11388         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11389
11390         pci_disable_msi(tp->pdev);
11391
11392         tg3_flag_clear(tp, USING_MSI);
11393         tp->napi[0].irq_vec = tp->pdev->irq;
11394
11395         err = tg3_request_irq(tp, 0);
11396         if (err)
11397                 return err;
11398
11399         /* Need to reset the chip because the MSI cycle may have terminated
11400          * with Master Abort.
11401          */
11402         tg3_full_lock(tp, 1);
11403
11404         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11405         err = tg3_init_hw(tp, true);
11406
11407         tg3_full_unlock(tp);
11408
11409         if (err)
11410                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11411
11412         return err;
11413 }
11414
11415 static int tg3_request_firmware(struct tg3 *tp)
11416 {
11417         const struct tg3_firmware_hdr *fw_hdr;
11418
11419         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11420                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11421                            tp->fw_needed);
11422                 return -ENOENT;
11423         }
11424
11425         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11426
11427         /* Firmware blob starts with version numbers, followed by
11428          * start address and _full_ length including BSS sections
11429          * (which must be longer than the actual data, of course
11430          */
11431
11432         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11433         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11434                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11435                            tp->fw_len, tp->fw_needed);
11436                 release_firmware(tp->fw);
11437                 tp->fw = NULL;
11438                 return -EINVAL;
11439         }
11440
11441         /* We no longer need firmware; we have it. */
11442         tp->fw_needed = NULL;
11443         return 0;
11444 }
11445
11446 static u32 tg3_irq_count(struct tg3 *tp)
11447 {
11448         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11449
11450         if (irq_cnt > 1) {
11451                 /* We want as many rx rings enabled as there are cpus.
11452                  * In multiqueue MSI-X mode, the first MSI-X vector
11453                  * only deals with link interrupts, etc, so we add
11454                  * one to the number of vectors we are requesting.
11455                  */
11456                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11457         }
11458
11459         return irq_cnt;
11460 }
11461
11462 static bool tg3_enable_msix(struct tg3 *tp)
11463 {
11464         int i, rc;
11465         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11466
11467         tp->txq_cnt = tp->txq_req;
11468         tp->rxq_cnt = tp->rxq_req;
11469         if (!tp->rxq_cnt)
11470                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11471         if (tp->rxq_cnt > tp->rxq_max)
11472                 tp->rxq_cnt = tp->rxq_max;
11473
11474         /* Disable multiple TX rings by default.  Simple round-robin hardware
11475          * scheduling of the TX rings can cause starvation of rings with
11476          * small packets when other rings have TSO or jumbo packets.
11477          */
11478         if (!tp->txq_req)
11479                 tp->txq_cnt = 1;
11480
11481         tp->irq_cnt = tg3_irq_count(tp);
11482
11483         for (i = 0; i < tp->irq_max; i++) {
11484                 msix_ent[i].entry  = i;
11485                 msix_ent[i].vector = 0;
11486         }
11487
11488         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11489         if (rc < 0) {
11490                 return false;
11491         } else if (rc < tp->irq_cnt) {
11492                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11493                               tp->irq_cnt, rc);
11494                 tp->irq_cnt = rc;
11495                 tp->rxq_cnt = max(rc - 1, 1);
11496                 if (tp->txq_cnt)
11497                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11498         }
11499
11500         for (i = 0; i < tp->irq_max; i++)
11501                 tp->napi[i].irq_vec = msix_ent[i].vector;
11502
11503         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11504                 pci_disable_msix(tp->pdev);
11505                 return false;
11506         }
11507
11508         if (tp->irq_cnt == 1)
11509                 return true;
11510
11511         tg3_flag_set(tp, ENABLE_RSS);
11512
11513         if (tp->txq_cnt > 1)
11514                 tg3_flag_set(tp, ENABLE_TSS);
11515
11516         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11517
11518         return true;
11519 }
11520
11521 static void tg3_ints_init(struct tg3 *tp)
11522 {
11523         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11524             !tg3_flag(tp, TAGGED_STATUS)) {
11525                 /* All MSI supporting chips should support tagged
11526                  * status.  Assert that this is the case.
11527                  */
11528                 netdev_warn(tp->dev,
11529                             "MSI without TAGGED_STATUS? Not using MSI\n");
11530                 goto defcfg;
11531         }
11532
11533         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11534                 tg3_flag_set(tp, USING_MSIX);
11535         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11536                 tg3_flag_set(tp, USING_MSI);
11537
11538         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11539                 u32 msi_mode = tr32(MSGINT_MODE);
11540                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11541                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11542                 if (!tg3_flag(tp, 1SHOT_MSI))
11543                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11544                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11545         }
11546 defcfg:
11547         if (!tg3_flag(tp, USING_MSIX)) {
11548                 tp->irq_cnt = 1;
11549                 tp->napi[0].irq_vec = tp->pdev->irq;
11550         }
11551
11552         if (tp->irq_cnt == 1) {
11553                 tp->txq_cnt = 1;
11554                 tp->rxq_cnt = 1;
11555                 netif_set_real_num_tx_queues(tp->dev, 1);
11556                 netif_set_real_num_rx_queues(tp->dev, 1);
11557         }
11558 }
11559
11560 static void tg3_ints_fini(struct tg3 *tp)
11561 {
11562         if (tg3_flag(tp, USING_MSIX))
11563                 pci_disable_msix(tp->pdev);
11564         else if (tg3_flag(tp, USING_MSI))
11565                 pci_disable_msi(tp->pdev);
11566         tg3_flag_clear(tp, USING_MSI);
11567         tg3_flag_clear(tp, USING_MSIX);
11568         tg3_flag_clear(tp, ENABLE_RSS);
11569         tg3_flag_clear(tp, ENABLE_TSS);
11570 }
11571
11572 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11573                      bool init)
11574 {
11575         struct net_device *dev = tp->dev;
11576         int i, err;
11577
11578         /*
11579          * Setup interrupts first so we know how
11580          * many NAPI resources to allocate
11581          */
11582         tg3_ints_init(tp);
11583
11584         tg3_rss_check_indir_tbl(tp);
11585
11586         /* The placement of this call is tied
11587          * to the setup and use of Host TX descriptors.
11588          */
11589         err = tg3_alloc_consistent(tp);
11590         if (err)
11591                 goto out_ints_fini;
11592
11593         tg3_napi_init(tp);
11594
11595         tg3_napi_enable(tp);
11596
11597         for (i = 0; i < tp->irq_cnt; i++) {
11598                 err = tg3_request_irq(tp, i);
11599                 if (err) {
11600                         for (i--; i >= 0; i--) {
11601                                 struct tg3_napi *tnapi = &tp->napi[i];
11602
11603                                 free_irq(tnapi->irq_vec, tnapi);
11604                         }
11605                         goto out_napi_fini;
11606                 }
11607         }
11608
11609         tg3_full_lock(tp, 0);
11610
11611         if (init)
11612                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11613
11614         err = tg3_init_hw(tp, reset_phy);
11615         if (err) {
11616                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11617                 tg3_free_rings(tp);
11618         }
11619
11620         tg3_full_unlock(tp);
11621
11622         if (err)
11623                 goto out_free_irq;
11624
11625         if (test_irq && tg3_flag(tp, USING_MSI)) {
11626                 err = tg3_test_msi(tp);
11627
11628                 if (err) {
11629                         tg3_full_lock(tp, 0);
11630                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11631                         tg3_free_rings(tp);
11632                         tg3_full_unlock(tp);
11633
11634                         goto out_napi_fini;
11635                 }
11636
11637                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11638                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11639
11640                         tw32(PCIE_TRANSACTION_CFG,
11641                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11642                 }
11643         }
11644
11645         tg3_phy_start(tp);
11646
11647         tg3_hwmon_open(tp);
11648
11649         tg3_full_lock(tp, 0);
11650
11651         tg3_timer_start(tp);
11652         tg3_flag_set(tp, INIT_COMPLETE);
11653         tg3_enable_ints(tp);
11654
11655         tg3_ptp_resume(tp);
11656
11657         tg3_full_unlock(tp);
11658
11659         netif_tx_start_all_queues(dev);
11660
11661         /*
11662          * Reset loopback feature if it was turned on while the device was down
11663          * make sure that it's installed properly now.
11664          */
11665         if (dev->features & NETIF_F_LOOPBACK)
11666                 tg3_set_loopback(dev, dev->features);
11667
11668         return 0;
11669
11670 out_free_irq:
11671         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11672                 struct tg3_napi *tnapi = &tp->napi[i];
11673                 free_irq(tnapi->irq_vec, tnapi);
11674         }
11675
11676 out_napi_fini:
11677         tg3_napi_disable(tp);
11678         tg3_napi_fini(tp);
11679         tg3_free_consistent(tp);
11680
11681 out_ints_fini:
11682         tg3_ints_fini(tp);
11683
11684         return err;
11685 }
11686
11687 static void tg3_stop(struct tg3 *tp)
11688 {
11689         int i;
11690
11691         tg3_reset_task_cancel(tp);
11692         tg3_netif_stop(tp);
11693
11694         tg3_timer_stop(tp);
11695
11696         tg3_hwmon_close(tp);
11697
11698         tg3_phy_stop(tp);
11699
11700         tg3_full_lock(tp, 1);
11701
11702         tg3_disable_ints(tp);
11703
11704         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11705         tg3_free_rings(tp);
11706         tg3_flag_clear(tp, INIT_COMPLETE);
11707
11708         tg3_full_unlock(tp);
11709
11710         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11711                 struct tg3_napi *tnapi = &tp->napi[i];
11712                 free_irq(tnapi->irq_vec, tnapi);
11713         }
11714
11715         tg3_ints_fini(tp);
11716
11717         tg3_napi_fini(tp);
11718
11719         tg3_free_consistent(tp);
11720 }
11721
11722 static int tg3_open(struct net_device *dev)
11723 {
11724         struct tg3 *tp = netdev_priv(dev);
11725         int err;
11726
11727         if (tp->pcierr_recovery) {
11728                 netdev_err(dev, "Failed to open device. PCI error recovery "
11729                            "in progress\n");
11730                 return -EAGAIN;
11731         }
11732
11733         if (tp->fw_needed) {
11734                 err = tg3_request_firmware(tp);
11735                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11736                         if (err) {
11737                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11738                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11739                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11740                                 netdev_warn(tp->dev, "EEE capability restored\n");
11741                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11742                         }
11743                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11744                         if (err)
11745                                 return err;
11746                 } else if (err) {
11747                         netdev_warn(tp->dev, "TSO capability disabled\n");
11748                         tg3_flag_clear(tp, TSO_CAPABLE);
11749                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11750                         netdev_notice(tp->dev, "TSO capability restored\n");
11751                         tg3_flag_set(tp, TSO_CAPABLE);
11752                 }
11753         }
11754
11755         tg3_carrier_off(tp);
11756
11757         err = tg3_power_up(tp);
11758         if (err)
11759                 return err;
11760
11761         tg3_full_lock(tp, 0);
11762
11763         tg3_disable_ints(tp);
11764         tg3_flag_clear(tp, INIT_COMPLETE);
11765
11766         tg3_full_unlock(tp);
11767
11768         err = tg3_start(tp,
11769                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11770                         true, true);
11771         if (err) {
11772                 tg3_frob_aux_power(tp, false);
11773                 pci_set_power_state(tp->pdev, PCI_D3hot);
11774         }
11775
11776         return err;
11777 }
11778
11779 static int tg3_close(struct net_device *dev)
11780 {
11781         struct tg3 *tp = netdev_priv(dev);
11782
11783         if (tp->pcierr_recovery) {
11784                 netdev_err(dev, "Failed to close device. PCI error recovery "
11785                            "in progress\n");
11786                 return -EAGAIN;
11787         }
11788
11789         tg3_stop(tp);
11790
11791         if (pci_device_is_present(tp->pdev)) {
11792                 tg3_power_down_prepare(tp);
11793
11794                 tg3_carrier_off(tp);
11795         }
11796         return 0;
11797 }
11798
11799 static inline u64 get_stat64(tg3_stat64_t *val)
11800 {
11801        return ((u64)val->high << 32) | ((u64)val->low);
11802 }
11803
11804 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11805 {
11806         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11807
11808         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11809             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11810              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11811                 u32 val;
11812
11813                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11814                         tg3_writephy(tp, MII_TG3_TEST1,
11815                                      val | MII_TG3_TEST1_CRC_EN);
11816                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11817                 } else
11818                         val = 0;
11819
11820                 tp->phy_crc_errors += val;
11821
11822                 return tp->phy_crc_errors;
11823         }
11824
11825         return get_stat64(&hw_stats->rx_fcs_errors);
11826 }
11827
11828 #define ESTAT_ADD(member) \
11829         estats->member =        old_estats->member + \
11830                                 get_stat64(&hw_stats->member)
11831
11832 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11833 {
11834         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11835         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11836
11837         ESTAT_ADD(rx_octets);
11838         ESTAT_ADD(rx_fragments);
11839         ESTAT_ADD(rx_ucast_packets);
11840         ESTAT_ADD(rx_mcast_packets);
11841         ESTAT_ADD(rx_bcast_packets);
11842         ESTAT_ADD(rx_fcs_errors);
11843         ESTAT_ADD(rx_align_errors);
11844         ESTAT_ADD(rx_xon_pause_rcvd);
11845         ESTAT_ADD(rx_xoff_pause_rcvd);
11846         ESTAT_ADD(rx_mac_ctrl_rcvd);
11847         ESTAT_ADD(rx_xoff_entered);
11848         ESTAT_ADD(rx_frame_too_long_errors);
11849         ESTAT_ADD(rx_jabbers);
11850         ESTAT_ADD(rx_undersize_packets);
11851         ESTAT_ADD(rx_in_length_errors);
11852         ESTAT_ADD(rx_out_length_errors);
11853         ESTAT_ADD(rx_64_or_less_octet_packets);
11854         ESTAT_ADD(rx_65_to_127_octet_packets);
11855         ESTAT_ADD(rx_128_to_255_octet_packets);
11856         ESTAT_ADD(rx_256_to_511_octet_packets);
11857         ESTAT_ADD(rx_512_to_1023_octet_packets);
11858         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11859         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11860         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11861         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11862         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11863
11864         ESTAT_ADD(tx_octets);
11865         ESTAT_ADD(tx_collisions);
11866         ESTAT_ADD(tx_xon_sent);
11867         ESTAT_ADD(tx_xoff_sent);
11868         ESTAT_ADD(tx_flow_control);
11869         ESTAT_ADD(tx_mac_errors);
11870         ESTAT_ADD(tx_single_collisions);
11871         ESTAT_ADD(tx_mult_collisions);
11872         ESTAT_ADD(tx_deferred);
11873         ESTAT_ADD(tx_excessive_collisions);
11874         ESTAT_ADD(tx_late_collisions);
11875         ESTAT_ADD(tx_collide_2times);
11876         ESTAT_ADD(tx_collide_3times);
11877         ESTAT_ADD(tx_collide_4times);
11878         ESTAT_ADD(tx_collide_5times);
11879         ESTAT_ADD(tx_collide_6times);
11880         ESTAT_ADD(tx_collide_7times);
11881         ESTAT_ADD(tx_collide_8times);
11882         ESTAT_ADD(tx_collide_9times);
11883         ESTAT_ADD(tx_collide_10times);
11884         ESTAT_ADD(tx_collide_11times);
11885         ESTAT_ADD(tx_collide_12times);
11886         ESTAT_ADD(tx_collide_13times);
11887         ESTAT_ADD(tx_collide_14times);
11888         ESTAT_ADD(tx_collide_15times);
11889         ESTAT_ADD(tx_ucast_packets);
11890         ESTAT_ADD(tx_mcast_packets);
11891         ESTAT_ADD(tx_bcast_packets);
11892         ESTAT_ADD(tx_carrier_sense_errors);
11893         ESTAT_ADD(tx_discards);
11894         ESTAT_ADD(tx_errors);
11895
11896         ESTAT_ADD(dma_writeq_full);
11897         ESTAT_ADD(dma_write_prioq_full);
11898         ESTAT_ADD(rxbds_empty);
11899         ESTAT_ADD(rx_discards);
11900         ESTAT_ADD(rx_errors);
11901         ESTAT_ADD(rx_threshold_hit);
11902
11903         ESTAT_ADD(dma_readq_full);
11904         ESTAT_ADD(dma_read_prioq_full);
11905         ESTAT_ADD(tx_comp_queue_full);
11906
11907         ESTAT_ADD(ring_set_send_prod_index);
11908         ESTAT_ADD(ring_status_update);
11909         ESTAT_ADD(nic_irqs);
11910         ESTAT_ADD(nic_avoided_irqs);
11911         ESTAT_ADD(nic_tx_threshold_hit);
11912
11913         ESTAT_ADD(mbuf_lwm_thresh_hit);
11914 }
11915
11916 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11917 {
11918         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11919         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11920
11921         stats->rx_packets = old_stats->rx_packets +
11922                 get_stat64(&hw_stats->rx_ucast_packets) +
11923                 get_stat64(&hw_stats->rx_mcast_packets) +
11924                 get_stat64(&hw_stats->rx_bcast_packets);
11925
11926         stats->tx_packets = old_stats->tx_packets +
11927                 get_stat64(&hw_stats->tx_ucast_packets) +
11928                 get_stat64(&hw_stats->tx_mcast_packets) +
11929                 get_stat64(&hw_stats->tx_bcast_packets);
11930
11931         stats->rx_bytes = old_stats->rx_bytes +
11932                 get_stat64(&hw_stats->rx_octets);
11933         stats->tx_bytes = old_stats->tx_bytes +
11934                 get_stat64(&hw_stats->tx_octets);
11935
11936         stats->rx_errors = old_stats->rx_errors +
11937                 get_stat64(&hw_stats->rx_errors);
11938         stats->tx_errors = old_stats->tx_errors +
11939                 get_stat64(&hw_stats->tx_errors) +
11940                 get_stat64(&hw_stats->tx_mac_errors) +
11941                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11942                 get_stat64(&hw_stats->tx_discards);
11943
11944         stats->multicast = old_stats->multicast +
11945                 get_stat64(&hw_stats->rx_mcast_packets);
11946         stats->collisions = old_stats->collisions +
11947                 get_stat64(&hw_stats->tx_collisions);
11948
11949         stats->rx_length_errors = old_stats->rx_length_errors +
11950                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11951                 get_stat64(&hw_stats->rx_undersize_packets);
11952
11953         stats->rx_frame_errors = old_stats->rx_frame_errors +
11954                 get_stat64(&hw_stats->rx_align_errors);
11955         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11956                 get_stat64(&hw_stats->tx_discards);
11957         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11958                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11959
11960         stats->rx_crc_errors = old_stats->rx_crc_errors +
11961                 tg3_calc_crc_errors(tp);
11962
11963         stats->rx_missed_errors = old_stats->rx_missed_errors +
11964                 get_stat64(&hw_stats->rx_discards);
11965
11966         stats->rx_dropped = tp->rx_dropped;
11967         stats->tx_dropped = tp->tx_dropped;
11968 }
11969
11970 static int tg3_get_regs_len(struct net_device *dev)
11971 {
11972         return TG3_REG_BLK_SIZE;
11973 }
11974
11975 static void tg3_get_regs(struct net_device *dev,
11976                 struct ethtool_regs *regs, void *_p)
11977 {
11978         struct tg3 *tp = netdev_priv(dev);
11979
11980         regs->version = 0;
11981
11982         memset(_p, 0, TG3_REG_BLK_SIZE);
11983
11984         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11985                 return;
11986
11987         tg3_full_lock(tp, 0);
11988
11989         tg3_dump_legacy_regs(tp, (u32 *)_p);
11990
11991         tg3_full_unlock(tp);
11992 }
11993
11994 static int tg3_get_eeprom_len(struct net_device *dev)
11995 {
11996         struct tg3 *tp = netdev_priv(dev);
11997
11998         return tp->nvram_size;
11999 }
12000
12001 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12002 {
12003         struct tg3 *tp = netdev_priv(dev);
12004         int ret, cpmu_restore = 0;
12005         u8  *pd;
12006         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12007         __be32 val;
12008
12009         if (tg3_flag(tp, NO_NVRAM))
12010                 return -EINVAL;
12011
12012         offset = eeprom->offset;
12013         len = eeprom->len;
12014         eeprom->len = 0;
12015
12016         eeprom->magic = TG3_EEPROM_MAGIC;
12017
12018         /* Override clock, link aware and link idle modes */
12019         if (tg3_flag(tp, CPMU_PRESENT)) {
12020                 cpmu_val = tr32(TG3_CPMU_CTRL);
12021                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12022                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12023                         tw32(TG3_CPMU_CTRL, cpmu_val &
12024                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12025                                              CPMU_CTRL_LINK_IDLE_MODE));
12026                         cpmu_restore = 1;
12027                 }
12028         }
12029         tg3_override_clk(tp);
12030
12031         if (offset & 3) {
12032                 /* adjustments to start on required 4 byte boundary */
12033                 b_offset = offset & 3;
12034                 b_count = 4 - b_offset;
12035                 if (b_count > len) {
12036                         /* i.e. offset=1 len=2 */
12037                         b_count = len;
12038                 }
12039                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12040                 if (ret)
12041                         goto eeprom_done;
12042                 memcpy(data, ((char *)&val) + b_offset, b_count);
12043                 len -= b_count;
12044                 offset += b_count;
12045                 eeprom->len += b_count;
12046         }
12047
12048         /* read bytes up to the last 4 byte boundary */
12049         pd = &data[eeprom->len];
12050         for (i = 0; i < (len - (len & 3)); i += 4) {
12051                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12052                 if (ret) {
12053                         if (i)
12054                                 i -= 4;
12055                         eeprom->len += i;
12056                         goto eeprom_done;
12057                 }
12058                 memcpy(pd + i, &val, 4);
12059                 if (need_resched()) {
12060                         if (signal_pending(current)) {
12061                                 eeprom->len += i;
12062                                 ret = -EINTR;
12063                                 goto eeprom_done;
12064                         }
12065                         cond_resched();
12066                 }
12067         }
12068         eeprom->len += i;
12069
12070         if (len & 3) {
12071                 /* read last bytes not ending on 4 byte boundary */
12072                 pd = &data[eeprom->len];
12073                 b_count = len & 3;
12074                 b_offset = offset + len - b_count;
12075                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12076                 if (ret)
12077                         goto eeprom_done;
12078                 memcpy(pd, &val, b_count);
12079                 eeprom->len += b_count;
12080         }
12081         ret = 0;
12082
12083 eeprom_done:
12084         /* Restore clock, link aware and link idle modes */
12085         tg3_restore_clk(tp);
12086         if (cpmu_restore)
12087                 tw32(TG3_CPMU_CTRL, cpmu_val);
12088
12089         return ret;
12090 }
12091
12092 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12093 {
12094         struct tg3 *tp = netdev_priv(dev);
12095         int ret;
12096         u32 offset, len, b_offset, odd_len;
12097         u8 *buf;
12098         __be32 start = 0, end;
12099
12100         if (tg3_flag(tp, NO_NVRAM) ||
12101             eeprom->magic != TG3_EEPROM_MAGIC)
12102                 return -EINVAL;
12103
12104         offset = eeprom->offset;
12105         len = eeprom->len;
12106
12107         if ((b_offset = (offset & 3))) {
12108                 /* adjustments to start on required 4 byte boundary */
12109                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12110                 if (ret)
12111                         return ret;
12112                 len += b_offset;
12113                 offset &= ~3;
12114                 if (len < 4)
12115                         len = 4;
12116         }
12117
12118         odd_len = 0;
12119         if (len & 3) {
12120                 /* adjustments to end on required 4 byte boundary */
12121                 odd_len = 1;
12122                 len = (len + 3) & ~3;
12123                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12124                 if (ret)
12125                         return ret;
12126         }
12127
12128         buf = data;
12129         if (b_offset || odd_len) {
12130                 buf = kmalloc(len, GFP_KERNEL);
12131                 if (!buf)
12132                         return -ENOMEM;
12133                 if (b_offset)
12134                         memcpy(buf, &start, 4);
12135                 if (odd_len)
12136                         memcpy(buf+len-4, &end, 4);
12137                 memcpy(buf + b_offset, data, eeprom->len);
12138         }
12139
12140         ret = tg3_nvram_write_block(tp, offset, len, buf);
12141
12142         if (buf != data)
12143                 kfree(buf);
12144
12145         return ret;
12146 }
12147
12148 static int tg3_get_link_ksettings(struct net_device *dev,
12149                                   struct ethtool_link_ksettings *cmd)
12150 {
12151         struct tg3 *tp = netdev_priv(dev);
12152         u32 supported, advertising;
12153
12154         if (tg3_flag(tp, USE_PHYLIB)) {
12155                 struct phy_device *phydev;
12156                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12157                         return -EAGAIN;
12158                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12159                 phy_ethtool_ksettings_get(phydev, cmd);
12160
12161                 return 0;
12162         }
12163
12164         supported = (SUPPORTED_Autoneg);
12165
12166         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12167                 supported |= (SUPPORTED_1000baseT_Half |
12168                               SUPPORTED_1000baseT_Full);
12169
12170         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12171                 supported |= (SUPPORTED_100baseT_Half |
12172                               SUPPORTED_100baseT_Full |
12173                               SUPPORTED_10baseT_Half |
12174                               SUPPORTED_10baseT_Full |
12175                               SUPPORTED_TP);
12176                 cmd->base.port = PORT_TP;
12177         } else {
12178                 supported |= SUPPORTED_FIBRE;
12179                 cmd->base.port = PORT_FIBRE;
12180         }
12181         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12182                                                 supported);
12183
12184         advertising = tp->link_config.advertising;
12185         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12186                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12187                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12188                                 advertising |= ADVERTISED_Pause;
12189                         } else {
12190                                 advertising |= ADVERTISED_Pause |
12191                                         ADVERTISED_Asym_Pause;
12192                         }
12193                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12194                         advertising |= ADVERTISED_Asym_Pause;
12195                 }
12196         }
12197         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12198                                                 advertising);
12199
12200         if (netif_running(dev) && tp->link_up) {
12201                 cmd->base.speed = tp->link_config.active_speed;
12202                 cmd->base.duplex = tp->link_config.active_duplex;
12203                 ethtool_convert_legacy_u32_to_link_mode(
12204                         cmd->link_modes.lp_advertising,
12205                         tp->link_config.rmt_adv);
12206
12207                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12208                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12209                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12210                         else
12211                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12212                 }
12213         } else {
12214                 cmd->base.speed = SPEED_UNKNOWN;
12215                 cmd->base.duplex = DUPLEX_UNKNOWN;
12216                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12217         }
12218         cmd->base.phy_address = tp->phy_addr;
12219         cmd->base.autoneg = tp->link_config.autoneg;
12220         return 0;
12221 }
12222
12223 static int tg3_set_link_ksettings(struct net_device *dev,
12224                                   const struct ethtool_link_ksettings *cmd)
12225 {
12226         struct tg3 *tp = netdev_priv(dev);
12227         u32 speed = cmd->base.speed;
12228         u32 advertising;
12229
12230         if (tg3_flag(tp, USE_PHYLIB)) {
12231                 struct phy_device *phydev;
12232                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12233                         return -EAGAIN;
12234                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12235                 return phy_ethtool_ksettings_set(phydev, cmd);
12236         }
12237
12238         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12239             cmd->base.autoneg != AUTONEG_DISABLE)
12240                 return -EINVAL;
12241
12242         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12243             cmd->base.duplex != DUPLEX_FULL &&
12244             cmd->base.duplex != DUPLEX_HALF)
12245                 return -EINVAL;
12246
12247         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12248                                                 cmd->link_modes.advertising);
12249
12250         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12251                 u32 mask = ADVERTISED_Autoneg |
12252                            ADVERTISED_Pause |
12253                            ADVERTISED_Asym_Pause;
12254
12255                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12256                         mask |= ADVERTISED_1000baseT_Half |
12257                                 ADVERTISED_1000baseT_Full;
12258
12259                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12260                         mask |= ADVERTISED_100baseT_Half |
12261                                 ADVERTISED_100baseT_Full |
12262                                 ADVERTISED_10baseT_Half |
12263                                 ADVERTISED_10baseT_Full |
12264                                 ADVERTISED_TP;
12265                 else
12266                         mask |= ADVERTISED_FIBRE;
12267
12268                 if (advertising & ~mask)
12269                         return -EINVAL;
12270
12271                 mask &= (ADVERTISED_1000baseT_Half |
12272                          ADVERTISED_1000baseT_Full |
12273                          ADVERTISED_100baseT_Half |
12274                          ADVERTISED_100baseT_Full |
12275                          ADVERTISED_10baseT_Half |
12276                          ADVERTISED_10baseT_Full);
12277
12278                 advertising &= mask;
12279         } else {
12280                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12281                         if (speed != SPEED_1000)
12282                                 return -EINVAL;
12283
12284                         if (cmd->base.duplex != DUPLEX_FULL)
12285                                 return -EINVAL;
12286                 } else {
12287                         if (speed != SPEED_100 &&
12288                             speed != SPEED_10)
12289                                 return -EINVAL;
12290                 }
12291         }
12292
12293         tg3_full_lock(tp, 0);
12294
12295         tp->link_config.autoneg = cmd->base.autoneg;
12296         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12297                 tp->link_config.advertising = (advertising |
12298                                               ADVERTISED_Autoneg);
12299                 tp->link_config.speed = SPEED_UNKNOWN;
12300                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12301         } else {
12302                 tp->link_config.advertising = 0;
12303                 tp->link_config.speed = speed;
12304                 tp->link_config.duplex = cmd->base.duplex;
12305         }
12306
12307         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12308
12309         tg3_warn_mgmt_link_flap(tp);
12310
12311         if (netif_running(dev))
12312                 tg3_setup_phy(tp, true);
12313
12314         tg3_full_unlock(tp);
12315
12316         return 0;
12317 }
12318
12319 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12320 {
12321         struct tg3 *tp = netdev_priv(dev);
12322
12323         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12324         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12325         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12326         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12327 }
12328
12329 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12330 {
12331         struct tg3 *tp = netdev_priv(dev);
12332
12333         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12334                 wol->supported = WAKE_MAGIC;
12335         else
12336                 wol->supported = 0;
12337         wol->wolopts = 0;
12338         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12339                 wol->wolopts = WAKE_MAGIC;
12340         memset(&wol->sopass, 0, sizeof(wol->sopass));
12341 }
12342
12343 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12344 {
12345         struct tg3 *tp = netdev_priv(dev);
12346         struct device *dp = &tp->pdev->dev;
12347
12348         if (wol->wolopts & ~WAKE_MAGIC)
12349                 return -EINVAL;
12350         if ((wol->wolopts & WAKE_MAGIC) &&
12351             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12352                 return -EINVAL;
12353
12354         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12355
12356         if (device_may_wakeup(dp))
12357                 tg3_flag_set(tp, WOL_ENABLE);
12358         else
12359                 tg3_flag_clear(tp, WOL_ENABLE);
12360
12361         return 0;
12362 }
12363
12364 static u32 tg3_get_msglevel(struct net_device *dev)
12365 {
12366         struct tg3 *tp = netdev_priv(dev);
12367         return tp->msg_enable;
12368 }
12369
12370 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12371 {
12372         struct tg3 *tp = netdev_priv(dev);
12373         tp->msg_enable = value;
12374 }
12375
12376 static int tg3_nway_reset(struct net_device *dev)
12377 {
12378         struct tg3 *tp = netdev_priv(dev);
12379         int r;
12380
12381         if (!netif_running(dev))
12382                 return -EAGAIN;
12383
12384         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12385                 return -EINVAL;
12386
12387         tg3_warn_mgmt_link_flap(tp);
12388
12389         if (tg3_flag(tp, USE_PHYLIB)) {
12390                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12391                         return -EAGAIN;
12392                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12393         } else {
12394                 u32 bmcr;
12395
12396                 spin_lock_bh(&tp->lock);
12397                 r = -EINVAL;
12398                 tg3_readphy(tp, MII_BMCR, &bmcr);
12399                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12400                     ((bmcr & BMCR_ANENABLE) ||
12401                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12402                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12403                                                    BMCR_ANENABLE);
12404                         r = 0;
12405                 }
12406                 spin_unlock_bh(&tp->lock);
12407         }
12408
12409         return r;
12410 }
12411
12412 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12413 {
12414         struct tg3 *tp = netdev_priv(dev);
12415
12416         ering->rx_max_pending = tp->rx_std_ring_mask;
12417         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12418                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12419         else
12420                 ering->rx_jumbo_max_pending = 0;
12421
12422         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12423
12424         ering->rx_pending = tp->rx_pending;
12425         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12426                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12427         else
12428                 ering->rx_jumbo_pending = 0;
12429
12430         ering->tx_pending = tp->napi[0].tx_pending;
12431 }
12432
12433 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12434 {
12435         struct tg3 *tp = netdev_priv(dev);
12436         int i, irq_sync = 0, err = 0;
12437         bool reset_phy = false;
12438
12439         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12440             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12441             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12442             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12443             (tg3_flag(tp, TSO_BUG) &&
12444              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12445                 return -EINVAL;
12446
12447         if (netif_running(dev)) {
12448                 tg3_phy_stop(tp);
12449                 tg3_netif_stop(tp);
12450                 irq_sync = 1;
12451         }
12452
12453         tg3_full_lock(tp, irq_sync);
12454
12455         tp->rx_pending = ering->rx_pending;
12456
12457         if (tg3_flag(tp, MAX_RXPEND_64) &&
12458             tp->rx_pending > 63)
12459                 tp->rx_pending = 63;
12460
12461         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12462                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12463
12464         for (i = 0; i < tp->irq_max; i++)
12465                 tp->napi[i].tx_pending = ering->tx_pending;
12466
12467         if (netif_running(dev)) {
12468                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12469                 /* Reset PHY to avoid PHY lock up */
12470                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12471                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12472                     tg3_asic_rev(tp) == ASIC_REV_5720)
12473                         reset_phy = true;
12474
12475                 err = tg3_restart_hw(tp, reset_phy);
12476                 if (!err)
12477                         tg3_netif_start(tp);
12478         }
12479
12480         tg3_full_unlock(tp);
12481
12482         if (irq_sync && !err)
12483                 tg3_phy_start(tp);
12484
12485         return err;
12486 }
12487
12488 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12489 {
12490         struct tg3 *tp = netdev_priv(dev);
12491
12492         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12493
12494         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12495                 epause->rx_pause = 1;
12496         else
12497                 epause->rx_pause = 0;
12498
12499         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12500                 epause->tx_pause = 1;
12501         else
12502                 epause->tx_pause = 0;
12503 }
12504
12505 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12506 {
12507         struct tg3 *tp = netdev_priv(dev);
12508         int err = 0;
12509         bool reset_phy = false;
12510
12511         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12512                 tg3_warn_mgmt_link_flap(tp);
12513
12514         if (tg3_flag(tp, USE_PHYLIB)) {
12515                 struct phy_device *phydev;
12516
12517                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12518
12519                 if (!phy_validate_pause(phydev, epause))
12520                         return -EINVAL;
12521
12522                 tp->link_config.flowctrl = 0;
12523                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12524                 if (epause->rx_pause) {
12525                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12526
12527                         if (epause->tx_pause) {
12528                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12529                         }
12530                 } else if (epause->tx_pause) {
12531                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12532                 }
12533
12534                 if (epause->autoneg)
12535                         tg3_flag_set(tp, PAUSE_AUTONEG);
12536                 else
12537                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12538
12539                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12540                         if (phydev->autoneg) {
12541                                 /* phy_set_asym_pause() will
12542                                  * renegotiate the link to inform our
12543                                  * link partner of our flow control
12544                                  * settings, even if the flow control
12545                                  * is forced.  Let tg3_adjust_link()
12546                                  * do the final flow control setup.
12547                                  */
12548                                 return 0;
12549                         }
12550
12551                         if (!epause->autoneg)
12552                                 tg3_setup_flow_control(tp, 0, 0);
12553                 }
12554         } else {
12555                 int irq_sync = 0;
12556
12557                 if (netif_running(dev)) {
12558                         tg3_netif_stop(tp);
12559                         irq_sync = 1;
12560                 }
12561
12562                 tg3_full_lock(tp, irq_sync);
12563
12564                 if (epause->autoneg)
12565                         tg3_flag_set(tp, PAUSE_AUTONEG);
12566                 else
12567                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12568                 if (epause->rx_pause)
12569                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12570                 else
12571                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12572                 if (epause->tx_pause)
12573                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12574                 else
12575                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12576
12577                 if (netif_running(dev)) {
12578                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12579                         /* Reset PHY to avoid PHY lock up */
12580                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12581                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12582                             tg3_asic_rev(tp) == ASIC_REV_5720)
12583                                 reset_phy = true;
12584
12585                         err = tg3_restart_hw(tp, reset_phy);
12586                         if (!err)
12587                                 tg3_netif_start(tp);
12588                 }
12589
12590                 tg3_full_unlock(tp);
12591         }
12592
12593         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12594
12595         return err;
12596 }
12597
12598 static int tg3_get_sset_count(struct net_device *dev, int sset)
12599 {
12600         switch (sset) {
12601         case ETH_SS_TEST:
12602                 return TG3_NUM_TEST;
12603         case ETH_SS_STATS:
12604                 return TG3_NUM_STATS;
12605         default:
12606                 return -EOPNOTSUPP;
12607         }
12608 }
12609
12610 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12611                          u32 *rules __always_unused)
12612 {
12613         struct tg3 *tp = netdev_priv(dev);
12614
12615         if (!tg3_flag(tp, SUPPORT_MSIX))
12616                 return -EOPNOTSUPP;
12617
12618         switch (info->cmd) {
12619         case ETHTOOL_GRXRINGS:
12620                 if (netif_running(tp->dev))
12621                         info->data = tp->rxq_cnt;
12622                 else {
12623                         info->data = num_online_cpus();
12624                         if (info->data > TG3_RSS_MAX_NUM_QS)
12625                                 info->data = TG3_RSS_MAX_NUM_QS;
12626                 }
12627
12628                 return 0;
12629
12630         default:
12631                 return -EOPNOTSUPP;
12632         }
12633 }
12634
12635 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12636 {
12637         u32 size = 0;
12638         struct tg3 *tp = netdev_priv(dev);
12639
12640         if (tg3_flag(tp, SUPPORT_MSIX))
12641                 size = TG3_RSS_INDIR_TBL_SIZE;
12642
12643         return size;
12644 }
12645
12646 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12647 {
12648         struct tg3 *tp = netdev_priv(dev);
12649         int i;
12650
12651         if (hfunc)
12652                 *hfunc = ETH_RSS_HASH_TOP;
12653         if (!indir)
12654                 return 0;
12655
12656         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12657                 indir[i] = tp->rss_ind_tbl[i];
12658
12659         return 0;
12660 }
12661
12662 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12663                         const u8 hfunc)
12664 {
12665         struct tg3 *tp = netdev_priv(dev);
12666         size_t i;
12667
12668         /* We require at least one supported parameter to be changed and no
12669          * change in any of the unsupported parameters
12670          */
12671         if (key ||
12672             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12673                 return -EOPNOTSUPP;
12674
12675         if (!indir)
12676                 return 0;
12677
12678         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12679                 tp->rss_ind_tbl[i] = indir[i];
12680
12681         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12682                 return 0;
12683
12684         /* It is legal to write the indirection
12685          * table while the device is running.
12686          */
12687         tg3_full_lock(tp, 0);
12688         tg3_rss_write_indir_tbl(tp);
12689         tg3_full_unlock(tp);
12690
12691         return 0;
12692 }
12693
12694 static void tg3_get_channels(struct net_device *dev,
12695                              struct ethtool_channels *channel)
12696 {
12697         struct tg3 *tp = netdev_priv(dev);
12698         u32 deflt_qs = netif_get_num_default_rss_queues();
12699
12700         channel->max_rx = tp->rxq_max;
12701         channel->max_tx = tp->txq_max;
12702
12703         if (netif_running(dev)) {
12704                 channel->rx_count = tp->rxq_cnt;
12705                 channel->tx_count = tp->txq_cnt;
12706         } else {
12707                 if (tp->rxq_req)
12708                         channel->rx_count = tp->rxq_req;
12709                 else
12710                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12711
12712                 if (tp->txq_req)
12713                         channel->tx_count = tp->txq_req;
12714                 else
12715                         channel->tx_count = min(deflt_qs, tp->txq_max);
12716         }
12717 }
12718
12719 static int tg3_set_channels(struct net_device *dev,
12720                             struct ethtool_channels *channel)
12721 {
12722         struct tg3 *tp = netdev_priv(dev);
12723
12724         if (!tg3_flag(tp, SUPPORT_MSIX))
12725                 return -EOPNOTSUPP;
12726
12727         if (channel->rx_count > tp->rxq_max ||
12728             channel->tx_count > tp->txq_max)
12729                 return -EINVAL;
12730
12731         tp->rxq_req = channel->rx_count;
12732         tp->txq_req = channel->tx_count;
12733
12734         if (!netif_running(dev))
12735                 return 0;
12736
12737         tg3_stop(tp);
12738
12739         tg3_carrier_off(tp);
12740
12741         tg3_start(tp, true, false, false);
12742
12743         return 0;
12744 }
12745
12746 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12747 {
12748         switch (stringset) {
12749         case ETH_SS_STATS:
12750                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12751                 break;
12752         case ETH_SS_TEST:
12753                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12754                 break;
12755         default:
12756                 WARN_ON(1);     /* we need a WARN() */
12757                 break;
12758         }
12759 }
12760
12761 static int tg3_set_phys_id(struct net_device *dev,
12762                             enum ethtool_phys_id_state state)
12763 {
12764         struct tg3 *tp = netdev_priv(dev);
12765
12766         if (!netif_running(tp->dev))
12767                 return -EAGAIN;
12768
12769         switch (state) {
12770         case ETHTOOL_ID_ACTIVE:
12771                 return 1;       /* cycle on/off once per second */
12772
12773         case ETHTOOL_ID_ON:
12774                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12775                      LED_CTRL_1000MBPS_ON |
12776                      LED_CTRL_100MBPS_ON |
12777                      LED_CTRL_10MBPS_ON |
12778                      LED_CTRL_TRAFFIC_OVERRIDE |
12779                      LED_CTRL_TRAFFIC_BLINK |
12780                      LED_CTRL_TRAFFIC_LED);
12781                 break;
12782
12783         case ETHTOOL_ID_OFF:
12784                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12785                      LED_CTRL_TRAFFIC_OVERRIDE);
12786                 break;
12787
12788         case ETHTOOL_ID_INACTIVE:
12789                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12790                 break;
12791         }
12792
12793         return 0;
12794 }
12795
12796 static void tg3_get_ethtool_stats(struct net_device *dev,
12797                                    struct ethtool_stats *estats, u64 *tmp_stats)
12798 {
12799         struct tg3 *tp = netdev_priv(dev);
12800
12801         if (tp->hw_stats)
12802                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12803         else
12804                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12805 }
12806
12807 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12808 {
12809         int i;
12810         __be32 *buf;
12811         u32 offset = 0, len = 0;
12812         u32 magic, val;
12813
12814         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12815                 return NULL;
12816
12817         if (magic == TG3_EEPROM_MAGIC) {
12818                 for (offset = TG3_NVM_DIR_START;
12819                      offset < TG3_NVM_DIR_END;
12820                      offset += TG3_NVM_DIRENT_SIZE) {
12821                         if (tg3_nvram_read(tp, offset, &val))
12822                                 return NULL;
12823
12824                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12825                             TG3_NVM_DIRTYPE_EXTVPD)
12826                                 break;
12827                 }
12828
12829                 if (offset != TG3_NVM_DIR_END) {
12830                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12831                         if (tg3_nvram_read(tp, offset + 4, &offset))
12832                                 return NULL;
12833
12834                         offset = tg3_nvram_logical_addr(tp, offset);
12835                 }
12836         }
12837
12838         if (!offset || !len) {
12839                 offset = TG3_NVM_VPD_OFF;
12840                 len = TG3_NVM_VPD_LEN;
12841         }
12842
12843         buf = kmalloc(len, GFP_KERNEL);
12844         if (buf == NULL)
12845                 return NULL;
12846
12847         if (magic == TG3_EEPROM_MAGIC) {
12848                 for (i = 0; i < len; i += 4) {
12849                         /* The data is in little-endian format in NVRAM.
12850                          * Use the big-endian read routines to preserve
12851                          * the byte order as it exists in NVRAM.
12852                          */
12853                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12854                                 goto error;
12855                 }
12856         } else {
12857                 u8 *ptr;
12858                 ssize_t cnt;
12859                 unsigned int pos = 0;
12860
12861                 ptr = (u8 *)&buf[0];
12862                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12863                         cnt = pci_read_vpd(tp->pdev, pos,
12864                                            len - pos, ptr);
12865                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12866                                 cnt = 0;
12867                         else if (cnt < 0)
12868                                 goto error;
12869                 }
12870                 if (pos != len)
12871                         goto error;
12872         }
12873
12874         *vpdlen = len;
12875
12876         return buf;
12877
12878 error:
12879         kfree(buf);
12880         return NULL;
12881 }
12882
12883 #define NVRAM_TEST_SIZE 0x100
12884 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12885 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12886 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12887 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12888 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12889 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12890 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12891 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12892
12893 static int tg3_test_nvram(struct tg3 *tp)
12894 {
12895         u32 csum, magic, len;
12896         __be32 *buf;
12897         int i, j, k, err = 0, size;
12898
12899         if (tg3_flag(tp, NO_NVRAM))
12900                 return 0;
12901
12902         if (tg3_nvram_read(tp, 0, &magic) != 0)
12903                 return -EIO;
12904
12905         if (magic == TG3_EEPROM_MAGIC)
12906                 size = NVRAM_TEST_SIZE;
12907         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12908                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12909                     TG3_EEPROM_SB_FORMAT_1) {
12910                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12911                         case TG3_EEPROM_SB_REVISION_0:
12912                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12913                                 break;
12914                         case TG3_EEPROM_SB_REVISION_2:
12915                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12916                                 break;
12917                         case TG3_EEPROM_SB_REVISION_3:
12918                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12919                                 break;
12920                         case TG3_EEPROM_SB_REVISION_4:
12921                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12922                                 break;
12923                         case TG3_EEPROM_SB_REVISION_5:
12924                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12925                                 break;
12926                         case TG3_EEPROM_SB_REVISION_6:
12927                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12928                                 break;
12929                         default:
12930                                 return -EIO;
12931                         }
12932                 } else
12933                         return 0;
12934         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12935                 size = NVRAM_SELFBOOT_HW_SIZE;
12936         else
12937                 return -EIO;
12938
12939         buf = kmalloc(size, GFP_KERNEL);
12940         if (buf == NULL)
12941                 return -ENOMEM;
12942
12943         err = -EIO;
12944         for (i = 0, j = 0; i < size; i += 4, j++) {
12945                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12946                 if (err)
12947                         break;
12948         }
12949         if (i < size)
12950                 goto out;
12951
12952         /* Selfboot format */
12953         magic = be32_to_cpu(buf[0]);
12954         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12955             TG3_EEPROM_MAGIC_FW) {
12956                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12957
12958                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12959                     TG3_EEPROM_SB_REVISION_2) {
12960                         /* For rev 2, the csum doesn't include the MBA. */
12961                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12962                                 csum8 += buf8[i];
12963                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12964                                 csum8 += buf8[i];
12965                 } else {
12966                         for (i = 0; i < size; i++)
12967                                 csum8 += buf8[i];
12968                 }
12969
12970                 if (csum8 == 0) {
12971                         err = 0;
12972                         goto out;
12973                 }
12974
12975                 err = -EIO;
12976                 goto out;
12977         }
12978
12979         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12980             TG3_EEPROM_MAGIC_HW) {
12981                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12982                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12983                 u8 *buf8 = (u8 *) buf;
12984
12985                 /* Separate the parity bits and the data bytes.  */
12986                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12987                         if ((i == 0) || (i == 8)) {
12988                                 int l;
12989                                 u8 msk;
12990
12991                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12992                                         parity[k++] = buf8[i] & msk;
12993                                 i++;
12994                         } else if (i == 16) {
12995                                 int l;
12996                                 u8 msk;
12997
12998                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12999                                         parity[k++] = buf8[i] & msk;
13000                                 i++;
13001
13002                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13003                                         parity[k++] = buf8[i] & msk;
13004                                 i++;
13005                         }
13006                         data[j++] = buf8[i];
13007                 }
13008
13009                 err = -EIO;
13010                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13011                         u8 hw8 = hweight8(data[i]);
13012
13013                         if ((hw8 & 0x1) && parity[i])
13014                                 goto out;
13015                         else if (!(hw8 & 0x1) && !parity[i])
13016                                 goto out;
13017                 }
13018                 err = 0;
13019                 goto out;
13020         }
13021
13022         err = -EIO;
13023
13024         /* Bootstrap checksum at offset 0x10 */
13025         csum = calc_crc((unsigned char *) buf, 0x10);
13026         if (csum != le32_to_cpu(buf[0x10/4]))
13027                 goto out;
13028
13029         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13030         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13031         if (csum != le32_to_cpu(buf[0xfc/4]))
13032                 goto out;
13033
13034         kfree(buf);
13035
13036         buf = tg3_vpd_readblock(tp, &len);
13037         if (!buf)
13038                 return -ENOMEM;
13039
13040         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13041         if (i > 0) {
13042                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13043                 if (j < 0)
13044                         goto out;
13045
13046                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13047                         goto out;
13048
13049                 i += PCI_VPD_LRDT_TAG_SIZE;
13050                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13051                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13052                 if (j > 0) {
13053                         u8 csum8 = 0;
13054
13055                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13056
13057                         for (i = 0; i <= j; i++)
13058                                 csum8 += ((u8 *)buf)[i];
13059
13060                         if (csum8)
13061                                 goto out;
13062                 }
13063         }
13064
13065         err = 0;
13066
13067 out:
13068         kfree(buf);
13069         return err;
13070 }
13071
13072 #define TG3_SERDES_TIMEOUT_SEC  2
13073 #define TG3_COPPER_TIMEOUT_SEC  6
13074
13075 static int tg3_test_link(struct tg3 *tp)
13076 {
13077         int i, max;
13078
13079         if (!netif_running(tp->dev))
13080                 return -ENODEV;
13081
13082         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13083                 max = TG3_SERDES_TIMEOUT_SEC;
13084         else
13085                 max = TG3_COPPER_TIMEOUT_SEC;
13086
13087         for (i = 0; i < max; i++) {
13088                 if (tp->link_up)
13089                         return 0;
13090
13091                 if (msleep_interruptible(1000))
13092                         break;
13093         }
13094
13095         return -EIO;
13096 }
13097
13098 /* Only test the commonly used registers */
13099 static int tg3_test_registers(struct tg3 *tp)
13100 {
13101         int i, is_5705, is_5750;
13102         u32 offset, read_mask, write_mask, val, save_val, read_val;
13103         static struct {
13104                 u16 offset;
13105                 u16 flags;
13106 #define TG3_FL_5705     0x1
13107 #define TG3_FL_NOT_5705 0x2
13108 #define TG3_FL_NOT_5788 0x4
13109 #define TG3_FL_NOT_5750 0x8
13110                 u32 read_mask;
13111                 u32 write_mask;
13112         } reg_tbl[] = {
13113                 /* MAC Control Registers */
13114                 { MAC_MODE, TG3_FL_NOT_5705,
13115                         0x00000000, 0x00ef6f8c },
13116                 { MAC_MODE, TG3_FL_5705,
13117                         0x00000000, 0x01ef6b8c },
13118                 { MAC_STATUS, TG3_FL_NOT_5705,
13119                         0x03800107, 0x00000000 },
13120                 { MAC_STATUS, TG3_FL_5705,
13121                         0x03800100, 0x00000000 },
13122                 { MAC_ADDR_0_HIGH, 0x0000,
13123                         0x00000000, 0x0000ffff },
13124                 { MAC_ADDR_0_LOW, 0x0000,
13125                         0x00000000, 0xffffffff },
13126                 { MAC_RX_MTU_SIZE, 0x0000,
13127                         0x00000000, 0x0000ffff },
13128                 { MAC_TX_MODE, 0x0000,
13129                         0x00000000, 0x00000070 },
13130                 { MAC_TX_LENGTHS, 0x0000,
13131                         0x00000000, 0x00003fff },
13132                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13133                         0x00000000, 0x000007fc },
13134                 { MAC_RX_MODE, TG3_FL_5705,
13135                         0x00000000, 0x000007dc },
13136                 { MAC_HASH_REG_0, 0x0000,
13137                         0x00000000, 0xffffffff },
13138                 { MAC_HASH_REG_1, 0x0000,
13139                         0x00000000, 0xffffffff },
13140                 { MAC_HASH_REG_2, 0x0000,
13141                         0x00000000, 0xffffffff },
13142                 { MAC_HASH_REG_3, 0x0000,
13143                         0x00000000, 0xffffffff },
13144
13145                 /* Receive Data and Receive BD Initiator Control Registers. */
13146                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13147                         0x00000000, 0xffffffff },
13148                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13149                         0x00000000, 0xffffffff },
13150                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13151                         0x00000000, 0x00000003 },
13152                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13153                         0x00000000, 0xffffffff },
13154                 { RCVDBDI_STD_BD+0, 0x0000,
13155                         0x00000000, 0xffffffff },
13156                 { RCVDBDI_STD_BD+4, 0x0000,
13157                         0x00000000, 0xffffffff },
13158                 { RCVDBDI_STD_BD+8, 0x0000,
13159                         0x00000000, 0xffff0002 },
13160                 { RCVDBDI_STD_BD+0xc, 0x0000,
13161                         0x00000000, 0xffffffff },
13162
13163                 /* Receive BD Initiator Control Registers. */
13164                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13165                         0x00000000, 0xffffffff },
13166                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13167                         0x00000000, 0x000003ff },
13168                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13169                         0x00000000, 0xffffffff },
13170
13171                 /* Host Coalescing Control Registers. */
13172                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13173                         0x00000000, 0x00000004 },
13174                 { HOSTCC_MODE, TG3_FL_5705,
13175                         0x00000000, 0x000000f6 },
13176                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13177                         0x00000000, 0xffffffff },
13178                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13179                         0x00000000, 0x000003ff },
13180                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13181                         0x00000000, 0xffffffff },
13182                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13183                         0x00000000, 0x000003ff },
13184                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13185                         0x00000000, 0xffffffff },
13186                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13187                         0x00000000, 0x000000ff },
13188                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13189                         0x00000000, 0xffffffff },
13190                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13191                         0x00000000, 0x000000ff },
13192                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13193                         0x00000000, 0xffffffff },
13194                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13195                         0x00000000, 0xffffffff },
13196                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13197                         0x00000000, 0xffffffff },
13198                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13199                         0x00000000, 0x000000ff },
13200                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13201                         0x00000000, 0xffffffff },
13202                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13203                         0x00000000, 0x000000ff },
13204                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13205                         0x00000000, 0xffffffff },
13206                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13207                         0x00000000, 0xffffffff },
13208                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13209                         0x00000000, 0xffffffff },
13210                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13211                         0x00000000, 0xffffffff },
13212                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13213                         0x00000000, 0xffffffff },
13214                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13215                         0xffffffff, 0x00000000 },
13216                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13217                         0xffffffff, 0x00000000 },
13218
13219                 /* Buffer Manager Control Registers. */
13220                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13221                         0x00000000, 0x007fff80 },
13222                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13223                         0x00000000, 0x007fffff },
13224                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13225                         0x00000000, 0x0000003f },
13226                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13227                         0x00000000, 0x000001ff },
13228                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13229                         0x00000000, 0x000001ff },
13230                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13231                         0xffffffff, 0x00000000 },
13232                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13233                         0xffffffff, 0x00000000 },
13234
13235                 /* Mailbox Registers */
13236                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13237                         0x00000000, 0x000001ff },
13238                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13239                         0x00000000, 0x000001ff },
13240                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13241                         0x00000000, 0x000007ff },
13242                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13243                         0x00000000, 0x000001ff },
13244
13245                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13246         };
13247
13248         is_5705 = is_5750 = 0;
13249         if (tg3_flag(tp, 5705_PLUS)) {
13250                 is_5705 = 1;
13251                 if (tg3_flag(tp, 5750_PLUS))
13252                         is_5750 = 1;
13253         }
13254
13255         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13256                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13257                         continue;
13258
13259                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13260                         continue;
13261
13262                 if (tg3_flag(tp, IS_5788) &&
13263                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13264                         continue;
13265
13266                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13267                         continue;
13268
13269                 offset = (u32) reg_tbl[i].offset;
13270                 read_mask = reg_tbl[i].read_mask;
13271                 write_mask = reg_tbl[i].write_mask;
13272
13273                 /* Save the original register content */
13274                 save_val = tr32(offset);
13275
13276                 /* Determine the read-only value. */
13277                 read_val = save_val & read_mask;
13278
13279                 /* Write zero to the register, then make sure the read-only bits
13280                  * are not changed and the read/write bits are all zeros.
13281                  */
13282                 tw32(offset, 0);
13283
13284                 val = tr32(offset);
13285
13286                 /* Test the read-only and read/write bits. */
13287                 if (((val & read_mask) != read_val) || (val & write_mask))
13288                         goto out;
13289
13290                 /* Write ones to all the bits defined by RdMask and WrMask, then
13291                  * make sure the read-only bits are not changed and the
13292                  * read/write bits are all ones.
13293                  */
13294                 tw32(offset, read_mask | write_mask);
13295
13296                 val = tr32(offset);
13297
13298                 /* Test the read-only bits. */
13299                 if ((val & read_mask) != read_val)
13300                         goto out;
13301
13302                 /* Test the read/write bits. */
13303                 if ((val & write_mask) != write_mask)
13304                         goto out;
13305
13306                 tw32(offset, save_val);
13307         }
13308
13309         return 0;
13310
13311 out:
13312         if (netif_msg_hw(tp))
13313                 netdev_err(tp->dev,
13314                            "Register test failed at offset %x\n", offset);
13315         tw32(offset, save_val);
13316         return -EIO;
13317 }
13318
13319 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13320 {
13321         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13322         int i;
13323         u32 j;
13324
13325         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13326                 for (j = 0; j < len; j += 4) {
13327                         u32 val;
13328
13329                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13330                         tg3_read_mem(tp, offset + j, &val);
13331                         if (val != test_pattern[i])
13332                                 return -EIO;
13333                 }
13334         }
13335         return 0;
13336 }
13337
13338 static int tg3_test_memory(struct tg3 *tp)
13339 {
13340         static struct mem_entry {
13341                 u32 offset;
13342                 u32 len;
13343         } mem_tbl_570x[] = {
13344                 { 0x00000000, 0x00b50},
13345                 { 0x00002000, 0x1c000},
13346                 { 0xffffffff, 0x00000}
13347         }, mem_tbl_5705[] = {
13348                 { 0x00000100, 0x0000c},
13349                 { 0x00000200, 0x00008},
13350                 { 0x00004000, 0x00800},
13351                 { 0x00006000, 0x01000},
13352                 { 0x00008000, 0x02000},
13353                 { 0x00010000, 0x0e000},
13354                 { 0xffffffff, 0x00000}
13355         }, mem_tbl_5755[] = {
13356                 { 0x00000200, 0x00008},
13357                 { 0x00004000, 0x00800},
13358                 { 0x00006000, 0x00800},
13359                 { 0x00008000, 0x02000},
13360                 { 0x00010000, 0x0c000},
13361                 { 0xffffffff, 0x00000}
13362         }, mem_tbl_5906[] = {
13363                 { 0x00000200, 0x00008},
13364                 { 0x00004000, 0x00400},
13365                 { 0x00006000, 0x00400},
13366                 { 0x00008000, 0x01000},
13367                 { 0x00010000, 0x01000},
13368                 { 0xffffffff, 0x00000}
13369         }, mem_tbl_5717[] = {
13370                 { 0x00000200, 0x00008},
13371                 { 0x00010000, 0x0a000},
13372                 { 0x00020000, 0x13c00},
13373                 { 0xffffffff, 0x00000}
13374         }, mem_tbl_57765[] = {
13375                 { 0x00000200, 0x00008},
13376                 { 0x00004000, 0x00800},
13377                 { 0x00006000, 0x09800},
13378                 { 0x00010000, 0x0a000},
13379                 { 0xffffffff, 0x00000}
13380         };
13381         struct mem_entry *mem_tbl;
13382         int err = 0;
13383         int i;
13384
13385         if (tg3_flag(tp, 5717_PLUS))
13386                 mem_tbl = mem_tbl_5717;
13387         else if (tg3_flag(tp, 57765_CLASS) ||
13388                  tg3_asic_rev(tp) == ASIC_REV_5762)
13389                 mem_tbl = mem_tbl_57765;
13390         else if (tg3_flag(tp, 5755_PLUS))
13391                 mem_tbl = mem_tbl_5755;
13392         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13393                 mem_tbl = mem_tbl_5906;
13394         else if (tg3_flag(tp, 5705_PLUS))
13395                 mem_tbl = mem_tbl_5705;
13396         else
13397                 mem_tbl = mem_tbl_570x;
13398
13399         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13400                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13401                 if (err)
13402                         break;
13403         }
13404
13405         return err;
13406 }
13407
13408 #define TG3_TSO_MSS             500
13409
13410 #define TG3_TSO_IP_HDR_LEN      20
13411 #define TG3_TSO_TCP_HDR_LEN     20
13412 #define TG3_TSO_TCP_OPT_LEN     12
13413
13414 static const u8 tg3_tso_header[] = {
13415 0x08, 0x00,
13416 0x45, 0x00, 0x00, 0x00,
13417 0x00, 0x00, 0x40, 0x00,
13418 0x40, 0x06, 0x00, 0x00,
13419 0x0a, 0x00, 0x00, 0x01,
13420 0x0a, 0x00, 0x00, 0x02,
13421 0x0d, 0x00, 0xe0, 0x00,
13422 0x00, 0x00, 0x01, 0x00,
13423 0x00, 0x00, 0x02, 0x00,
13424 0x80, 0x10, 0x10, 0x00,
13425 0x14, 0x09, 0x00, 0x00,
13426 0x01, 0x01, 0x08, 0x0a,
13427 0x11, 0x11, 0x11, 0x11,
13428 0x11, 0x11, 0x11, 0x11,
13429 };
13430
13431 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13432 {
13433         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13434         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13435         u32 budget;
13436         struct sk_buff *skb;
13437         u8 *tx_data, *rx_data;
13438         dma_addr_t map;
13439         int num_pkts, tx_len, rx_len, i, err;
13440         struct tg3_rx_buffer_desc *desc;
13441         struct tg3_napi *tnapi, *rnapi;
13442         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13443
13444         tnapi = &tp->napi[0];
13445         rnapi = &tp->napi[0];
13446         if (tp->irq_cnt > 1) {
13447                 if (tg3_flag(tp, ENABLE_RSS))
13448                         rnapi = &tp->napi[1];
13449                 if (tg3_flag(tp, ENABLE_TSS))
13450                         tnapi = &tp->napi[1];
13451         }
13452         coal_now = tnapi->coal_now | rnapi->coal_now;
13453
13454         err = -EIO;
13455
13456         tx_len = pktsz;
13457         skb = netdev_alloc_skb(tp->dev, tx_len);
13458         if (!skb)
13459                 return -ENOMEM;
13460
13461         tx_data = skb_put(skb, tx_len);
13462         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13463         memset(tx_data + ETH_ALEN, 0x0, 8);
13464
13465         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13466
13467         if (tso_loopback) {
13468                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13469
13470                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13471                               TG3_TSO_TCP_OPT_LEN;
13472
13473                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13474                        sizeof(tg3_tso_header));
13475                 mss = TG3_TSO_MSS;
13476
13477                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13478                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13479
13480                 /* Set the total length field in the IP header */
13481                 iph->tot_len = htons((u16)(mss + hdr_len));
13482
13483                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13484                               TXD_FLAG_CPU_POST_DMA);
13485
13486                 if (tg3_flag(tp, HW_TSO_1) ||
13487                     tg3_flag(tp, HW_TSO_2) ||
13488                     tg3_flag(tp, HW_TSO_3)) {
13489                         struct tcphdr *th;
13490                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13491                         th = (struct tcphdr *)&tx_data[val];
13492                         th->check = 0;
13493                 } else
13494                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13495
13496                 if (tg3_flag(tp, HW_TSO_3)) {
13497                         mss |= (hdr_len & 0xc) << 12;
13498                         if (hdr_len & 0x10)
13499                                 base_flags |= 0x00000010;
13500                         base_flags |= (hdr_len & 0x3e0) << 5;
13501                 } else if (tg3_flag(tp, HW_TSO_2))
13502                         mss |= hdr_len << 9;
13503                 else if (tg3_flag(tp, HW_TSO_1) ||
13504                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13505                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13506                 } else {
13507                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13508                 }
13509
13510                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13511         } else {
13512                 num_pkts = 1;
13513                 data_off = ETH_HLEN;
13514
13515                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13516                     tx_len > VLAN_ETH_FRAME_LEN)
13517                         base_flags |= TXD_FLAG_JMB_PKT;
13518         }
13519
13520         for (i = data_off; i < tx_len; i++)
13521                 tx_data[i] = (u8) (i & 0xff);
13522
13523         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13524         if (pci_dma_mapping_error(tp->pdev, map)) {
13525                 dev_kfree_skb(skb);
13526                 return -EIO;
13527         }
13528
13529         val = tnapi->tx_prod;
13530         tnapi->tx_buffers[val].skb = skb;
13531         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13532
13533         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13534                rnapi->coal_now);
13535
13536         udelay(10);
13537
13538         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13539
13540         budget = tg3_tx_avail(tnapi);
13541         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13542                             base_flags | TXD_FLAG_END, mss, 0)) {
13543                 tnapi->tx_buffers[val].skb = NULL;
13544                 dev_kfree_skb(skb);
13545                 return -EIO;
13546         }
13547
13548         tnapi->tx_prod++;
13549
13550         /* Sync BD data before updating mailbox */
13551         wmb();
13552
13553         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13554         tr32_mailbox(tnapi->prodmbox);
13555
13556         udelay(10);
13557
13558         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13559         for (i = 0; i < 35; i++) {
13560                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13561                        coal_now);
13562
13563                 udelay(10);
13564
13565                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13566                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13567                 if ((tx_idx == tnapi->tx_prod) &&
13568                     (rx_idx == (rx_start_idx + num_pkts)))
13569                         break;
13570         }
13571
13572         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13573         dev_kfree_skb(skb);
13574
13575         if (tx_idx != tnapi->tx_prod)
13576                 goto out;
13577
13578         if (rx_idx != rx_start_idx + num_pkts)
13579                 goto out;
13580
13581         val = data_off;
13582         while (rx_idx != rx_start_idx) {
13583                 desc = &rnapi->rx_rcb[rx_start_idx++];
13584                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13585                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13586
13587                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13588                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13589                         goto out;
13590
13591                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13592                          - ETH_FCS_LEN;
13593
13594                 if (!tso_loopback) {
13595                         if (rx_len != tx_len)
13596                                 goto out;
13597
13598                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13599                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13600                                         goto out;
13601                         } else {
13602                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13603                                         goto out;
13604                         }
13605                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13606                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13607                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13608                         goto out;
13609                 }
13610
13611                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13612                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13613                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13614                                              mapping);
13615                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13616                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13617                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13618                                              mapping);
13619                 } else
13620                         goto out;
13621
13622                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13623                                             PCI_DMA_FROMDEVICE);
13624
13625                 rx_data += TG3_RX_OFFSET(tp);
13626                 for (i = data_off; i < rx_len; i++, val++) {
13627                         if (*(rx_data + i) != (u8) (val & 0xff))
13628                                 goto out;
13629                 }
13630         }
13631
13632         err = 0;
13633
13634         /* tg3_free_rings will unmap and free the rx_data */
13635 out:
13636         return err;
13637 }
13638
13639 #define TG3_STD_LOOPBACK_FAILED         1
13640 #define TG3_JMB_LOOPBACK_FAILED         2
13641 #define TG3_TSO_LOOPBACK_FAILED         4
13642 #define TG3_LOOPBACK_FAILED \
13643         (TG3_STD_LOOPBACK_FAILED | \
13644          TG3_JMB_LOOPBACK_FAILED | \
13645          TG3_TSO_LOOPBACK_FAILED)
13646
13647 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13648 {
13649         int err = -EIO;
13650         u32 eee_cap;
13651         u32 jmb_pkt_sz = 9000;
13652
13653         if (tp->dma_limit)
13654                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13655
13656         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13657         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13658
13659         if (!netif_running(tp->dev)) {
13660                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13661                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13662                 if (do_extlpbk)
13663                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13664                 goto done;
13665         }
13666
13667         err = tg3_reset_hw(tp, true);
13668         if (err) {
13669                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13670                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13671                 if (do_extlpbk)
13672                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13673                 goto done;
13674         }
13675
13676         if (tg3_flag(tp, ENABLE_RSS)) {
13677                 int i;
13678
13679                 /* Reroute all rx packets to the 1st queue */
13680                 for (i = MAC_RSS_INDIR_TBL_0;
13681                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13682                         tw32(i, 0x0);
13683         }
13684
13685         /* HW errata - mac loopback fails in some cases on 5780.
13686          * Normal traffic and PHY loopback are not affected by
13687          * errata.  Also, the MAC loopback test is deprecated for
13688          * all newer ASIC revisions.
13689          */
13690         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13691             !tg3_flag(tp, CPMU_PRESENT)) {
13692                 tg3_mac_loopback(tp, true);
13693
13694                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13695                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13696
13697                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13698                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13699                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13700
13701                 tg3_mac_loopback(tp, false);
13702         }
13703
13704         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13705             !tg3_flag(tp, USE_PHYLIB)) {
13706                 int i;
13707
13708                 tg3_phy_lpbk_set(tp, 0, false);
13709
13710                 /* Wait for link */
13711                 for (i = 0; i < 100; i++) {
13712                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13713                                 break;
13714                         mdelay(1);
13715                 }
13716
13717                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13718                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13719                 if (tg3_flag(tp, TSO_CAPABLE) &&
13720                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13721                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13722                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13723                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13724                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13725
13726                 if (do_extlpbk) {
13727                         tg3_phy_lpbk_set(tp, 0, true);
13728
13729                         /* All link indications report up, but the hardware
13730                          * isn't really ready for about 20 msec.  Double it
13731                          * to be sure.
13732                          */
13733                         mdelay(40);
13734
13735                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13736                                 data[TG3_EXT_LOOPB_TEST] |=
13737                                                         TG3_STD_LOOPBACK_FAILED;
13738                         if (tg3_flag(tp, TSO_CAPABLE) &&
13739                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13740                                 data[TG3_EXT_LOOPB_TEST] |=
13741                                                         TG3_TSO_LOOPBACK_FAILED;
13742                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13743                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13744                                 data[TG3_EXT_LOOPB_TEST] |=
13745                                                         TG3_JMB_LOOPBACK_FAILED;
13746                 }
13747
13748                 /* Re-enable gphy autopowerdown. */
13749                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13750                         tg3_phy_toggle_apd(tp, true);
13751         }
13752
13753         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13754                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13755
13756 done:
13757         tp->phy_flags |= eee_cap;
13758
13759         return err;
13760 }
13761
13762 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13763                           u64 *data)
13764 {
13765         struct tg3 *tp = netdev_priv(dev);
13766         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13767
13768         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13769                 if (tg3_power_up(tp)) {
13770                         etest->flags |= ETH_TEST_FL_FAILED;
13771                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13772                         return;
13773                 }
13774                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13775         }
13776
13777         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13778
13779         if (tg3_test_nvram(tp) != 0) {
13780                 etest->flags |= ETH_TEST_FL_FAILED;
13781                 data[TG3_NVRAM_TEST] = 1;
13782         }
13783         if (!doextlpbk && tg3_test_link(tp)) {
13784                 etest->flags |= ETH_TEST_FL_FAILED;
13785                 data[TG3_LINK_TEST] = 1;
13786         }
13787         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13788                 int err, err2 = 0, irq_sync = 0;
13789
13790                 if (netif_running(dev)) {
13791                         tg3_phy_stop(tp);
13792                         tg3_netif_stop(tp);
13793                         irq_sync = 1;
13794                 }
13795
13796                 tg3_full_lock(tp, irq_sync);
13797                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13798                 err = tg3_nvram_lock(tp);
13799                 tg3_halt_cpu(tp, RX_CPU_BASE);
13800                 if (!tg3_flag(tp, 5705_PLUS))
13801                         tg3_halt_cpu(tp, TX_CPU_BASE);
13802                 if (!err)
13803                         tg3_nvram_unlock(tp);
13804
13805                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13806                         tg3_phy_reset(tp);
13807
13808                 if (tg3_test_registers(tp) != 0) {
13809                         etest->flags |= ETH_TEST_FL_FAILED;
13810                         data[TG3_REGISTER_TEST] = 1;
13811                 }
13812
13813                 if (tg3_test_memory(tp) != 0) {
13814                         etest->flags |= ETH_TEST_FL_FAILED;
13815                         data[TG3_MEMORY_TEST] = 1;
13816                 }
13817
13818                 if (doextlpbk)
13819                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13820
13821                 if (tg3_test_loopback(tp, data, doextlpbk))
13822                         etest->flags |= ETH_TEST_FL_FAILED;
13823
13824                 tg3_full_unlock(tp);
13825
13826                 if (tg3_test_interrupt(tp) != 0) {
13827                         etest->flags |= ETH_TEST_FL_FAILED;
13828                         data[TG3_INTERRUPT_TEST] = 1;
13829                 }
13830
13831                 tg3_full_lock(tp, 0);
13832
13833                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13834                 if (netif_running(dev)) {
13835                         tg3_flag_set(tp, INIT_COMPLETE);
13836                         err2 = tg3_restart_hw(tp, true);
13837                         if (!err2)
13838                                 tg3_netif_start(tp);
13839                 }
13840
13841                 tg3_full_unlock(tp);
13842
13843                 if (irq_sync && !err2)
13844                         tg3_phy_start(tp);
13845         }
13846         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13847                 tg3_power_down_prepare(tp);
13848
13849 }
13850
13851 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13852 {
13853         struct tg3 *tp = netdev_priv(dev);
13854         struct hwtstamp_config stmpconf;
13855
13856         if (!tg3_flag(tp, PTP_CAPABLE))
13857                 return -EOPNOTSUPP;
13858
13859         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13860                 return -EFAULT;
13861
13862         if (stmpconf.flags)
13863                 return -EINVAL;
13864
13865         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13866             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13867                 return -ERANGE;
13868
13869         switch (stmpconf.rx_filter) {
13870         case HWTSTAMP_FILTER_NONE:
13871                 tp->rxptpctl = 0;
13872                 break;
13873         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13874                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13875                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13876                 break;
13877         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13878                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13879                                TG3_RX_PTP_CTL_SYNC_EVNT;
13880                 break;
13881         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13882                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13883                                TG3_RX_PTP_CTL_DELAY_REQ;
13884                 break;
13885         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13886                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13887                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13888                 break;
13889         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13890                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13891                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13892                 break;
13893         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13894                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13895                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13896                 break;
13897         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13898                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13899                                TG3_RX_PTP_CTL_SYNC_EVNT;
13900                 break;
13901         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13902                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13903                                TG3_RX_PTP_CTL_SYNC_EVNT;
13904                 break;
13905         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13906                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13907                                TG3_RX_PTP_CTL_SYNC_EVNT;
13908                 break;
13909         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13910                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13911                                TG3_RX_PTP_CTL_DELAY_REQ;
13912                 break;
13913         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13914                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13915                                TG3_RX_PTP_CTL_DELAY_REQ;
13916                 break;
13917         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13918                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13919                                TG3_RX_PTP_CTL_DELAY_REQ;
13920                 break;
13921         default:
13922                 return -ERANGE;
13923         }
13924
13925         if (netif_running(dev) && tp->rxptpctl)
13926                 tw32(TG3_RX_PTP_CTL,
13927                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13928
13929         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13930                 tg3_flag_set(tp, TX_TSTAMP_EN);
13931         else
13932                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13933
13934         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13935                 -EFAULT : 0;
13936 }
13937
13938 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13939 {
13940         struct tg3 *tp = netdev_priv(dev);
13941         struct hwtstamp_config stmpconf;
13942
13943         if (!tg3_flag(tp, PTP_CAPABLE))
13944                 return -EOPNOTSUPP;
13945
13946         stmpconf.flags = 0;
13947         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13948                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13949
13950         switch (tp->rxptpctl) {
13951         case 0:
13952                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13953                 break;
13954         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13955                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13956                 break;
13957         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13958                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13959                 break;
13960         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13961                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13962                 break;
13963         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13964                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13965                 break;
13966         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13967                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13968                 break;
13969         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13970                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13971                 break;
13972         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13973                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13974                 break;
13975         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13976                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13977                 break;
13978         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13979                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13980                 break;
13981         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13982                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13983                 break;
13984         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13985                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13986                 break;
13987         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13988                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13989                 break;
13990         default:
13991                 WARN_ON_ONCE(1);
13992                 return -ERANGE;
13993         }
13994
13995         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13996                 -EFAULT : 0;
13997 }
13998
13999 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14000 {
14001         struct mii_ioctl_data *data = if_mii(ifr);
14002         struct tg3 *tp = netdev_priv(dev);
14003         int err;
14004
14005         if (tg3_flag(tp, USE_PHYLIB)) {
14006                 struct phy_device *phydev;
14007                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14008                         return -EAGAIN;
14009                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14010                 return phy_mii_ioctl(phydev, ifr, cmd);
14011         }
14012
14013         switch (cmd) {
14014         case SIOCGMIIPHY:
14015                 data->phy_id = tp->phy_addr;
14016
14017                 /* fall through */
14018         case SIOCGMIIREG: {
14019                 u32 mii_regval;
14020
14021                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14022                         break;                  /* We have no PHY */
14023
14024                 if (!netif_running(dev))
14025                         return -EAGAIN;
14026
14027                 spin_lock_bh(&tp->lock);
14028                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14029                                     data->reg_num & 0x1f, &mii_regval);
14030                 spin_unlock_bh(&tp->lock);
14031
14032                 data->val_out = mii_regval;
14033
14034                 return err;
14035         }
14036
14037         case SIOCSMIIREG:
14038                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14039                         break;                  /* We have no PHY */
14040
14041                 if (!netif_running(dev))
14042                         return -EAGAIN;
14043
14044                 spin_lock_bh(&tp->lock);
14045                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14046                                      data->reg_num & 0x1f, data->val_in);
14047                 spin_unlock_bh(&tp->lock);
14048
14049                 return err;
14050
14051         case SIOCSHWTSTAMP:
14052                 return tg3_hwtstamp_set(dev, ifr);
14053
14054         case SIOCGHWTSTAMP:
14055                 return tg3_hwtstamp_get(dev, ifr);
14056
14057         default:
14058                 /* do nothing */
14059                 break;
14060         }
14061         return -EOPNOTSUPP;
14062 }
14063
14064 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14065 {
14066         struct tg3 *tp = netdev_priv(dev);
14067
14068         memcpy(ec, &tp->coal, sizeof(*ec));
14069         return 0;
14070 }
14071
14072 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14073 {
14074         struct tg3 *tp = netdev_priv(dev);
14075         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14076         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14077
14078         if (!tg3_flag(tp, 5705_PLUS)) {
14079                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14080                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14081                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14082                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14083         }
14084
14085         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14086             (!ec->rx_coalesce_usecs) ||
14087             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14088             (!ec->tx_coalesce_usecs) ||
14089             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14090             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14091             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14092             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14093             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14094             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14095             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14096             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14097                 return -EINVAL;
14098
14099         /* Only copy relevant parameters, ignore all others. */
14100         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14101         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14102         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14103         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14104         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14105         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14106         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14107         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14108         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14109
14110         if (netif_running(dev)) {
14111                 tg3_full_lock(tp, 0);
14112                 __tg3_set_coalesce(tp, &tp->coal);
14113                 tg3_full_unlock(tp);
14114         }
14115         return 0;
14116 }
14117
14118 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14119 {
14120         struct tg3 *tp = netdev_priv(dev);
14121
14122         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14123                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14124                 return -EOPNOTSUPP;
14125         }
14126
14127         if (edata->advertised != tp->eee.advertised) {
14128                 netdev_warn(tp->dev,
14129                             "Direct manipulation of EEE advertisement is not supported\n");
14130                 return -EINVAL;
14131         }
14132
14133         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14134                 netdev_warn(tp->dev,
14135                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14136                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14137                 return -EINVAL;
14138         }
14139
14140         tp->eee = *edata;
14141
14142         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14143         tg3_warn_mgmt_link_flap(tp);
14144
14145         if (netif_running(tp->dev)) {
14146                 tg3_full_lock(tp, 0);
14147                 tg3_setup_eee(tp);
14148                 tg3_phy_reset(tp);
14149                 tg3_full_unlock(tp);
14150         }
14151
14152         return 0;
14153 }
14154
14155 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14156 {
14157         struct tg3 *tp = netdev_priv(dev);
14158
14159         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14160                 netdev_warn(tp->dev,
14161                             "Board does not support EEE!\n");
14162                 return -EOPNOTSUPP;
14163         }
14164
14165         *edata = tp->eee;
14166         return 0;
14167 }
14168
14169 static const struct ethtool_ops tg3_ethtool_ops = {
14170         .get_drvinfo            = tg3_get_drvinfo,
14171         .get_regs_len           = tg3_get_regs_len,
14172         .get_regs               = tg3_get_regs,
14173         .get_wol                = tg3_get_wol,
14174         .set_wol                = tg3_set_wol,
14175         .get_msglevel           = tg3_get_msglevel,
14176         .set_msglevel           = tg3_set_msglevel,
14177         .nway_reset             = tg3_nway_reset,
14178         .get_link               = ethtool_op_get_link,
14179         .get_eeprom_len         = tg3_get_eeprom_len,
14180         .get_eeprom             = tg3_get_eeprom,
14181         .set_eeprom             = tg3_set_eeprom,
14182         .get_ringparam          = tg3_get_ringparam,
14183         .set_ringparam          = tg3_set_ringparam,
14184         .get_pauseparam         = tg3_get_pauseparam,
14185         .set_pauseparam         = tg3_set_pauseparam,
14186         .self_test              = tg3_self_test,
14187         .get_strings            = tg3_get_strings,
14188         .set_phys_id            = tg3_set_phys_id,
14189         .get_ethtool_stats      = tg3_get_ethtool_stats,
14190         .get_coalesce           = tg3_get_coalesce,
14191         .set_coalesce           = tg3_set_coalesce,
14192         .get_sset_count         = tg3_get_sset_count,
14193         .get_rxnfc              = tg3_get_rxnfc,
14194         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14195         .get_rxfh               = tg3_get_rxfh,
14196         .set_rxfh               = tg3_set_rxfh,
14197         .get_channels           = tg3_get_channels,
14198         .set_channels           = tg3_set_channels,
14199         .get_ts_info            = tg3_get_ts_info,
14200         .get_eee                = tg3_get_eee,
14201         .set_eee                = tg3_set_eee,
14202         .get_link_ksettings     = tg3_get_link_ksettings,
14203         .set_link_ksettings     = tg3_set_link_ksettings,
14204 };
14205
14206 static void tg3_get_stats64(struct net_device *dev,
14207                             struct rtnl_link_stats64 *stats)
14208 {
14209         struct tg3 *tp = netdev_priv(dev);
14210
14211         spin_lock_bh(&tp->lock);
14212         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14213                 *stats = tp->net_stats_prev;
14214                 spin_unlock_bh(&tp->lock);
14215                 return;
14216         }
14217
14218         tg3_get_nstats(tp, stats);
14219         spin_unlock_bh(&tp->lock);
14220 }
14221
14222 static void tg3_set_rx_mode(struct net_device *dev)
14223 {
14224         struct tg3 *tp = netdev_priv(dev);
14225
14226         if (!netif_running(dev))
14227                 return;
14228
14229         tg3_full_lock(tp, 0);
14230         __tg3_set_rx_mode(dev);
14231         tg3_full_unlock(tp);
14232 }
14233
14234 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14235                                int new_mtu)
14236 {
14237         dev->mtu = new_mtu;
14238
14239         if (new_mtu > ETH_DATA_LEN) {
14240                 if (tg3_flag(tp, 5780_CLASS)) {
14241                         netdev_update_features(dev);
14242                         tg3_flag_clear(tp, TSO_CAPABLE);
14243                 } else {
14244                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14245                 }
14246         } else {
14247                 if (tg3_flag(tp, 5780_CLASS)) {
14248                         tg3_flag_set(tp, TSO_CAPABLE);
14249                         netdev_update_features(dev);
14250                 }
14251                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14252         }
14253 }
14254
14255 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14256 {
14257         struct tg3 *tp = netdev_priv(dev);
14258         int err;
14259         bool reset_phy = false;
14260
14261         if (!netif_running(dev)) {
14262                 /* We'll just catch it later when the
14263                  * device is up'd.
14264                  */
14265                 tg3_set_mtu(dev, tp, new_mtu);
14266                 return 0;
14267         }
14268
14269         tg3_phy_stop(tp);
14270
14271         tg3_netif_stop(tp);
14272
14273         tg3_set_mtu(dev, tp, new_mtu);
14274
14275         tg3_full_lock(tp, 1);
14276
14277         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14278
14279         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14280          * breaks all requests to 256 bytes.
14281          */
14282         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14283             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14284             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14285             tg3_asic_rev(tp) == ASIC_REV_5720)
14286                 reset_phy = true;
14287
14288         err = tg3_restart_hw(tp, reset_phy);
14289
14290         if (!err)
14291                 tg3_netif_start(tp);
14292
14293         tg3_full_unlock(tp);
14294
14295         if (!err)
14296                 tg3_phy_start(tp);
14297
14298         return err;
14299 }
14300
14301 static const struct net_device_ops tg3_netdev_ops = {
14302         .ndo_open               = tg3_open,
14303         .ndo_stop               = tg3_close,
14304         .ndo_start_xmit         = tg3_start_xmit,
14305         .ndo_get_stats64        = tg3_get_stats64,
14306         .ndo_validate_addr      = eth_validate_addr,
14307         .ndo_set_rx_mode        = tg3_set_rx_mode,
14308         .ndo_set_mac_address    = tg3_set_mac_addr,
14309         .ndo_do_ioctl           = tg3_ioctl,
14310         .ndo_tx_timeout         = tg3_tx_timeout,
14311         .ndo_change_mtu         = tg3_change_mtu,
14312         .ndo_fix_features       = tg3_fix_features,
14313         .ndo_set_features       = tg3_set_features,
14314 #ifdef CONFIG_NET_POLL_CONTROLLER
14315         .ndo_poll_controller    = tg3_poll_controller,
14316 #endif
14317 };
14318
14319 static void tg3_get_eeprom_size(struct tg3 *tp)
14320 {
14321         u32 cursize, val, magic;
14322
14323         tp->nvram_size = EEPROM_CHIP_SIZE;
14324
14325         if (tg3_nvram_read(tp, 0, &magic) != 0)
14326                 return;
14327
14328         if ((magic != TG3_EEPROM_MAGIC) &&
14329             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14330             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14331                 return;
14332
14333         /*
14334          * Size the chip by reading offsets at increasing powers of two.
14335          * When we encounter our validation signature, we know the addressing
14336          * has wrapped around, and thus have our chip size.
14337          */
14338         cursize = 0x10;
14339
14340         while (cursize < tp->nvram_size) {
14341                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14342                         return;
14343
14344                 if (val == magic)
14345                         break;
14346
14347                 cursize <<= 1;
14348         }
14349
14350         tp->nvram_size = cursize;
14351 }
14352
14353 static void tg3_get_nvram_size(struct tg3 *tp)
14354 {
14355         u32 val;
14356
14357         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14358                 return;
14359
14360         /* Selfboot format */
14361         if (val != TG3_EEPROM_MAGIC) {
14362                 tg3_get_eeprom_size(tp);
14363                 return;
14364         }
14365
14366         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14367                 if (val != 0) {
14368                         /* This is confusing.  We want to operate on the
14369                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14370                          * call will read from NVRAM and byteswap the data
14371                          * according to the byteswapping settings for all
14372                          * other register accesses.  This ensures the data we
14373                          * want will always reside in the lower 16-bits.
14374                          * However, the data in NVRAM is in LE format, which
14375                          * means the data from the NVRAM read will always be
14376                          * opposite the endianness of the CPU.  The 16-bit
14377                          * byteswap then brings the data to CPU endianness.
14378                          */
14379                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14380                         return;
14381                 }
14382         }
14383         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14384 }
14385
14386 static void tg3_get_nvram_info(struct tg3 *tp)
14387 {
14388         u32 nvcfg1;
14389
14390         nvcfg1 = tr32(NVRAM_CFG1);
14391         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14392                 tg3_flag_set(tp, FLASH);
14393         } else {
14394                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14395                 tw32(NVRAM_CFG1, nvcfg1);
14396         }
14397
14398         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14399             tg3_flag(tp, 5780_CLASS)) {
14400                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14401                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14402                         tp->nvram_jedecnum = JEDEC_ATMEL;
14403                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14404                         tg3_flag_set(tp, NVRAM_BUFFERED);
14405                         break;
14406                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14407                         tp->nvram_jedecnum = JEDEC_ATMEL;
14408                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14409                         break;
14410                 case FLASH_VENDOR_ATMEL_EEPROM:
14411                         tp->nvram_jedecnum = JEDEC_ATMEL;
14412                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14413                         tg3_flag_set(tp, NVRAM_BUFFERED);
14414                         break;
14415                 case FLASH_VENDOR_ST:
14416                         tp->nvram_jedecnum = JEDEC_ST;
14417                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14418                         tg3_flag_set(tp, NVRAM_BUFFERED);
14419                         break;
14420                 case FLASH_VENDOR_SAIFUN:
14421                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14422                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14423                         break;
14424                 case FLASH_VENDOR_SST_SMALL:
14425                 case FLASH_VENDOR_SST_LARGE:
14426                         tp->nvram_jedecnum = JEDEC_SST;
14427                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14428                         break;
14429                 }
14430         } else {
14431                 tp->nvram_jedecnum = JEDEC_ATMEL;
14432                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14433                 tg3_flag_set(tp, NVRAM_BUFFERED);
14434         }
14435 }
14436
14437 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14438 {
14439         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14440         case FLASH_5752PAGE_SIZE_256:
14441                 tp->nvram_pagesize = 256;
14442                 break;
14443         case FLASH_5752PAGE_SIZE_512:
14444                 tp->nvram_pagesize = 512;
14445                 break;
14446         case FLASH_5752PAGE_SIZE_1K:
14447                 tp->nvram_pagesize = 1024;
14448                 break;
14449         case FLASH_5752PAGE_SIZE_2K:
14450                 tp->nvram_pagesize = 2048;
14451                 break;
14452         case FLASH_5752PAGE_SIZE_4K:
14453                 tp->nvram_pagesize = 4096;
14454                 break;
14455         case FLASH_5752PAGE_SIZE_264:
14456                 tp->nvram_pagesize = 264;
14457                 break;
14458         case FLASH_5752PAGE_SIZE_528:
14459                 tp->nvram_pagesize = 528;
14460                 break;
14461         }
14462 }
14463
14464 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14465 {
14466         u32 nvcfg1;
14467
14468         nvcfg1 = tr32(NVRAM_CFG1);
14469
14470         /* NVRAM protection for TPM */
14471         if (nvcfg1 & (1 << 27))
14472                 tg3_flag_set(tp, PROTECTED_NVRAM);
14473
14474         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14475         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14476         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14477                 tp->nvram_jedecnum = JEDEC_ATMEL;
14478                 tg3_flag_set(tp, NVRAM_BUFFERED);
14479                 break;
14480         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14481                 tp->nvram_jedecnum = JEDEC_ATMEL;
14482                 tg3_flag_set(tp, NVRAM_BUFFERED);
14483                 tg3_flag_set(tp, FLASH);
14484                 break;
14485         case FLASH_5752VENDOR_ST_M45PE10:
14486         case FLASH_5752VENDOR_ST_M45PE20:
14487         case FLASH_5752VENDOR_ST_M45PE40:
14488                 tp->nvram_jedecnum = JEDEC_ST;
14489                 tg3_flag_set(tp, NVRAM_BUFFERED);
14490                 tg3_flag_set(tp, FLASH);
14491                 break;
14492         }
14493
14494         if (tg3_flag(tp, FLASH)) {
14495                 tg3_nvram_get_pagesize(tp, nvcfg1);
14496         } else {
14497                 /* For eeprom, set pagesize to maximum eeprom size */
14498                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14499
14500                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14501                 tw32(NVRAM_CFG1, nvcfg1);
14502         }
14503 }
14504
14505 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14506 {
14507         u32 nvcfg1, protect = 0;
14508
14509         nvcfg1 = tr32(NVRAM_CFG1);
14510
14511         /* NVRAM protection for TPM */
14512         if (nvcfg1 & (1 << 27)) {
14513                 tg3_flag_set(tp, PROTECTED_NVRAM);
14514                 protect = 1;
14515         }
14516
14517         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14518         switch (nvcfg1) {
14519         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14520         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14521         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14522         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14523                 tp->nvram_jedecnum = JEDEC_ATMEL;
14524                 tg3_flag_set(tp, NVRAM_BUFFERED);
14525                 tg3_flag_set(tp, FLASH);
14526                 tp->nvram_pagesize = 264;
14527                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14528                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14529                         tp->nvram_size = (protect ? 0x3e200 :
14530                                           TG3_NVRAM_SIZE_512KB);
14531                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14532                         tp->nvram_size = (protect ? 0x1f200 :
14533                                           TG3_NVRAM_SIZE_256KB);
14534                 else
14535                         tp->nvram_size = (protect ? 0x1f200 :
14536                                           TG3_NVRAM_SIZE_128KB);
14537                 break;
14538         case FLASH_5752VENDOR_ST_M45PE10:
14539         case FLASH_5752VENDOR_ST_M45PE20:
14540         case FLASH_5752VENDOR_ST_M45PE40:
14541                 tp->nvram_jedecnum = JEDEC_ST;
14542                 tg3_flag_set(tp, NVRAM_BUFFERED);
14543                 tg3_flag_set(tp, FLASH);
14544                 tp->nvram_pagesize = 256;
14545                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14546                         tp->nvram_size = (protect ?
14547                                           TG3_NVRAM_SIZE_64KB :
14548                                           TG3_NVRAM_SIZE_128KB);
14549                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14550                         tp->nvram_size = (protect ?
14551                                           TG3_NVRAM_SIZE_64KB :
14552                                           TG3_NVRAM_SIZE_256KB);
14553                 else
14554                         tp->nvram_size = (protect ?
14555                                           TG3_NVRAM_SIZE_128KB :
14556                                           TG3_NVRAM_SIZE_512KB);
14557                 break;
14558         }
14559 }
14560
14561 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14562 {
14563         u32 nvcfg1;
14564
14565         nvcfg1 = tr32(NVRAM_CFG1);
14566
14567         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14568         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14569         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14570         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14571         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14572                 tp->nvram_jedecnum = JEDEC_ATMEL;
14573                 tg3_flag_set(tp, NVRAM_BUFFERED);
14574                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14575
14576                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14577                 tw32(NVRAM_CFG1, nvcfg1);
14578                 break;
14579         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14580         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14581         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14582         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14583                 tp->nvram_jedecnum = JEDEC_ATMEL;
14584                 tg3_flag_set(tp, NVRAM_BUFFERED);
14585                 tg3_flag_set(tp, FLASH);
14586                 tp->nvram_pagesize = 264;
14587                 break;
14588         case FLASH_5752VENDOR_ST_M45PE10:
14589         case FLASH_5752VENDOR_ST_M45PE20:
14590         case FLASH_5752VENDOR_ST_M45PE40:
14591                 tp->nvram_jedecnum = JEDEC_ST;
14592                 tg3_flag_set(tp, NVRAM_BUFFERED);
14593                 tg3_flag_set(tp, FLASH);
14594                 tp->nvram_pagesize = 256;
14595                 break;
14596         }
14597 }
14598
14599 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14600 {
14601         u32 nvcfg1, protect = 0;
14602
14603         nvcfg1 = tr32(NVRAM_CFG1);
14604
14605         /* NVRAM protection for TPM */
14606         if (nvcfg1 & (1 << 27)) {
14607                 tg3_flag_set(tp, PROTECTED_NVRAM);
14608                 protect = 1;
14609         }
14610
14611         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14612         switch (nvcfg1) {
14613         case FLASH_5761VENDOR_ATMEL_ADB021D:
14614         case FLASH_5761VENDOR_ATMEL_ADB041D:
14615         case FLASH_5761VENDOR_ATMEL_ADB081D:
14616         case FLASH_5761VENDOR_ATMEL_ADB161D:
14617         case FLASH_5761VENDOR_ATMEL_MDB021D:
14618         case FLASH_5761VENDOR_ATMEL_MDB041D:
14619         case FLASH_5761VENDOR_ATMEL_MDB081D:
14620         case FLASH_5761VENDOR_ATMEL_MDB161D:
14621                 tp->nvram_jedecnum = JEDEC_ATMEL;
14622                 tg3_flag_set(tp, NVRAM_BUFFERED);
14623                 tg3_flag_set(tp, FLASH);
14624                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14625                 tp->nvram_pagesize = 256;
14626                 break;
14627         case FLASH_5761VENDOR_ST_A_M45PE20:
14628         case FLASH_5761VENDOR_ST_A_M45PE40:
14629         case FLASH_5761VENDOR_ST_A_M45PE80:
14630         case FLASH_5761VENDOR_ST_A_M45PE16:
14631         case FLASH_5761VENDOR_ST_M_M45PE20:
14632         case FLASH_5761VENDOR_ST_M_M45PE40:
14633         case FLASH_5761VENDOR_ST_M_M45PE80:
14634         case FLASH_5761VENDOR_ST_M_M45PE16:
14635                 tp->nvram_jedecnum = JEDEC_ST;
14636                 tg3_flag_set(tp, NVRAM_BUFFERED);
14637                 tg3_flag_set(tp, FLASH);
14638                 tp->nvram_pagesize = 256;
14639                 break;
14640         }
14641
14642         if (protect) {
14643                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14644         } else {
14645                 switch (nvcfg1) {
14646                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14647                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14648                 case FLASH_5761VENDOR_ST_A_M45PE16:
14649                 case FLASH_5761VENDOR_ST_M_M45PE16:
14650                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14651                         break;
14652                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14653                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14654                 case FLASH_5761VENDOR_ST_A_M45PE80:
14655                 case FLASH_5761VENDOR_ST_M_M45PE80:
14656                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14657                         break;
14658                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14659                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14660                 case FLASH_5761VENDOR_ST_A_M45PE40:
14661                 case FLASH_5761VENDOR_ST_M_M45PE40:
14662                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14663                         break;
14664                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14665                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14666                 case FLASH_5761VENDOR_ST_A_M45PE20:
14667                 case FLASH_5761VENDOR_ST_M_M45PE20:
14668                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14669                         break;
14670                 }
14671         }
14672 }
14673
14674 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14675 {
14676         tp->nvram_jedecnum = JEDEC_ATMEL;
14677         tg3_flag_set(tp, NVRAM_BUFFERED);
14678         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14679 }
14680
14681 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14682 {
14683         u32 nvcfg1;
14684
14685         nvcfg1 = tr32(NVRAM_CFG1);
14686
14687         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14688         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14689         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14690                 tp->nvram_jedecnum = JEDEC_ATMEL;
14691                 tg3_flag_set(tp, NVRAM_BUFFERED);
14692                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14693
14694                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14695                 tw32(NVRAM_CFG1, nvcfg1);
14696                 return;
14697         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14698         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14699         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14700         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14701         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14702         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14703         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14704                 tp->nvram_jedecnum = JEDEC_ATMEL;
14705                 tg3_flag_set(tp, NVRAM_BUFFERED);
14706                 tg3_flag_set(tp, FLASH);
14707
14708                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14709                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14710                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14711                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14712                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14713                         break;
14714                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14715                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14716                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14717                         break;
14718                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14719                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14720                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14721                         break;
14722                 }
14723                 break;
14724         case FLASH_5752VENDOR_ST_M45PE10:
14725         case FLASH_5752VENDOR_ST_M45PE20:
14726         case FLASH_5752VENDOR_ST_M45PE40:
14727                 tp->nvram_jedecnum = JEDEC_ST;
14728                 tg3_flag_set(tp, NVRAM_BUFFERED);
14729                 tg3_flag_set(tp, FLASH);
14730
14731                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14732                 case FLASH_5752VENDOR_ST_M45PE10:
14733                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14734                         break;
14735                 case FLASH_5752VENDOR_ST_M45PE20:
14736                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14737                         break;
14738                 case FLASH_5752VENDOR_ST_M45PE40:
14739                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14740                         break;
14741                 }
14742                 break;
14743         default:
14744                 tg3_flag_set(tp, NO_NVRAM);
14745                 return;
14746         }
14747
14748         tg3_nvram_get_pagesize(tp, nvcfg1);
14749         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14750                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14751 }
14752
14753
14754 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14755 {
14756         u32 nvcfg1;
14757
14758         nvcfg1 = tr32(NVRAM_CFG1);
14759
14760         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14761         case FLASH_5717VENDOR_ATMEL_EEPROM:
14762         case FLASH_5717VENDOR_MICRO_EEPROM:
14763                 tp->nvram_jedecnum = JEDEC_ATMEL;
14764                 tg3_flag_set(tp, NVRAM_BUFFERED);
14765                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14766
14767                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14768                 tw32(NVRAM_CFG1, nvcfg1);
14769                 return;
14770         case FLASH_5717VENDOR_ATMEL_MDB011D:
14771         case FLASH_5717VENDOR_ATMEL_ADB011B:
14772         case FLASH_5717VENDOR_ATMEL_ADB011D:
14773         case FLASH_5717VENDOR_ATMEL_MDB021D:
14774         case FLASH_5717VENDOR_ATMEL_ADB021B:
14775         case FLASH_5717VENDOR_ATMEL_ADB021D:
14776         case FLASH_5717VENDOR_ATMEL_45USPT:
14777                 tp->nvram_jedecnum = JEDEC_ATMEL;
14778                 tg3_flag_set(tp, NVRAM_BUFFERED);
14779                 tg3_flag_set(tp, FLASH);
14780
14781                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14782                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14783                         /* Detect size with tg3_nvram_get_size() */
14784                         break;
14785                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14786                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14787                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14788                         break;
14789                 default:
14790                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14791                         break;
14792                 }
14793                 break;
14794         case FLASH_5717VENDOR_ST_M_M25PE10:
14795         case FLASH_5717VENDOR_ST_A_M25PE10:
14796         case FLASH_5717VENDOR_ST_M_M45PE10:
14797         case FLASH_5717VENDOR_ST_A_M45PE10:
14798         case FLASH_5717VENDOR_ST_M_M25PE20:
14799         case FLASH_5717VENDOR_ST_A_M25PE20:
14800         case FLASH_5717VENDOR_ST_M_M45PE20:
14801         case FLASH_5717VENDOR_ST_A_M45PE20:
14802         case FLASH_5717VENDOR_ST_25USPT:
14803         case FLASH_5717VENDOR_ST_45USPT:
14804                 tp->nvram_jedecnum = JEDEC_ST;
14805                 tg3_flag_set(tp, NVRAM_BUFFERED);
14806                 tg3_flag_set(tp, FLASH);
14807
14808                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14809                 case FLASH_5717VENDOR_ST_M_M25PE20:
14810                 case FLASH_5717VENDOR_ST_M_M45PE20:
14811                         /* Detect size with tg3_nvram_get_size() */
14812                         break;
14813                 case FLASH_5717VENDOR_ST_A_M25PE20:
14814                 case FLASH_5717VENDOR_ST_A_M45PE20:
14815                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14816                         break;
14817                 default:
14818                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14819                         break;
14820                 }
14821                 break;
14822         default:
14823                 tg3_flag_set(tp, NO_NVRAM);
14824                 return;
14825         }
14826
14827         tg3_nvram_get_pagesize(tp, nvcfg1);
14828         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14829                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14830 }
14831
14832 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14833 {
14834         u32 nvcfg1, nvmpinstrp, nv_status;
14835
14836         nvcfg1 = tr32(NVRAM_CFG1);
14837         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14838
14839         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14840                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14841                         tg3_flag_set(tp, NO_NVRAM);
14842                         return;
14843                 }
14844
14845                 switch (nvmpinstrp) {
14846                 case FLASH_5762_MX25L_100:
14847                 case FLASH_5762_MX25L_200:
14848                 case FLASH_5762_MX25L_400:
14849                 case FLASH_5762_MX25L_800:
14850                 case FLASH_5762_MX25L_160_320:
14851                         tp->nvram_pagesize = 4096;
14852                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14853                         tg3_flag_set(tp, NVRAM_BUFFERED);
14854                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14855                         tg3_flag_set(tp, FLASH);
14856                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14857                         tp->nvram_size =
14858                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14859                                                 AUTOSENSE_DEVID_MASK)
14860                                         << AUTOSENSE_SIZE_IN_MB);
14861                         return;
14862
14863                 case FLASH_5762_EEPROM_HD:
14864                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14865                         break;
14866                 case FLASH_5762_EEPROM_LD:
14867                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14868                         break;
14869                 case FLASH_5720VENDOR_M_ST_M45PE20:
14870                         /* This pinstrap supports multiple sizes, so force it
14871                          * to read the actual size from location 0xf0.
14872                          */
14873                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14874                         break;
14875                 }
14876         }
14877
14878         switch (nvmpinstrp) {
14879         case FLASH_5720_EEPROM_HD:
14880         case FLASH_5720_EEPROM_LD:
14881                 tp->nvram_jedecnum = JEDEC_ATMEL;
14882                 tg3_flag_set(tp, NVRAM_BUFFERED);
14883
14884                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14885                 tw32(NVRAM_CFG1, nvcfg1);
14886                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14887                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14888                 else
14889                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14890                 return;
14891         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14892         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14893         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14894         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14895         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14896         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14897         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14898         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14899         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14900         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14901         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14902         case FLASH_5720VENDOR_ATMEL_45USPT:
14903                 tp->nvram_jedecnum = JEDEC_ATMEL;
14904                 tg3_flag_set(tp, NVRAM_BUFFERED);
14905                 tg3_flag_set(tp, FLASH);
14906
14907                 switch (nvmpinstrp) {
14908                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14909                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14910                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14911                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14912                         break;
14913                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14914                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14915                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14916                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14917                         break;
14918                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14919                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14920                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14921                         break;
14922                 default:
14923                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14924                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14925                         break;
14926                 }
14927                 break;
14928         case FLASH_5720VENDOR_M_ST_M25PE10:
14929         case FLASH_5720VENDOR_M_ST_M45PE10:
14930         case FLASH_5720VENDOR_A_ST_M25PE10:
14931         case FLASH_5720VENDOR_A_ST_M45PE10:
14932         case FLASH_5720VENDOR_M_ST_M25PE20:
14933         case FLASH_5720VENDOR_M_ST_M45PE20:
14934         case FLASH_5720VENDOR_A_ST_M25PE20:
14935         case FLASH_5720VENDOR_A_ST_M45PE20:
14936         case FLASH_5720VENDOR_M_ST_M25PE40:
14937         case FLASH_5720VENDOR_M_ST_M45PE40:
14938         case FLASH_5720VENDOR_A_ST_M25PE40:
14939         case FLASH_5720VENDOR_A_ST_M45PE40:
14940         case FLASH_5720VENDOR_M_ST_M25PE80:
14941         case FLASH_5720VENDOR_M_ST_M45PE80:
14942         case FLASH_5720VENDOR_A_ST_M25PE80:
14943         case FLASH_5720VENDOR_A_ST_M45PE80:
14944         case FLASH_5720VENDOR_ST_25USPT:
14945         case FLASH_5720VENDOR_ST_45USPT:
14946                 tp->nvram_jedecnum = JEDEC_ST;
14947                 tg3_flag_set(tp, NVRAM_BUFFERED);
14948                 tg3_flag_set(tp, FLASH);
14949
14950                 switch (nvmpinstrp) {
14951                 case FLASH_5720VENDOR_M_ST_M25PE20:
14952                 case FLASH_5720VENDOR_M_ST_M45PE20:
14953                 case FLASH_5720VENDOR_A_ST_M25PE20:
14954                 case FLASH_5720VENDOR_A_ST_M45PE20:
14955                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14956                         break;
14957                 case FLASH_5720VENDOR_M_ST_M25PE40:
14958                 case FLASH_5720VENDOR_M_ST_M45PE40:
14959                 case FLASH_5720VENDOR_A_ST_M25PE40:
14960                 case FLASH_5720VENDOR_A_ST_M45PE40:
14961                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14962                         break;
14963                 case FLASH_5720VENDOR_M_ST_M25PE80:
14964                 case FLASH_5720VENDOR_M_ST_M45PE80:
14965                 case FLASH_5720VENDOR_A_ST_M25PE80:
14966                 case FLASH_5720VENDOR_A_ST_M45PE80:
14967                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14968                         break;
14969                 default:
14970                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14971                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14972                         break;
14973                 }
14974                 break;
14975         default:
14976                 tg3_flag_set(tp, NO_NVRAM);
14977                 return;
14978         }
14979
14980         tg3_nvram_get_pagesize(tp, nvcfg1);
14981         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14982                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14983
14984         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14985                 u32 val;
14986
14987                 if (tg3_nvram_read(tp, 0, &val))
14988                         return;
14989
14990                 if (val != TG3_EEPROM_MAGIC &&
14991                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14992                         tg3_flag_set(tp, NO_NVRAM);
14993         }
14994 }
14995
14996 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14997 static void tg3_nvram_init(struct tg3 *tp)
14998 {
14999         if (tg3_flag(tp, IS_SSB_CORE)) {
15000                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15001                 tg3_flag_clear(tp, NVRAM);
15002                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15003                 tg3_flag_set(tp, NO_NVRAM);
15004                 return;
15005         }
15006
15007         tw32_f(GRC_EEPROM_ADDR,
15008              (EEPROM_ADDR_FSM_RESET |
15009               (EEPROM_DEFAULT_CLOCK_PERIOD <<
15010                EEPROM_ADDR_CLKPERD_SHIFT)));
15011
15012         msleep(1);
15013
15014         /* Enable seeprom accesses. */
15015         tw32_f(GRC_LOCAL_CTRL,
15016              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15017         udelay(100);
15018
15019         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15020             tg3_asic_rev(tp) != ASIC_REV_5701) {
15021                 tg3_flag_set(tp, NVRAM);
15022
15023                 if (tg3_nvram_lock(tp)) {
15024                         netdev_warn(tp->dev,
15025                                     "Cannot get nvram lock, %s failed\n",
15026                                     __func__);
15027                         return;
15028                 }
15029                 tg3_enable_nvram_access(tp);
15030
15031                 tp->nvram_size = 0;
15032
15033                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15034                         tg3_get_5752_nvram_info(tp);
15035                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15036                         tg3_get_5755_nvram_info(tp);
15037                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15038                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15039                          tg3_asic_rev(tp) == ASIC_REV_5785)
15040                         tg3_get_5787_nvram_info(tp);
15041                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15042                         tg3_get_5761_nvram_info(tp);
15043                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15044                         tg3_get_5906_nvram_info(tp);
15045                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15046                          tg3_flag(tp, 57765_CLASS))
15047                         tg3_get_57780_nvram_info(tp);
15048                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15049                          tg3_asic_rev(tp) == ASIC_REV_5719)
15050                         tg3_get_5717_nvram_info(tp);
15051                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15052                          tg3_asic_rev(tp) == ASIC_REV_5762)
15053                         tg3_get_5720_nvram_info(tp);
15054                 else
15055                         tg3_get_nvram_info(tp);
15056
15057                 if (tp->nvram_size == 0)
15058                         tg3_get_nvram_size(tp);
15059
15060                 tg3_disable_nvram_access(tp);
15061                 tg3_nvram_unlock(tp);
15062
15063         } else {
15064                 tg3_flag_clear(tp, NVRAM);
15065                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15066
15067                 tg3_get_eeprom_size(tp);
15068         }
15069 }
15070
15071 struct subsys_tbl_ent {
15072         u16 subsys_vendor, subsys_devid;
15073         u32 phy_id;
15074 };
15075
15076 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15077         /* Broadcom boards. */
15078         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15079           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15080         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15081           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15082         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15083           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15084         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15085           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15086         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15087           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15088         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15089           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15090         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15091           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15092         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15093           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15094         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15095           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15096         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15097           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15098         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15099           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15100
15101         /* 3com boards. */
15102         { TG3PCI_SUBVENDOR_ID_3COM,
15103           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15104         { TG3PCI_SUBVENDOR_ID_3COM,
15105           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15106         { TG3PCI_SUBVENDOR_ID_3COM,
15107           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15108         { TG3PCI_SUBVENDOR_ID_3COM,
15109           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15110         { TG3PCI_SUBVENDOR_ID_3COM,
15111           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15112
15113         /* DELL boards. */
15114         { TG3PCI_SUBVENDOR_ID_DELL,
15115           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15116         { TG3PCI_SUBVENDOR_ID_DELL,
15117           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15118         { TG3PCI_SUBVENDOR_ID_DELL,
15119           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15120         { TG3PCI_SUBVENDOR_ID_DELL,
15121           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15122
15123         /* Compaq boards. */
15124         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15125           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15126         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15127           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15128         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15129           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15130         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15131           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15132         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15133           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15134
15135         /* IBM boards. */
15136         { TG3PCI_SUBVENDOR_ID_IBM,
15137           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15138 };
15139
15140 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15141 {
15142         int i;
15143
15144         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15145                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15146                      tp->pdev->subsystem_vendor) &&
15147                     (subsys_id_to_phy_id[i].subsys_devid ==
15148                      tp->pdev->subsystem_device))
15149                         return &subsys_id_to_phy_id[i];
15150         }
15151         return NULL;
15152 }
15153
15154 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15155 {
15156         u32 val;
15157
15158         tp->phy_id = TG3_PHY_ID_INVALID;
15159         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15160
15161         /* Assume an onboard device and WOL capable by default.  */
15162         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15163         tg3_flag_set(tp, WOL_CAP);
15164
15165         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15166                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15167                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15168                         tg3_flag_set(tp, IS_NIC);
15169                 }
15170                 val = tr32(VCPU_CFGSHDW);
15171                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15172                         tg3_flag_set(tp, ASPM_WORKAROUND);
15173                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15174                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15175                         tg3_flag_set(tp, WOL_ENABLE);
15176                         device_set_wakeup_enable(&tp->pdev->dev, true);
15177                 }
15178                 goto done;
15179         }
15180
15181         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15182         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15183                 u32 nic_cfg, led_cfg;
15184                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15185                 u32 nic_phy_id, ver, eeprom_phy_id;
15186                 int eeprom_phy_serdes = 0;
15187
15188                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15189                 tp->nic_sram_data_cfg = nic_cfg;
15190
15191                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15192                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15193                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15194                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15195                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15196                     (ver > 0) && (ver < 0x100))
15197                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15198
15199                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15200                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15201
15202                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15203                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15204                     tg3_asic_rev(tp) == ASIC_REV_5720)
15205                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15206
15207                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15208                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15209                         eeprom_phy_serdes = 1;
15210
15211                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15212                 if (nic_phy_id != 0) {
15213                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15214                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15215
15216                         eeprom_phy_id  = (id1 >> 16) << 10;
15217                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15218                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15219                 } else
15220                         eeprom_phy_id = 0;
15221
15222                 tp->phy_id = eeprom_phy_id;
15223                 if (eeprom_phy_serdes) {
15224                         if (!tg3_flag(tp, 5705_PLUS))
15225                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15226                         else
15227                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15228                 }
15229
15230                 if (tg3_flag(tp, 5750_PLUS))
15231                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15232                                     SHASTA_EXT_LED_MODE_MASK);
15233                 else
15234                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15235
15236                 switch (led_cfg) {
15237                 default:
15238                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15239                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15240                         break;
15241
15242                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15243                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15244                         break;
15245
15246                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15247                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15248
15249                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15250                          * read on some older 5700/5701 bootcode.
15251                          */
15252                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15253                             tg3_asic_rev(tp) == ASIC_REV_5701)
15254                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15255
15256                         break;
15257
15258                 case SHASTA_EXT_LED_SHARED:
15259                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15260                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15261                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15262                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15263                                                  LED_CTRL_MODE_PHY_2);
15264
15265                         if (tg3_flag(tp, 5717_PLUS) ||
15266                             tg3_asic_rev(tp) == ASIC_REV_5762)
15267                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15268                                                 LED_CTRL_BLINK_RATE_MASK;
15269
15270                         break;
15271
15272                 case SHASTA_EXT_LED_MAC:
15273                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15274                         break;
15275
15276                 case SHASTA_EXT_LED_COMBO:
15277                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15278                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15279                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15280                                                  LED_CTRL_MODE_PHY_2);
15281                         break;
15282
15283                 }
15284
15285                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15286                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15287                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15288                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15289
15290                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15291                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15292
15293                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15294                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15295                         if ((tp->pdev->subsystem_vendor ==
15296                              PCI_VENDOR_ID_ARIMA) &&
15297                             (tp->pdev->subsystem_device == 0x205a ||
15298                              tp->pdev->subsystem_device == 0x2063))
15299                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15300                 } else {
15301                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15302                         tg3_flag_set(tp, IS_NIC);
15303                 }
15304
15305                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15306                         tg3_flag_set(tp, ENABLE_ASF);
15307                         if (tg3_flag(tp, 5750_PLUS))
15308                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15309                 }
15310
15311                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15312                     tg3_flag(tp, 5750_PLUS))
15313                         tg3_flag_set(tp, ENABLE_APE);
15314
15315                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15316                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15317                         tg3_flag_clear(tp, WOL_CAP);
15318
15319                 if (tg3_flag(tp, WOL_CAP) &&
15320                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15321                         tg3_flag_set(tp, WOL_ENABLE);
15322                         device_set_wakeup_enable(&tp->pdev->dev, true);
15323                 }
15324
15325                 if (cfg2 & (1 << 17))
15326                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15327
15328                 /* serdes signal pre-emphasis in register 0x590 set by */
15329                 /* bootcode if bit 18 is set */
15330                 if (cfg2 & (1 << 18))
15331                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15332
15333                 if ((tg3_flag(tp, 57765_PLUS) ||
15334                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15335                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15336                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15337                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15338
15339                 if (tg3_flag(tp, PCI_EXPRESS)) {
15340                         u32 cfg3;
15341
15342                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15343                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15344                             !tg3_flag(tp, 57765_PLUS) &&
15345                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15346                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15347                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15348                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15349                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15350                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15351                 }
15352
15353                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15354                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15355                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15356                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15357                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15358                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15359
15360                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15361                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15362         }
15363 done:
15364         if (tg3_flag(tp, WOL_CAP))
15365                 device_set_wakeup_enable(&tp->pdev->dev,
15366                                          tg3_flag(tp, WOL_ENABLE));
15367         else
15368                 device_set_wakeup_capable(&tp->pdev->dev, false);
15369 }
15370
15371 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15372 {
15373         int i, err;
15374         u32 val2, off = offset * 8;
15375
15376         err = tg3_nvram_lock(tp);
15377         if (err)
15378                 return err;
15379
15380         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15381         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15382                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15383         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15384         udelay(10);
15385
15386         for (i = 0; i < 100; i++) {
15387                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15388                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15389                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15390                         break;
15391                 }
15392                 udelay(10);
15393         }
15394
15395         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15396
15397         tg3_nvram_unlock(tp);
15398         if (val2 & APE_OTP_STATUS_CMD_DONE)
15399                 return 0;
15400
15401         return -EBUSY;
15402 }
15403
15404 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15405 {
15406         int i;
15407         u32 val;
15408
15409         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15410         tw32(OTP_CTRL, cmd);
15411
15412         /* Wait for up to 1 ms for command to execute. */
15413         for (i = 0; i < 100; i++) {
15414                 val = tr32(OTP_STATUS);
15415                 if (val & OTP_STATUS_CMD_DONE)
15416                         break;
15417                 udelay(10);
15418         }
15419
15420         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15421 }
15422
15423 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15424  * configuration is a 32-bit value that straddles the alignment boundary.
15425  * We do two 32-bit reads and then shift and merge the results.
15426  */
15427 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15428 {
15429         u32 bhalf_otp, thalf_otp;
15430
15431         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15432
15433         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15434                 return 0;
15435
15436         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15437
15438         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15439                 return 0;
15440
15441         thalf_otp = tr32(OTP_READ_DATA);
15442
15443         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15444
15445         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15446                 return 0;
15447
15448         bhalf_otp = tr32(OTP_READ_DATA);
15449
15450         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15451 }
15452
15453 static void tg3_phy_init_link_config(struct tg3 *tp)
15454 {
15455         u32 adv = ADVERTISED_Autoneg;
15456
15457         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15458                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15459                         adv |= ADVERTISED_1000baseT_Half;
15460                 adv |= ADVERTISED_1000baseT_Full;
15461         }
15462
15463         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15464                 adv |= ADVERTISED_100baseT_Half |
15465                        ADVERTISED_100baseT_Full |
15466                        ADVERTISED_10baseT_Half |
15467                        ADVERTISED_10baseT_Full |
15468                        ADVERTISED_TP;
15469         else
15470                 adv |= ADVERTISED_FIBRE;
15471
15472         tp->link_config.advertising = adv;
15473         tp->link_config.speed = SPEED_UNKNOWN;
15474         tp->link_config.duplex = DUPLEX_UNKNOWN;
15475         tp->link_config.autoneg = AUTONEG_ENABLE;
15476         tp->link_config.active_speed = SPEED_UNKNOWN;
15477         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15478
15479         tp->old_link = -1;
15480 }
15481
15482 static int tg3_phy_probe(struct tg3 *tp)
15483 {
15484         u32 hw_phy_id_1, hw_phy_id_2;
15485         u32 hw_phy_id, hw_phy_id_masked;
15486         int err;
15487
15488         /* flow control autonegotiation is default behavior */
15489         tg3_flag_set(tp, PAUSE_AUTONEG);
15490         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15491
15492         if (tg3_flag(tp, ENABLE_APE)) {
15493                 switch (tp->pci_fn) {
15494                 case 0:
15495                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15496                         break;
15497                 case 1:
15498                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15499                         break;
15500                 case 2:
15501                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15502                         break;
15503                 case 3:
15504                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15505                         break;
15506                 }
15507         }
15508
15509         if (!tg3_flag(tp, ENABLE_ASF) &&
15510             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15511             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15512                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15513                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15514
15515         if (tg3_flag(tp, USE_PHYLIB))
15516                 return tg3_phy_init(tp);
15517
15518         /* Reading the PHY ID register can conflict with ASF
15519          * firmware access to the PHY hardware.
15520          */
15521         err = 0;
15522         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15523                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15524         } else {
15525                 /* Now read the physical PHY_ID from the chip and verify
15526                  * that it is sane.  If it doesn't look good, we fall back
15527                  * to either the hard-coded table based PHY_ID and failing
15528                  * that the value found in the eeprom area.
15529                  */
15530                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15531                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15532
15533                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15534                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15535                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15536
15537                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15538         }
15539
15540         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15541                 tp->phy_id = hw_phy_id;
15542                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15543                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15544                 else
15545                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15546         } else {
15547                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15548                         /* Do nothing, phy ID already set up in
15549                          * tg3_get_eeprom_hw_cfg().
15550                          */
15551                 } else {
15552                         struct subsys_tbl_ent *p;
15553
15554                         /* No eeprom signature?  Try the hardcoded
15555                          * subsys device table.
15556                          */
15557                         p = tg3_lookup_by_subsys(tp);
15558                         if (p) {
15559                                 tp->phy_id = p->phy_id;
15560                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15561                                 /* For now we saw the IDs 0xbc050cd0,
15562                                  * 0xbc050f80 and 0xbc050c30 on devices
15563                                  * connected to an BCM4785 and there are
15564                                  * probably more. Just assume that the phy is
15565                                  * supported when it is connected to a SSB core
15566                                  * for now.
15567                                  */
15568                                 return -ENODEV;
15569                         }
15570
15571                         if (!tp->phy_id ||
15572                             tp->phy_id == TG3_PHY_ID_BCM8002)
15573                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15574                 }
15575         }
15576
15577         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15578             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15579              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15580              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15581              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15582              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15583               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15584              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15585               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15586                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15587
15588                 tp->eee.supported = SUPPORTED_100baseT_Full |
15589                                     SUPPORTED_1000baseT_Full;
15590                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15591                                      ADVERTISED_1000baseT_Full;
15592                 tp->eee.eee_enabled = 1;
15593                 tp->eee.tx_lpi_enabled = 1;
15594                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15595         }
15596
15597         tg3_phy_init_link_config(tp);
15598
15599         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15600             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15601             !tg3_flag(tp, ENABLE_APE) &&
15602             !tg3_flag(tp, ENABLE_ASF)) {
15603                 u32 bmsr, dummy;
15604
15605                 tg3_readphy(tp, MII_BMSR, &bmsr);
15606                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15607                     (bmsr & BMSR_LSTATUS))
15608                         goto skip_phy_reset;
15609
15610                 err = tg3_phy_reset(tp);
15611                 if (err)
15612                         return err;
15613
15614                 tg3_phy_set_wirespeed(tp);
15615
15616                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15617                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15618                                             tp->link_config.flowctrl);
15619
15620                         tg3_writephy(tp, MII_BMCR,
15621                                      BMCR_ANENABLE | BMCR_ANRESTART);
15622                 }
15623         }
15624
15625 skip_phy_reset:
15626         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15627                 err = tg3_init_5401phy_dsp(tp);
15628                 if (err)
15629                         return err;
15630
15631                 err = tg3_init_5401phy_dsp(tp);
15632         }
15633
15634         return err;
15635 }
15636
15637 static void tg3_read_vpd(struct tg3 *tp)
15638 {
15639         u8 *vpd_data;
15640         unsigned int block_end, rosize, len;
15641         u32 vpdlen;
15642         int j, i = 0;
15643
15644         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15645         if (!vpd_data)
15646                 goto out_no_vpd;
15647
15648         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15649         if (i < 0)
15650                 goto out_not_found;
15651
15652         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15653         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15654         i += PCI_VPD_LRDT_TAG_SIZE;
15655
15656         if (block_end > vpdlen)
15657                 goto out_not_found;
15658
15659         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15660                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15661         if (j > 0) {
15662                 len = pci_vpd_info_field_size(&vpd_data[j]);
15663
15664                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15665                 if (j + len > block_end || len != 4 ||
15666                     memcmp(&vpd_data[j], "1028", 4))
15667                         goto partno;
15668
15669                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15670                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15671                 if (j < 0)
15672                         goto partno;
15673
15674                 len = pci_vpd_info_field_size(&vpd_data[j]);
15675
15676                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15677                 if (j + len > block_end)
15678                         goto partno;
15679
15680                 if (len >= sizeof(tp->fw_ver))
15681                         len = sizeof(tp->fw_ver) - 1;
15682                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15683                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15684                          &vpd_data[j]);
15685         }
15686
15687 partno:
15688         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15689                                       PCI_VPD_RO_KEYWORD_PARTNO);
15690         if (i < 0)
15691                 goto out_not_found;
15692
15693         len = pci_vpd_info_field_size(&vpd_data[i]);
15694
15695         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15696         if (len > TG3_BPN_SIZE ||
15697             (len + i) > vpdlen)
15698                 goto out_not_found;
15699
15700         memcpy(tp->board_part_number, &vpd_data[i], len);
15701
15702 out_not_found:
15703         kfree(vpd_data);
15704         if (tp->board_part_number[0])
15705                 return;
15706
15707 out_no_vpd:
15708         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15709                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15710                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15711                         strcpy(tp->board_part_number, "BCM5717");
15712                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15713                         strcpy(tp->board_part_number, "BCM5718");
15714                 else
15715                         goto nomatch;
15716         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15717                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15718                         strcpy(tp->board_part_number, "BCM57780");
15719                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15720                         strcpy(tp->board_part_number, "BCM57760");
15721                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15722                         strcpy(tp->board_part_number, "BCM57790");
15723                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15724                         strcpy(tp->board_part_number, "BCM57788");
15725                 else
15726                         goto nomatch;
15727         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15728                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15729                         strcpy(tp->board_part_number, "BCM57761");
15730                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15731                         strcpy(tp->board_part_number, "BCM57765");
15732                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15733                         strcpy(tp->board_part_number, "BCM57781");
15734                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15735                         strcpy(tp->board_part_number, "BCM57785");
15736                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15737                         strcpy(tp->board_part_number, "BCM57791");
15738                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15739                         strcpy(tp->board_part_number, "BCM57795");
15740                 else
15741                         goto nomatch;
15742         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15743                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15744                         strcpy(tp->board_part_number, "BCM57762");
15745                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15746                         strcpy(tp->board_part_number, "BCM57766");
15747                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15748                         strcpy(tp->board_part_number, "BCM57782");
15749                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15750                         strcpy(tp->board_part_number, "BCM57786");
15751                 else
15752                         goto nomatch;
15753         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15754                 strcpy(tp->board_part_number, "BCM95906");
15755         } else {
15756 nomatch:
15757                 strcpy(tp->board_part_number, "none");
15758         }
15759 }
15760
15761 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15762 {
15763         u32 val;
15764
15765         if (tg3_nvram_read(tp, offset, &val) ||
15766             (val & 0xfc000000) != 0x0c000000 ||
15767             tg3_nvram_read(tp, offset + 4, &val) ||
15768             val != 0)
15769                 return 0;
15770
15771         return 1;
15772 }
15773
15774 static void tg3_read_bc_ver(struct tg3 *tp)
15775 {
15776         u32 val, offset, start, ver_offset;
15777         int i, dst_off;
15778         bool newver = false;
15779
15780         if (tg3_nvram_read(tp, 0xc, &offset) ||
15781             tg3_nvram_read(tp, 0x4, &start))
15782                 return;
15783
15784         offset = tg3_nvram_logical_addr(tp, offset);
15785
15786         if (tg3_nvram_read(tp, offset, &val))
15787                 return;
15788
15789         if ((val & 0xfc000000) == 0x0c000000) {
15790                 if (tg3_nvram_read(tp, offset + 4, &val))
15791                         return;
15792
15793                 if (val == 0)
15794                         newver = true;
15795         }
15796
15797         dst_off = strlen(tp->fw_ver);
15798
15799         if (newver) {
15800                 if (TG3_VER_SIZE - dst_off < 16 ||
15801                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15802                         return;
15803
15804                 offset = offset + ver_offset - start;
15805                 for (i = 0; i < 16; i += 4) {
15806                         __be32 v;
15807                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15808                                 return;
15809
15810                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15811                 }
15812         } else {
15813                 u32 major, minor;
15814
15815                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15816                         return;
15817
15818                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15819                         TG3_NVM_BCVER_MAJSFT;
15820                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15821                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15822                          "v%d.%02d", major, minor);
15823         }
15824 }
15825
15826 static void tg3_read_hwsb_ver(struct tg3 *tp)
15827 {
15828         u32 val, major, minor;
15829
15830         /* Use native endian representation */
15831         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15832                 return;
15833
15834         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15835                 TG3_NVM_HWSB_CFG1_MAJSFT;
15836         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15837                 TG3_NVM_HWSB_CFG1_MINSFT;
15838
15839         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15840 }
15841
15842 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15843 {
15844         u32 offset, major, minor, build;
15845
15846         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15847
15848         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15849                 return;
15850
15851         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15852         case TG3_EEPROM_SB_REVISION_0:
15853                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15854                 break;
15855         case TG3_EEPROM_SB_REVISION_2:
15856                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15857                 break;
15858         case TG3_EEPROM_SB_REVISION_3:
15859                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15860                 break;
15861         case TG3_EEPROM_SB_REVISION_4:
15862                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15863                 break;
15864         case TG3_EEPROM_SB_REVISION_5:
15865                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15866                 break;
15867         case TG3_EEPROM_SB_REVISION_6:
15868                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15869                 break;
15870         default:
15871                 return;
15872         }
15873
15874         if (tg3_nvram_read(tp, offset, &val))
15875                 return;
15876
15877         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15878                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15879         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15880                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15881         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15882
15883         if (minor > 99 || build > 26)
15884                 return;
15885
15886         offset = strlen(tp->fw_ver);
15887         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15888                  " v%d.%02d", major, minor);
15889
15890         if (build > 0) {
15891                 offset = strlen(tp->fw_ver);
15892                 if (offset < TG3_VER_SIZE - 1)
15893                         tp->fw_ver[offset] = 'a' + build - 1;
15894         }
15895 }
15896
15897 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15898 {
15899         u32 val, offset, start;
15900         int i, vlen;
15901
15902         for (offset = TG3_NVM_DIR_START;
15903              offset < TG3_NVM_DIR_END;
15904              offset += TG3_NVM_DIRENT_SIZE) {
15905                 if (tg3_nvram_read(tp, offset, &val))
15906                         return;
15907
15908                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15909                         break;
15910         }
15911
15912         if (offset == TG3_NVM_DIR_END)
15913                 return;
15914
15915         if (!tg3_flag(tp, 5705_PLUS))
15916                 start = 0x08000000;
15917         else if (tg3_nvram_read(tp, offset - 4, &start))
15918                 return;
15919
15920         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15921             !tg3_fw_img_is_valid(tp, offset) ||
15922             tg3_nvram_read(tp, offset + 8, &val))
15923                 return;
15924
15925         offset += val - start;
15926
15927         vlen = strlen(tp->fw_ver);
15928
15929         tp->fw_ver[vlen++] = ',';
15930         tp->fw_ver[vlen++] = ' ';
15931
15932         for (i = 0; i < 4; i++) {
15933                 __be32 v;
15934                 if (tg3_nvram_read_be32(tp, offset, &v))
15935                         return;
15936
15937                 offset += sizeof(v);
15938
15939                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15940                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15941                         break;
15942                 }
15943
15944                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15945                 vlen += sizeof(v);
15946         }
15947 }
15948
15949 static void tg3_probe_ncsi(struct tg3 *tp)
15950 {
15951         u32 apedata;
15952
15953         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15954         if (apedata != APE_SEG_SIG_MAGIC)
15955                 return;
15956
15957         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15958         if (!(apedata & APE_FW_STATUS_READY))
15959                 return;
15960
15961         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15962                 tg3_flag_set(tp, APE_HAS_NCSI);
15963 }
15964
15965 static void tg3_read_dash_ver(struct tg3 *tp)
15966 {
15967         int vlen;
15968         u32 apedata;
15969         char *fwtype;
15970
15971         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15972
15973         if (tg3_flag(tp, APE_HAS_NCSI))
15974                 fwtype = "NCSI";
15975         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15976                 fwtype = "SMASH";
15977         else
15978                 fwtype = "DASH";
15979
15980         vlen = strlen(tp->fw_ver);
15981
15982         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15983                  fwtype,
15984                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15985                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15986                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15987                  (apedata & APE_FW_VERSION_BLDMSK));
15988 }
15989
15990 static void tg3_read_otp_ver(struct tg3 *tp)
15991 {
15992         u32 val, val2;
15993
15994         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15995                 return;
15996
15997         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15998             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15999             TG3_OTP_MAGIC0_VALID(val)) {
16000                 u64 val64 = (u64) val << 32 | val2;
16001                 u32 ver = 0;
16002                 int i, vlen;
16003
16004                 for (i = 0; i < 7; i++) {
16005                         if ((val64 & 0xff) == 0)
16006                                 break;
16007                         ver = val64 & 0xff;
16008                         val64 >>= 8;
16009                 }
16010                 vlen = strlen(tp->fw_ver);
16011                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16012         }
16013 }
16014
16015 static void tg3_read_fw_ver(struct tg3 *tp)
16016 {
16017         u32 val;
16018         bool vpd_vers = false;
16019
16020         if (tp->fw_ver[0] != 0)
16021                 vpd_vers = true;
16022
16023         if (tg3_flag(tp, NO_NVRAM)) {
16024                 strcat(tp->fw_ver, "sb");
16025                 tg3_read_otp_ver(tp);
16026                 return;
16027         }
16028
16029         if (tg3_nvram_read(tp, 0, &val))
16030                 return;
16031
16032         if (val == TG3_EEPROM_MAGIC)
16033                 tg3_read_bc_ver(tp);
16034         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16035                 tg3_read_sb_ver(tp, val);
16036         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16037                 tg3_read_hwsb_ver(tp);
16038
16039         if (tg3_flag(tp, ENABLE_ASF)) {
16040                 if (tg3_flag(tp, ENABLE_APE)) {
16041                         tg3_probe_ncsi(tp);
16042                         if (!vpd_vers)
16043                                 tg3_read_dash_ver(tp);
16044                 } else if (!vpd_vers) {
16045                         tg3_read_mgmtfw_ver(tp);
16046                 }
16047         }
16048
16049         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16050 }
16051
16052 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16053 {
16054         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16055                 return TG3_RX_RET_MAX_SIZE_5717;
16056         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16057                 return TG3_RX_RET_MAX_SIZE_5700;
16058         else
16059                 return TG3_RX_RET_MAX_SIZE_5705;
16060 }
16061
16062 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16063         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16064         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16065         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16066         { },
16067 };
16068
16069 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16070 {
16071         struct pci_dev *peer;
16072         unsigned int func, devnr = tp->pdev->devfn & ~7;
16073
16074         for (func = 0; func < 8; func++) {
16075                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16076                 if (peer && peer != tp->pdev)
16077                         break;
16078                 pci_dev_put(peer);
16079         }
16080         /* 5704 can be configured in single-port mode, set peer to
16081          * tp->pdev in that case.
16082          */
16083         if (!peer) {
16084                 peer = tp->pdev;
16085                 return peer;
16086         }
16087
16088         /*
16089          * We don't need to keep the refcount elevated; there's no way
16090          * to remove one half of this device without removing the other
16091          */
16092         pci_dev_put(peer);
16093
16094         return peer;
16095 }
16096
16097 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16098 {
16099         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16100         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16101                 u32 reg;
16102
16103                 /* All devices that use the alternate
16104                  * ASIC REV location have a CPMU.
16105                  */
16106                 tg3_flag_set(tp, CPMU_PRESENT);
16107
16108                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16109                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16110                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16111                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16112                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16113                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16114                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16115                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16116                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16117                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16118                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16119                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16120                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16121                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16122                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16123                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16124                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16125                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16126                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16127                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16128                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16129                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16130                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16131                 else
16132                         reg = TG3PCI_PRODID_ASICREV;
16133
16134                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16135         }
16136
16137         /* Wrong chip ID in 5752 A0. This code can be removed later
16138          * as A0 is not in production.
16139          */
16140         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16141                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16142
16143         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16144                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16145
16146         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16147             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16148             tg3_asic_rev(tp) == ASIC_REV_5720)
16149                 tg3_flag_set(tp, 5717_PLUS);
16150
16151         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16152             tg3_asic_rev(tp) == ASIC_REV_57766)
16153                 tg3_flag_set(tp, 57765_CLASS);
16154
16155         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16156              tg3_asic_rev(tp) == ASIC_REV_5762)
16157                 tg3_flag_set(tp, 57765_PLUS);
16158
16159         /* Intentionally exclude ASIC_REV_5906 */
16160         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16161             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16162             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16163             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16164             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16165             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16166             tg3_flag(tp, 57765_PLUS))
16167                 tg3_flag_set(tp, 5755_PLUS);
16168
16169         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16170             tg3_asic_rev(tp) == ASIC_REV_5714)
16171                 tg3_flag_set(tp, 5780_CLASS);
16172
16173         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16174             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16175             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16176             tg3_flag(tp, 5755_PLUS) ||
16177             tg3_flag(tp, 5780_CLASS))
16178                 tg3_flag_set(tp, 5750_PLUS);
16179
16180         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16181             tg3_flag(tp, 5750_PLUS))
16182                 tg3_flag_set(tp, 5705_PLUS);
16183 }
16184
16185 static bool tg3_10_100_only_device(struct tg3 *tp,
16186                                    const struct pci_device_id *ent)
16187 {
16188         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16189
16190         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16191              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16192             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16193                 return true;
16194
16195         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16196                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16197                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16198                                 return true;
16199                 } else {
16200                         return true;
16201                 }
16202         }
16203
16204         return false;
16205 }
16206
16207 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16208 {
16209         u32 misc_ctrl_reg;
16210         u32 pci_state_reg, grc_misc_cfg;
16211         u32 val;
16212         u16 pci_cmd;
16213         int err;
16214
16215         /* Force memory write invalidate off.  If we leave it on,
16216          * then on 5700_BX chips we have to enable a workaround.
16217          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16218          * to match the cacheline size.  The Broadcom driver have this
16219          * workaround but turns MWI off all the times so never uses
16220          * it.  This seems to suggest that the workaround is insufficient.
16221          */
16222         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16223         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16224         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16225
16226         /* Important! -- Make sure register accesses are byteswapped
16227          * correctly.  Also, for those chips that require it, make
16228          * sure that indirect register accesses are enabled before
16229          * the first operation.
16230          */
16231         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16232                               &misc_ctrl_reg);
16233         tp->misc_host_ctrl |= (misc_ctrl_reg &
16234                                MISC_HOST_CTRL_CHIPREV);
16235         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16236                                tp->misc_host_ctrl);
16237
16238         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16239
16240         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16241          * we need to disable memory and use config. cycles
16242          * only to access all registers. The 5702/03 chips
16243          * can mistakenly decode the special cycles from the
16244          * ICH chipsets as memory write cycles, causing corruption
16245          * of register and memory space. Only certain ICH bridges
16246          * will drive special cycles with non-zero data during the
16247          * address phase which can fall within the 5703's address
16248          * range. This is not an ICH bug as the PCI spec allows
16249          * non-zero address during special cycles. However, only
16250          * these ICH bridges are known to drive non-zero addresses
16251          * during special cycles.
16252          *
16253          * Since special cycles do not cross PCI bridges, we only
16254          * enable this workaround if the 5703 is on the secondary
16255          * bus of these ICH bridges.
16256          */
16257         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16258             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16259                 static struct tg3_dev_id {
16260                         u32     vendor;
16261                         u32     device;
16262                         u32     rev;
16263                 } ich_chipsets[] = {
16264                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16265                           PCI_ANY_ID },
16266                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16267                           PCI_ANY_ID },
16268                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16269                           0xa },
16270                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16271                           PCI_ANY_ID },
16272                         { },
16273                 };
16274                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16275                 struct pci_dev *bridge = NULL;
16276
16277                 while (pci_id->vendor != 0) {
16278                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16279                                                 bridge);
16280                         if (!bridge) {
16281                                 pci_id++;
16282                                 continue;
16283                         }
16284                         if (pci_id->rev != PCI_ANY_ID) {
16285                                 if (bridge->revision > pci_id->rev)
16286                                         continue;
16287                         }
16288                         if (bridge->subordinate &&
16289                             (bridge->subordinate->number ==
16290                              tp->pdev->bus->number)) {
16291                                 tg3_flag_set(tp, ICH_WORKAROUND);
16292                                 pci_dev_put(bridge);
16293                                 break;
16294                         }
16295                 }
16296         }
16297
16298         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16299                 static struct tg3_dev_id {
16300                         u32     vendor;
16301                         u32     device;
16302                 } bridge_chipsets[] = {
16303                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16304                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16305                         { },
16306                 };
16307                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16308                 struct pci_dev *bridge = NULL;
16309
16310                 while (pci_id->vendor != 0) {
16311                         bridge = pci_get_device(pci_id->vendor,
16312                                                 pci_id->device,
16313                                                 bridge);
16314                         if (!bridge) {
16315                                 pci_id++;
16316                                 continue;
16317                         }
16318                         if (bridge->subordinate &&
16319                             (bridge->subordinate->number <=
16320                              tp->pdev->bus->number) &&
16321                             (bridge->subordinate->busn_res.end >=
16322                              tp->pdev->bus->number)) {
16323                                 tg3_flag_set(tp, 5701_DMA_BUG);
16324                                 pci_dev_put(bridge);
16325                                 break;
16326                         }
16327                 }
16328         }
16329
16330         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16331          * DMA addresses > 40-bit. This bridge may have other additional
16332          * 57xx devices behind it in some 4-port NIC designs for example.
16333          * Any tg3 device found behind the bridge will also need the 40-bit
16334          * DMA workaround.
16335          */
16336         if (tg3_flag(tp, 5780_CLASS)) {
16337                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16338                 tp->msi_cap = tp->pdev->msi_cap;
16339         } else {
16340                 struct pci_dev *bridge = NULL;
16341
16342                 do {
16343                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16344                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16345                                                 bridge);
16346                         if (bridge && bridge->subordinate &&
16347                             (bridge->subordinate->number <=
16348                              tp->pdev->bus->number) &&
16349                             (bridge->subordinate->busn_res.end >=
16350                              tp->pdev->bus->number)) {
16351                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16352                                 pci_dev_put(bridge);
16353                                 break;
16354                         }
16355                 } while (bridge);
16356         }
16357
16358         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16359             tg3_asic_rev(tp) == ASIC_REV_5714)
16360                 tp->pdev_peer = tg3_find_peer(tp);
16361
16362         /* Determine TSO capabilities */
16363         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16364                 ; /* Do nothing. HW bug. */
16365         else if (tg3_flag(tp, 57765_PLUS))
16366                 tg3_flag_set(tp, HW_TSO_3);
16367         else if (tg3_flag(tp, 5755_PLUS) ||
16368                  tg3_asic_rev(tp) == ASIC_REV_5906)
16369                 tg3_flag_set(tp, HW_TSO_2);
16370         else if (tg3_flag(tp, 5750_PLUS)) {
16371                 tg3_flag_set(tp, HW_TSO_1);
16372                 tg3_flag_set(tp, TSO_BUG);
16373                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16374                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16375                         tg3_flag_clear(tp, TSO_BUG);
16376         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16377                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16378                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16379                 tg3_flag_set(tp, FW_TSO);
16380                 tg3_flag_set(tp, TSO_BUG);
16381                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16382                         tp->fw_needed = FIRMWARE_TG3TSO5;
16383                 else
16384                         tp->fw_needed = FIRMWARE_TG3TSO;
16385         }
16386
16387         /* Selectively allow TSO based on operating conditions */
16388         if (tg3_flag(tp, HW_TSO_1) ||
16389             tg3_flag(tp, HW_TSO_2) ||
16390             tg3_flag(tp, HW_TSO_3) ||
16391             tg3_flag(tp, FW_TSO)) {
16392                 /* For firmware TSO, assume ASF is disabled.
16393                  * We'll disable TSO later if we discover ASF
16394                  * is enabled in tg3_get_eeprom_hw_cfg().
16395                  */
16396                 tg3_flag_set(tp, TSO_CAPABLE);
16397         } else {
16398                 tg3_flag_clear(tp, TSO_CAPABLE);
16399                 tg3_flag_clear(tp, TSO_BUG);
16400                 tp->fw_needed = NULL;
16401         }
16402
16403         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16404                 tp->fw_needed = FIRMWARE_TG3;
16405
16406         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16407                 tp->fw_needed = FIRMWARE_TG357766;
16408
16409         tp->irq_max = 1;
16410
16411         if (tg3_flag(tp, 5750_PLUS)) {
16412                 tg3_flag_set(tp, SUPPORT_MSI);
16413                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16414                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16415                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16416                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16417                      tp->pdev_peer == tp->pdev))
16418                         tg3_flag_clear(tp, SUPPORT_MSI);
16419
16420                 if (tg3_flag(tp, 5755_PLUS) ||
16421                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16422                         tg3_flag_set(tp, 1SHOT_MSI);
16423                 }
16424
16425                 if (tg3_flag(tp, 57765_PLUS)) {
16426                         tg3_flag_set(tp, SUPPORT_MSIX);
16427                         tp->irq_max = TG3_IRQ_MAX_VECS;
16428                 }
16429         }
16430
16431         tp->txq_max = 1;
16432         tp->rxq_max = 1;
16433         if (tp->irq_max > 1) {
16434                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16435                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16436
16437                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16438                     tg3_asic_rev(tp) == ASIC_REV_5720)
16439                         tp->txq_max = tp->irq_max - 1;
16440         }
16441
16442         if (tg3_flag(tp, 5755_PLUS) ||
16443             tg3_asic_rev(tp) == ASIC_REV_5906)
16444                 tg3_flag_set(tp, SHORT_DMA_BUG);
16445
16446         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16447                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16448
16449         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16450             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16451             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16452             tg3_asic_rev(tp) == ASIC_REV_5762)
16453                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16454
16455         if (tg3_flag(tp, 57765_PLUS) &&
16456             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16457                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16458
16459         if (!tg3_flag(tp, 5705_PLUS) ||
16460             tg3_flag(tp, 5780_CLASS) ||
16461             tg3_flag(tp, USE_JUMBO_BDFLAG))
16462                 tg3_flag_set(tp, JUMBO_CAPABLE);
16463
16464         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16465                               &pci_state_reg);
16466
16467         if (pci_is_pcie(tp->pdev)) {
16468                 u16 lnkctl;
16469
16470                 tg3_flag_set(tp, PCI_EXPRESS);
16471
16472                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16473                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16474                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16475                                 tg3_flag_clear(tp, HW_TSO_2);
16476                                 tg3_flag_clear(tp, TSO_CAPABLE);
16477                         }
16478                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16479                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16480                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16481                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16482                                 tg3_flag_set(tp, CLKREQ_BUG);
16483                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16484                         tg3_flag_set(tp, L1PLLPD_EN);
16485                 }
16486         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16487                 /* BCM5785 devices are effectively PCIe devices, and should
16488                  * follow PCIe codepaths, but do not have a PCIe capabilities
16489                  * section.
16490                  */
16491                 tg3_flag_set(tp, PCI_EXPRESS);
16492         } else if (!tg3_flag(tp, 5705_PLUS) ||
16493                    tg3_flag(tp, 5780_CLASS)) {
16494                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16495                 if (!tp->pcix_cap) {
16496                         dev_err(&tp->pdev->dev,
16497                                 "Cannot find PCI-X capability, aborting\n");
16498                         return -EIO;
16499                 }
16500
16501                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16502                         tg3_flag_set(tp, PCIX_MODE);
16503         }
16504
16505         /* If we have an AMD 762 or VIA K8T800 chipset, write
16506          * reordering to the mailbox registers done by the host
16507          * controller can cause major troubles.  We read back from
16508          * every mailbox register write to force the writes to be
16509          * posted to the chip in order.
16510          */
16511         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16512             !tg3_flag(tp, PCI_EXPRESS))
16513                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16514
16515         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16516                              &tp->pci_cacheline_sz);
16517         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16518                              &tp->pci_lat_timer);
16519         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16520             tp->pci_lat_timer < 64) {
16521                 tp->pci_lat_timer = 64;
16522                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16523                                       tp->pci_lat_timer);
16524         }
16525
16526         /* Important! -- It is critical that the PCI-X hw workaround
16527          * situation is decided before the first MMIO register access.
16528          */
16529         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16530                 /* 5700 BX chips need to have their TX producer index
16531                  * mailboxes written twice to workaround a bug.
16532                  */
16533                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16534
16535                 /* If we are in PCI-X mode, enable register write workaround.
16536                  *
16537                  * The workaround is to use indirect register accesses
16538                  * for all chip writes not to mailbox registers.
16539                  */
16540                 if (tg3_flag(tp, PCIX_MODE)) {
16541                         u32 pm_reg;
16542
16543                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16544
16545                         /* The chip can have it's power management PCI config
16546                          * space registers clobbered due to this bug.
16547                          * So explicitly force the chip into D0 here.
16548                          */
16549                         pci_read_config_dword(tp->pdev,
16550                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16551                                               &pm_reg);
16552                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16553                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16554                         pci_write_config_dword(tp->pdev,
16555                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16556                                                pm_reg);
16557
16558                         /* Also, force SERR#/PERR# in PCI command. */
16559                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16560                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16561                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16562                 }
16563         }
16564
16565         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16566                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16567         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16568                 tg3_flag_set(tp, PCI_32BIT);
16569
16570         /* Chip-specific fixup from Broadcom driver */
16571         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16572             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16573                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16574                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16575         }
16576
16577         /* Default fast path register access methods */
16578         tp->read32 = tg3_read32;
16579         tp->write32 = tg3_write32;
16580         tp->read32_mbox = tg3_read32;
16581         tp->write32_mbox = tg3_write32;
16582         tp->write32_tx_mbox = tg3_write32;
16583         tp->write32_rx_mbox = tg3_write32;
16584
16585         /* Various workaround register access methods */
16586         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16587                 tp->write32 = tg3_write_indirect_reg32;
16588         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16589                  (tg3_flag(tp, PCI_EXPRESS) &&
16590                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16591                 /*
16592                  * Back to back register writes can cause problems on these
16593                  * chips, the workaround is to read back all reg writes
16594                  * except those to mailbox regs.
16595                  *
16596                  * See tg3_write_indirect_reg32().
16597                  */
16598                 tp->write32 = tg3_write_flush_reg32;
16599         }
16600
16601         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16602                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16603                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16604                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16605         }
16606
16607         if (tg3_flag(tp, ICH_WORKAROUND)) {
16608                 tp->read32 = tg3_read_indirect_reg32;
16609                 tp->write32 = tg3_write_indirect_reg32;
16610                 tp->read32_mbox = tg3_read_indirect_mbox;
16611                 tp->write32_mbox = tg3_write_indirect_mbox;
16612                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16613                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16614
16615                 iounmap(tp->regs);
16616                 tp->regs = NULL;
16617
16618                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16619                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16620                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16621         }
16622         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16623                 tp->read32_mbox = tg3_read32_mbox_5906;
16624                 tp->write32_mbox = tg3_write32_mbox_5906;
16625                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16626                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16627         }
16628
16629         if (tp->write32 == tg3_write_indirect_reg32 ||
16630             (tg3_flag(tp, PCIX_MODE) &&
16631              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16632               tg3_asic_rev(tp) == ASIC_REV_5701)))
16633                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16634
16635         /* The memory arbiter has to be enabled in order for SRAM accesses
16636          * to succeed.  Normally on powerup the tg3 chip firmware will make
16637          * sure it is enabled, but other entities such as system netboot
16638          * code might disable it.
16639          */
16640         val = tr32(MEMARB_MODE);
16641         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16642
16643         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16644         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16645             tg3_flag(tp, 5780_CLASS)) {
16646                 if (tg3_flag(tp, PCIX_MODE)) {
16647                         pci_read_config_dword(tp->pdev,
16648                                               tp->pcix_cap + PCI_X_STATUS,
16649                                               &val);
16650                         tp->pci_fn = val & 0x7;
16651                 }
16652         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16653                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16654                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16655                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16656                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16657                         val = tr32(TG3_CPMU_STATUS);
16658
16659                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16660                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16661                 else
16662                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16663                                      TG3_CPMU_STATUS_FSHFT_5719;
16664         }
16665
16666         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16667                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16668                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16669         }
16670
16671         /* Get eeprom hw config before calling tg3_set_power_state().
16672          * In particular, the TG3_FLAG_IS_NIC flag must be
16673          * determined before calling tg3_set_power_state() so that
16674          * we know whether or not to switch out of Vaux power.
16675          * When the flag is set, it means that GPIO1 is used for eeprom
16676          * write protect and also implies that it is a LOM where GPIOs
16677          * are not used to switch power.
16678          */
16679         tg3_get_eeprom_hw_cfg(tp);
16680
16681         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16682                 tg3_flag_clear(tp, TSO_CAPABLE);
16683                 tg3_flag_clear(tp, TSO_BUG);
16684                 tp->fw_needed = NULL;
16685         }
16686
16687         if (tg3_flag(tp, ENABLE_APE)) {
16688                 /* Allow reads and writes to the
16689                  * APE register and memory space.
16690                  */
16691                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16692                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16693                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16694                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16695                                        pci_state_reg);
16696
16697                 tg3_ape_lock_init(tp);
16698                 tp->ape_hb_interval =
16699                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16700         }
16701
16702         /* Set up tp->grc_local_ctrl before calling
16703          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16704          * will bring 5700's external PHY out of reset.
16705          * It is also used as eeprom write protect on LOMs.
16706          */
16707         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16708         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16709             tg3_flag(tp, EEPROM_WRITE_PROT))
16710                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16711                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16712         /* Unused GPIO3 must be driven as output on 5752 because there
16713          * are no pull-up resistors on unused GPIO pins.
16714          */
16715         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16716                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16717
16718         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16719             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16720             tg3_flag(tp, 57765_CLASS))
16721                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16722
16723         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16725                 /* Turn off the debug UART. */
16726                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16727                 if (tg3_flag(tp, IS_NIC))
16728                         /* Keep VMain power. */
16729                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16730                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16731         }
16732
16733         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16734                 tp->grc_local_ctrl |=
16735                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16736
16737         /* Switch out of Vaux if it is a NIC */
16738         tg3_pwrsrc_switch_to_vmain(tp);
16739
16740         /* Derive initial jumbo mode from MTU assigned in
16741          * ether_setup() via the alloc_etherdev() call
16742          */
16743         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16744                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16745
16746         /* Determine WakeOnLan speed to use. */
16747         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16748             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16749             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16750             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16751                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16752         } else {
16753                 tg3_flag_set(tp, WOL_SPEED_100MB);
16754         }
16755
16756         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16757                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16758
16759         /* A few boards don't want Ethernet@WireSpeed phy feature */
16760         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16761             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16762              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16763              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16764             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16765             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16766                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16767
16768         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16769             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16770                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16771         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16772                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16773
16774         if (tg3_flag(tp, 5705_PLUS) &&
16775             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16776             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16777             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16778             !tg3_flag(tp, 57765_PLUS)) {
16779                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16780                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16781                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16782                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16783                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16784                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16785                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16786                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16787                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16788                 } else
16789                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16790         }
16791
16792         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16793             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16794                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16795                 if (tp->phy_otp == 0)
16796                         tp->phy_otp = TG3_OTP_DEFAULT;
16797         }
16798
16799         if (tg3_flag(tp, CPMU_PRESENT))
16800                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16801         else
16802                 tp->mi_mode = MAC_MI_MODE_BASE;
16803
16804         tp->coalesce_mode = 0;
16805         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16806             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16807                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16808
16809         /* Set these bits to enable statistics workaround. */
16810         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16811             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16812             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16813             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16814                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16815                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16816         }
16817
16818         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16819             tg3_asic_rev(tp) == ASIC_REV_57780)
16820                 tg3_flag_set(tp, USE_PHYLIB);
16821
16822         err = tg3_mdio_init(tp);
16823         if (err)
16824                 return err;
16825
16826         /* Initialize data/descriptor byte/word swapping. */
16827         val = tr32(GRC_MODE);
16828         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16829             tg3_asic_rev(tp) == ASIC_REV_5762)
16830                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16831                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16832                         GRC_MODE_B2HRX_ENABLE |
16833                         GRC_MODE_HTX2B_ENABLE |
16834                         GRC_MODE_HOST_STACKUP);
16835         else
16836                 val &= GRC_MODE_HOST_STACKUP;
16837
16838         tw32(GRC_MODE, val | tp->grc_mode);
16839
16840         tg3_switch_clocks(tp);
16841
16842         /* Clear this out for sanity. */
16843         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16844
16845         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16846         tw32(TG3PCI_REG_BASE_ADDR, 0);
16847
16848         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16849                               &pci_state_reg);
16850         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16851             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16852                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16853                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16854                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16855                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16856                         void __iomem *sram_base;
16857
16858                         /* Write some dummy words into the SRAM status block
16859                          * area, see if it reads back correctly.  If the return
16860                          * value is bad, force enable the PCIX workaround.
16861                          */
16862                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16863
16864                         writel(0x00000000, sram_base);
16865                         writel(0x00000000, sram_base + 4);
16866                         writel(0xffffffff, sram_base + 4);
16867                         if (readl(sram_base) != 0x00000000)
16868                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16869                 }
16870         }
16871
16872         udelay(50);
16873         tg3_nvram_init(tp);
16874
16875         /* If the device has an NVRAM, no need to load patch firmware */
16876         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16877             !tg3_flag(tp, NO_NVRAM))
16878                 tp->fw_needed = NULL;
16879
16880         grc_misc_cfg = tr32(GRC_MISC_CFG);
16881         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16882
16883         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16884             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16885              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16886                 tg3_flag_set(tp, IS_5788);
16887
16888         if (!tg3_flag(tp, IS_5788) &&
16889             tg3_asic_rev(tp) != ASIC_REV_5700)
16890                 tg3_flag_set(tp, TAGGED_STATUS);
16891         if (tg3_flag(tp, TAGGED_STATUS)) {
16892                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16893                                       HOSTCC_MODE_CLRTICK_TXBD);
16894
16895                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16896                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16897                                        tp->misc_host_ctrl);
16898         }
16899
16900         /* Preserve the APE MAC_MODE bits */
16901         if (tg3_flag(tp, ENABLE_APE))
16902                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16903         else
16904                 tp->mac_mode = 0;
16905
16906         if (tg3_10_100_only_device(tp, ent))
16907                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16908
16909         err = tg3_phy_probe(tp);
16910         if (err) {
16911                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16912                 /* ... but do not return immediately ... */
16913                 tg3_mdio_fini(tp);
16914         }
16915
16916         tg3_read_vpd(tp);
16917         tg3_read_fw_ver(tp);
16918
16919         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16920                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16921         } else {
16922                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16923                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16924                 else
16925                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16926         }
16927
16928         /* 5700 {AX,BX} chips have a broken status block link
16929          * change bit implementation, so we must use the
16930          * status register in those cases.
16931          */
16932         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16933                 tg3_flag_set(tp, USE_LINKCHG_REG);
16934         else
16935                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16936
16937         /* The led_ctrl is set during tg3_phy_probe, here we might
16938          * have to force the link status polling mechanism based
16939          * upon subsystem IDs.
16940          */
16941         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16942             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16943             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16944                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16945                 tg3_flag_set(tp, USE_LINKCHG_REG);
16946         }
16947
16948         /* For all SERDES we poll the MAC status register. */
16949         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16950                 tg3_flag_set(tp, POLL_SERDES);
16951         else
16952                 tg3_flag_clear(tp, POLL_SERDES);
16953
16954         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16955                 tg3_flag_set(tp, POLL_CPMU_LINK);
16956
16957         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16958         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16959         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16960             tg3_flag(tp, PCIX_MODE)) {
16961                 tp->rx_offset = NET_SKB_PAD;
16962 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16963                 tp->rx_copy_thresh = ~(u16)0;
16964 #endif
16965         }
16966
16967         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16968         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16969         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16970
16971         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16972
16973         /* Increment the rx prod index on the rx std ring by at most
16974          * 8 for these chips to workaround hw errata.
16975          */
16976         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16977             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16978             tg3_asic_rev(tp) == ASIC_REV_5755)
16979                 tp->rx_std_max_post = 8;
16980
16981         if (tg3_flag(tp, ASPM_WORKAROUND))
16982                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16983                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16984
16985         return err;
16986 }
16987
16988 static int tg3_get_device_address(struct tg3 *tp)
16989 {
16990         struct net_device *dev = tp->dev;
16991         u32 hi, lo, mac_offset;
16992         int addr_ok = 0;
16993         int err;
16994
16995         if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16996                 return 0;
16997
16998         if (tg3_flag(tp, IS_SSB_CORE)) {
16999                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17000                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17001                         return 0;
17002         }
17003
17004         mac_offset = 0x7c;
17005         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17006             tg3_flag(tp, 5780_CLASS)) {
17007                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17008                         mac_offset = 0xcc;
17009                 if (tg3_nvram_lock(tp))
17010                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17011                 else
17012                         tg3_nvram_unlock(tp);
17013         } else if (tg3_flag(tp, 5717_PLUS)) {
17014                 if (tp->pci_fn & 1)
17015                         mac_offset = 0xcc;
17016                 if (tp->pci_fn > 1)
17017                         mac_offset += 0x18c;
17018         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17019                 mac_offset = 0x10;
17020
17021         /* First try to get it from MAC address mailbox. */
17022         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17023         if ((hi >> 16) == 0x484b) {
17024                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17025                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17026
17027                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17028                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17029                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17030                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17031                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17032
17033                 /* Some old bootcode may report a 0 MAC address in SRAM */
17034                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17035         }
17036         if (!addr_ok) {
17037                 /* Next, try NVRAM. */
17038                 if (!tg3_flag(tp, NO_NVRAM) &&
17039                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17040                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17041                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17042                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17043                 }
17044                 /* Finally just fetch it out of the MAC control regs. */
17045                 else {
17046                         hi = tr32(MAC_ADDR_0_HIGH);
17047                         lo = tr32(MAC_ADDR_0_LOW);
17048
17049                         dev->dev_addr[5] = lo & 0xff;
17050                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17051                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17052                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17053                         dev->dev_addr[1] = hi & 0xff;
17054                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17055                 }
17056         }
17057
17058         if (!is_valid_ether_addr(&dev->dev_addr[0]))
17059                 return -EINVAL;
17060         return 0;
17061 }
17062
17063 #define BOUNDARY_SINGLE_CACHELINE       1
17064 #define BOUNDARY_MULTI_CACHELINE        2
17065
17066 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17067 {
17068         int cacheline_size;
17069         u8 byte;
17070         int goal;
17071
17072         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17073         if (byte == 0)
17074                 cacheline_size = 1024;
17075         else
17076                 cacheline_size = (int) byte * 4;
17077
17078         /* On 5703 and later chips, the boundary bits have no
17079          * effect.
17080          */
17081         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17082             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17083             !tg3_flag(tp, PCI_EXPRESS))
17084                 goto out;
17085
17086 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17087         goal = BOUNDARY_MULTI_CACHELINE;
17088 #else
17089 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17090         goal = BOUNDARY_SINGLE_CACHELINE;
17091 #else
17092         goal = 0;
17093 #endif
17094 #endif
17095
17096         if (tg3_flag(tp, 57765_PLUS)) {
17097                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17098                 goto out;
17099         }
17100
17101         if (!goal)
17102                 goto out;
17103
17104         /* PCI controllers on most RISC systems tend to disconnect
17105          * when a device tries to burst across a cache-line boundary.
17106          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17107          *
17108          * Unfortunately, for PCI-E there are only limited
17109          * write-side controls for this, and thus for reads
17110          * we will still get the disconnects.  We'll also waste
17111          * these PCI cycles for both read and write for chips
17112          * other than 5700 and 5701 which do not implement the
17113          * boundary bits.
17114          */
17115         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17116                 switch (cacheline_size) {
17117                 case 16:
17118                 case 32:
17119                 case 64:
17120                 case 128:
17121                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17122                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17123                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17124                         } else {
17125                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17126                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17127                         }
17128                         break;
17129
17130                 case 256:
17131                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17132                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17133                         break;
17134
17135                 default:
17136                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17137                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17138                         break;
17139                 }
17140         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17141                 switch (cacheline_size) {
17142                 case 16:
17143                 case 32:
17144                 case 64:
17145                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17146                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17147                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17148                                 break;
17149                         }
17150                         /* fallthrough */
17151                 case 128:
17152                 default:
17153                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17154                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17155                         break;
17156                 }
17157         } else {
17158                 switch (cacheline_size) {
17159                 case 16:
17160                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17161                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17162                                         DMA_RWCTRL_WRITE_BNDRY_16);
17163                                 break;
17164                         }
17165                         /* fallthrough */
17166                 case 32:
17167                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17168                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17169                                         DMA_RWCTRL_WRITE_BNDRY_32);
17170                                 break;
17171                         }
17172                         /* fallthrough */
17173                 case 64:
17174                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17175                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17176                                         DMA_RWCTRL_WRITE_BNDRY_64);
17177                                 break;
17178                         }
17179                         /* fallthrough */
17180                 case 128:
17181                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17182                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17183                                         DMA_RWCTRL_WRITE_BNDRY_128);
17184                                 break;
17185                         }
17186                         /* fallthrough */
17187                 case 256:
17188                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17189                                 DMA_RWCTRL_WRITE_BNDRY_256);
17190                         break;
17191                 case 512:
17192                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17193                                 DMA_RWCTRL_WRITE_BNDRY_512);
17194                         break;
17195                 case 1024:
17196                 default:
17197                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17198                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17199                         break;
17200                 }
17201         }
17202
17203 out:
17204         return val;
17205 }
17206
17207 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17208                            int size, bool to_device)
17209 {
17210         struct tg3_internal_buffer_desc test_desc;
17211         u32 sram_dma_descs;
17212         int i, ret;
17213
17214         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17215
17216         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17217         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17218         tw32(RDMAC_STATUS, 0);
17219         tw32(WDMAC_STATUS, 0);
17220
17221         tw32(BUFMGR_MODE, 0);
17222         tw32(FTQ_RESET, 0);
17223
17224         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17225         test_desc.addr_lo = buf_dma & 0xffffffff;
17226         test_desc.nic_mbuf = 0x00002100;
17227         test_desc.len = size;
17228
17229         /*
17230          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17231          * the *second* time the tg3 driver was getting loaded after an
17232          * initial scan.
17233          *
17234          * Broadcom tells me:
17235          *   ...the DMA engine is connected to the GRC block and a DMA
17236          *   reset may affect the GRC block in some unpredictable way...
17237          *   The behavior of resets to individual blocks has not been tested.
17238          *
17239          * Broadcom noted the GRC reset will also reset all sub-components.
17240          */
17241         if (to_device) {
17242                 test_desc.cqid_sqid = (13 << 8) | 2;
17243
17244                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17245                 udelay(40);
17246         } else {
17247                 test_desc.cqid_sqid = (16 << 8) | 7;
17248
17249                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17250                 udelay(40);
17251         }
17252         test_desc.flags = 0x00000005;
17253
17254         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17255                 u32 val;
17256
17257                 val = *(((u32 *)&test_desc) + i);
17258                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17259                                        sram_dma_descs + (i * sizeof(u32)));
17260                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17261         }
17262         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17263
17264         if (to_device)
17265                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17266         else
17267                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17268
17269         ret = -ENODEV;
17270         for (i = 0; i < 40; i++) {
17271                 u32 val;
17272
17273                 if (to_device)
17274                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17275                 else
17276                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17277                 if ((val & 0xffff) == sram_dma_descs) {
17278                         ret = 0;
17279                         break;
17280                 }
17281
17282                 udelay(100);
17283         }
17284
17285         return ret;
17286 }
17287
17288 #define TEST_BUFFER_SIZE        0x2000
17289
17290 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17291         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17292         { },
17293 };
17294
17295 static int tg3_test_dma(struct tg3 *tp)
17296 {
17297         dma_addr_t buf_dma;
17298         u32 *buf, saved_dma_rwctrl;
17299         int ret = 0;
17300
17301         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17302                                  &buf_dma, GFP_KERNEL);
17303         if (!buf) {
17304                 ret = -ENOMEM;
17305                 goto out_nofree;
17306         }
17307
17308         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17309                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17310
17311         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17312
17313         if (tg3_flag(tp, 57765_PLUS))
17314                 goto out;
17315
17316         if (tg3_flag(tp, PCI_EXPRESS)) {
17317                 /* DMA read watermark not used on PCIE */
17318                 tp->dma_rwctrl |= 0x00180000;
17319         } else if (!tg3_flag(tp, PCIX_MODE)) {
17320                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17321                     tg3_asic_rev(tp) == ASIC_REV_5750)
17322                         tp->dma_rwctrl |= 0x003f0000;
17323                 else
17324                         tp->dma_rwctrl |= 0x003f000f;
17325         } else {
17326                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17327                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17328                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17329                         u32 read_water = 0x7;
17330
17331                         /* If the 5704 is behind the EPB bridge, we can
17332                          * do the less restrictive ONE_DMA workaround for
17333                          * better performance.
17334                          */
17335                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17336                             tg3_asic_rev(tp) == ASIC_REV_5704)
17337                                 tp->dma_rwctrl |= 0x8000;
17338                         else if (ccval == 0x6 || ccval == 0x7)
17339                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17340
17341                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17342                                 read_water = 4;
17343                         /* Set bit 23 to enable PCIX hw bug fix */
17344                         tp->dma_rwctrl |=
17345                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17346                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17347                                 (1 << 23);
17348                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17349                         /* 5780 always in PCIX mode */
17350                         tp->dma_rwctrl |= 0x00144000;
17351                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17352                         /* 5714 always in PCIX mode */
17353                         tp->dma_rwctrl |= 0x00148000;
17354                 } else {
17355                         tp->dma_rwctrl |= 0x001b000f;
17356                 }
17357         }
17358         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17359                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17360
17361         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17362             tg3_asic_rev(tp) == ASIC_REV_5704)
17363                 tp->dma_rwctrl &= 0xfffffff0;
17364
17365         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17366             tg3_asic_rev(tp) == ASIC_REV_5701) {
17367                 /* Remove this if it causes problems for some boards. */
17368                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17369
17370                 /* On 5700/5701 chips, we need to set this bit.
17371                  * Otherwise the chip will issue cacheline transactions
17372                  * to streamable DMA memory with not all the byte
17373                  * enables turned on.  This is an error on several
17374                  * RISC PCI controllers, in particular sparc64.
17375                  *
17376                  * On 5703/5704 chips, this bit has been reassigned
17377                  * a different meaning.  In particular, it is used
17378                  * on those chips to enable a PCI-X workaround.
17379                  */
17380                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17381         }
17382
17383         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17384
17385
17386         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17387             tg3_asic_rev(tp) != ASIC_REV_5701)
17388                 goto out;
17389
17390         /* It is best to perform DMA test with maximum write burst size
17391          * to expose the 5700/5701 write DMA bug.
17392          */
17393         saved_dma_rwctrl = tp->dma_rwctrl;
17394         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17395         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17396
17397         while (1) {
17398                 u32 *p = buf, i;
17399
17400                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17401                         p[i] = i;
17402
17403                 /* Send the buffer to the chip. */
17404                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17405                 if (ret) {
17406                         dev_err(&tp->pdev->dev,
17407                                 "%s: Buffer write failed. err = %d\n",
17408                                 __func__, ret);
17409                         break;
17410                 }
17411
17412                 /* Now read it back. */
17413                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17414                 if (ret) {
17415                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17416                                 "err = %d\n", __func__, ret);
17417                         break;
17418                 }
17419
17420                 /* Verify it. */
17421                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17422                         if (p[i] == i)
17423                                 continue;
17424
17425                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17426                             DMA_RWCTRL_WRITE_BNDRY_16) {
17427                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17428                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17429                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17430                                 break;
17431                         } else {
17432                                 dev_err(&tp->pdev->dev,
17433                                         "%s: Buffer corrupted on read back! "
17434                                         "(%d != %d)\n", __func__, p[i], i);
17435                                 ret = -ENODEV;
17436                                 goto out;
17437                         }
17438                 }
17439
17440                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17441                         /* Success. */
17442                         ret = 0;
17443                         break;
17444                 }
17445         }
17446         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17447             DMA_RWCTRL_WRITE_BNDRY_16) {
17448                 /* DMA test passed without adjusting DMA boundary,
17449                  * now look for chipsets that are known to expose the
17450                  * DMA bug without failing the test.
17451                  */
17452                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17453                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17454                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17455                 } else {
17456                         /* Safe to use the calculated DMA boundary. */
17457                         tp->dma_rwctrl = saved_dma_rwctrl;
17458                 }
17459
17460                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17461         }
17462
17463 out:
17464         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17465 out_nofree:
17466         return ret;
17467 }
17468
17469 static void tg3_init_bufmgr_config(struct tg3 *tp)
17470 {
17471         if (tg3_flag(tp, 57765_PLUS)) {
17472                 tp->bufmgr_config.mbuf_read_dma_low_water =
17473                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17474                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17475                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17476                 tp->bufmgr_config.mbuf_high_water =
17477                         DEFAULT_MB_HIGH_WATER_57765;
17478
17479                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17480                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17481                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17482                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17483                 tp->bufmgr_config.mbuf_high_water_jumbo =
17484                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17485         } else if (tg3_flag(tp, 5705_PLUS)) {
17486                 tp->bufmgr_config.mbuf_read_dma_low_water =
17487                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17488                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17489                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17490                 tp->bufmgr_config.mbuf_high_water =
17491                         DEFAULT_MB_HIGH_WATER_5705;
17492                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17493                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17494                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17495                         tp->bufmgr_config.mbuf_high_water =
17496                                 DEFAULT_MB_HIGH_WATER_5906;
17497                 }
17498
17499                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17500                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17501                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17502                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17503                 tp->bufmgr_config.mbuf_high_water_jumbo =
17504                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17505         } else {
17506                 tp->bufmgr_config.mbuf_read_dma_low_water =
17507                         DEFAULT_MB_RDMA_LOW_WATER;
17508                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17509                         DEFAULT_MB_MACRX_LOW_WATER;
17510                 tp->bufmgr_config.mbuf_high_water =
17511                         DEFAULT_MB_HIGH_WATER;
17512
17513                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17514                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17515                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17516                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17517                 tp->bufmgr_config.mbuf_high_water_jumbo =
17518                         DEFAULT_MB_HIGH_WATER_JUMBO;
17519         }
17520
17521         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17522         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17523 }
17524
17525 static char *tg3_phy_string(struct tg3 *tp)
17526 {
17527         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17528         case TG3_PHY_ID_BCM5400:        return "5400";
17529         case TG3_PHY_ID_BCM5401:        return "5401";
17530         case TG3_PHY_ID_BCM5411:        return "5411";
17531         case TG3_PHY_ID_BCM5701:        return "5701";
17532         case TG3_PHY_ID_BCM5703:        return "5703";
17533         case TG3_PHY_ID_BCM5704:        return "5704";
17534         case TG3_PHY_ID_BCM5705:        return "5705";
17535         case TG3_PHY_ID_BCM5750:        return "5750";
17536         case TG3_PHY_ID_BCM5752:        return "5752";
17537         case TG3_PHY_ID_BCM5714:        return "5714";
17538         case TG3_PHY_ID_BCM5780:        return "5780";
17539         case TG3_PHY_ID_BCM5755:        return "5755";
17540         case TG3_PHY_ID_BCM5787:        return "5787";
17541         case TG3_PHY_ID_BCM5784:        return "5784";
17542         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17543         case TG3_PHY_ID_BCM5906:        return "5906";
17544         case TG3_PHY_ID_BCM5761:        return "5761";
17545         case TG3_PHY_ID_BCM5718C:       return "5718C";
17546         case TG3_PHY_ID_BCM5718S:       return "5718S";
17547         case TG3_PHY_ID_BCM57765:       return "57765";
17548         case TG3_PHY_ID_BCM5719C:       return "5719C";
17549         case TG3_PHY_ID_BCM5720C:       return "5720C";
17550         case TG3_PHY_ID_BCM5762:        return "5762C";
17551         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17552         case 0:                 return "serdes";
17553         default:                return "unknown";
17554         }
17555 }
17556
17557 static char *tg3_bus_string(struct tg3 *tp, char *str)
17558 {
17559         if (tg3_flag(tp, PCI_EXPRESS)) {
17560                 strcpy(str, "PCI Express");
17561                 return str;
17562         } else if (tg3_flag(tp, PCIX_MODE)) {
17563                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17564
17565                 strcpy(str, "PCIX:");
17566
17567                 if ((clock_ctrl == 7) ||
17568                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17569                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17570                         strcat(str, "133MHz");
17571                 else if (clock_ctrl == 0)
17572                         strcat(str, "33MHz");
17573                 else if (clock_ctrl == 2)
17574                         strcat(str, "50MHz");
17575                 else if (clock_ctrl == 4)
17576                         strcat(str, "66MHz");
17577                 else if (clock_ctrl == 6)
17578                         strcat(str, "100MHz");
17579         } else {
17580                 strcpy(str, "PCI:");
17581                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17582                         strcat(str, "66MHz");
17583                 else
17584                         strcat(str, "33MHz");
17585         }
17586         if (tg3_flag(tp, PCI_32BIT))
17587                 strcat(str, ":32-bit");
17588         else
17589                 strcat(str, ":64-bit");
17590         return str;
17591 }
17592
17593 static void tg3_init_coal(struct tg3 *tp)
17594 {
17595         struct ethtool_coalesce *ec = &tp->coal;
17596
17597         memset(ec, 0, sizeof(*ec));
17598         ec->cmd = ETHTOOL_GCOALESCE;
17599         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17600         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17601         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17602         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17603         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17604         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17605         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17606         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17607         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17608
17609         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17610                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17611                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17612                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17613                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17614                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17615         }
17616
17617         if (tg3_flag(tp, 5705_PLUS)) {
17618                 ec->rx_coalesce_usecs_irq = 0;
17619                 ec->tx_coalesce_usecs_irq = 0;
17620                 ec->stats_block_coalesce_usecs = 0;
17621         }
17622 }
17623
17624 static int tg3_init_one(struct pci_dev *pdev,
17625                                   const struct pci_device_id *ent)
17626 {
17627         struct net_device *dev;
17628         struct tg3 *tp;
17629         int i, err;
17630         u32 sndmbx, rcvmbx, intmbx;
17631         char str[40];
17632         u64 dma_mask, persist_dma_mask;
17633         netdev_features_t features = 0;
17634
17635         printk_once(KERN_INFO "%s\n", version);
17636
17637         err = pci_enable_device(pdev);
17638         if (err) {
17639                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17640                 return err;
17641         }
17642
17643         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17644         if (err) {
17645                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17646                 goto err_out_disable_pdev;
17647         }
17648
17649         pci_set_master(pdev);
17650
17651         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17652         if (!dev) {
17653                 err = -ENOMEM;
17654                 goto err_out_free_res;
17655         }
17656
17657         SET_NETDEV_DEV(dev, &pdev->dev);
17658
17659         tp = netdev_priv(dev);
17660         tp->pdev = pdev;
17661         tp->dev = dev;
17662         tp->rx_mode = TG3_DEF_RX_MODE;
17663         tp->tx_mode = TG3_DEF_TX_MODE;
17664         tp->irq_sync = 1;
17665         tp->pcierr_recovery = false;
17666
17667         if (tg3_debug > 0)
17668                 tp->msg_enable = tg3_debug;
17669         else
17670                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17671
17672         if (pdev_is_ssb_gige_core(pdev)) {
17673                 tg3_flag_set(tp, IS_SSB_CORE);
17674                 if (ssb_gige_must_flush_posted_writes(pdev))
17675                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17676                 if (ssb_gige_one_dma_at_once(pdev))
17677                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17678                 if (ssb_gige_have_roboswitch(pdev)) {
17679                         tg3_flag_set(tp, USE_PHYLIB);
17680                         tg3_flag_set(tp, ROBOSWITCH);
17681                 }
17682                 if (ssb_gige_is_rgmii(pdev))
17683                         tg3_flag_set(tp, RGMII_MODE);
17684         }
17685
17686         /* The word/byte swap controls here control register access byte
17687          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17688          * setting below.
17689          */
17690         tp->misc_host_ctrl =
17691                 MISC_HOST_CTRL_MASK_PCI_INT |
17692                 MISC_HOST_CTRL_WORD_SWAP |
17693                 MISC_HOST_CTRL_INDIR_ACCESS |
17694                 MISC_HOST_CTRL_PCISTATE_RW;
17695
17696         /* The NONFRM (non-frame) byte/word swap controls take effect
17697          * on descriptor entries, anything which isn't packet data.
17698          *
17699          * The StrongARM chips on the board (one for tx, one for rx)
17700          * are running in big-endian mode.
17701          */
17702         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17703                         GRC_MODE_WSWAP_NONFRM_DATA);
17704 #ifdef __BIG_ENDIAN
17705         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17706 #endif
17707         spin_lock_init(&tp->lock);
17708         spin_lock_init(&tp->indirect_lock);
17709         INIT_WORK(&tp->reset_task, tg3_reset_task);
17710
17711         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17712         if (!tp->regs) {
17713                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17714                 err = -ENOMEM;
17715                 goto err_out_free_dev;
17716         }
17717
17718         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17719             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17720             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17721             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17722             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17723             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17725             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17726             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17728             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17729             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17730             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17731             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17732             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17733                 tg3_flag_set(tp, ENABLE_APE);
17734                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17735                 if (!tp->aperegs) {
17736                         dev_err(&pdev->dev,
17737                                 "Cannot map APE registers, aborting\n");
17738                         err = -ENOMEM;
17739                         goto err_out_iounmap;
17740                 }
17741         }
17742
17743         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17744         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17745
17746         dev->ethtool_ops = &tg3_ethtool_ops;
17747         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17748         dev->netdev_ops = &tg3_netdev_ops;
17749         dev->irq = pdev->irq;
17750
17751         err = tg3_get_invariants(tp, ent);
17752         if (err) {
17753                 dev_err(&pdev->dev,
17754                         "Problem fetching invariants of chip, aborting\n");
17755                 goto err_out_apeunmap;
17756         }
17757
17758         /* The EPB bridge inside 5714, 5715, and 5780 and any
17759          * device behind the EPB cannot support DMA addresses > 40-bit.
17760          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17761          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17762          * do DMA address check in tg3_start_xmit().
17763          */
17764         if (tg3_flag(tp, IS_5788))
17765                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17766         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17767                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17768 #ifdef CONFIG_HIGHMEM
17769                 dma_mask = DMA_BIT_MASK(64);
17770 #endif
17771         } else
17772                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17773
17774         /* Configure DMA attributes. */
17775         if (dma_mask > DMA_BIT_MASK(32)) {
17776                 err = pci_set_dma_mask(pdev, dma_mask);
17777                 if (!err) {
17778                         features |= NETIF_F_HIGHDMA;
17779                         err = pci_set_consistent_dma_mask(pdev,
17780                                                           persist_dma_mask);
17781                         if (err < 0) {
17782                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17783                                         "DMA for consistent allocations\n");
17784                                 goto err_out_apeunmap;
17785                         }
17786                 }
17787         }
17788         if (err || dma_mask == DMA_BIT_MASK(32)) {
17789                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17790                 if (err) {
17791                         dev_err(&pdev->dev,
17792                                 "No usable DMA configuration, aborting\n");
17793                         goto err_out_apeunmap;
17794                 }
17795         }
17796
17797         tg3_init_bufmgr_config(tp);
17798
17799         /* 5700 B0 chips do not support checksumming correctly due
17800          * to hardware bugs.
17801          */
17802         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17803                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17804
17805                 if (tg3_flag(tp, 5755_PLUS))
17806                         features |= NETIF_F_IPV6_CSUM;
17807         }
17808
17809         /* TSO is on by default on chips that support hardware TSO.
17810          * Firmware TSO on older chips gives lower performance, so it
17811          * is off by default, but can be enabled using ethtool.
17812          */
17813         if ((tg3_flag(tp, HW_TSO_1) ||
17814              tg3_flag(tp, HW_TSO_2) ||
17815              tg3_flag(tp, HW_TSO_3)) &&
17816             (features & NETIF_F_IP_CSUM))
17817                 features |= NETIF_F_TSO;
17818         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17819                 if (features & NETIF_F_IPV6_CSUM)
17820                         features |= NETIF_F_TSO6;
17821                 if (tg3_flag(tp, HW_TSO_3) ||
17822                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17823                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17824                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17825                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17826                     tg3_asic_rev(tp) == ASIC_REV_57780)
17827                         features |= NETIF_F_TSO_ECN;
17828         }
17829
17830         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17831                          NETIF_F_HW_VLAN_CTAG_RX;
17832         dev->vlan_features |= features;
17833
17834         /*
17835          * Add loopback capability only for a subset of devices that support
17836          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17837          * loopback for the remaining devices.
17838          */
17839         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17840             !tg3_flag(tp, CPMU_PRESENT))
17841                 /* Add the loopback capability */
17842                 features |= NETIF_F_LOOPBACK;
17843
17844         dev->hw_features |= features;
17845         dev->priv_flags |= IFF_UNICAST_FLT;
17846
17847         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17848         dev->min_mtu = TG3_MIN_MTU;
17849         dev->max_mtu = TG3_MAX_MTU(tp);
17850
17851         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17852             !tg3_flag(tp, TSO_CAPABLE) &&
17853             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17854                 tg3_flag_set(tp, MAX_RXPEND_64);
17855                 tp->rx_pending = 63;
17856         }
17857
17858         err = tg3_get_device_address(tp);
17859         if (err) {
17860                 dev_err(&pdev->dev,
17861                         "Could not obtain valid ethernet address, aborting\n");
17862                 goto err_out_apeunmap;
17863         }
17864
17865         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17866         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17867         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17868         for (i = 0; i < tp->irq_max; i++) {
17869                 struct tg3_napi *tnapi = &tp->napi[i];
17870
17871                 tnapi->tp = tp;
17872                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17873
17874                 tnapi->int_mbox = intmbx;
17875                 if (i <= 4)
17876                         intmbx += 0x8;
17877                 else
17878                         intmbx += 0x4;
17879
17880                 tnapi->consmbox = rcvmbx;
17881                 tnapi->prodmbox = sndmbx;
17882
17883                 if (i)
17884                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17885                 else
17886                         tnapi->coal_now = HOSTCC_MODE_NOW;
17887
17888                 if (!tg3_flag(tp, SUPPORT_MSIX))
17889                         break;
17890
17891                 /*
17892                  * If we support MSIX, we'll be using RSS.  If we're using
17893                  * RSS, the first vector only handles link interrupts and the
17894                  * remaining vectors handle rx and tx interrupts.  Reuse the
17895                  * mailbox values for the next iteration.  The values we setup
17896                  * above are still useful for the single vectored mode.
17897                  */
17898                 if (!i)
17899                         continue;
17900
17901                 rcvmbx += 0x8;
17902
17903                 if (sndmbx & 0x4)
17904                         sndmbx -= 0x4;
17905                 else
17906                         sndmbx += 0xc;
17907         }
17908
17909         /*
17910          * Reset chip in case UNDI or EFI driver did not shutdown
17911          * DMA self test will enable WDMAC and we'll see (spurious)
17912          * pending DMA on the PCI bus at that point.
17913          */
17914         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17915             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17916                 tg3_full_lock(tp, 0);
17917                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17918                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17919                 tg3_full_unlock(tp);
17920         }
17921
17922         err = tg3_test_dma(tp);
17923         if (err) {
17924                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17925                 goto err_out_apeunmap;
17926         }
17927
17928         tg3_init_coal(tp);
17929
17930         pci_set_drvdata(pdev, dev);
17931
17932         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17933             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17934             tg3_asic_rev(tp) == ASIC_REV_5762)
17935                 tg3_flag_set(tp, PTP_CAPABLE);
17936
17937         tg3_timer_init(tp);
17938
17939         tg3_carrier_off(tp);
17940
17941         err = register_netdev(dev);
17942         if (err) {
17943                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17944                 goto err_out_apeunmap;
17945         }
17946
17947         if (tg3_flag(tp, PTP_CAPABLE)) {
17948                 tg3_ptp_init(tp);
17949                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17950                                                    &tp->pdev->dev);
17951                 if (IS_ERR(tp->ptp_clock))
17952                         tp->ptp_clock = NULL;
17953         }
17954
17955         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17956                     tp->board_part_number,
17957                     tg3_chip_rev_id(tp),
17958                     tg3_bus_string(tp, str),
17959                     dev->dev_addr);
17960
17961         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17962                 char *ethtype;
17963
17964                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17965                         ethtype = "10/100Base-TX";
17966                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17967                         ethtype = "1000Base-SX";
17968                 else
17969                         ethtype = "10/100/1000Base-T";
17970
17971                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17972                             "(WireSpeed[%d], EEE[%d])\n",
17973                             tg3_phy_string(tp), ethtype,
17974                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17975                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17976         }
17977
17978         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17979                     (dev->features & NETIF_F_RXCSUM) != 0,
17980                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17981                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17982                     tg3_flag(tp, ENABLE_ASF) != 0,
17983                     tg3_flag(tp, TSO_CAPABLE) != 0);
17984         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17985                     tp->dma_rwctrl,
17986                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17987                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17988
17989         pci_save_state(pdev);
17990
17991         return 0;
17992
17993 err_out_apeunmap:
17994         if (tp->aperegs) {
17995                 iounmap(tp->aperegs);
17996                 tp->aperegs = NULL;
17997         }
17998
17999 err_out_iounmap:
18000         if (tp->regs) {
18001                 iounmap(tp->regs);
18002                 tp->regs = NULL;
18003         }
18004
18005 err_out_free_dev:
18006         free_netdev(dev);
18007
18008 err_out_free_res:
18009         pci_release_regions(pdev);
18010
18011 err_out_disable_pdev:
18012         if (pci_is_enabled(pdev))
18013                 pci_disable_device(pdev);
18014         return err;
18015 }
18016
18017 static void tg3_remove_one(struct pci_dev *pdev)
18018 {
18019         struct net_device *dev = pci_get_drvdata(pdev);
18020
18021         if (dev) {
18022                 struct tg3 *tp = netdev_priv(dev);
18023
18024                 tg3_ptp_fini(tp);
18025
18026                 release_firmware(tp->fw);
18027
18028                 tg3_reset_task_cancel(tp);
18029
18030                 if (tg3_flag(tp, USE_PHYLIB)) {
18031                         tg3_phy_fini(tp);
18032                         tg3_mdio_fini(tp);
18033                 }
18034
18035                 unregister_netdev(dev);
18036                 if (tp->aperegs) {
18037                         iounmap(tp->aperegs);
18038                         tp->aperegs = NULL;
18039                 }
18040                 if (tp->regs) {
18041                         iounmap(tp->regs);
18042                         tp->regs = NULL;
18043                 }
18044                 free_netdev(dev);
18045                 pci_release_regions(pdev);
18046                 pci_disable_device(pdev);
18047         }
18048 }
18049
18050 #ifdef CONFIG_PM_SLEEP
18051 static int tg3_suspend(struct device *device)
18052 {
18053         struct pci_dev *pdev = to_pci_dev(device);
18054         struct net_device *dev = pci_get_drvdata(pdev);
18055         struct tg3 *tp = netdev_priv(dev);
18056         int err = 0;
18057
18058         rtnl_lock();
18059
18060         if (!netif_running(dev))
18061                 goto unlock;
18062
18063         tg3_reset_task_cancel(tp);
18064         tg3_phy_stop(tp);
18065         tg3_netif_stop(tp);
18066
18067         tg3_timer_stop(tp);
18068
18069         tg3_full_lock(tp, 1);
18070         tg3_disable_ints(tp);
18071         tg3_full_unlock(tp);
18072
18073         netif_device_detach(dev);
18074
18075         tg3_full_lock(tp, 0);
18076         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18077         tg3_flag_clear(tp, INIT_COMPLETE);
18078         tg3_full_unlock(tp);
18079
18080         err = tg3_power_down_prepare(tp);
18081         if (err) {
18082                 int err2;
18083
18084                 tg3_full_lock(tp, 0);
18085
18086                 tg3_flag_set(tp, INIT_COMPLETE);
18087                 err2 = tg3_restart_hw(tp, true);
18088                 if (err2)
18089                         goto out;
18090
18091                 tg3_timer_start(tp);
18092
18093                 netif_device_attach(dev);
18094                 tg3_netif_start(tp);
18095
18096 out:
18097                 tg3_full_unlock(tp);
18098
18099                 if (!err2)
18100                         tg3_phy_start(tp);
18101         }
18102
18103 unlock:
18104         rtnl_unlock();
18105         return err;
18106 }
18107
18108 static int tg3_resume(struct device *device)
18109 {
18110         struct pci_dev *pdev = to_pci_dev(device);
18111         struct net_device *dev = pci_get_drvdata(pdev);
18112         struct tg3 *tp = netdev_priv(dev);
18113         int err = 0;
18114
18115         rtnl_lock();
18116
18117         if (!netif_running(dev))
18118                 goto unlock;
18119
18120         netif_device_attach(dev);
18121
18122         tg3_full_lock(tp, 0);
18123
18124         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18125
18126         tg3_flag_set(tp, INIT_COMPLETE);
18127         err = tg3_restart_hw(tp,
18128                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18129         if (err)
18130                 goto out;
18131
18132         tg3_timer_start(tp);
18133
18134         tg3_netif_start(tp);
18135
18136 out:
18137         tg3_full_unlock(tp);
18138
18139         if (!err)
18140                 tg3_phy_start(tp);
18141
18142 unlock:
18143         rtnl_unlock();
18144         return err;
18145 }
18146 #endif /* CONFIG_PM_SLEEP */
18147
18148 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18149
18150 static void tg3_shutdown(struct pci_dev *pdev)
18151 {
18152         struct net_device *dev = pci_get_drvdata(pdev);
18153         struct tg3 *tp = netdev_priv(dev);
18154
18155         rtnl_lock();
18156         netif_device_detach(dev);
18157
18158         if (netif_running(dev))
18159                 dev_close(dev);
18160
18161         if (system_state == SYSTEM_POWER_OFF)
18162                 tg3_power_down(tp);
18163
18164         rtnl_unlock();
18165 }
18166
18167 /**
18168  * tg3_io_error_detected - called when PCI error is detected
18169  * @pdev: Pointer to PCI device
18170  * @state: The current pci connection state
18171  *
18172  * This function is called after a PCI bus error affecting
18173  * this device has been detected.
18174  */
18175 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18176                                               pci_channel_state_t state)
18177 {
18178         struct net_device *netdev = pci_get_drvdata(pdev);
18179         struct tg3 *tp = netdev_priv(netdev);
18180         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18181
18182         netdev_info(netdev, "PCI I/O error detected\n");
18183
18184         rtnl_lock();
18185
18186         /* We probably don't have netdev yet */
18187         if (!netdev || !netif_running(netdev))
18188                 goto done;
18189
18190         /* We needn't recover from permanent error */
18191         if (state == pci_channel_io_frozen)
18192                 tp->pcierr_recovery = true;
18193
18194         tg3_phy_stop(tp);
18195
18196         tg3_netif_stop(tp);
18197
18198         tg3_timer_stop(tp);
18199
18200         /* Want to make sure that the reset task doesn't run */
18201         tg3_reset_task_cancel(tp);
18202
18203         netif_device_detach(netdev);
18204
18205         /* Clean up software state, even if MMIO is blocked */
18206         tg3_full_lock(tp, 0);
18207         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18208         tg3_full_unlock(tp);
18209
18210 done:
18211         if (state == pci_channel_io_perm_failure) {
18212                 if (netdev) {
18213                         tg3_napi_enable(tp);
18214                         dev_close(netdev);
18215                 }
18216                 err = PCI_ERS_RESULT_DISCONNECT;
18217         } else {
18218                 pci_disable_device(pdev);
18219         }
18220
18221         rtnl_unlock();
18222
18223         return err;
18224 }
18225
18226 /**
18227  * tg3_io_slot_reset - called after the pci bus has been reset.
18228  * @pdev: Pointer to PCI device
18229  *
18230  * Restart the card from scratch, as if from a cold-boot.
18231  * At this point, the card has exprienced a hard reset,
18232  * followed by fixups by BIOS, and has its config space
18233  * set up identically to what it was at cold boot.
18234  */
18235 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18236 {
18237         struct net_device *netdev = pci_get_drvdata(pdev);
18238         struct tg3 *tp = netdev_priv(netdev);
18239         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18240         int err;
18241
18242         rtnl_lock();
18243
18244         if (pci_enable_device(pdev)) {
18245                 dev_err(&pdev->dev,
18246                         "Cannot re-enable PCI device after reset.\n");
18247                 goto done;
18248         }
18249
18250         pci_set_master(pdev);
18251         pci_restore_state(pdev);
18252         pci_save_state(pdev);
18253
18254         if (!netdev || !netif_running(netdev)) {
18255                 rc = PCI_ERS_RESULT_RECOVERED;
18256                 goto done;
18257         }
18258
18259         err = tg3_power_up(tp);
18260         if (err)
18261                 goto done;
18262
18263         rc = PCI_ERS_RESULT_RECOVERED;
18264
18265 done:
18266         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18267                 tg3_napi_enable(tp);
18268                 dev_close(netdev);
18269         }
18270         rtnl_unlock();
18271
18272         return rc;
18273 }
18274
18275 /**
18276  * tg3_io_resume - called when traffic can start flowing again.
18277  * @pdev: Pointer to PCI device
18278  *
18279  * This callback is called when the error recovery driver tells
18280  * us that its OK to resume normal operation.
18281  */
18282 static void tg3_io_resume(struct pci_dev *pdev)
18283 {
18284         struct net_device *netdev = pci_get_drvdata(pdev);
18285         struct tg3 *tp = netdev_priv(netdev);
18286         int err;
18287
18288         rtnl_lock();
18289
18290         if (!netdev || !netif_running(netdev))
18291                 goto done;
18292
18293         tg3_full_lock(tp, 0);
18294         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18295         tg3_flag_set(tp, INIT_COMPLETE);
18296         err = tg3_restart_hw(tp, true);
18297         if (err) {
18298                 tg3_full_unlock(tp);
18299                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18300                 goto done;
18301         }
18302
18303         netif_device_attach(netdev);
18304
18305         tg3_timer_start(tp);
18306
18307         tg3_netif_start(tp);
18308
18309         tg3_full_unlock(tp);
18310
18311         tg3_phy_start(tp);
18312
18313 done:
18314         tp->pcierr_recovery = false;
18315         rtnl_unlock();
18316 }
18317
18318 static const struct pci_error_handlers tg3_err_handler = {
18319         .error_detected = tg3_io_error_detected,
18320         .slot_reset     = tg3_io_slot_reset,
18321         .resume         = tg3_io_resume
18322 };
18323
18324 static struct pci_driver tg3_driver = {
18325         .name           = DRV_MODULE_NAME,
18326         .id_table       = tg3_pci_tbl,
18327         .probe          = tg3_init_one,
18328         .remove         = tg3_remove_one,
18329         .err_handler    = &tg3_err_handler,
18330         .driver.pm      = &tg3_pm_ops,
18331         .shutdown       = tg3_shutdown,
18332 };
18333
18334 module_pci_driver(tg3_driver);