Merge tag 'asoc-fix-v4.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/broon...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     137
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "May 11, 2014"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     ETH_ZLEN
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
213
214 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
215 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216
217 #define FIRMWARE_TG3            "tigon/tg3.bin"
218 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
221
222 static char version[] =
223         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
224
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION);
229 MODULE_FIRMWARE(FIRMWARE_TG3);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
239
240 static const struct pci_device_id tg3_pci_tbl[] = {
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268                         TG3_DRV_DATA_FLAG_5705_10_100},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290                         PCI_VENDOR_ID_LENOVO,
291                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356         {}
357 };
358
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361 static const struct {
362         const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364         { "rx_octets" },
365         { "rx_fragments" },
366         { "rx_ucast_packets" },
367         { "rx_mcast_packets" },
368         { "rx_bcast_packets" },
369         { "rx_fcs_errors" },
370         { "rx_align_errors" },
371         { "rx_xon_pause_rcvd" },
372         { "rx_xoff_pause_rcvd" },
373         { "rx_mac_ctrl_rcvd" },
374         { "rx_xoff_entered" },
375         { "rx_frame_too_long_errors" },
376         { "rx_jabbers" },
377         { "rx_undersize_packets" },
378         { "rx_in_length_errors" },
379         { "rx_out_length_errors" },
380         { "rx_64_or_less_octet_packets" },
381         { "rx_65_to_127_octet_packets" },
382         { "rx_128_to_255_octet_packets" },
383         { "rx_256_to_511_octet_packets" },
384         { "rx_512_to_1023_octet_packets" },
385         { "rx_1024_to_1522_octet_packets" },
386         { "rx_1523_to_2047_octet_packets" },
387         { "rx_2048_to_4095_octet_packets" },
388         { "rx_4096_to_8191_octet_packets" },
389         { "rx_8192_to_9022_octet_packets" },
390
391         { "tx_octets" },
392         { "tx_collisions" },
393
394         { "tx_xon_sent" },
395         { "tx_xoff_sent" },
396         { "tx_flow_control" },
397         { "tx_mac_errors" },
398         { "tx_single_collisions" },
399         { "tx_mult_collisions" },
400         { "tx_deferred" },
401         { "tx_excessive_collisions" },
402         { "tx_late_collisions" },
403         { "tx_collide_2times" },
404         { "tx_collide_3times" },
405         { "tx_collide_4times" },
406         { "tx_collide_5times" },
407         { "tx_collide_6times" },
408         { "tx_collide_7times" },
409         { "tx_collide_8times" },
410         { "tx_collide_9times" },
411         { "tx_collide_10times" },
412         { "tx_collide_11times" },
413         { "tx_collide_12times" },
414         { "tx_collide_13times" },
415         { "tx_collide_14times" },
416         { "tx_collide_15times" },
417         { "tx_ucast_packets" },
418         { "tx_mcast_packets" },
419         { "tx_bcast_packets" },
420         { "tx_carrier_sense_errors" },
421         { "tx_discards" },
422         { "tx_errors" },
423
424         { "dma_writeq_full" },
425         { "dma_write_prioq_full" },
426         { "rxbds_empty" },
427         { "rx_discards" },
428         { "rx_errors" },
429         { "rx_threshold_hit" },
430
431         { "dma_readq_full" },
432         { "dma_read_prioq_full" },
433         { "tx_comp_queue_full" },
434
435         { "ring_set_send_prod_index" },
436         { "ring_status_update" },
437         { "nic_irqs" },
438         { "nic_avoided_irqs" },
439         { "nic_tx_threshold_hit" },
440
441         { "mbuf_lwm_thresh_hit" },
442 };
443
444 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST          0
446 #define TG3_LINK_TEST           1
447 #define TG3_REGISTER_TEST       2
448 #define TG3_MEMORY_TEST         3
449 #define TG3_MAC_LOOPB_TEST      4
450 #define TG3_PHY_LOOPB_TEST      5
451 #define TG3_EXT_LOOPB_TEST      6
452 #define TG3_INTERRUPT_TEST      7
453
454
455 static const struct {
456         const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
459         [TG3_LINK_TEST]         = { "link test         (online) " },
460         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
461         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
462         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
463         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
464         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
465         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
466 };
467
468 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
469
470
471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473         writel(val, tp->regs + off);
474 }
475
476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478         return readl(tp->regs + off);
479 }
480
481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483         writel(val, tp->aperegs + off);
484 }
485
486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488         return readl(tp->aperegs + off);
489 }
490
491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493         unsigned long flags;
494
495         spin_lock_irqsave(&tp->indirect_lock, flags);
496         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498         spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503         writel(val, tp->regs + off);
504         readl(tp->regs + off);
505 }
506
507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509         unsigned long flags;
510         u32 val;
511
512         spin_lock_irqsave(&tp->indirect_lock, flags);
513         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515         spin_unlock_irqrestore(&tp->indirect_lock, flags);
516         return val;
517 }
518
519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521         unsigned long flags;
522
523         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525                                        TG3_64BIT_REG_LOW, val);
526                 return;
527         }
528         if (off == TG3_RX_STD_PROD_IDX_REG) {
529                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530                                        TG3_64BIT_REG_LOW, val);
531                 return;
532         }
533
534         spin_lock_irqsave(&tp->indirect_lock, flags);
535         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537         spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539         /* In indirect mode when disabling interrupts, we also need
540          * to clear the interrupt bit in the GRC local ctrl register.
541          */
542         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543             (val == 0x1)) {
544                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546         }
547 }
548
549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551         unsigned long flags;
552         u32 val;
553
554         spin_lock_irqsave(&tp->indirect_lock, flags);
555         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557         spin_unlock_irqrestore(&tp->indirect_lock, flags);
558         return val;
559 }
560
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562  * where it is unsafe to read back the register without some delay.
563  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565  */
566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569                 /* Non-posted methods */
570                 tp->write32(tp, off, val);
571         else {
572                 /* Posted method */
573                 tg3_write32(tp, off, val);
574                 if (usec_wait)
575                         udelay(usec_wait);
576                 tp->read32(tp, off);
577         }
578         /* Wait again after the read for the posted method to guarantee that
579          * the wait time is met.
580          */
581         if (usec_wait)
582                 udelay(usec_wait);
583 }
584
585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587         tp->write32_mbox(tp, off, val);
588         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590              !tg3_flag(tp, ICH_WORKAROUND)))
591                 tp->read32_mbox(tp, off);
592 }
593
594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596         void __iomem *mbox = tp->regs + off;
597         writel(val, mbox);
598         if (tg3_flag(tp, TXD_MBOX_HWBUG))
599                 writel(val, mbox);
600         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601             tg3_flag(tp, FLUSH_POSTED_WRITES))
602                 readl(mbox);
603 }
604
605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607         return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609
610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612         writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614
615 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
620
621 #define tw32(reg, val)                  tp->write32(tp, reg, val)
622 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg)                       tp->read32(tp, reg)
625
626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628         unsigned long flags;
629
630         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632                 return;
633
634         spin_lock_irqsave(&tp->indirect_lock, flags);
635         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639                 /* Always leave this as zero. */
640                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641         } else {
642                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645                 /* Always leave this as zero. */
646                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647         }
648         spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650
651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653         unsigned long flags;
654
655         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657                 *val = 0;
658                 return;
659         }
660
661         spin_lock_irqsave(&tp->indirect_lock, flags);
662         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666                 /* Always leave this as zero. */
667                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668         } else {
669                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670                 *val = tr32(TG3PCI_MEM_WIN_DATA);
671
672                 /* Always leave this as zero. */
673                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674         }
675         spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677
678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680         int i;
681         u32 regbase, bit;
682
683         if (tg3_asic_rev(tp) == ASIC_REV_5761)
684                 regbase = TG3_APE_LOCK_GRANT;
685         else
686                 regbase = TG3_APE_PER_LOCK_GRANT;
687
688         /* Make sure the driver hasn't any stale locks. */
689         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690                 switch (i) {
691                 case TG3_APE_LOCK_PHY0:
692                 case TG3_APE_LOCK_PHY1:
693                 case TG3_APE_LOCK_PHY2:
694                 case TG3_APE_LOCK_PHY3:
695                         bit = APE_LOCK_GRANT_DRIVER;
696                         break;
697                 default:
698                         if (!tp->pci_fn)
699                                 bit = APE_LOCK_GRANT_DRIVER;
700                         else
701                                 bit = 1 << tp->pci_fn;
702                 }
703                 tg3_ape_write32(tp, regbase + 4 * i, bit);
704         }
705
706 }
707
708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710         int i, off;
711         int ret = 0;
712         u32 status, req, gnt, bit;
713
714         if (!tg3_flag(tp, ENABLE_APE))
715                 return 0;
716
717         switch (locknum) {
718         case TG3_APE_LOCK_GPIO:
719                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
720                         return 0;
721         case TG3_APE_LOCK_GRC:
722         case TG3_APE_LOCK_MEM:
723                 if (!tp->pci_fn)
724                         bit = APE_LOCK_REQ_DRIVER;
725                 else
726                         bit = 1 << tp->pci_fn;
727                 break;
728         case TG3_APE_LOCK_PHY0:
729         case TG3_APE_LOCK_PHY1:
730         case TG3_APE_LOCK_PHY2:
731         case TG3_APE_LOCK_PHY3:
732                 bit = APE_LOCK_REQ_DRIVER;
733                 break;
734         default:
735                 return -EINVAL;
736         }
737
738         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739                 req = TG3_APE_LOCK_REQ;
740                 gnt = TG3_APE_LOCK_GRANT;
741         } else {
742                 req = TG3_APE_PER_LOCK_REQ;
743                 gnt = TG3_APE_PER_LOCK_GRANT;
744         }
745
746         off = 4 * locknum;
747
748         tg3_ape_write32(tp, req + off, bit);
749
750         /* Wait for up to 1 millisecond to acquire lock. */
751         for (i = 0; i < 100; i++) {
752                 status = tg3_ape_read32(tp, gnt + off);
753                 if (status == bit)
754                         break;
755                 if (pci_channel_offline(tp->pdev))
756                         break;
757
758                 udelay(10);
759         }
760
761         if (status != bit) {
762                 /* Revoke the lock request. */
763                 tg3_ape_write32(tp, gnt + off, bit);
764                 ret = -EBUSY;
765         }
766
767         return ret;
768 }
769
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772         u32 gnt, bit;
773
774         if (!tg3_flag(tp, ENABLE_APE))
775                 return;
776
777         switch (locknum) {
778         case TG3_APE_LOCK_GPIO:
779                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780                         return;
781         case TG3_APE_LOCK_GRC:
782         case TG3_APE_LOCK_MEM:
783                 if (!tp->pci_fn)
784                         bit = APE_LOCK_GRANT_DRIVER;
785                 else
786                         bit = 1 << tp->pci_fn;
787                 break;
788         case TG3_APE_LOCK_PHY0:
789         case TG3_APE_LOCK_PHY1:
790         case TG3_APE_LOCK_PHY2:
791         case TG3_APE_LOCK_PHY3:
792                 bit = APE_LOCK_GRANT_DRIVER;
793                 break;
794         default:
795                 return;
796         }
797
798         if (tg3_asic_rev(tp) == ASIC_REV_5761)
799                 gnt = TG3_APE_LOCK_GRANT;
800         else
801                 gnt = TG3_APE_PER_LOCK_GRANT;
802
803         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804 }
805
806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807 {
808         u32 apedata;
809
810         while (timeout_us) {
811                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812                         return -EBUSY;
813
814                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816                         break;
817
818                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819
820                 udelay(10);
821                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822         }
823
824         return timeout_us ? 0 : -EBUSY;
825 }
826
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829         u32 i, apedata;
830
831         for (i = 0; i < timeout_us / 10; i++) {
832                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835                         break;
836
837                 udelay(10);
838         }
839
840         return i == timeout_us / 10;
841 }
842
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844                                    u32 len)
845 {
846         int err;
847         u32 i, bufoff, msgoff, maxlen, apedata;
848
849         if (!tg3_flag(tp, APE_HAS_NCSI))
850                 return 0;
851
852         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853         if (apedata != APE_SEG_SIG_MAGIC)
854                 return -ENODEV;
855
856         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857         if (!(apedata & APE_FW_STATUS_READY))
858                 return -EAGAIN;
859
860         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861                  TG3_APE_SHMEM_BASE;
862         msgoff = bufoff + 2 * sizeof(u32);
863         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865         while (len) {
866                 u32 length;
867
868                 /* Cap xfer sizes to scratchpad limits. */
869                 length = (len > maxlen) ? maxlen : len;
870                 len -= length;
871
872                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873                 if (!(apedata & APE_FW_STATUS_READY))
874                         return -EAGAIN;
875
876                 /* Wait for up to 1 msec for APE to service previous event. */
877                 err = tg3_ape_event_lock(tp, 1000);
878                 if (err)
879                         return err;
880
881                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882                           APE_EVENT_STATUS_SCRTCHPD_READ |
883                           APE_EVENT_STATUS_EVENT_PENDING;
884                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886                 tg3_ape_write32(tp, bufoff, base_off);
887                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892                 base_off += length;
893
894                 if (tg3_ape_wait_for_event(tp, 30000))
895                         return -EAGAIN;
896
897                 for (i = 0; length; i += 4, length -= 4) {
898                         u32 val = tg3_ape_read32(tp, msgoff + i);
899                         memcpy(data, &val, sizeof(u32));
900                         data++;
901                 }
902         }
903
904         return 0;
905 }
906
907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908 {
909         int err;
910         u32 apedata;
911
912         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913         if (apedata != APE_SEG_SIG_MAGIC)
914                 return -EAGAIN;
915
916         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917         if (!(apedata & APE_FW_STATUS_READY))
918                 return -EAGAIN;
919
920         /* Wait for up to 1 millisecond for APE to service previous event. */
921         err = tg3_ape_event_lock(tp, 1000);
922         if (err)
923                 return err;
924
925         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926                         event | APE_EVENT_STATUS_EVENT_PENDING);
927
928         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930
931         return 0;
932 }
933
934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935 {
936         u32 event;
937         u32 apedata;
938
939         if (!tg3_flag(tp, ENABLE_APE))
940                 return;
941
942         switch (kind) {
943         case RESET_KIND_INIT:
944                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945                                 APE_HOST_SEG_SIG_MAGIC);
946                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947                                 APE_HOST_SEG_LEN_MAGIC);
948                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953                                 APE_HOST_BEHAV_NO_PHYLOCK);
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955                                     TG3_APE_HOST_DRVR_STATE_START);
956
957                 event = APE_EVENT_STATUS_STATE_START;
958                 break;
959         case RESET_KIND_SHUTDOWN:
960                 /* With the interface we are currently using,
961                  * APE does not track driver state.  Wiping
962                  * out the HOST SEGMENT SIGNATURE forces
963                  * the APE to assume OS absent status.
964                  */
965                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
966
967                 if (device_may_wakeup(&tp->pdev->dev) &&
968                     tg3_flag(tp, WOL_ENABLE)) {
969                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970                                             TG3_APE_HOST_WOL_SPEED_AUTO);
971                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972                 } else
973                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974
975                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976
977                 event = APE_EVENT_STATUS_STATE_UNLOAD;
978                 break;
979         default:
980                 return;
981         }
982
983         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984
985         tg3_ape_send_event(tp, event);
986 }
987
988 static void tg3_disable_ints(struct tg3 *tp)
989 {
990         int i;
991
992         tw32(TG3PCI_MISC_HOST_CTRL,
993              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994         for (i = 0; i < tp->irq_max; i++)
995                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
996 }
997
998 static void tg3_enable_ints(struct tg3 *tp)
999 {
1000         int i;
1001
1002         tp->irq_sync = 0;
1003         wmb();
1004
1005         tw32(TG3PCI_MISC_HOST_CTRL,
1006              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1007
1008         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009         for (i = 0; i < tp->irq_cnt; i++) {
1010                 struct tg3_napi *tnapi = &tp->napi[i];
1011
1012                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013                 if (tg3_flag(tp, 1SHOT_MSI))
1014                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1015
1016                 tp->coal_now |= tnapi->coal_now;
1017         }
1018
1019         /* Force an initial interrupt */
1020         if (!tg3_flag(tp, TAGGED_STATUS) &&
1021             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1023         else
1024                 tw32(HOSTCC_MODE, tp->coal_now);
1025
1026         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1027 }
1028
1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1030 {
1031         struct tg3 *tp = tnapi->tp;
1032         struct tg3_hw_status *sblk = tnapi->hw_status;
1033         unsigned int work_exists = 0;
1034
1035         /* check for phy events */
1036         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037                 if (sblk->status & SD_STATUS_LINK_CHG)
1038                         work_exists = 1;
1039         }
1040
1041         /* check for TX work to do */
1042         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1043                 work_exists = 1;
1044
1045         /* check for RX work to do */
1046         if (tnapi->rx_rcb_prod_idx &&
1047             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048                 work_exists = 1;
1049
1050         return work_exists;
1051 }
1052
1053 /* tg3_int_reenable
1054  *  similar to tg3_enable_ints, but it accurately determines whether there
1055  *  is new work pending and can return without flushing the PIO write
1056  *  which reenables interrupts
1057  */
1058 static void tg3_int_reenable(struct tg3_napi *tnapi)
1059 {
1060         struct tg3 *tp = tnapi->tp;
1061
1062         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1063         mmiowb();
1064
1065         /* When doing tagged status, this work check is unnecessary.
1066          * The last_tag we write above tells the chip which piece of
1067          * work we've completed.
1068          */
1069         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1071                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1072 }
1073
1074 static void tg3_switch_clocks(struct tg3 *tp)
1075 {
1076         u32 clock_ctrl;
1077         u32 orig_clock_ctrl;
1078
1079         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1080                 return;
1081
1082         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1083
1084         orig_clock_ctrl = clock_ctrl;
1085         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086                        CLOCK_CTRL_CLKRUN_OENABLE |
1087                        0x1f);
1088         tp->pci_clock_ctrl = clock_ctrl;
1089
1090         if (tg3_flag(tp, 5705_PLUS)) {
1091                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1094                 }
1095         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1097                             clock_ctrl |
1098                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1099                             40);
1100                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1102                             40);
1103         }
1104         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1105 }
1106
1107 #define PHY_BUSY_LOOPS  5000
1108
1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110                          u32 *val)
1111 {
1112         u32 frame_val;
1113         unsigned int loops;
1114         int ret;
1115
1116         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117                 tw32_f(MAC_MI_MODE,
1118                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119                 udelay(80);
1120         }
1121
1122         tg3_ape_lock(tp, tp->phy_ape_lock);
1123
1124         *val = 0x0;
1125
1126         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127                       MI_COM_PHY_ADDR_MASK);
1128         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129                       MI_COM_REG_ADDR_MASK);
1130         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1131
1132         tw32_f(MAC_MI_COM, frame_val);
1133
1134         loops = PHY_BUSY_LOOPS;
1135         while (loops != 0) {
1136                 udelay(10);
1137                 frame_val = tr32(MAC_MI_COM);
1138
1139                 if ((frame_val & MI_COM_BUSY) == 0) {
1140                         udelay(5);
1141                         frame_val = tr32(MAC_MI_COM);
1142                         break;
1143                 }
1144                 loops -= 1;
1145         }
1146
1147         ret = -EBUSY;
1148         if (loops != 0) {
1149                 *val = frame_val & MI_COM_DATA_MASK;
1150                 ret = 0;
1151         }
1152
1153         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155                 udelay(80);
1156         }
1157
1158         tg3_ape_unlock(tp, tp->phy_ape_lock);
1159
1160         return ret;
1161 }
1162
1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1164 {
1165         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1166 }
1167
1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169                           u32 val)
1170 {
1171         u32 frame_val;
1172         unsigned int loops;
1173         int ret;
1174
1175         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1177                 return 0;
1178
1179         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1180                 tw32_f(MAC_MI_MODE,
1181                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182                 udelay(80);
1183         }
1184
1185         tg3_ape_lock(tp, tp->phy_ape_lock);
1186
1187         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188                       MI_COM_PHY_ADDR_MASK);
1189         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190                       MI_COM_REG_ADDR_MASK);
1191         frame_val |= (val & MI_COM_DATA_MASK);
1192         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1193
1194         tw32_f(MAC_MI_COM, frame_val);
1195
1196         loops = PHY_BUSY_LOOPS;
1197         while (loops != 0) {
1198                 udelay(10);
1199                 frame_val = tr32(MAC_MI_COM);
1200                 if ((frame_val & MI_COM_BUSY) == 0) {
1201                         udelay(5);
1202                         frame_val = tr32(MAC_MI_COM);
1203                         break;
1204                 }
1205                 loops -= 1;
1206         }
1207
1208         ret = -EBUSY;
1209         if (loops != 0)
1210                 ret = 0;
1211
1212         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1214                 udelay(80);
1215         }
1216
1217         tg3_ape_unlock(tp, tp->phy_ape_lock);
1218
1219         return ret;
1220 }
1221
1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1223 {
1224         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1225 }
1226
1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228 {
1229         int err;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236         if (err)
1237                 goto done;
1238
1239         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241         if (err)
1242                 goto done;
1243
1244         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245
1246 done:
1247         return err;
1248 }
1249
1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251 {
1252         int err;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255         if (err)
1256                 goto done;
1257
1258         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259         if (err)
1260                 goto done;
1261
1262         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264         if (err)
1265                 goto done;
1266
1267         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268
1269 done:
1270         return err;
1271 }
1272
1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274 {
1275         int err;
1276
1277         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278         if (!err)
1279                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281         return err;
1282 }
1283
1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285 {
1286         int err;
1287
1288         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289         if (!err)
1290                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292         return err;
1293 }
1294
1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296 {
1297         int err;
1298
1299         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1302         if (!err)
1303                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1304
1305         return err;
1306 }
1307
1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1309 {
1310         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311                 set |= MII_TG3_AUXCTL_MISC_WREN;
1312
1313         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1314 }
1315
1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317 {
1318         u32 val;
1319         int err;
1320
1321         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322
1323         if (err)
1324                 return err;
1325
1326         if (enable)
1327                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328         else
1329                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1330
1331         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1333
1334         return err;
1335 }
1336
1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1338 {
1339         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340                             reg | val | MII_TG3_MISC_SHDW_WREN);
1341 }
1342
1343 static int tg3_bmcr_reset(struct tg3 *tp)
1344 {
1345         u32 phy_control;
1346         int limit, err;
1347
1348         /* OK, reset it, and poll the BMCR_RESET bit until it
1349          * clears or we time out.
1350          */
1351         phy_control = BMCR_RESET;
1352         err = tg3_writephy(tp, MII_BMCR, phy_control);
1353         if (err != 0)
1354                 return -EBUSY;
1355
1356         limit = 5000;
1357         while (limit--) {
1358                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359                 if (err != 0)
1360                         return -EBUSY;
1361
1362                 if ((phy_control & BMCR_RESET) == 0) {
1363                         udelay(40);
1364                         break;
1365                 }
1366                 udelay(10);
1367         }
1368         if (limit < 0)
1369                 return -EBUSY;
1370
1371         return 0;
1372 }
1373
1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1375 {
1376         struct tg3 *tp = bp->priv;
1377         u32 val;
1378
1379         spin_lock_bh(&tp->lock);
1380
1381         if (__tg3_readphy(tp, mii_id, reg, &val))
1382                 val = -EIO;
1383
1384         spin_unlock_bh(&tp->lock);
1385
1386         return val;
1387 }
1388
1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1390 {
1391         struct tg3 *tp = bp->priv;
1392         u32 ret = 0;
1393
1394         spin_lock_bh(&tp->lock);
1395
1396         if (__tg3_writephy(tp, mii_id, reg, val))
1397                 ret = -EIO;
1398
1399         spin_unlock_bh(&tp->lock);
1400
1401         return ret;
1402 }
1403
1404 static void tg3_mdio_config_5785(struct tg3 *tp)
1405 {
1406         u32 val;
1407         struct phy_device *phydev;
1408
1409         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1410         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1411         case PHY_ID_BCM50610:
1412         case PHY_ID_BCM50610M:
1413                 val = MAC_PHYCFG2_50610_LED_MODES;
1414                 break;
1415         case PHY_ID_BCMAC131:
1416                 val = MAC_PHYCFG2_AC131_LED_MODES;
1417                 break;
1418         case PHY_ID_RTL8211C:
1419                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1420                 break;
1421         case PHY_ID_RTL8201E:
1422                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1423                 break;
1424         default:
1425                 return;
1426         }
1427
1428         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1429                 tw32(MAC_PHYCFG2, val);
1430
1431                 val = tr32(MAC_PHYCFG1);
1432                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1433                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1434                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1435                 tw32(MAC_PHYCFG1, val);
1436
1437                 return;
1438         }
1439
1440         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1441                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1442                        MAC_PHYCFG2_FMODE_MASK_MASK |
1443                        MAC_PHYCFG2_GMODE_MASK_MASK |
1444                        MAC_PHYCFG2_ACT_MASK_MASK   |
1445                        MAC_PHYCFG2_QUAL_MASK_MASK |
1446                        MAC_PHYCFG2_INBAND_ENABLE;
1447
1448         tw32(MAC_PHYCFG2, val);
1449
1450         val = tr32(MAC_PHYCFG1);
1451         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1452                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1453         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1454                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1455                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1456                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1457                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1458         }
1459         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1460                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1461         tw32(MAC_PHYCFG1, val);
1462
1463         val = tr32(MAC_EXT_RGMII_MODE);
1464         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1465                  MAC_RGMII_MODE_RX_QUALITY |
1466                  MAC_RGMII_MODE_RX_ACTIVITY |
1467                  MAC_RGMII_MODE_RX_ENG_DET |
1468                  MAC_RGMII_MODE_TX_ENABLE |
1469                  MAC_RGMII_MODE_TX_LOWPWR |
1470                  MAC_RGMII_MODE_TX_RESET);
1471         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473                         val |= MAC_RGMII_MODE_RX_INT_B |
1474                                MAC_RGMII_MODE_RX_QUALITY |
1475                                MAC_RGMII_MODE_RX_ACTIVITY |
1476                                MAC_RGMII_MODE_RX_ENG_DET;
1477                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1478                         val |= MAC_RGMII_MODE_TX_ENABLE |
1479                                MAC_RGMII_MODE_TX_LOWPWR |
1480                                MAC_RGMII_MODE_TX_RESET;
1481         }
1482         tw32(MAC_EXT_RGMII_MODE, val);
1483 }
1484
1485 static void tg3_mdio_start(struct tg3 *tp)
1486 {
1487         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1488         tw32_f(MAC_MI_MODE, tp->mi_mode);
1489         udelay(80);
1490
1491         if (tg3_flag(tp, MDIOBUS_INITED) &&
1492             tg3_asic_rev(tp) == ASIC_REV_5785)
1493                 tg3_mdio_config_5785(tp);
1494 }
1495
1496 static int tg3_mdio_init(struct tg3 *tp)
1497 {
1498         int i;
1499         u32 reg;
1500         struct phy_device *phydev;
1501
1502         if (tg3_flag(tp, 5717_PLUS)) {
1503                 u32 is_serdes;
1504
1505                 tp->phy_addr = tp->pci_fn + 1;
1506
1507                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1508                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1509                 else
1510                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1511                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1512                 if (is_serdes)
1513                         tp->phy_addr += 7;
1514         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1515                 int addr;
1516
1517                 addr = ssb_gige_get_phyaddr(tp->pdev);
1518                 if (addr < 0)
1519                         return addr;
1520                 tp->phy_addr = addr;
1521         } else
1522                 tp->phy_addr = TG3_PHY_MII_ADDR;
1523
1524         tg3_mdio_start(tp);
1525
1526         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1527                 return 0;
1528
1529         tp->mdio_bus = mdiobus_alloc();
1530         if (tp->mdio_bus == NULL)
1531                 return -ENOMEM;
1532
1533         tp->mdio_bus->name     = "tg3 mdio bus";
1534         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1535                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1536         tp->mdio_bus->priv     = tp;
1537         tp->mdio_bus->parent   = &tp->pdev->dev;
1538         tp->mdio_bus->read     = &tg3_mdio_read;
1539         tp->mdio_bus->write    = &tg3_mdio_write;
1540         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1541
1542         /* The bus registration will look for all the PHYs on the mdio bus.
1543          * Unfortunately, it does not ensure the PHY is powered up before
1544          * accessing the PHY ID registers.  A chip reset is the
1545          * quickest way to bring the device back to an operational state..
1546          */
1547         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1548                 tg3_bmcr_reset(tp);
1549
1550         i = mdiobus_register(tp->mdio_bus);
1551         if (i) {
1552                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1553                 mdiobus_free(tp->mdio_bus);
1554                 return i;
1555         }
1556
1557         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1558
1559         if (!phydev || !phydev->drv) {
1560                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1561                 mdiobus_unregister(tp->mdio_bus);
1562                 mdiobus_free(tp->mdio_bus);
1563                 return -ENODEV;
1564         }
1565
1566         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1567         case PHY_ID_BCM57780:
1568                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1569                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1570                 break;
1571         case PHY_ID_BCM50610:
1572         case PHY_ID_BCM50610M:
1573                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1574                                      PHY_BRCM_RX_REFCLK_UNUSED |
1575                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1576                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1578                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1579                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1580                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1581                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1582                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1583                 /* fallthru */
1584         case PHY_ID_RTL8211C:
1585                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1586                 break;
1587         case PHY_ID_RTL8201E:
1588         case PHY_ID_BCMAC131:
1589                 phydev->interface = PHY_INTERFACE_MODE_MII;
1590                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1592                 break;
1593         }
1594
1595         tg3_flag_set(tp, MDIOBUS_INITED);
1596
1597         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598                 tg3_mdio_config_5785(tp);
1599
1600         return 0;
1601 }
1602
1603 static void tg3_mdio_fini(struct tg3 *tp)
1604 {
1605         if (tg3_flag(tp, MDIOBUS_INITED)) {
1606                 tg3_flag_clear(tp, MDIOBUS_INITED);
1607                 mdiobus_unregister(tp->mdio_bus);
1608                 mdiobus_free(tp->mdio_bus);
1609         }
1610 }
1611
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1614 {
1615         u32 val;
1616
1617         val = tr32(GRC_RX_CPU_EVENT);
1618         val |= GRC_RX_CPU_DRIVER_EVENT;
1619         tw32_f(GRC_RX_CPU_EVENT, val);
1620
1621         tp->last_event_jiffies = jiffies;
1622 }
1623
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1625
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1628 {
1629         int i;
1630         unsigned int delay_cnt;
1631         long time_remain;
1632
1633         /* If enough time has passed, no wait is necessary. */
1634         time_remain = (long)(tp->last_event_jiffies + 1 +
1635                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1636                       (long)jiffies;
1637         if (time_remain < 0)
1638                 return;
1639
1640         /* Check if we can shorten the wait time. */
1641         delay_cnt = jiffies_to_usecs(time_remain);
1642         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644         delay_cnt = (delay_cnt >> 3) + 1;
1645
1646         for (i = 0; i < delay_cnt; i++) {
1647                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1648                         break;
1649                 if (pci_channel_offline(tp->pdev))
1650                         break;
1651
1652                 udelay(8);
1653         }
1654 }
1655
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1658 {
1659         u32 reg, val;
1660
1661         val = 0;
1662         if (!tg3_readphy(tp, MII_BMCR, &reg))
1663                 val = reg << 16;
1664         if (!tg3_readphy(tp, MII_BMSR, &reg))
1665                 val |= (reg & 0xffff);
1666         *data++ = val;
1667
1668         val = 0;
1669         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1670                 val = reg << 16;
1671         if (!tg3_readphy(tp, MII_LPA, &reg))
1672                 val |= (reg & 0xffff);
1673         *data++ = val;
1674
1675         val = 0;
1676         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1678                         val = reg << 16;
1679                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1680                         val |= (reg & 0xffff);
1681         }
1682         *data++ = val;
1683
1684         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1685                 val = reg << 16;
1686         else
1687                 val = 0;
1688         *data++ = val;
1689 }
1690
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1693 {
1694         u32 data[4];
1695
1696         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1697                 return;
1698
1699         tg3_phy_gather_ump_data(tp, data);
1700
1701         tg3_wait_for_event_ack(tp);
1702
1703         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1709
1710         tg3_generate_fw_event(tp);
1711 }
1712
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1715 {
1716         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717                 /* Wait for RX cpu to ACK the previous event. */
1718                 tg3_wait_for_event_ack(tp);
1719
1720                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1721
1722                 tg3_generate_fw_event(tp);
1723
1724                 /* Wait for RX cpu to ACK this event. */
1725                 tg3_wait_for_event_ack(tp);
1726         }
1727 }
1728
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1731 {
1732         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1734
1735         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1736                 switch (kind) {
1737                 case RESET_KIND_INIT:
1738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739                                       DRV_STATE_START);
1740                         break;
1741
1742                 case RESET_KIND_SHUTDOWN:
1743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744                                       DRV_STATE_UNLOAD);
1745                         break;
1746
1747                 case RESET_KIND_SUSPEND:
1748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749                                       DRV_STATE_SUSPEND);
1750                         break;
1751
1752                 default:
1753                         break;
1754                 }
1755         }
1756 }
1757
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1760 {
1761         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1762                 switch (kind) {
1763                 case RESET_KIND_INIT:
1764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765                                       DRV_STATE_START_DONE);
1766                         break;
1767
1768                 case RESET_KIND_SHUTDOWN:
1769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770                                       DRV_STATE_UNLOAD_DONE);
1771                         break;
1772
1773                 default:
1774                         break;
1775                 }
1776         }
1777 }
1778
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1781 {
1782         if (tg3_flag(tp, ENABLE_ASF)) {
1783                 switch (kind) {
1784                 case RESET_KIND_INIT:
1785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1786                                       DRV_STATE_START);
1787                         break;
1788
1789                 case RESET_KIND_SHUTDOWN:
1790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791                                       DRV_STATE_UNLOAD);
1792                         break;
1793
1794                 case RESET_KIND_SUSPEND:
1795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796                                       DRV_STATE_SUSPEND);
1797                         break;
1798
1799                 default:
1800                         break;
1801                 }
1802         }
1803 }
1804
1805 static int tg3_poll_fw(struct tg3 *tp)
1806 {
1807         int i;
1808         u32 val;
1809
1810         if (tg3_flag(tp, NO_FWARE_REPORTED))
1811                 return 0;
1812
1813         if (tg3_flag(tp, IS_SSB_CORE)) {
1814                 /* We don't use firmware. */
1815                 return 0;
1816         }
1817
1818         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819                 /* Wait up to 20ms for init done. */
1820                 for (i = 0; i < 200; i++) {
1821                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1822                                 return 0;
1823                         if (pci_channel_offline(tp->pdev))
1824                                 return -ENODEV;
1825
1826                         udelay(100);
1827                 }
1828                 return -ENODEV;
1829         }
1830
1831         /* Wait for firmware initialization to complete. */
1832         for (i = 0; i < 100000; i++) {
1833                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1835                         break;
1836                 if (pci_channel_offline(tp->pdev)) {
1837                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1839                                 netdev_info(tp->dev, "No firmware running\n");
1840                         }
1841
1842                         break;
1843                 }
1844
1845                 udelay(10);
1846         }
1847
1848         /* Chip might not be fitted with firmware.  Some Sun onboard
1849          * parts are configured like that.  So don't signal the timeout
1850          * of the above loop as an error, but do report the lack of
1851          * running firmware once.
1852          */
1853         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1855
1856                 netdev_info(tp->dev, "No firmware running\n");
1857         }
1858
1859         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860                 /* The 57765 A0 needs a little more
1861                  * time to do some important work.
1862                  */
1863                 mdelay(10);
1864         }
1865
1866         return 0;
1867 }
1868
1869 static void tg3_link_report(struct tg3 *tp)
1870 {
1871         if (!netif_carrier_ok(tp->dev)) {
1872                 netif_info(tp, link, tp->dev, "Link is down\n");
1873                 tg3_ump_link_report(tp);
1874         } else if (netif_msg_link(tp)) {
1875                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876                             (tp->link_config.active_speed == SPEED_1000 ?
1877                              1000 :
1878                              (tp->link_config.active_speed == SPEED_100 ?
1879                               100 : 10)),
1880                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1881                              "full" : "half"));
1882
1883                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1885                             "on" : "off",
1886                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1887                             "on" : "off");
1888
1889                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890                         netdev_info(tp->dev, "EEE is %s\n",
1891                                     tp->setlpicnt ? "enabled" : "disabled");
1892
1893                 tg3_ump_link_report(tp);
1894         }
1895
1896         tp->link_up = netif_carrier_ok(tp->dev);
1897 }
1898
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1900 {
1901         u32 flowctrl = 0;
1902
1903         if (adv & ADVERTISE_PAUSE_CAP) {
1904                 flowctrl |= FLOW_CTRL_RX;
1905                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1906                         flowctrl |= FLOW_CTRL_TX;
1907         } else if (adv & ADVERTISE_PAUSE_ASYM)
1908                 flowctrl |= FLOW_CTRL_TX;
1909
1910         return flowctrl;
1911 }
1912
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1914 {
1915         u16 miireg;
1916
1917         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918                 miireg = ADVERTISE_1000XPAUSE;
1919         else if (flow_ctrl & FLOW_CTRL_TX)
1920                 miireg = ADVERTISE_1000XPSE_ASYM;
1921         else if (flow_ctrl & FLOW_CTRL_RX)
1922                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1923         else
1924                 miireg = 0;
1925
1926         return miireg;
1927 }
1928
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1930 {
1931         u32 flowctrl = 0;
1932
1933         if (adv & ADVERTISE_1000XPAUSE) {
1934                 flowctrl |= FLOW_CTRL_RX;
1935                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936                         flowctrl |= FLOW_CTRL_TX;
1937         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1938                 flowctrl |= FLOW_CTRL_TX;
1939
1940         return flowctrl;
1941 }
1942
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1944 {
1945         u8 cap = 0;
1946
1947         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950                 if (lcladv & ADVERTISE_1000XPAUSE)
1951                         cap = FLOW_CTRL_RX;
1952                 if (rmtadv & ADVERTISE_1000XPAUSE)
1953                         cap = FLOW_CTRL_TX;
1954         }
1955
1956         return cap;
1957 }
1958
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1960 {
1961         u8 autoneg;
1962         u8 flowctrl = 0;
1963         u32 old_rx_mode = tp->rx_mode;
1964         u32 old_tx_mode = tp->tx_mode;
1965
1966         if (tg3_flag(tp, USE_PHYLIB))
1967                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1968         else
1969                 autoneg = tp->link_config.autoneg;
1970
1971         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1974                 else
1975                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1976         } else
1977                 flowctrl = tp->link_config.flowctrl;
1978
1979         tp->link_config.active_flowctrl = flowctrl;
1980
1981         if (flowctrl & FLOW_CTRL_RX)
1982                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1983         else
1984                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1985
1986         if (old_rx_mode != tp->rx_mode)
1987                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1988
1989         if (flowctrl & FLOW_CTRL_TX)
1990                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1991         else
1992                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1993
1994         if (old_tx_mode != tp->tx_mode)
1995                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1996 }
1997
1998 static void tg3_adjust_link(struct net_device *dev)
1999 {
2000         u8 oldflowctrl, linkmesg = 0;
2001         u32 mac_mode, lcl_adv, rmt_adv;
2002         struct tg3 *tp = netdev_priv(dev);
2003         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2004
2005         spin_lock_bh(&tp->lock);
2006
2007         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008                                     MAC_MODE_HALF_DUPLEX);
2009
2010         oldflowctrl = tp->link_config.active_flowctrl;
2011
2012         if (phydev->link) {
2013                 lcl_adv = 0;
2014                 rmt_adv = 0;
2015
2016                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2018                 else if (phydev->speed == SPEED_1000 ||
2019                          tg3_asic_rev(tp) != ASIC_REV_5785)
2020                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2021                 else
2022                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2023
2024                 if (phydev->duplex == DUPLEX_HALF)
2025                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2026                 else {
2027                         lcl_adv = mii_advertise_flowctrl(
2028                                   tp->link_config.flowctrl);
2029
2030                         if (phydev->pause)
2031                                 rmt_adv = LPA_PAUSE_CAP;
2032                         if (phydev->asym_pause)
2033                                 rmt_adv |= LPA_PAUSE_ASYM;
2034                 }
2035
2036                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2037         } else
2038                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039
2040         if (mac_mode != tp->mac_mode) {
2041                 tp->mac_mode = mac_mode;
2042                 tw32_f(MAC_MODE, tp->mac_mode);
2043                 udelay(40);
2044         }
2045
2046         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047                 if (phydev->speed == SPEED_10)
2048                         tw32(MAC_MI_STAT,
2049                              MAC_MI_STAT_10MBPS_MODE |
2050                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2051                 else
2052                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053         }
2054
2055         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056                 tw32(MAC_TX_LENGTHS,
2057                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058                       (6 << TX_LENGTHS_IPG_SHIFT) |
2059                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060         else
2061                 tw32(MAC_TX_LENGTHS,
2062                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063                       (6 << TX_LENGTHS_IPG_SHIFT) |
2064                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065
2066         if (phydev->link != tp->old_link ||
2067             phydev->speed != tp->link_config.active_speed ||
2068             phydev->duplex != tp->link_config.active_duplex ||
2069             oldflowctrl != tp->link_config.active_flowctrl)
2070                 linkmesg = 1;
2071
2072         tp->old_link = phydev->link;
2073         tp->link_config.active_speed = phydev->speed;
2074         tp->link_config.active_duplex = phydev->duplex;
2075
2076         spin_unlock_bh(&tp->lock);
2077
2078         if (linkmesg)
2079                 tg3_link_report(tp);
2080 }
2081
2082 static int tg3_phy_init(struct tg3 *tp)
2083 {
2084         struct phy_device *phydev;
2085
2086         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2087                 return 0;
2088
2089         /* Bring the PHY back to a known state. */
2090         tg3_bmcr_reset(tp);
2091
2092         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2093
2094         /* Attach the MAC to the PHY. */
2095         phydev = phy_connect(tp->dev, phydev_name(phydev),
2096                              tg3_adjust_link, phydev->interface);
2097         if (IS_ERR(phydev)) {
2098                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099                 return PTR_ERR(phydev);
2100         }
2101
2102         /* Mask with MAC supported features. */
2103         switch (phydev->interface) {
2104         case PHY_INTERFACE_MODE_GMII:
2105         case PHY_INTERFACE_MODE_RGMII:
2106                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107                         phydev->supported &= (PHY_GBIT_FEATURES |
2108                                               SUPPORTED_Pause |
2109                                               SUPPORTED_Asym_Pause);
2110                         break;
2111                 }
2112                 /* fallthru */
2113         case PHY_INTERFACE_MODE_MII:
2114                 phydev->supported &= (PHY_BASIC_FEATURES |
2115                                       SUPPORTED_Pause |
2116                                       SUPPORTED_Asym_Pause);
2117                 break;
2118         default:
2119                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120                 return -EINVAL;
2121         }
2122
2123         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124
2125         phydev->advertising = phydev->supported;
2126
2127         phy_attached_info(phydev);
2128
2129         return 0;
2130 }
2131
2132 static void tg3_phy_start(struct tg3 *tp)
2133 {
2134         struct phy_device *phydev;
2135
2136         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2137                 return;
2138
2139         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2140
2141         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2142                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2143                 phydev->speed = tp->link_config.speed;
2144                 phydev->duplex = tp->link_config.duplex;
2145                 phydev->autoneg = tp->link_config.autoneg;
2146                 phydev->advertising = tp->link_config.advertising;
2147         }
2148
2149         phy_start(phydev);
2150
2151         phy_start_aneg(phydev);
2152 }
2153
2154 static void tg3_phy_stop(struct tg3 *tp)
2155 {
2156         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2157                 return;
2158
2159         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2160 }
2161
2162 static void tg3_phy_fini(struct tg3 *tp)
2163 {
2164         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2165                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2166                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2167         }
2168 }
2169
2170 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2171 {
2172         int err;
2173         u32 val;
2174
2175         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2176                 return 0;
2177
2178         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2179                 /* Cannot do read-modify-write on 5401 */
2180                 err = tg3_phy_auxctl_write(tp,
2181                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2182                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2183                                            0x4c20);
2184                 goto done;
2185         }
2186
2187         err = tg3_phy_auxctl_read(tp,
2188                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2189         if (err)
2190                 return err;
2191
2192         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2193         err = tg3_phy_auxctl_write(tp,
2194                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2195
2196 done:
2197         return err;
2198 }
2199
2200 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2201 {
2202         u32 phytest;
2203
2204         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2205                 u32 phy;
2206
2207                 tg3_writephy(tp, MII_TG3_FET_TEST,
2208                              phytest | MII_TG3_FET_SHADOW_EN);
2209                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2210                         if (enable)
2211                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212                         else
2213                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2214                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2215                 }
2216                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2217         }
2218 }
2219
2220 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2221 {
2222         u32 reg;
2223
2224         if (!tg3_flag(tp, 5705_PLUS) ||
2225             (tg3_flag(tp, 5717_PLUS) &&
2226              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2227                 return;
2228
2229         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2230                 tg3_phy_fet_toggle_apd(tp, enable);
2231                 return;
2232         }
2233
2234         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2235               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2236               MII_TG3_MISC_SHDW_SCR5_SDTL |
2237               MII_TG3_MISC_SHDW_SCR5_C125OE;
2238         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2239                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2240
2241         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2242
2243
2244         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2245         if (enable)
2246                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2247
2248         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2249 }
2250
2251 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2252 {
2253         u32 phy;
2254
2255         if (!tg3_flag(tp, 5705_PLUS) ||
2256             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2257                 return;
2258
2259         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2260                 u32 ephy;
2261
2262                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2263                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2264
2265                         tg3_writephy(tp, MII_TG3_FET_TEST,
2266                                      ephy | MII_TG3_FET_SHADOW_EN);
2267                         if (!tg3_readphy(tp, reg, &phy)) {
2268                                 if (enable)
2269                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270                                 else
2271                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2272                                 tg3_writephy(tp, reg, phy);
2273                         }
2274                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2275                 }
2276         } else {
2277                 int ret;
2278
2279                 ret = tg3_phy_auxctl_read(tp,
2280                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2281                 if (!ret) {
2282                         if (enable)
2283                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284                         else
2285                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2286                         tg3_phy_auxctl_write(tp,
2287                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2288                 }
2289         }
2290 }
2291
2292 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2293 {
2294         int ret;
2295         u32 val;
2296
2297         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2298                 return;
2299
2300         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2301         if (!ret)
2302                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2303                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2304 }
2305
2306 static void tg3_phy_apply_otp(struct tg3 *tp)
2307 {
2308         u32 otp, phy;
2309
2310         if (!tp->phy_otp)
2311                 return;
2312
2313         otp = tp->phy_otp;
2314
2315         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2316                 return;
2317
2318         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2319         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2320         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2321
2322         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2323               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2324         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2325
2326         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2327         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2328         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2329
2330         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2331         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2332
2333         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2334         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2335
2336         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2337               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2338         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2339
2340         tg3_phy_toggle_auxctl_smdsp(tp, false);
2341 }
2342
2343 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2344 {
2345         u32 val;
2346         struct ethtool_eee *dest = &tp->eee;
2347
2348         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2349                 return;
2350
2351         if (eee)
2352                 dest = eee;
2353
2354         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2355                 return;
2356
2357         /* Pull eee_active */
2358         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2359             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2360                 dest->eee_active = 1;
2361         } else
2362                 dest->eee_active = 0;
2363
2364         /* Pull lp advertised settings */
2365         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2366                 return;
2367         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2368
2369         /* Pull advertised and eee_enabled settings */
2370         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2371                 return;
2372         dest->eee_enabled = !!val;
2373         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2374
2375         /* Pull tx_lpi_enabled */
2376         val = tr32(TG3_CPMU_EEE_MODE);
2377         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2378
2379         /* Pull lpi timer value */
2380         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2381 }
2382
2383 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2384 {
2385         u32 val;
2386
2387         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2388                 return;
2389
2390         tp->setlpicnt = 0;
2391
2392         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2393             current_link_up &&
2394             tp->link_config.active_duplex == DUPLEX_FULL &&
2395             (tp->link_config.active_speed == SPEED_100 ||
2396              tp->link_config.active_speed == SPEED_1000)) {
2397                 u32 eeectl;
2398
2399                 if (tp->link_config.active_speed == SPEED_1000)
2400                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2401                 else
2402                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2403
2404                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2405
2406                 tg3_eee_pull_config(tp, NULL);
2407                 if (tp->eee.eee_active)
2408                         tp->setlpicnt = 2;
2409         }
2410
2411         if (!tp->setlpicnt) {
2412                 if (current_link_up &&
2413                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2414                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2415                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2416                 }
2417
2418                 val = tr32(TG3_CPMU_EEE_MODE);
2419                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2420         }
2421 }
2422
2423 static void tg3_phy_eee_enable(struct tg3 *tp)
2424 {
2425         u32 val;
2426
2427         if (tp->link_config.active_speed == SPEED_1000 &&
2428             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2429              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2430              tg3_flag(tp, 57765_CLASS)) &&
2431             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2432                 val = MII_TG3_DSP_TAP26_ALNOKO |
2433                       MII_TG3_DSP_TAP26_RMRXSTO;
2434                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2435                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2436         }
2437
2438         val = tr32(TG3_CPMU_EEE_MODE);
2439         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2440 }
2441
2442 static int tg3_wait_macro_done(struct tg3 *tp)
2443 {
2444         int limit = 100;
2445
2446         while (limit--) {
2447                 u32 tmp32;
2448
2449                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2450                         if ((tmp32 & 0x1000) == 0)
2451                                 break;
2452                 }
2453         }
2454         if (limit < 0)
2455                 return -EBUSY;
2456
2457         return 0;
2458 }
2459
2460 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2461 {
2462         static const u32 test_pat[4][6] = {
2463         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2464         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2465         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2466         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2467         };
2468         int chan;
2469
2470         for (chan = 0; chan < 4; chan++) {
2471                 int i;
2472
2473                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2474                              (chan * 0x2000) | 0x0200);
2475                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2476
2477                 for (i = 0; i < 6; i++)
2478                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2479                                      test_pat[chan][i]);
2480
2481                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482                 if (tg3_wait_macro_done(tp)) {
2483                         *resetp = 1;
2484                         return -EBUSY;
2485                 }
2486
2487                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2488                              (chan * 0x2000) | 0x0200);
2489                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2490                 if (tg3_wait_macro_done(tp)) {
2491                         *resetp = 1;
2492                         return -EBUSY;
2493                 }
2494
2495                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2496                 if (tg3_wait_macro_done(tp)) {
2497                         *resetp = 1;
2498                         return -EBUSY;
2499                 }
2500
2501                 for (i = 0; i < 6; i += 2) {
2502                         u32 low, high;
2503
2504                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2505                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2506                             tg3_wait_macro_done(tp)) {
2507                                 *resetp = 1;
2508                                 return -EBUSY;
2509                         }
2510                         low &= 0x7fff;
2511                         high &= 0x000f;
2512                         if (low != test_pat[chan][i] ||
2513                             high != test_pat[chan][i+1]) {
2514                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2515                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2516                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2517
2518                                 return -EBUSY;
2519                         }
2520                 }
2521         }
2522
2523         return 0;
2524 }
2525
2526 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2527 {
2528         int chan;
2529
2530         for (chan = 0; chan < 4; chan++) {
2531                 int i;
2532
2533                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2534                              (chan * 0x2000) | 0x0200);
2535                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2536                 for (i = 0; i < 6; i++)
2537                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2538                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2539                 if (tg3_wait_macro_done(tp))
2540                         return -EBUSY;
2541         }
2542
2543         return 0;
2544 }
2545
2546 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2547 {
2548         u32 reg32, phy9_orig;
2549         int retries, do_phy_reset, err;
2550
2551         retries = 10;
2552         do_phy_reset = 1;
2553         do {
2554                 if (do_phy_reset) {
2555                         err = tg3_bmcr_reset(tp);
2556                         if (err)
2557                                 return err;
2558                         do_phy_reset = 0;
2559                 }
2560
2561                 /* Disable transmitter and interrupt.  */
2562                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2563                         continue;
2564
2565                 reg32 |= 0x3000;
2566                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2567
2568                 /* Set full-duplex, 1000 mbps.  */
2569                 tg3_writephy(tp, MII_BMCR,
2570                              BMCR_FULLDPLX | BMCR_SPEED1000);
2571
2572                 /* Set to master mode.  */
2573                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2574                         continue;
2575
2576                 tg3_writephy(tp, MII_CTRL1000,
2577                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2578
2579                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2580                 if (err)
2581                         return err;
2582
2583                 /* Block the PHY control access.  */
2584                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2585
2586                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2587                 if (!err)
2588                         break;
2589         } while (--retries);
2590
2591         err = tg3_phy_reset_chanpat(tp);
2592         if (err)
2593                 return err;
2594
2595         tg3_phydsp_write(tp, 0x8005, 0x0000);
2596
2597         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2598         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2599
2600         tg3_phy_toggle_auxctl_smdsp(tp, false);
2601
2602         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2603
2604         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2605         if (err)
2606                 return err;
2607
2608         reg32 &= ~0x3000;
2609         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2610
2611         return 0;
2612 }
2613
2614 static void tg3_carrier_off(struct tg3 *tp)
2615 {
2616         netif_carrier_off(tp->dev);
2617         tp->link_up = false;
2618 }
2619
2620 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2621 {
2622         if (tg3_flag(tp, ENABLE_ASF))
2623                 netdev_warn(tp->dev,
2624                             "Management side-band traffic will be interrupted during phy settings change\n");
2625 }
2626
2627 /* This will reset the tigon3 PHY if there is no valid
2628  * link unless the FORCE argument is non-zero.
2629  */
2630 static int tg3_phy_reset(struct tg3 *tp)
2631 {
2632         u32 val, cpmuctrl;
2633         int err;
2634
2635         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2636                 val = tr32(GRC_MISC_CFG);
2637                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2638                 udelay(40);
2639         }
2640         err  = tg3_readphy(tp, MII_BMSR, &val);
2641         err |= tg3_readphy(tp, MII_BMSR, &val);
2642         if (err != 0)
2643                 return -EBUSY;
2644
2645         if (netif_running(tp->dev) && tp->link_up) {
2646                 netif_carrier_off(tp->dev);
2647                 tg3_link_report(tp);
2648         }
2649
2650         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2651             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2652             tg3_asic_rev(tp) == ASIC_REV_5705) {
2653                 err = tg3_phy_reset_5703_4_5(tp);
2654                 if (err)
2655                         return err;
2656                 goto out;
2657         }
2658
2659         cpmuctrl = 0;
2660         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2661             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2662                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2663                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2664                         tw32(TG3_CPMU_CTRL,
2665                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2666         }
2667
2668         err = tg3_bmcr_reset(tp);
2669         if (err)
2670                 return err;
2671
2672         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2673                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2674                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2675
2676                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2677         }
2678
2679         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2680             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2681                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2682                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2683                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2684                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2685                         udelay(40);
2686                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2687                 }
2688         }
2689
2690         if (tg3_flag(tp, 5717_PLUS) &&
2691             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2692                 return 0;
2693
2694         tg3_phy_apply_otp(tp);
2695
2696         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2697                 tg3_phy_toggle_apd(tp, true);
2698         else
2699                 tg3_phy_toggle_apd(tp, false);
2700
2701 out:
2702         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2703             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2704                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2705                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2706                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2707         }
2708
2709         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2710                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2712         }
2713
2714         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2715                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2716                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2717                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2718                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2719                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2720                 }
2721         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2722                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2723                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2724                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2725                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2726                                 tg3_writephy(tp, MII_TG3_TEST1,
2727                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2728                         } else
2729                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2730
2731                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2732                 }
2733         }
2734
2735         /* Set Extended packet length bit (bit 14) on all chips that */
2736         /* support jumbo frames */
2737         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2738                 /* Cannot do read-modify-write on 5401 */
2739                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2740         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2741                 /* Set bit 14 with read-modify-write to preserve other bits */
2742                 err = tg3_phy_auxctl_read(tp,
2743                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2744                 if (!err)
2745                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2746                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2747         }
2748
2749         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2750          * jumbo frames transmission.
2751          */
2752         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2753                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2754                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2755                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2756         }
2757
2758         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2759                 /* adjust output voltage */
2760                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2761         }
2762
2763         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2764                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2765
2766         tg3_phy_toggle_automdix(tp, true);
2767         tg3_phy_set_wirespeed(tp);
2768         return 0;
2769 }
2770
2771 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2772 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2773 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2774                                           TG3_GPIO_MSG_NEED_VAUX)
2775 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2776         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2777          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2778          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2779          (TG3_GPIO_MSG_DRVR_PRES << 12))
2780
2781 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2782         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2783          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2784          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2785          (TG3_GPIO_MSG_NEED_VAUX << 12))
2786
2787 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2788 {
2789         u32 status, shift;
2790
2791         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792             tg3_asic_rev(tp) == ASIC_REV_5719)
2793                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2794         else
2795                 status = tr32(TG3_CPMU_DRV_STATUS);
2796
2797         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2798         status &= ~(TG3_GPIO_MSG_MASK << shift);
2799         status |= (newstat << shift);
2800
2801         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2802             tg3_asic_rev(tp) == ASIC_REV_5719)
2803                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2804         else
2805                 tw32(TG3_CPMU_DRV_STATUS, status);
2806
2807         return status >> TG3_APE_GPIO_MSG_SHIFT;
2808 }
2809
2810 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2811 {
2812         if (!tg3_flag(tp, IS_NIC))
2813                 return 0;
2814
2815         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2816             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2817             tg3_asic_rev(tp) == ASIC_REV_5720) {
2818                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2819                         return -EIO;
2820
2821                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2822
2823                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2824                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2825
2826                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2827         } else {
2828                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2829                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2830         }
2831
2832         return 0;
2833 }
2834
2835 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2836 {
2837         u32 grc_local_ctrl;
2838
2839         if (!tg3_flag(tp, IS_NIC) ||
2840             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2841             tg3_asic_rev(tp) == ASIC_REV_5701)
2842                 return;
2843
2844         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2845
2846         tw32_wait_f(GRC_LOCAL_CTRL,
2847                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2848                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2849
2850         tw32_wait_f(GRC_LOCAL_CTRL,
2851                     grc_local_ctrl,
2852                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2853
2854         tw32_wait_f(GRC_LOCAL_CTRL,
2855                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2856                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 }
2858
2859 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2860 {
2861         if (!tg3_flag(tp, IS_NIC))
2862                 return;
2863
2864         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2865             tg3_asic_rev(tp) == ASIC_REV_5701) {
2866                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2867                             (GRC_LCLCTRL_GPIO_OE0 |
2868                              GRC_LCLCTRL_GPIO_OE1 |
2869                              GRC_LCLCTRL_GPIO_OE2 |
2870                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2871                              GRC_LCLCTRL_GPIO_OUTPUT1),
2872                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2873         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2874                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2875                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2876                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2877                                      GRC_LCLCTRL_GPIO_OE1 |
2878                                      GRC_LCLCTRL_GPIO_OE2 |
2879                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2880                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2881                                      tp->grc_local_ctrl;
2882                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2883                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2884
2885                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2886                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2888
2889                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2890                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2892         } else {
2893                 u32 no_gpio2;
2894                 u32 grc_local_ctrl = 0;
2895
2896                 /* Workaround to prevent overdrawing Amps. */
2897                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2898                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2899                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2900                                     grc_local_ctrl,
2901                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2902                 }
2903
2904                 /* On 5753 and variants, GPIO2 cannot be used. */
2905                 no_gpio2 = tp->nic_sram_data_cfg &
2906                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2907
2908                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2909                                   GRC_LCLCTRL_GPIO_OE1 |
2910                                   GRC_LCLCTRL_GPIO_OE2 |
2911                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2912                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2913                 if (no_gpio2) {
2914                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2915                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2916                 }
2917                 tw32_wait_f(GRC_LOCAL_CTRL,
2918                             tp->grc_local_ctrl | grc_local_ctrl,
2919                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2920
2921                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2922
2923                 tw32_wait_f(GRC_LOCAL_CTRL,
2924                             tp->grc_local_ctrl | grc_local_ctrl,
2925                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2926
2927                 if (!no_gpio2) {
2928                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2929                         tw32_wait_f(GRC_LOCAL_CTRL,
2930                                     tp->grc_local_ctrl | grc_local_ctrl,
2931                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2932                 }
2933         }
2934 }
2935
2936 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2937 {
2938         u32 msg = 0;
2939
2940         /* Serialize power state transitions */
2941         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2942                 return;
2943
2944         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2945                 msg = TG3_GPIO_MSG_NEED_VAUX;
2946
2947         msg = tg3_set_function_status(tp, msg);
2948
2949         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2950                 goto done;
2951
2952         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2953                 tg3_pwrsrc_switch_to_vaux(tp);
2954         else
2955                 tg3_pwrsrc_die_with_vmain(tp);
2956
2957 done:
2958         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2959 }
2960
2961 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2962 {
2963         bool need_vaux = false;
2964
2965         /* The GPIOs do something completely different on 57765. */
2966         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2967                 return;
2968
2969         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2970             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2971             tg3_asic_rev(tp) == ASIC_REV_5720) {
2972                 tg3_frob_aux_power_5717(tp, include_wol ?
2973                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2974                 return;
2975         }
2976
2977         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2978                 struct net_device *dev_peer;
2979
2980                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2981
2982                 /* remove_one() may have been run on the peer. */
2983                 if (dev_peer) {
2984                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2985
2986                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2987                                 return;
2988
2989                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2990                             tg3_flag(tp_peer, ENABLE_ASF))
2991                                 need_vaux = true;
2992                 }
2993         }
2994
2995         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2996             tg3_flag(tp, ENABLE_ASF))
2997                 need_vaux = true;
2998
2999         if (need_vaux)
3000                 tg3_pwrsrc_switch_to_vaux(tp);
3001         else
3002                 tg3_pwrsrc_die_with_vmain(tp);
3003 }
3004
3005 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3006 {
3007         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3008                 return 1;
3009         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3010                 if (speed != SPEED_10)
3011                         return 1;
3012         } else if (speed == SPEED_10)
3013                 return 1;
3014
3015         return 0;
3016 }
3017
3018 static bool tg3_phy_power_bug(struct tg3 *tp)
3019 {
3020         switch (tg3_asic_rev(tp)) {
3021         case ASIC_REV_5700:
3022         case ASIC_REV_5704:
3023                 return true;
3024         case ASIC_REV_5780:
3025                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3026                         return true;
3027                 return false;
3028         case ASIC_REV_5717:
3029                 if (!tp->pci_fn)
3030                         return true;
3031                 return false;
3032         case ASIC_REV_5719:
3033         case ASIC_REV_5720:
3034                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3035                     !tp->pci_fn)
3036                         return true;
3037                 return false;
3038         }
3039
3040         return false;
3041 }
3042
3043 static bool tg3_phy_led_bug(struct tg3 *tp)
3044 {
3045         switch (tg3_asic_rev(tp)) {
3046         case ASIC_REV_5719:
3047         case ASIC_REV_5720:
3048                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3049                     !tp->pci_fn)
3050                         return true;
3051                 return false;
3052         }
3053
3054         return false;
3055 }
3056
3057 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3058 {
3059         u32 val;
3060
3061         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3062                 return;
3063
3064         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3065                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3066                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3067                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3068
3069                         sg_dig_ctrl |=
3070                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3071                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3072                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3073                 }
3074                 return;
3075         }
3076
3077         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3078                 tg3_bmcr_reset(tp);
3079                 val = tr32(GRC_MISC_CFG);
3080                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3081                 udelay(40);
3082                 return;
3083         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3084                 u32 phytest;
3085                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3086                         u32 phy;
3087
3088                         tg3_writephy(tp, MII_ADVERTISE, 0);
3089                         tg3_writephy(tp, MII_BMCR,
3090                                      BMCR_ANENABLE | BMCR_ANRESTART);
3091
3092                         tg3_writephy(tp, MII_TG3_FET_TEST,
3093                                      phytest | MII_TG3_FET_SHADOW_EN);
3094                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3095                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3096                                 tg3_writephy(tp,
3097                                              MII_TG3_FET_SHDW_AUXMODE4,
3098                                              phy);
3099                         }
3100                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3101                 }
3102                 return;
3103         } else if (do_low_power) {
3104                 if (!tg3_phy_led_bug(tp))
3105                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3106                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3107
3108                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3109                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3110                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3111                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3112         }
3113
3114         /* The PHY should not be powered down on some chips because
3115          * of bugs.
3116          */
3117         if (tg3_phy_power_bug(tp))
3118                 return;
3119
3120         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3121             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3122                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3123                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3124                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3125                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3126         }
3127
3128         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3129 }
3130
3131 /* tp->lock is held. */
3132 static int tg3_nvram_lock(struct tg3 *tp)
3133 {
3134         if (tg3_flag(tp, NVRAM)) {
3135                 int i;
3136
3137                 if (tp->nvram_lock_cnt == 0) {
3138                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3139                         for (i = 0; i < 8000; i++) {
3140                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3141                                         break;
3142                                 udelay(20);
3143                         }
3144                         if (i == 8000) {
3145                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3146                                 return -ENODEV;
3147                         }
3148                 }
3149                 tp->nvram_lock_cnt++;
3150         }
3151         return 0;
3152 }
3153
3154 /* tp->lock is held. */
3155 static void tg3_nvram_unlock(struct tg3 *tp)
3156 {
3157         if (tg3_flag(tp, NVRAM)) {
3158                 if (tp->nvram_lock_cnt > 0)
3159                         tp->nvram_lock_cnt--;
3160                 if (tp->nvram_lock_cnt == 0)
3161                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3162         }
3163 }
3164
3165 /* tp->lock is held. */
3166 static void tg3_enable_nvram_access(struct tg3 *tp)
3167 {
3168         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3169                 u32 nvaccess = tr32(NVRAM_ACCESS);
3170
3171                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3172         }
3173 }
3174
3175 /* tp->lock is held. */
3176 static void tg3_disable_nvram_access(struct tg3 *tp)
3177 {
3178         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3179                 u32 nvaccess = tr32(NVRAM_ACCESS);
3180
3181                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3182         }
3183 }
3184
3185 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3186                                         u32 offset, u32 *val)
3187 {
3188         u32 tmp;
3189         int i;
3190
3191         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3192                 return -EINVAL;
3193
3194         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3195                                         EEPROM_ADDR_DEVID_MASK |
3196                                         EEPROM_ADDR_READ);
3197         tw32(GRC_EEPROM_ADDR,
3198              tmp |
3199              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3200              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3201               EEPROM_ADDR_ADDR_MASK) |
3202              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3203
3204         for (i = 0; i < 1000; i++) {
3205                 tmp = tr32(GRC_EEPROM_ADDR);
3206
3207                 if (tmp & EEPROM_ADDR_COMPLETE)
3208                         break;
3209                 msleep(1);
3210         }
3211         if (!(tmp & EEPROM_ADDR_COMPLETE))
3212                 return -EBUSY;
3213
3214         tmp = tr32(GRC_EEPROM_DATA);
3215
3216         /*
3217          * The data will always be opposite the native endian
3218          * format.  Perform a blind byteswap to compensate.
3219          */
3220         *val = swab32(tmp);
3221
3222         return 0;
3223 }
3224
3225 #define NVRAM_CMD_TIMEOUT 5000
3226
3227 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3228 {
3229         int i;
3230
3231         tw32(NVRAM_CMD, nvram_cmd);
3232         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3233                 usleep_range(10, 40);
3234                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3235                         udelay(10);
3236                         break;
3237                 }
3238         }
3239
3240         if (i == NVRAM_CMD_TIMEOUT)
3241                 return -EBUSY;
3242
3243         return 0;
3244 }
3245
3246 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3247 {
3248         if (tg3_flag(tp, NVRAM) &&
3249             tg3_flag(tp, NVRAM_BUFFERED) &&
3250             tg3_flag(tp, FLASH) &&
3251             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3252             (tp->nvram_jedecnum == JEDEC_ATMEL))
3253
3254                 addr = ((addr / tp->nvram_pagesize) <<
3255                         ATMEL_AT45DB0X1B_PAGE_POS) +
3256                        (addr % tp->nvram_pagesize);
3257
3258         return addr;
3259 }
3260
3261 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3262 {
3263         if (tg3_flag(tp, NVRAM) &&
3264             tg3_flag(tp, NVRAM_BUFFERED) &&
3265             tg3_flag(tp, FLASH) &&
3266             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3267             (tp->nvram_jedecnum == JEDEC_ATMEL))
3268
3269                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3270                         tp->nvram_pagesize) +
3271                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3272
3273         return addr;
3274 }
3275
3276 /* NOTE: Data read in from NVRAM is byteswapped according to
3277  * the byteswapping settings for all other register accesses.
3278  * tg3 devices are BE devices, so on a BE machine, the data
3279  * returned will be exactly as it is seen in NVRAM.  On a LE
3280  * machine, the 32-bit value will be byteswapped.
3281  */
3282 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3283 {
3284         int ret;
3285
3286         if (!tg3_flag(tp, NVRAM))
3287                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3288
3289         offset = tg3_nvram_phys_addr(tp, offset);
3290
3291         if (offset > NVRAM_ADDR_MSK)
3292                 return -EINVAL;
3293
3294         ret = tg3_nvram_lock(tp);
3295         if (ret)
3296                 return ret;
3297
3298         tg3_enable_nvram_access(tp);
3299
3300         tw32(NVRAM_ADDR, offset);
3301         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3302                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3303
3304         if (ret == 0)
3305                 *val = tr32(NVRAM_RDDATA);
3306
3307         tg3_disable_nvram_access(tp);
3308
3309         tg3_nvram_unlock(tp);
3310
3311         return ret;
3312 }
3313
3314 /* Ensures NVRAM data is in bytestream format. */
3315 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3316 {
3317         u32 v;
3318         int res = tg3_nvram_read(tp, offset, &v);
3319         if (!res)
3320                 *val = cpu_to_be32(v);
3321         return res;
3322 }
3323
3324 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3325                                     u32 offset, u32 len, u8 *buf)
3326 {
3327         int i, j, rc = 0;
3328         u32 val;
3329
3330         for (i = 0; i < len; i += 4) {
3331                 u32 addr;
3332                 __be32 data;
3333
3334                 addr = offset + i;
3335
3336                 memcpy(&data, buf + i, 4);
3337
3338                 /*
3339                  * The SEEPROM interface expects the data to always be opposite
3340                  * the native endian format.  We accomplish this by reversing
3341                  * all the operations that would have been performed on the
3342                  * data from a call to tg3_nvram_read_be32().
3343                  */
3344                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3345
3346                 val = tr32(GRC_EEPROM_ADDR);
3347                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3348
3349                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3350                         EEPROM_ADDR_READ);
3351                 tw32(GRC_EEPROM_ADDR, val |
3352                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3353                         (addr & EEPROM_ADDR_ADDR_MASK) |
3354                         EEPROM_ADDR_START |
3355                         EEPROM_ADDR_WRITE);
3356
3357                 for (j = 0; j < 1000; j++) {
3358                         val = tr32(GRC_EEPROM_ADDR);
3359
3360                         if (val & EEPROM_ADDR_COMPLETE)
3361                                 break;
3362                         msleep(1);
3363                 }
3364                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3365                         rc = -EBUSY;
3366                         break;
3367                 }
3368         }
3369
3370         return rc;
3371 }
3372
3373 /* offset and length are dword aligned */
3374 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3375                 u8 *buf)
3376 {
3377         int ret = 0;
3378         u32 pagesize = tp->nvram_pagesize;
3379         u32 pagemask = pagesize - 1;
3380         u32 nvram_cmd;
3381         u8 *tmp;
3382
3383         tmp = kmalloc(pagesize, GFP_KERNEL);
3384         if (tmp == NULL)
3385                 return -ENOMEM;
3386
3387         while (len) {
3388                 int j;
3389                 u32 phy_addr, page_off, size;
3390
3391                 phy_addr = offset & ~pagemask;
3392
3393                 for (j = 0; j < pagesize; j += 4) {
3394                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3395                                                   (__be32 *) (tmp + j));
3396                         if (ret)
3397                                 break;
3398                 }
3399                 if (ret)
3400                         break;
3401
3402                 page_off = offset & pagemask;
3403                 size = pagesize;
3404                 if (len < size)
3405                         size = len;
3406
3407                 len -= size;
3408
3409                 memcpy(tmp + page_off, buf, size);
3410
3411                 offset = offset + (pagesize - page_off);
3412
3413                 tg3_enable_nvram_access(tp);
3414
3415                 /*
3416                  * Before we can erase the flash page, we need
3417                  * to issue a special "write enable" command.
3418                  */
3419                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3420
3421                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3422                         break;
3423
3424                 /* Erase the target page */
3425                 tw32(NVRAM_ADDR, phy_addr);
3426
3427                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3428                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3429
3430                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3431                         break;
3432
3433                 /* Issue another write enable to start the write. */
3434                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3435
3436                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3437                         break;
3438
3439                 for (j = 0; j < pagesize; j += 4) {
3440                         __be32 data;
3441
3442                         data = *((__be32 *) (tmp + j));
3443
3444                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3445
3446                         tw32(NVRAM_ADDR, phy_addr + j);
3447
3448                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3449                                 NVRAM_CMD_WR;
3450
3451                         if (j == 0)
3452                                 nvram_cmd |= NVRAM_CMD_FIRST;
3453                         else if (j == (pagesize - 4))
3454                                 nvram_cmd |= NVRAM_CMD_LAST;
3455
3456                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3457                         if (ret)
3458                                 break;
3459                 }
3460                 if (ret)
3461                         break;
3462         }
3463
3464         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3465         tg3_nvram_exec_cmd(tp, nvram_cmd);
3466
3467         kfree(tmp);
3468
3469         return ret;
3470 }
3471
3472 /* offset and length are dword aligned */
3473 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3474                 u8 *buf)
3475 {
3476         int i, ret = 0;
3477
3478         for (i = 0; i < len; i += 4, offset += 4) {
3479                 u32 page_off, phy_addr, nvram_cmd;
3480                 __be32 data;
3481
3482                 memcpy(&data, buf + i, 4);
3483                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3484
3485                 page_off = offset % tp->nvram_pagesize;
3486
3487                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3488
3489                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3490
3491                 if (page_off == 0 || i == 0)
3492                         nvram_cmd |= NVRAM_CMD_FIRST;
3493                 if (page_off == (tp->nvram_pagesize - 4))
3494                         nvram_cmd |= NVRAM_CMD_LAST;
3495
3496                 if (i == (len - 4))
3497                         nvram_cmd |= NVRAM_CMD_LAST;
3498
3499                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3500                     !tg3_flag(tp, FLASH) ||
3501                     !tg3_flag(tp, 57765_PLUS))
3502                         tw32(NVRAM_ADDR, phy_addr);
3503
3504                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3505                     !tg3_flag(tp, 5755_PLUS) &&
3506                     (tp->nvram_jedecnum == JEDEC_ST) &&
3507                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3508                         u32 cmd;
3509
3510                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3511                         ret = tg3_nvram_exec_cmd(tp, cmd);
3512                         if (ret)
3513                                 break;
3514                 }
3515                 if (!tg3_flag(tp, FLASH)) {
3516                         /* We always do complete word writes to eeprom. */
3517                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3518                 }
3519
3520                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3521                 if (ret)
3522                         break;
3523         }
3524         return ret;
3525 }
3526
3527 /* offset and length are dword aligned */
3528 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3529 {
3530         int ret;
3531
3532         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3533                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3534                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3535                 udelay(40);
3536         }
3537
3538         if (!tg3_flag(tp, NVRAM)) {
3539                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3540         } else {
3541                 u32 grc_mode;
3542
3543                 ret = tg3_nvram_lock(tp);
3544                 if (ret)
3545                         return ret;
3546
3547                 tg3_enable_nvram_access(tp);
3548                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3549                         tw32(NVRAM_WRITE1, 0x406);
3550
3551                 grc_mode = tr32(GRC_MODE);
3552                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3553
3554                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3555                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3556                                 buf);
3557                 } else {
3558                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3559                                 buf);
3560                 }
3561
3562                 grc_mode = tr32(GRC_MODE);
3563                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3564
3565                 tg3_disable_nvram_access(tp);
3566                 tg3_nvram_unlock(tp);
3567         }
3568
3569         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3570                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3571                 udelay(40);
3572         }
3573
3574         return ret;
3575 }
3576
3577 #define RX_CPU_SCRATCH_BASE     0x30000
3578 #define RX_CPU_SCRATCH_SIZE     0x04000
3579 #define TX_CPU_SCRATCH_BASE     0x34000
3580 #define TX_CPU_SCRATCH_SIZE     0x04000
3581
3582 /* tp->lock is held. */
3583 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3584 {
3585         int i;
3586         const int iters = 10000;
3587
3588         for (i = 0; i < iters; i++) {
3589                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3590                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3591                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3592                         break;
3593                 if (pci_channel_offline(tp->pdev))
3594                         return -EBUSY;
3595         }
3596
3597         return (i == iters) ? -EBUSY : 0;
3598 }
3599
3600 /* tp->lock is held. */
3601 static int tg3_rxcpu_pause(struct tg3 *tp)
3602 {
3603         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3604
3605         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3606         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3607         udelay(10);
3608
3609         return rc;
3610 }
3611
3612 /* tp->lock is held. */
3613 static int tg3_txcpu_pause(struct tg3 *tp)
3614 {
3615         return tg3_pause_cpu(tp, TX_CPU_BASE);
3616 }
3617
3618 /* tp->lock is held. */
3619 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3620 {
3621         tw32(cpu_base + CPU_STATE, 0xffffffff);
3622         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3623 }
3624
3625 /* tp->lock is held. */
3626 static void tg3_rxcpu_resume(struct tg3 *tp)
3627 {
3628         tg3_resume_cpu(tp, RX_CPU_BASE);
3629 }
3630
3631 /* tp->lock is held. */
3632 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3633 {
3634         int rc;
3635
3636         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3637
3638         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3639                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3640
3641                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3642                 return 0;
3643         }
3644         if (cpu_base == RX_CPU_BASE) {
3645                 rc = tg3_rxcpu_pause(tp);
3646         } else {
3647                 /*
3648                  * There is only an Rx CPU for the 5750 derivative in the
3649                  * BCM4785.
3650                  */
3651                 if (tg3_flag(tp, IS_SSB_CORE))
3652                         return 0;
3653
3654                 rc = tg3_txcpu_pause(tp);
3655         }
3656
3657         if (rc) {
3658                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3659                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3660                 return -ENODEV;
3661         }
3662
3663         /* Clear firmware's nvram arbitration. */
3664         if (tg3_flag(tp, NVRAM))
3665                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3666         return 0;
3667 }
3668
3669 static int tg3_fw_data_len(struct tg3 *tp,
3670                            const struct tg3_firmware_hdr *fw_hdr)
3671 {
3672         int fw_len;
3673
3674         /* Non fragmented firmware have one firmware header followed by a
3675          * contiguous chunk of data to be written. The length field in that
3676          * header is not the length of data to be written but the complete
3677          * length of the bss. The data length is determined based on
3678          * tp->fw->size minus headers.
3679          *
3680          * Fragmented firmware have a main header followed by multiple
3681          * fragments. Each fragment is identical to non fragmented firmware
3682          * with a firmware header followed by a contiguous chunk of data. In
3683          * the main header, the length field is unused and set to 0xffffffff.
3684          * In each fragment header the length is the entire size of that
3685          * fragment i.e. fragment data + header length. Data length is
3686          * therefore length field in the header minus TG3_FW_HDR_LEN.
3687          */
3688         if (tp->fw_len == 0xffffffff)
3689                 fw_len = be32_to_cpu(fw_hdr->len);
3690         else
3691                 fw_len = tp->fw->size;
3692
3693         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3694 }
3695
3696 /* tp->lock is held. */
3697 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3698                                  u32 cpu_scratch_base, int cpu_scratch_size,
3699                                  const struct tg3_firmware_hdr *fw_hdr)
3700 {
3701         int err, i;
3702         void (*write_op)(struct tg3 *, u32, u32);
3703         int total_len = tp->fw->size;
3704
3705         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3706                 netdev_err(tp->dev,
3707                            "%s: Trying to load TX cpu firmware which is 5705\n",
3708                            __func__);
3709                 return -EINVAL;
3710         }
3711
3712         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3713                 write_op = tg3_write_mem;
3714         else
3715                 write_op = tg3_write_indirect_reg32;
3716
3717         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3718                 /* It is possible that bootcode is still loading at this point.
3719                  * Get the nvram lock first before halting the cpu.
3720                  */
3721                 int lock_err = tg3_nvram_lock(tp);
3722                 err = tg3_halt_cpu(tp, cpu_base);
3723                 if (!lock_err)
3724                         tg3_nvram_unlock(tp);
3725                 if (err)
3726                         goto out;
3727
3728                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3729                         write_op(tp, cpu_scratch_base + i, 0);
3730                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3731                 tw32(cpu_base + CPU_MODE,
3732                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3733         } else {
3734                 /* Subtract additional main header for fragmented firmware and
3735                  * advance to the first fragment
3736                  */
3737                 total_len -= TG3_FW_HDR_LEN;
3738                 fw_hdr++;
3739         }
3740
3741         do {
3742                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3743                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3744                         write_op(tp, cpu_scratch_base +
3745                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3746                                      (i * sizeof(u32)),
3747                                  be32_to_cpu(fw_data[i]));
3748
3749                 total_len -= be32_to_cpu(fw_hdr->len);
3750
3751                 /* Advance to next fragment */
3752                 fw_hdr = (struct tg3_firmware_hdr *)
3753                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3754         } while (total_len > 0);
3755
3756         err = 0;
3757
3758 out:
3759         return err;
3760 }
3761
3762 /* tp->lock is held. */
3763 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3764 {
3765         int i;
3766         const int iters = 5;
3767
3768         tw32(cpu_base + CPU_STATE, 0xffffffff);
3769         tw32_f(cpu_base + CPU_PC, pc);
3770
3771         for (i = 0; i < iters; i++) {
3772                 if (tr32(cpu_base + CPU_PC) == pc)
3773                         break;
3774                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3775                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3776                 tw32_f(cpu_base + CPU_PC, pc);
3777                 udelay(1000);
3778         }
3779
3780         return (i == iters) ? -EBUSY : 0;
3781 }
3782
3783 /* tp->lock is held. */
3784 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3785 {
3786         const struct tg3_firmware_hdr *fw_hdr;
3787         int err;
3788
3789         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3790
3791         /* Firmware blob starts with version numbers, followed by
3792            start address and length. We are setting complete length.
3793            length = end_address_of_bss - start_address_of_text.
3794            Remainder is the blob to be loaded contiguously
3795            from start address. */
3796
3797         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3798                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3799                                     fw_hdr);
3800         if (err)
3801                 return err;
3802
3803         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3804                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3805                                     fw_hdr);
3806         if (err)
3807                 return err;
3808
3809         /* Now startup only the RX cpu. */
3810         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3811                                        be32_to_cpu(fw_hdr->base_addr));
3812         if (err) {
3813                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3814                            "should be %08x\n", __func__,
3815                            tr32(RX_CPU_BASE + CPU_PC),
3816                                 be32_to_cpu(fw_hdr->base_addr));
3817                 return -ENODEV;
3818         }
3819
3820         tg3_rxcpu_resume(tp);
3821
3822         return 0;
3823 }
3824
3825 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3826 {
3827         const int iters = 1000;
3828         int i;
3829         u32 val;
3830
3831         /* Wait for boot code to complete initialization and enter service
3832          * loop. It is then safe to download service patches
3833          */
3834         for (i = 0; i < iters; i++) {
3835                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3836                         break;
3837
3838                 udelay(10);
3839         }
3840
3841         if (i == iters) {
3842                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3843                 return -EBUSY;
3844         }
3845
3846         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3847         if (val & 0xff) {
3848                 netdev_warn(tp->dev,
3849                             "Other patches exist. Not downloading EEE patch\n");
3850                 return -EEXIST;
3851         }
3852
3853         return 0;
3854 }
3855
3856 /* tp->lock is held. */
3857 static void tg3_load_57766_firmware(struct tg3 *tp)
3858 {
3859         struct tg3_firmware_hdr *fw_hdr;
3860
3861         if (!tg3_flag(tp, NO_NVRAM))
3862                 return;
3863
3864         if (tg3_validate_rxcpu_state(tp))
3865                 return;
3866
3867         if (!tp->fw)
3868                 return;
3869
3870         /* This firmware blob has a different format than older firmware
3871          * releases as given below. The main difference is we have fragmented
3872          * data to be written to non-contiguous locations.
3873          *
3874          * In the beginning we have a firmware header identical to other
3875          * firmware which consists of version, base addr and length. The length
3876          * here is unused and set to 0xffffffff.
3877          *
3878          * This is followed by a series of firmware fragments which are
3879          * individually identical to previous firmware. i.e. they have the
3880          * firmware header and followed by data for that fragment. The version
3881          * field of the individual fragment header is unused.
3882          */
3883
3884         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3885         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3886                 return;
3887
3888         if (tg3_rxcpu_pause(tp))
3889                 return;
3890
3891         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3892         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3893
3894         tg3_rxcpu_resume(tp);
3895 }
3896
3897 /* tp->lock is held. */
3898 static int tg3_load_tso_firmware(struct tg3 *tp)
3899 {
3900         const struct tg3_firmware_hdr *fw_hdr;
3901         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3902         int err;
3903
3904         if (!tg3_flag(tp, FW_TSO))
3905                 return 0;
3906
3907         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3908
3909         /* Firmware blob starts with version numbers, followed by
3910            start address and length. We are setting complete length.
3911            length = end_address_of_bss - start_address_of_text.
3912            Remainder is the blob to be loaded contiguously
3913            from start address. */
3914
3915         cpu_scratch_size = tp->fw_len;
3916
3917         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3918                 cpu_base = RX_CPU_BASE;
3919                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3920         } else {
3921                 cpu_base = TX_CPU_BASE;
3922                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3923                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3924         }
3925
3926         err = tg3_load_firmware_cpu(tp, cpu_base,
3927                                     cpu_scratch_base, cpu_scratch_size,
3928                                     fw_hdr);
3929         if (err)
3930                 return err;
3931
3932         /* Now startup the cpu. */
3933         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3934                                        be32_to_cpu(fw_hdr->base_addr));
3935         if (err) {
3936                 netdev_err(tp->dev,
3937                            "%s fails to set CPU PC, is %08x should be %08x\n",
3938                            __func__, tr32(cpu_base + CPU_PC),
3939                            be32_to_cpu(fw_hdr->base_addr));
3940                 return -ENODEV;
3941         }
3942
3943         tg3_resume_cpu(tp, cpu_base);
3944         return 0;
3945 }
3946
3947 /* tp->lock is held. */
3948 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3949 {
3950         u32 addr_high, addr_low;
3951
3952         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954                     (mac_addr[4] <<  8) | mac_addr[5]);
3955
3956         if (index < 4) {
3957                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959         } else {
3960                 index -= 4;
3961                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963         }
3964 }
3965
3966 /* tp->lock is held. */
3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969         u32 addr_high;
3970         int i;
3971
3972         for (i = 0; i < 4; i++) {
3973                 if (i == 1 && skip_mac_1)
3974                         continue;
3975                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976         }
3977
3978         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979             tg3_asic_rev(tp) == ASIC_REV_5704) {
3980                 for (i = 4; i < 16; i++)
3981                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982         }
3983
3984         addr_high = (tp->dev->dev_addr[0] +
3985                      tp->dev->dev_addr[1] +
3986                      tp->dev->dev_addr[2] +
3987                      tp->dev->dev_addr[3] +
3988                      tp->dev->dev_addr[4] +
3989                      tp->dev->dev_addr[5]) &
3990                 TX_BACKOFF_SEED_MASK;
3991         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993
3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996         /*
3997          * Make sure register accesses (indirect or otherwise) will function
3998          * correctly.
3999          */
4000         pci_write_config_dword(tp->pdev,
4001                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003
4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006         int err;
4007
4008         tg3_enable_register_access(tp);
4009
4010         err = pci_set_power_state(tp->pdev, PCI_D0);
4011         if (!err) {
4012                 /* Switch out of Vaux if it is a NIC */
4013                 tg3_pwrsrc_switch_to_vmain(tp);
4014         } else {
4015                 netdev_err(tp->dev, "Transition to D0 failed\n");
4016         }
4017
4018         return err;
4019 }
4020
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022
4023 static int tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025         u32 misc_host_ctrl;
4026         bool device_should_wake, do_low_power;
4027
4028         tg3_enable_register_access(tp);
4029
4030         /* Restore the CLKREQ setting. */
4031         if (tg3_flag(tp, CLKREQ_BUG))
4032                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4034
4035         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036         tw32(TG3PCI_MISC_HOST_CTRL,
4037              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038
4039         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040                              tg3_flag(tp, WOL_ENABLE);
4041
4042         if (tg3_flag(tp, USE_PHYLIB)) {
4043                 do_low_power = false;
4044                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046                         struct phy_device *phydev;
4047                         u32 phyid, advertising;
4048
4049                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053                         tp->link_config.speed = phydev->speed;
4054                         tp->link_config.duplex = phydev->duplex;
4055                         tp->link_config.autoneg = phydev->autoneg;
4056                         tp->link_config.advertising = phydev->advertising;
4057
4058                         advertising = ADVERTISED_TP |
4059                                       ADVERTISED_Pause |
4060                                       ADVERTISED_Autoneg |
4061                                       ADVERTISED_10baseT_Half;
4062
4063                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4064                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4065                                         advertising |=
4066                                                 ADVERTISED_100baseT_Half |
4067                                                 ADVERTISED_100baseT_Full |
4068                                                 ADVERTISED_10baseT_Full;
4069                                 else
4070                                         advertising |= ADVERTISED_10baseT_Full;
4071                         }
4072
4073                         phydev->advertising = advertising;
4074
4075                         phy_start_aneg(phydev);
4076
4077                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4078                         if (phyid != PHY_ID_BCMAC131) {
4079                                 phyid &= PHY_BCM_OUI_MASK;
4080                                 if (phyid == PHY_BCM_OUI_1 ||
4081                                     phyid == PHY_BCM_OUI_2 ||
4082                                     phyid == PHY_BCM_OUI_3)
4083                                         do_low_power = true;
4084                         }
4085                 }
4086         } else {
4087                 do_low_power = true;
4088
4089                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4090                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4091
4092                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4093                         tg3_setup_phy(tp, false);
4094         }
4095
4096         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4097                 u32 val;
4098
4099                 val = tr32(GRC_VCPU_EXT_CTRL);
4100                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4101         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4102                 int i;
4103                 u32 val;
4104
4105                 for (i = 0; i < 200; i++) {
4106                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4107                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4108                                 break;
4109                         msleep(1);
4110                 }
4111         }
4112         if (tg3_flag(tp, WOL_CAP))
4113                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4114                                                      WOL_DRV_STATE_SHUTDOWN |
4115                                                      WOL_DRV_WOL |
4116                                                      WOL_SET_MAGIC_PKT);
4117
4118         if (device_should_wake) {
4119                 u32 mac_mode;
4120
4121                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4122                         if (do_low_power &&
4123                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4124                                 tg3_phy_auxctl_write(tp,
4125                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4126                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4127                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4128                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4129                                 udelay(40);
4130                         }
4131
4132                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4133                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4134                         else if (tp->phy_flags &
4135                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4136                                 if (tp->link_config.active_speed == SPEED_1000)
4137                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4138                                 else
4139                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4140                         } else
4141                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4142
4143                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4144                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4145                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4146                                              SPEED_100 : SPEED_10;
4147                                 if (tg3_5700_link_polarity(tp, speed))
4148                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4149                                 else
4150                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4151                         }
4152                 } else {
4153                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4154                 }
4155
4156                 if (!tg3_flag(tp, 5750_PLUS))
4157                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4158
4159                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4160                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4161                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4162                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4163
4164                 if (tg3_flag(tp, ENABLE_APE))
4165                         mac_mode |= MAC_MODE_APE_TX_EN |
4166                                     MAC_MODE_APE_RX_EN |
4167                                     MAC_MODE_TDE_ENABLE;
4168
4169                 tw32_f(MAC_MODE, mac_mode);
4170                 udelay(100);
4171
4172                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4173                 udelay(10);
4174         }
4175
4176         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4177             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4178              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4179                 u32 base_val;
4180
4181                 base_val = tp->pci_clock_ctrl;
4182                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4183                              CLOCK_CTRL_TXCLK_DISABLE);
4184
4185                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4186                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4187         } else if (tg3_flag(tp, 5780_CLASS) ||
4188                    tg3_flag(tp, CPMU_PRESENT) ||
4189                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4190                 /* do nothing */
4191         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4192                 u32 newbits1, newbits2;
4193
4194                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4195                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4196                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4197                                     CLOCK_CTRL_TXCLK_DISABLE |
4198                                     CLOCK_CTRL_ALTCLK);
4199                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4200                 } else if (tg3_flag(tp, 5705_PLUS)) {
4201                         newbits1 = CLOCK_CTRL_625_CORE;
4202                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4203                 } else {
4204                         newbits1 = CLOCK_CTRL_ALTCLK;
4205                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4206                 }
4207
4208                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4209                             40);
4210
4211                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4212                             40);
4213
4214                 if (!tg3_flag(tp, 5705_PLUS)) {
4215                         u32 newbits3;
4216
4217                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4218                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4219                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4220                                             CLOCK_CTRL_TXCLK_DISABLE |
4221                                             CLOCK_CTRL_44MHZ_CORE);
4222                         } else {
4223                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4224                         }
4225
4226                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4227                                     tp->pci_clock_ctrl | newbits3, 40);
4228                 }
4229         }
4230
4231         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4232                 tg3_power_down_phy(tp, do_low_power);
4233
4234         tg3_frob_aux_power(tp, true);
4235
4236         /* Workaround for unstable PLL clock */
4237         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4238             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4239              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4240                 u32 val = tr32(0x7d00);
4241
4242                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4243                 tw32(0x7d00, val);
4244                 if (!tg3_flag(tp, ENABLE_ASF)) {
4245                         int err;
4246
4247                         err = tg3_nvram_lock(tp);
4248                         tg3_halt_cpu(tp, RX_CPU_BASE);
4249                         if (!err)
4250                                 tg3_nvram_unlock(tp);
4251                 }
4252         }
4253
4254         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4255
4256         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4257
4258         return 0;
4259 }
4260
4261 static void tg3_power_down(struct tg3 *tp)
4262 {
4263         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4264         pci_set_power_state(tp->pdev, PCI_D3hot);
4265 }
4266
4267 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4268 {
4269         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4270         case MII_TG3_AUX_STAT_10HALF:
4271                 *speed = SPEED_10;
4272                 *duplex = DUPLEX_HALF;
4273                 break;
4274
4275         case MII_TG3_AUX_STAT_10FULL:
4276                 *speed = SPEED_10;
4277                 *duplex = DUPLEX_FULL;
4278                 break;
4279
4280         case MII_TG3_AUX_STAT_100HALF:
4281                 *speed = SPEED_100;
4282                 *duplex = DUPLEX_HALF;
4283                 break;
4284
4285         case MII_TG3_AUX_STAT_100FULL:
4286                 *speed = SPEED_100;
4287                 *duplex = DUPLEX_FULL;
4288                 break;
4289
4290         case MII_TG3_AUX_STAT_1000HALF:
4291                 *speed = SPEED_1000;
4292                 *duplex = DUPLEX_HALF;
4293                 break;
4294
4295         case MII_TG3_AUX_STAT_1000FULL:
4296                 *speed = SPEED_1000;
4297                 *duplex = DUPLEX_FULL;
4298                 break;
4299
4300         default:
4301                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4302                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4303                                  SPEED_10;
4304                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4305                                   DUPLEX_HALF;
4306                         break;
4307                 }
4308                 *speed = SPEED_UNKNOWN;
4309                 *duplex = DUPLEX_UNKNOWN;
4310                 break;
4311         }
4312 }
4313
4314 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4315 {
4316         int err = 0;
4317         u32 val, new_adv;
4318
4319         new_adv = ADVERTISE_CSMA;
4320         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4321         new_adv |= mii_advertise_flowctrl(flowctrl);
4322
4323         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4324         if (err)
4325                 goto done;
4326
4327         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4328                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4329
4330                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4331                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4332                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4333
4334                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4335                 if (err)
4336                         goto done;
4337         }
4338
4339         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4340                 goto done;
4341
4342         tw32(TG3_CPMU_EEE_MODE,
4343              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4344
4345         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4346         if (!err) {
4347                 u32 err2;
4348
4349                 val = 0;
4350                 /* Advertise 100-BaseTX EEE ability */
4351                 if (advertise & ADVERTISED_100baseT_Full)
4352                         val |= MDIO_AN_EEE_ADV_100TX;
4353                 /* Advertise 1000-BaseT EEE ability */
4354                 if (advertise & ADVERTISED_1000baseT_Full)
4355                         val |= MDIO_AN_EEE_ADV_1000T;
4356
4357                 if (!tp->eee.eee_enabled) {
4358                         val = 0;
4359                         tp->eee.advertised = 0;
4360                 } else {
4361                         tp->eee.advertised = advertise &
4362                                              (ADVERTISED_100baseT_Full |
4363                                               ADVERTISED_1000baseT_Full);
4364                 }
4365
4366                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4367                 if (err)
4368                         val = 0;
4369
4370                 switch (tg3_asic_rev(tp)) {
4371                 case ASIC_REV_5717:
4372                 case ASIC_REV_57765:
4373                 case ASIC_REV_57766:
4374                 case ASIC_REV_5719:
4375                         /* If we advertised any eee advertisements above... */
4376                         if (val)
4377                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4378                                       MII_TG3_DSP_TAP26_RMRXSTO |
4379                                       MII_TG3_DSP_TAP26_OPCSINPT;
4380                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4381                         /* Fall through */
4382                 case ASIC_REV_5720:
4383                 case ASIC_REV_5762:
4384                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4385                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4386                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4387                 }
4388
4389                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4390                 if (!err)
4391                         err = err2;
4392         }
4393
4394 done:
4395         return err;
4396 }
4397
4398 static void tg3_phy_copper_begin(struct tg3 *tp)
4399 {
4400         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4401             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4402                 u32 adv, fc;
4403
4404                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4405                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4406                         adv = ADVERTISED_10baseT_Half |
4407                               ADVERTISED_10baseT_Full;
4408                         if (tg3_flag(tp, WOL_SPEED_100MB))
4409                                 adv |= ADVERTISED_100baseT_Half |
4410                                        ADVERTISED_100baseT_Full;
4411                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4412                                 if (!(tp->phy_flags &
4413                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4414                                         adv |= ADVERTISED_1000baseT_Half;
4415                                 adv |= ADVERTISED_1000baseT_Full;
4416                         }
4417
4418                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4419                 } else {
4420                         adv = tp->link_config.advertising;
4421                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4422                                 adv &= ~(ADVERTISED_1000baseT_Half |
4423                                          ADVERTISED_1000baseT_Full);
4424
4425                         fc = tp->link_config.flowctrl;
4426                 }
4427
4428                 tg3_phy_autoneg_cfg(tp, adv, fc);
4429
4430                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4431                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4432                         /* Normally during power down we want to autonegotiate
4433                          * the lowest possible speed for WOL. However, to avoid
4434                          * link flap, we leave it untouched.
4435                          */
4436                         return;
4437                 }
4438
4439                 tg3_writephy(tp, MII_BMCR,
4440                              BMCR_ANENABLE | BMCR_ANRESTART);
4441         } else {
4442                 int i;
4443                 u32 bmcr, orig_bmcr;
4444
4445                 tp->link_config.active_speed = tp->link_config.speed;
4446                 tp->link_config.active_duplex = tp->link_config.duplex;
4447
4448                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4449                         /* With autoneg disabled, 5715 only links up when the
4450                          * advertisement register has the configured speed
4451                          * enabled.
4452                          */
4453                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4454                 }
4455
4456                 bmcr = 0;
4457                 switch (tp->link_config.speed) {
4458                 default:
4459                 case SPEED_10:
4460                         break;
4461
4462                 case SPEED_100:
4463                         bmcr |= BMCR_SPEED100;
4464                         break;
4465
4466                 case SPEED_1000:
4467                         bmcr |= BMCR_SPEED1000;
4468                         break;
4469                 }
4470
4471                 if (tp->link_config.duplex == DUPLEX_FULL)
4472                         bmcr |= BMCR_FULLDPLX;
4473
4474                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4475                     (bmcr != orig_bmcr)) {
4476                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4477                         for (i = 0; i < 1500; i++) {
4478                                 u32 tmp;
4479
4480                                 udelay(10);
4481                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4482                                     tg3_readphy(tp, MII_BMSR, &tmp))
4483                                         continue;
4484                                 if (!(tmp & BMSR_LSTATUS)) {
4485                                         udelay(40);
4486                                         break;
4487                                 }
4488                         }
4489                         tg3_writephy(tp, MII_BMCR, bmcr);
4490                         udelay(40);
4491                 }
4492         }
4493 }
4494
4495 static int tg3_phy_pull_config(struct tg3 *tp)
4496 {
4497         int err;
4498         u32 val;
4499
4500         err = tg3_readphy(tp, MII_BMCR, &val);
4501         if (err)
4502                 goto done;
4503
4504         if (!(val & BMCR_ANENABLE)) {
4505                 tp->link_config.autoneg = AUTONEG_DISABLE;
4506                 tp->link_config.advertising = 0;
4507                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4508
4509                 err = -EIO;
4510
4511                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4512                 case 0:
4513                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4514                                 goto done;
4515
4516                         tp->link_config.speed = SPEED_10;
4517                         break;
4518                 case BMCR_SPEED100:
4519                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4520                                 goto done;
4521
4522                         tp->link_config.speed = SPEED_100;
4523                         break;
4524                 case BMCR_SPEED1000:
4525                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4526                                 tp->link_config.speed = SPEED_1000;
4527                                 break;
4528                         }
4529                         /* Fall through */
4530                 default:
4531                         goto done;
4532                 }
4533
4534                 if (val & BMCR_FULLDPLX)
4535                         tp->link_config.duplex = DUPLEX_FULL;
4536                 else
4537                         tp->link_config.duplex = DUPLEX_HALF;
4538
4539                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4540
4541                 err = 0;
4542                 goto done;
4543         }
4544
4545         tp->link_config.autoneg = AUTONEG_ENABLE;
4546         tp->link_config.advertising = ADVERTISED_Autoneg;
4547         tg3_flag_set(tp, PAUSE_AUTONEG);
4548
4549         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4550                 u32 adv;
4551
4552                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4553                 if (err)
4554                         goto done;
4555
4556                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4557                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4558
4559                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4560         } else {
4561                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4562         }
4563
4564         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4565                 u32 adv;
4566
4567                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4568                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4569                         if (err)
4570                                 goto done;
4571
4572                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4573                 } else {
4574                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4575                         if (err)
4576                                 goto done;
4577
4578                         adv = tg3_decode_flowctrl_1000X(val);
4579                         tp->link_config.flowctrl = adv;
4580
4581                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4582                         adv = mii_adv_to_ethtool_adv_x(val);
4583                 }
4584
4585                 tp->link_config.advertising |= adv;
4586         }
4587
4588 done:
4589         return err;
4590 }
4591
4592 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4593 {
4594         int err;
4595
4596         /* Turn off tap power management. */
4597         /* Set Extended packet length bit */
4598         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4599
4600         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4601         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4602         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4603         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4604         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4605
4606         udelay(40);
4607
4608         return err;
4609 }
4610
4611 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4612 {
4613         struct ethtool_eee eee;
4614
4615         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4616                 return true;
4617
4618         tg3_eee_pull_config(tp, &eee);
4619
4620         if (tp->eee.eee_enabled) {
4621                 if (tp->eee.advertised != eee.advertised ||
4622                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4623                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4624                         return false;
4625         } else {
4626                 /* EEE is disabled but we're advertising */
4627                 if (eee.advertised)
4628                         return false;
4629         }
4630
4631         return true;
4632 }
4633
4634 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4635 {
4636         u32 advmsk, tgtadv, advertising;
4637
4638         advertising = tp->link_config.advertising;
4639         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4640
4641         advmsk = ADVERTISE_ALL;
4642         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4643                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4644                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4645         }
4646
4647         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4648                 return false;
4649
4650         if ((*lcladv & advmsk) != tgtadv)
4651                 return false;
4652
4653         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4654                 u32 tg3_ctrl;
4655
4656                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4657
4658                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4659                         return false;
4660
4661                 if (tgtadv &&
4662                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4663                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4664                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4665                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4666                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4667                 } else {
4668                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4669                 }
4670
4671                 if (tg3_ctrl != tgtadv)
4672                         return false;
4673         }
4674
4675         return true;
4676 }
4677
4678 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4679 {
4680         u32 lpeth = 0;
4681
4682         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4683                 u32 val;
4684
4685                 if (tg3_readphy(tp, MII_STAT1000, &val))
4686                         return false;
4687
4688                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4689         }
4690
4691         if (tg3_readphy(tp, MII_LPA, rmtadv))
4692                 return false;
4693
4694         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4695         tp->link_config.rmt_adv = lpeth;
4696
4697         return true;
4698 }
4699
4700 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4701 {
4702         if (curr_link_up != tp->link_up) {
4703                 if (curr_link_up) {
4704                         netif_carrier_on(tp->dev);
4705                 } else {
4706                         netif_carrier_off(tp->dev);
4707                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4708                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4709                 }
4710
4711                 tg3_link_report(tp);
4712                 return true;
4713         }
4714
4715         return false;
4716 }
4717
4718 static void tg3_clear_mac_status(struct tg3 *tp)
4719 {
4720         tw32(MAC_EVENT, 0);
4721
4722         tw32_f(MAC_STATUS,
4723                MAC_STATUS_SYNC_CHANGED |
4724                MAC_STATUS_CFG_CHANGED |
4725                MAC_STATUS_MI_COMPLETION |
4726                MAC_STATUS_LNKSTATE_CHANGED);
4727         udelay(40);
4728 }
4729
4730 static void tg3_setup_eee(struct tg3 *tp)
4731 {
4732         u32 val;
4733
4734         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4735               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4736         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4737                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4738
4739         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4740
4741         tw32_f(TG3_CPMU_EEE_CTRL,
4742                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4743
4744         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4745               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4746               TG3_CPMU_EEEMD_LPI_IN_RX |
4747               TG3_CPMU_EEEMD_EEE_ENABLE;
4748
4749         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4750                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4751
4752         if (tg3_flag(tp, ENABLE_APE))
4753                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4754
4755         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4756
4757         tw32_f(TG3_CPMU_EEE_DBTMR1,
4758                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4759                (tp->eee.tx_lpi_timer & 0xffff));
4760
4761         tw32_f(TG3_CPMU_EEE_DBTMR2,
4762                TG3_CPMU_DBTMR2_APE_TX_2047US |
4763                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4764 }
4765
4766 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4767 {
4768         bool current_link_up;
4769         u32 bmsr, val;
4770         u32 lcl_adv, rmt_adv;
4771         u16 current_speed;
4772         u8 current_duplex;
4773         int i, err;
4774
4775         tg3_clear_mac_status(tp);
4776
4777         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4778                 tw32_f(MAC_MI_MODE,
4779                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4780                 udelay(80);
4781         }
4782
4783         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4784
4785         /* Some third-party PHYs need to be reset on link going
4786          * down.
4787          */
4788         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4789              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4790              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4791             tp->link_up) {
4792                 tg3_readphy(tp, MII_BMSR, &bmsr);
4793                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4794                     !(bmsr & BMSR_LSTATUS))
4795                         force_reset = true;
4796         }
4797         if (force_reset)
4798                 tg3_phy_reset(tp);
4799
4800         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4801                 tg3_readphy(tp, MII_BMSR, &bmsr);
4802                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4803                     !tg3_flag(tp, INIT_COMPLETE))
4804                         bmsr = 0;
4805
4806                 if (!(bmsr & BMSR_LSTATUS)) {
4807                         err = tg3_init_5401phy_dsp(tp);
4808                         if (err)
4809                                 return err;
4810
4811                         tg3_readphy(tp, MII_BMSR, &bmsr);
4812                         for (i = 0; i < 1000; i++) {
4813                                 udelay(10);
4814                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4815                                     (bmsr & BMSR_LSTATUS)) {
4816                                         udelay(40);
4817                                         break;
4818                                 }
4819                         }
4820
4821                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4822                             TG3_PHY_REV_BCM5401_B0 &&
4823                             !(bmsr & BMSR_LSTATUS) &&
4824                             tp->link_config.active_speed == SPEED_1000) {
4825                                 err = tg3_phy_reset(tp);
4826                                 if (!err)
4827                                         err = tg3_init_5401phy_dsp(tp);
4828                                 if (err)
4829                                         return err;
4830                         }
4831                 }
4832         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4833                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4834                 /* 5701 {A0,B0} CRC bug workaround */
4835                 tg3_writephy(tp, 0x15, 0x0a75);
4836                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4837                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4838                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4839         }
4840
4841         /* Clear pending interrupts... */
4842         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4843         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4844
4845         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4846                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4847         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4848                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4849
4850         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4851             tg3_asic_rev(tp) == ASIC_REV_5701) {
4852                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4853                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4854                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4855                 else
4856                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4857         }
4858
4859         current_link_up = false;
4860         current_speed = SPEED_UNKNOWN;
4861         current_duplex = DUPLEX_UNKNOWN;
4862         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4863         tp->link_config.rmt_adv = 0;
4864
4865         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4866                 err = tg3_phy_auxctl_read(tp,
4867                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4868                                           &val);
4869                 if (!err && !(val & (1 << 10))) {
4870                         tg3_phy_auxctl_write(tp,
4871                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4872                                              val | (1 << 10));
4873                         goto relink;
4874                 }
4875         }
4876
4877         bmsr = 0;
4878         for (i = 0; i < 100; i++) {
4879                 tg3_readphy(tp, MII_BMSR, &bmsr);
4880                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4881                     (bmsr & BMSR_LSTATUS))
4882                         break;
4883                 udelay(40);
4884         }
4885
4886         if (bmsr & BMSR_LSTATUS) {
4887                 u32 aux_stat, bmcr;
4888
4889                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4890                 for (i = 0; i < 2000; i++) {
4891                         udelay(10);
4892                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4893                             aux_stat)
4894                                 break;
4895                 }
4896
4897                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4898                                              &current_speed,
4899                                              &current_duplex);
4900
4901                 bmcr = 0;
4902                 for (i = 0; i < 200; i++) {
4903                         tg3_readphy(tp, MII_BMCR, &bmcr);
4904                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4905                                 continue;
4906                         if (bmcr && bmcr != 0x7fff)
4907                                 break;
4908                         udelay(10);
4909                 }
4910
4911                 lcl_adv = 0;
4912                 rmt_adv = 0;
4913
4914                 tp->link_config.active_speed = current_speed;
4915                 tp->link_config.active_duplex = current_duplex;
4916
4917                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4918                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4919
4920                         if ((bmcr & BMCR_ANENABLE) &&
4921                             eee_config_ok &&
4922                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4923                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4924                                 current_link_up = true;
4925
4926                         /* EEE settings changes take effect only after a phy
4927                          * reset.  If we have skipped a reset due to Link Flap
4928                          * Avoidance being enabled, do it now.
4929                          */
4930                         if (!eee_config_ok &&
4931                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4932                             !force_reset) {
4933                                 tg3_setup_eee(tp);
4934                                 tg3_phy_reset(tp);
4935                         }
4936                 } else {
4937                         if (!(bmcr & BMCR_ANENABLE) &&
4938                             tp->link_config.speed == current_speed &&
4939                             tp->link_config.duplex == current_duplex) {
4940                                 current_link_up = true;
4941                         }
4942                 }
4943
4944                 if (current_link_up &&
4945                     tp->link_config.active_duplex == DUPLEX_FULL) {
4946                         u32 reg, bit;
4947
4948                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4949                                 reg = MII_TG3_FET_GEN_STAT;
4950                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4951                         } else {
4952                                 reg = MII_TG3_EXT_STAT;
4953                                 bit = MII_TG3_EXT_STAT_MDIX;
4954                         }
4955
4956                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4957                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4958
4959                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4960                 }
4961         }
4962
4963 relink:
4964         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4965                 tg3_phy_copper_begin(tp);
4966
4967                 if (tg3_flag(tp, ROBOSWITCH)) {
4968                         current_link_up = true;
4969                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4970                         current_speed = SPEED_1000;
4971                         current_duplex = DUPLEX_FULL;
4972                         tp->link_config.active_speed = current_speed;
4973                         tp->link_config.active_duplex = current_duplex;
4974                 }
4975
4976                 tg3_readphy(tp, MII_BMSR, &bmsr);
4977                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4978                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4979                         current_link_up = true;
4980         }
4981
4982         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4983         if (current_link_up) {
4984                 if (tp->link_config.active_speed == SPEED_100 ||
4985                     tp->link_config.active_speed == SPEED_10)
4986                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4987                 else
4988                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4989         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4990                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4991         else
4992                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4993
4994         /* In order for the 5750 core in BCM4785 chip to work properly
4995          * in RGMII mode, the Led Control Register must be set up.
4996          */
4997         if (tg3_flag(tp, RGMII_MODE)) {
4998                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4999                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5000
5001                 if (tp->link_config.active_speed == SPEED_10)
5002                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5003                 else if (tp->link_config.active_speed == SPEED_100)
5004                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5005                                      LED_CTRL_100MBPS_ON);
5006                 else if (tp->link_config.active_speed == SPEED_1000)
5007                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5008                                      LED_CTRL_1000MBPS_ON);
5009
5010                 tw32(MAC_LED_CTRL, led_ctrl);
5011                 udelay(40);
5012         }
5013
5014         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5015         if (tp->link_config.active_duplex == DUPLEX_HALF)
5016                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5017
5018         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5019                 if (current_link_up &&
5020                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5021                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5022                 else
5023                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5024         }
5025
5026         /* ??? Without this setting Netgear GA302T PHY does not
5027          * ??? send/receive packets...
5028          */
5029         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5030             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5031                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5032                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5033                 udelay(80);
5034         }
5035
5036         tw32_f(MAC_MODE, tp->mac_mode);
5037         udelay(40);
5038
5039         tg3_phy_eee_adjust(tp, current_link_up);
5040
5041         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5042                 /* Polled via timer. */
5043                 tw32_f(MAC_EVENT, 0);
5044         } else {
5045                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5046         }
5047         udelay(40);
5048
5049         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5050             current_link_up &&
5051             tp->link_config.active_speed == SPEED_1000 &&
5052             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5053                 udelay(120);
5054                 tw32_f(MAC_STATUS,
5055                      (MAC_STATUS_SYNC_CHANGED |
5056                       MAC_STATUS_CFG_CHANGED));
5057                 udelay(40);
5058                 tg3_write_mem(tp,
5059                               NIC_SRAM_FIRMWARE_MBOX,
5060                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5061         }
5062
5063         /* Prevent send BD corruption. */
5064         if (tg3_flag(tp, CLKREQ_BUG)) {
5065                 if (tp->link_config.active_speed == SPEED_100 ||
5066                     tp->link_config.active_speed == SPEED_10)
5067                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5068                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5069                 else
5070                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5071                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5072         }
5073
5074         tg3_test_and_report_link_chg(tp, current_link_up);
5075
5076         return 0;
5077 }
5078
5079 struct tg3_fiber_aneginfo {
5080         int state;
5081 #define ANEG_STATE_UNKNOWN              0
5082 #define ANEG_STATE_AN_ENABLE            1
5083 #define ANEG_STATE_RESTART_INIT         2
5084 #define ANEG_STATE_RESTART              3
5085 #define ANEG_STATE_DISABLE_LINK_OK      4
5086 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5087 #define ANEG_STATE_ABILITY_DETECT       6
5088 #define ANEG_STATE_ACK_DETECT_INIT      7
5089 #define ANEG_STATE_ACK_DETECT           8
5090 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5091 #define ANEG_STATE_COMPLETE_ACK         10
5092 #define ANEG_STATE_IDLE_DETECT_INIT     11
5093 #define ANEG_STATE_IDLE_DETECT          12
5094 #define ANEG_STATE_LINK_OK              13
5095 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5096 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5097
5098         u32 flags;
5099 #define MR_AN_ENABLE            0x00000001
5100 #define MR_RESTART_AN           0x00000002
5101 #define MR_AN_COMPLETE          0x00000004
5102 #define MR_PAGE_RX              0x00000008
5103 #define MR_NP_LOADED            0x00000010
5104 #define MR_TOGGLE_TX            0x00000020
5105 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5106 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5107 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5108 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5109 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5110 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5111 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5112 #define MR_TOGGLE_RX            0x00002000
5113 #define MR_NP_RX                0x00004000
5114
5115 #define MR_LINK_OK              0x80000000
5116
5117         unsigned long link_time, cur_time;
5118
5119         u32 ability_match_cfg;
5120         int ability_match_count;
5121
5122         char ability_match, idle_match, ack_match;
5123
5124         u32 txconfig, rxconfig;
5125 #define ANEG_CFG_NP             0x00000080
5126 #define ANEG_CFG_ACK            0x00000040
5127 #define ANEG_CFG_RF2            0x00000020
5128 #define ANEG_CFG_RF1            0x00000010
5129 #define ANEG_CFG_PS2            0x00000001
5130 #define ANEG_CFG_PS1            0x00008000
5131 #define ANEG_CFG_HD             0x00004000
5132 #define ANEG_CFG_FD             0x00002000
5133 #define ANEG_CFG_INVAL          0x00001f06
5134
5135 };
5136 #define ANEG_OK         0
5137 #define ANEG_DONE       1
5138 #define ANEG_TIMER_ENAB 2
5139 #define ANEG_FAILED     -1
5140
5141 #define ANEG_STATE_SETTLE_TIME  10000
5142
5143 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5144                                    struct tg3_fiber_aneginfo *ap)
5145 {
5146         u16 flowctrl;
5147         unsigned long delta;
5148         u32 rx_cfg_reg;
5149         int ret;
5150
5151         if (ap->state == ANEG_STATE_UNKNOWN) {
5152                 ap->rxconfig = 0;
5153                 ap->link_time = 0;
5154                 ap->cur_time = 0;
5155                 ap->ability_match_cfg = 0;
5156                 ap->ability_match_count = 0;
5157                 ap->ability_match = 0;
5158                 ap->idle_match = 0;
5159                 ap->ack_match = 0;
5160         }
5161         ap->cur_time++;
5162
5163         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5164                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5165
5166                 if (rx_cfg_reg != ap->ability_match_cfg) {
5167                         ap->ability_match_cfg = rx_cfg_reg;
5168                         ap->ability_match = 0;
5169                         ap->ability_match_count = 0;
5170                 } else {
5171                         if (++ap->ability_match_count > 1) {
5172                                 ap->ability_match = 1;
5173                                 ap->ability_match_cfg = rx_cfg_reg;
5174                         }
5175                 }
5176                 if (rx_cfg_reg & ANEG_CFG_ACK)
5177                         ap->ack_match = 1;
5178                 else
5179                         ap->ack_match = 0;
5180
5181                 ap->idle_match = 0;
5182         } else {
5183                 ap->idle_match = 1;
5184                 ap->ability_match_cfg = 0;
5185                 ap->ability_match_count = 0;
5186                 ap->ability_match = 0;
5187                 ap->ack_match = 0;
5188
5189                 rx_cfg_reg = 0;
5190         }
5191
5192         ap->rxconfig = rx_cfg_reg;
5193         ret = ANEG_OK;
5194
5195         switch (ap->state) {
5196         case ANEG_STATE_UNKNOWN:
5197                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5198                         ap->state = ANEG_STATE_AN_ENABLE;
5199
5200                 /* fallthru */
5201         case ANEG_STATE_AN_ENABLE:
5202                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5203                 if (ap->flags & MR_AN_ENABLE) {
5204                         ap->link_time = 0;
5205                         ap->cur_time = 0;
5206                         ap->ability_match_cfg = 0;
5207                         ap->ability_match_count = 0;
5208                         ap->ability_match = 0;
5209                         ap->idle_match = 0;
5210                         ap->ack_match = 0;
5211
5212                         ap->state = ANEG_STATE_RESTART_INIT;
5213                 } else {
5214                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5215                 }
5216                 break;
5217
5218         case ANEG_STATE_RESTART_INIT:
5219                 ap->link_time = ap->cur_time;
5220                 ap->flags &= ~(MR_NP_LOADED);
5221                 ap->txconfig = 0;
5222                 tw32(MAC_TX_AUTO_NEG, 0);
5223                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5224                 tw32_f(MAC_MODE, tp->mac_mode);
5225                 udelay(40);
5226
5227                 ret = ANEG_TIMER_ENAB;
5228                 ap->state = ANEG_STATE_RESTART;
5229
5230                 /* fallthru */
5231         case ANEG_STATE_RESTART:
5232                 delta = ap->cur_time - ap->link_time;
5233                 if (delta > ANEG_STATE_SETTLE_TIME)
5234                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5235                 else
5236                         ret = ANEG_TIMER_ENAB;
5237                 break;
5238
5239         case ANEG_STATE_DISABLE_LINK_OK:
5240                 ret = ANEG_DONE;
5241                 break;
5242
5243         case ANEG_STATE_ABILITY_DETECT_INIT:
5244                 ap->flags &= ~(MR_TOGGLE_TX);
5245                 ap->txconfig = ANEG_CFG_FD;
5246                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5247                 if (flowctrl & ADVERTISE_1000XPAUSE)
5248                         ap->txconfig |= ANEG_CFG_PS1;
5249                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5250                         ap->txconfig |= ANEG_CFG_PS2;
5251                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5252                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5253                 tw32_f(MAC_MODE, tp->mac_mode);
5254                 udelay(40);
5255
5256                 ap->state = ANEG_STATE_ABILITY_DETECT;
5257                 break;
5258
5259         case ANEG_STATE_ABILITY_DETECT:
5260                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5261                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5262                 break;
5263
5264         case ANEG_STATE_ACK_DETECT_INIT:
5265                 ap->txconfig |= ANEG_CFG_ACK;
5266                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5267                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5268                 tw32_f(MAC_MODE, tp->mac_mode);
5269                 udelay(40);
5270
5271                 ap->state = ANEG_STATE_ACK_DETECT;
5272
5273                 /* fallthru */
5274         case ANEG_STATE_ACK_DETECT:
5275                 if (ap->ack_match != 0) {
5276                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5277                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5278                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5279                         } else {
5280                                 ap->state = ANEG_STATE_AN_ENABLE;
5281                         }
5282                 } else if (ap->ability_match != 0 &&
5283                            ap->rxconfig == 0) {
5284                         ap->state = ANEG_STATE_AN_ENABLE;
5285                 }
5286                 break;
5287
5288         case ANEG_STATE_COMPLETE_ACK_INIT:
5289                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5290                         ret = ANEG_FAILED;
5291                         break;
5292                 }
5293                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5294                                MR_LP_ADV_HALF_DUPLEX |
5295                                MR_LP_ADV_SYM_PAUSE |
5296                                MR_LP_ADV_ASYM_PAUSE |
5297                                MR_LP_ADV_REMOTE_FAULT1 |
5298                                MR_LP_ADV_REMOTE_FAULT2 |
5299                                MR_LP_ADV_NEXT_PAGE |
5300                                MR_TOGGLE_RX |
5301                                MR_NP_RX);
5302                 if (ap->rxconfig & ANEG_CFG_FD)
5303                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5304                 if (ap->rxconfig & ANEG_CFG_HD)
5305                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5306                 if (ap->rxconfig & ANEG_CFG_PS1)
5307                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5308                 if (ap->rxconfig & ANEG_CFG_PS2)
5309                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5310                 if (ap->rxconfig & ANEG_CFG_RF1)
5311                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5312                 if (ap->rxconfig & ANEG_CFG_RF2)
5313                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5314                 if (ap->rxconfig & ANEG_CFG_NP)
5315                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5316
5317                 ap->link_time = ap->cur_time;
5318
5319                 ap->flags ^= (MR_TOGGLE_TX);
5320                 if (ap->rxconfig & 0x0008)
5321                         ap->flags |= MR_TOGGLE_RX;
5322                 if (ap->rxconfig & ANEG_CFG_NP)
5323                         ap->flags |= MR_NP_RX;
5324                 ap->flags |= MR_PAGE_RX;
5325
5326                 ap->state = ANEG_STATE_COMPLETE_ACK;
5327                 ret = ANEG_TIMER_ENAB;
5328                 break;
5329
5330         case ANEG_STATE_COMPLETE_ACK:
5331                 if (ap->ability_match != 0 &&
5332                     ap->rxconfig == 0) {
5333                         ap->state = ANEG_STATE_AN_ENABLE;
5334                         break;
5335                 }
5336                 delta = ap->cur_time - ap->link_time;
5337                 if (delta > ANEG_STATE_SETTLE_TIME) {
5338                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5339                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5340                         } else {
5341                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5342                                     !(ap->flags & MR_NP_RX)) {
5343                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5344                                 } else {
5345                                         ret = ANEG_FAILED;
5346                                 }
5347                         }
5348                 }
5349                 break;
5350
5351         case ANEG_STATE_IDLE_DETECT_INIT:
5352                 ap->link_time = ap->cur_time;
5353                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5354                 tw32_f(MAC_MODE, tp->mac_mode);
5355                 udelay(40);
5356
5357                 ap->state = ANEG_STATE_IDLE_DETECT;
5358                 ret = ANEG_TIMER_ENAB;
5359                 break;
5360
5361         case ANEG_STATE_IDLE_DETECT:
5362                 if (ap->ability_match != 0 &&
5363                     ap->rxconfig == 0) {
5364                         ap->state = ANEG_STATE_AN_ENABLE;
5365                         break;
5366                 }
5367                 delta = ap->cur_time - ap->link_time;
5368                 if (delta > ANEG_STATE_SETTLE_TIME) {
5369                         /* XXX another gem from the Broadcom driver :( */
5370                         ap->state = ANEG_STATE_LINK_OK;
5371                 }
5372                 break;
5373
5374         case ANEG_STATE_LINK_OK:
5375                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5376                 ret = ANEG_DONE;
5377                 break;
5378
5379         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5380                 /* ??? unimplemented */
5381                 break;
5382
5383         case ANEG_STATE_NEXT_PAGE_WAIT:
5384                 /* ??? unimplemented */
5385                 break;
5386
5387         default:
5388                 ret = ANEG_FAILED;
5389                 break;
5390         }
5391
5392         return ret;
5393 }
5394
5395 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5396 {
5397         int res = 0;
5398         struct tg3_fiber_aneginfo aninfo;
5399         int status = ANEG_FAILED;
5400         unsigned int tick;
5401         u32 tmp;
5402
5403         tw32_f(MAC_TX_AUTO_NEG, 0);
5404
5405         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5406         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5407         udelay(40);
5408
5409         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5410         udelay(40);
5411
5412         memset(&aninfo, 0, sizeof(aninfo));
5413         aninfo.flags |= MR_AN_ENABLE;
5414         aninfo.state = ANEG_STATE_UNKNOWN;
5415         aninfo.cur_time = 0;
5416         tick = 0;
5417         while (++tick < 195000) {
5418                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5419                 if (status == ANEG_DONE || status == ANEG_FAILED)
5420                         break;
5421
5422                 udelay(1);
5423         }
5424
5425         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5426         tw32_f(MAC_MODE, tp->mac_mode);
5427         udelay(40);
5428
5429         *txflags = aninfo.txconfig;
5430         *rxflags = aninfo.flags;
5431
5432         if (status == ANEG_DONE &&
5433             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5434                              MR_LP_ADV_FULL_DUPLEX)))
5435                 res = 1;
5436
5437         return res;
5438 }
5439
5440 static void tg3_init_bcm8002(struct tg3 *tp)
5441 {
5442         u32 mac_status = tr32(MAC_STATUS);
5443         int i;
5444
5445         /* Reset when initting first time or we have a link. */
5446         if (tg3_flag(tp, INIT_COMPLETE) &&
5447             !(mac_status & MAC_STATUS_PCS_SYNCED))
5448                 return;
5449
5450         /* Set PLL lock range. */
5451         tg3_writephy(tp, 0x16, 0x8007);
5452
5453         /* SW reset */
5454         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5455
5456         /* Wait for reset to complete. */
5457         /* XXX schedule_timeout() ... */
5458         for (i = 0; i < 500; i++)
5459                 udelay(10);
5460
5461         /* Config mode; select PMA/Ch 1 regs. */
5462         tg3_writephy(tp, 0x10, 0x8411);
5463
5464         /* Enable auto-lock and comdet, select txclk for tx. */
5465         tg3_writephy(tp, 0x11, 0x0a10);
5466
5467         tg3_writephy(tp, 0x18, 0x00a0);
5468         tg3_writephy(tp, 0x16, 0x41ff);
5469
5470         /* Assert and deassert POR. */
5471         tg3_writephy(tp, 0x13, 0x0400);
5472         udelay(40);
5473         tg3_writephy(tp, 0x13, 0x0000);
5474
5475         tg3_writephy(tp, 0x11, 0x0a50);
5476         udelay(40);
5477         tg3_writephy(tp, 0x11, 0x0a10);
5478
5479         /* Wait for signal to stabilize */
5480         /* XXX schedule_timeout() ... */
5481         for (i = 0; i < 15000; i++)
5482                 udelay(10);
5483
5484         /* Deselect the channel register so we can read the PHYID
5485          * later.
5486          */
5487         tg3_writephy(tp, 0x10, 0x8011);
5488 }
5489
5490 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5491 {
5492         u16 flowctrl;
5493         bool current_link_up;
5494         u32 sg_dig_ctrl, sg_dig_status;
5495         u32 serdes_cfg, expected_sg_dig_ctrl;
5496         int workaround, port_a;
5497
5498         serdes_cfg = 0;
5499         expected_sg_dig_ctrl = 0;
5500         workaround = 0;
5501         port_a = 1;
5502         current_link_up = false;
5503
5504         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5505             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5506                 workaround = 1;
5507                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5508                         port_a = 0;
5509
5510                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5511                 /* preserve bits 20-23 for voltage regulator */
5512                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5513         }
5514
5515         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5516
5517         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5518                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5519                         if (workaround) {
5520                                 u32 val = serdes_cfg;
5521
5522                                 if (port_a)
5523                                         val |= 0xc010000;
5524                                 else
5525                                         val |= 0x4010000;
5526                                 tw32_f(MAC_SERDES_CFG, val);
5527                         }
5528
5529                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5530                 }
5531                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5532                         tg3_setup_flow_control(tp, 0, 0);
5533                         current_link_up = true;
5534                 }
5535                 goto out;
5536         }
5537
5538         /* Want auto-negotiation.  */
5539         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5540
5541         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5542         if (flowctrl & ADVERTISE_1000XPAUSE)
5543                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5544         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5545                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5546
5547         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5548                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5549                     tp->serdes_counter &&
5550                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5551                                     MAC_STATUS_RCVD_CFG)) ==
5552                      MAC_STATUS_PCS_SYNCED)) {
5553                         tp->serdes_counter--;
5554                         current_link_up = true;
5555                         goto out;
5556                 }
5557 restart_autoneg:
5558                 if (workaround)
5559                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5560                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5561                 udelay(5);
5562                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5563
5564                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5565                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5566         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5567                                  MAC_STATUS_SIGNAL_DET)) {
5568                 sg_dig_status = tr32(SG_DIG_STATUS);
5569                 mac_status = tr32(MAC_STATUS);
5570
5571                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5572                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5573                         u32 local_adv = 0, remote_adv = 0;
5574
5575                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5576                                 local_adv |= ADVERTISE_1000XPAUSE;
5577                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5578                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5579
5580                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5581                                 remote_adv |= LPA_1000XPAUSE;
5582                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5583                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5584
5585                         tp->link_config.rmt_adv =
5586                                            mii_adv_to_ethtool_adv_x(remote_adv);
5587
5588                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5589                         current_link_up = true;
5590                         tp->serdes_counter = 0;
5591                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5592                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5593                         if (tp->serdes_counter)
5594                                 tp->serdes_counter--;
5595                         else {
5596                                 if (workaround) {
5597                                         u32 val = serdes_cfg;
5598
5599                                         if (port_a)
5600                                                 val |= 0xc010000;
5601                                         else
5602                                                 val |= 0x4010000;
5603
5604                                         tw32_f(MAC_SERDES_CFG, val);
5605                                 }
5606
5607                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5608                                 udelay(40);
5609
5610                                 /* Link parallel detection - link is up */
5611                                 /* only if we have PCS_SYNC and not */
5612                                 /* receiving config code words */
5613                                 mac_status = tr32(MAC_STATUS);
5614                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5615                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5616                                         tg3_setup_flow_control(tp, 0, 0);
5617                                         current_link_up = true;
5618                                         tp->phy_flags |=
5619                                                 TG3_PHYFLG_PARALLEL_DETECT;
5620                                         tp->serdes_counter =
5621                                                 SERDES_PARALLEL_DET_TIMEOUT;
5622                                 } else
5623                                         goto restart_autoneg;
5624                         }
5625                 }
5626         } else {
5627                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5628                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5629         }
5630
5631 out:
5632         return current_link_up;
5633 }
5634
5635 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5636 {
5637         bool current_link_up = false;
5638
5639         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5640                 goto out;
5641
5642         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5643                 u32 txflags, rxflags;
5644                 int i;
5645
5646                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5647                         u32 local_adv = 0, remote_adv = 0;
5648
5649                         if (txflags & ANEG_CFG_PS1)
5650                                 local_adv |= ADVERTISE_1000XPAUSE;
5651                         if (txflags & ANEG_CFG_PS2)
5652                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5653
5654                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5655                                 remote_adv |= LPA_1000XPAUSE;
5656                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5657                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5658
5659                         tp->link_config.rmt_adv =
5660                                            mii_adv_to_ethtool_adv_x(remote_adv);
5661
5662                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5663
5664                         current_link_up = true;
5665                 }
5666                 for (i = 0; i < 30; i++) {
5667                         udelay(20);
5668                         tw32_f(MAC_STATUS,
5669                                (MAC_STATUS_SYNC_CHANGED |
5670                                 MAC_STATUS_CFG_CHANGED));
5671                         udelay(40);
5672                         if ((tr32(MAC_STATUS) &
5673                              (MAC_STATUS_SYNC_CHANGED |
5674                               MAC_STATUS_CFG_CHANGED)) == 0)
5675                                 break;
5676                 }
5677
5678                 mac_status = tr32(MAC_STATUS);
5679                 if (!current_link_up &&
5680                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5681                     !(mac_status & MAC_STATUS_RCVD_CFG))
5682                         current_link_up = true;
5683         } else {
5684                 tg3_setup_flow_control(tp, 0, 0);
5685
5686                 /* Forcing 1000FD link up. */
5687                 current_link_up = true;
5688
5689                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5690                 udelay(40);
5691
5692                 tw32_f(MAC_MODE, tp->mac_mode);
5693                 udelay(40);
5694         }
5695
5696 out:
5697         return current_link_up;
5698 }
5699
5700 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5701 {
5702         u32 orig_pause_cfg;
5703         u16 orig_active_speed;
5704         u8 orig_active_duplex;
5705         u32 mac_status;
5706         bool current_link_up;
5707         int i;
5708
5709         orig_pause_cfg = tp->link_config.active_flowctrl;
5710         orig_active_speed = tp->link_config.active_speed;
5711         orig_active_duplex = tp->link_config.active_duplex;
5712
5713         if (!tg3_flag(tp, HW_AUTONEG) &&
5714             tp->link_up &&
5715             tg3_flag(tp, INIT_COMPLETE)) {
5716                 mac_status = tr32(MAC_STATUS);
5717                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5718                                MAC_STATUS_SIGNAL_DET |
5719                                MAC_STATUS_CFG_CHANGED |
5720                                MAC_STATUS_RCVD_CFG);
5721                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5722                                    MAC_STATUS_SIGNAL_DET)) {
5723                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5724                                             MAC_STATUS_CFG_CHANGED));
5725                         return 0;
5726                 }
5727         }
5728
5729         tw32_f(MAC_TX_AUTO_NEG, 0);
5730
5731         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5732         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5733         tw32_f(MAC_MODE, tp->mac_mode);
5734         udelay(40);
5735
5736         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5737                 tg3_init_bcm8002(tp);
5738
5739         /* Enable link change event even when serdes polling.  */
5740         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5741         udelay(40);
5742
5743         current_link_up = false;
5744         tp->link_config.rmt_adv = 0;
5745         mac_status = tr32(MAC_STATUS);
5746
5747         if (tg3_flag(tp, HW_AUTONEG))
5748                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5749         else
5750                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5751
5752         tp->napi[0].hw_status->status =
5753                 (SD_STATUS_UPDATED |
5754                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5755
5756         for (i = 0; i < 100; i++) {
5757                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5758                                     MAC_STATUS_CFG_CHANGED));
5759                 udelay(5);
5760                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5761                                          MAC_STATUS_CFG_CHANGED |
5762                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5763                         break;
5764         }
5765
5766         mac_status = tr32(MAC_STATUS);
5767         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5768                 current_link_up = false;
5769                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5770                     tp->serdes_counter == 0) {
5771                         tw32_f(MAC_MODE, (tp->mac_mode |
5772                                           MAC_MODE_SEND_CONFIGS));
5773                         udelay(1);
5774                         tw32_f(MAC_MODE, tp->mac_mode);
5775                 }
5776         }
5777
5778         if (current_link_up) {
5779                 tp->link_config.active_speed = SPEED_1000;
5780                 tp->link_config.active_duplex = DUPLEX_FULL;
5781                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5782                                     LED_CTRL_LNKLED_OVERRIDE |
5783                                     LED_CTRL_1000MBPS_ON));
5784         } else {
5785                 tp->link_config.active_speed = SPEED_UNKNOWN;
5786                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5787                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788                                     LED_CTRL_LNKLED_OVERRIDE |
5789                                     LED_CTRL_TRAFFIC_OVERRIDE));
5790         }
5791
5792         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5793                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5794                 if (orig_pause_cfg != now_pause_cfg ||
5795                     orig_active_speed != tp->link_config.active_speed ||
5796                     orig_active_duplex != tp->link_config.active_duplex)
5797                         tg3_link_report(tp);
5798         }
5799
5800         return 0;
5801 }
5802
5803 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5804 {
5805         int err = 0;
5806         u32 bmsr, bmcr;
5807         u16 current_speed = SPEED_UNKNOWN;
5808         u8 current_duplex = DUPLEX_UNKNOWN;
5809         bool current_link_up = false;
5810         u32 local_adv, remote_adv, sgsr;
5811
5812         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5813              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5814              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5815              (sgsr & SERDES_TG3_SGMII_MODE)) {
5816
5817                 if (force_reset)
5818                         tg3_phy_reset(tp);
5819
5820                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5821
5822                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5823                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5824                 } else {
5825                         current_link_up = true;
5826                         if (sgsr & SERDES_TG3_SPEED_1000) {
5827                                 current_speed = SPEED_1000;
5828                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5830                                 current_speed = SPEED_100;
5831                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5832                         } else {
5833                                 current_speed = SPEED_10;
5834                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5835                         }
5836
5837                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5838                                 current_duplex = DUPLEX_FULL;
5839                         else
5840                                 current_duplex = DUPLEX_HALF;
5841                 }
5842
5843                 tw32_f(MAC_MODE, tp->mac_mode);
5844                 udelay(40);
5845
5846                 tg3_clear_mac_status(tp);
5847
5848                 goto fiber_setup_done;
5849         }
5850
5851         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5852         tw32_f(MAC_MODE, tp->mac_mode);
5853         udelay(40);
5854
5855         tg3_clear_mac_status(tp);
5856
5857         if (force_reset)
5858                 tg3_phy_reset(tp);
5859
5860         tp->link_config.rmt_adv = 0;
5861
5862         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5863         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5864         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5865                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5866                         bmsr |= BMSR_LSTATUS;
5867                 else
5868                         bmsr &= ~BMSR_LSTATUS;
5869         }
5870
5871         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5872
5873         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5874             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5875                 /* do nothing, just check for link up at the end */
5876         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5877                 u32 adv, newadv;
5878
5879                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5880                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5881                                  ADVERTISE_1000XPAUSE |
5882                                  ADVERTISE_1000XPSE_ASYM |
5883                                  ADVERTISE_SLCT);
5884
5885                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5886                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5887
5888                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5889                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5890                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5891                         tg3_writephy(tp, MII_BMCR, bmcr);
5892
5893                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5894                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5895                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5896
5897                         return err;
5898                 }
5899         } else {
5900                 u32 new_bmcr;
5901
5902                 bmcr &= ~BMCR_SPEED1000;
5903                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5904
5905                 if (tp->link_config.duplex == DUPLEX_FULL)
5906                         new_bmcr |= BMCR_FULLDPLX;
5907
5908                 if (new_bmcr != bmcr) {
5909                         /* BMCR_SPEED1000 is a reserved bit that needs
5910                          * to be set on write.
5911                          */
5912                         new_bmcr |= BMCR_SPEED1000;
5913
5914                         /* Force a linkdown */
5915                         if (tp->link_up) {
5916                                 u32 adv;
5917
5918                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5919                                 adv &= ~(ADVERTISE_1000XFULL |
5920                                          ADVERTISE_1000XHALF |
5921                                          ADVERTISE_SLCT);
5922                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5923                                 tg3_writephy(tp, MII_BMCR, bmcr |
5924                                                            BMCR_ANRESTART |
5925                                                            BMCR_ANENABLE);
5926                                 udelay(10);
5927                                 tg3_carrier_off(tp);
5928                         }
5929                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5930                         bmcr = new_bmcr;
5931                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5932                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5933                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5934                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5935                                         bmsr |= BMSR_LSTATUS;
5936                                 else
5937                                         bmsr &= ~BMSR_LSTATUS;
5938                         }
5939                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5940                 }
5941         }
5942
5943         if (bmsr & BMSR_LSTATUS) {
5944                 current_speed = SPEED_1000;
5945                 current_link_up = true;
5946                 if (bmcr & BMCR_FULLDPLX)
5947                         current_duplex = DUPLEX_FULL;
5948                 else
5949                         current_duplex = DUPLEX_HALF;
5950
5951                 local_adv = 0;
5952                 remote_adv = 0;
5953
5954                 if (bmcr & BMCR_ANENABLE) {
5955                         u32 common;
5956
5957                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5958                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5959                         common = local_adv & remote_adv;
5960                         if (common & (ADVERTISE_1000XHALF |
5961                                       ADVERTISE_1000XFULL)) {
5962                                 if (common & ADVERTISE_1000XFULL)
5963                                         current_duplex = DUPLEX_FULL;
5964                                 else
5965                                         current_duplex = DUPLEX_HALF;
5966
5967                                 tp->link_config.rmt_adv =
5968                                            mii_adv_to_ethtool_adv_x(remote_adv);
5969                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5970                                 /* Link is up via parallel detect */
5971                         } else {
5972                                 current_link_up = false;
5973                         }
5974                 }
5975         }
5976
5977 fiber_setup_done:
5978         if (current_link_up && current_duplex == DUPLEX_FULL)
5979                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5980
5981         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5982         if (tp->link_config.active_duplex == DUPLEX_HALF)
5983                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5984
5985         tw32_f(MAC_MODE, tp->mac_mode);
5986         udelay(40);
5987
5988         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5989
5990         tp->link_config.active_speed = current_speed;
5991         tp->link_config.active_duplex = current_duplex;
5992
5993         tg3_test_and_report_link_chg(tp, current_link_up);
5994         return err;
5995 }
5996
5997 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5998 {
5999         if (tp->serdes_counter) {
6000                 /* Give autoneg time to complete. */
6001                 tp->serdes_counter--;
6002                 return;
6003         }
6004
6005         if (!tp->link_up &&
6006             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6007                 u32 bmcr;
6008
6009                 tg3_readphy(tp, MII_BMCR, &bmcr);
6010                 if (bmcr & BMCR_ANENABLE) {
6011                         u32 phy1, phy2;
6012
6013                         /* Select shadow register 0x1f */
6014                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6015                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6016
6017                         /* Select expansion interrupt status register */
6018                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6019                                          MII_TG3_DSP_EXP1_INT_STAT);
6020                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6021                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6022
6023                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6024                                 /* We have signal detect and not receiving
6025                                  * config code words, link is up by parallel
6026                                  * detection.
6027                                  */
6028
6029                                 bmcr &= ~BMCR_ANENABLE;
6030                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6031                                 tg3_writephy(tp, MII_BMCR, bmcr);
6032                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6033                         }
6034                 }
6035         } else if (tp->link_up &&
6036                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6037                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6038                 u32 phy2;
6039
6040                 /* Select expansion interrupt status register */
6041                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6042                                  MII_TG3_DSP_EXP1_INT_STAT);
6043                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6044                 if (phy2 & 0x20) {
6045                         u32 bmcr;
6046
6047                         /* Config code words received, turn on autoneg. */
6048                         tg3_readphy(tp, MII_BMCR, &bmcr);
6049                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6050
6051                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6052
6053                 }
6054         }
6055 }
6056
6057 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6058 {
6059         u32 val;
6060         int err;
6061
6062         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6063                 err = tg3_setup_fiber_phy(tp, force_reset);
6064         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6065                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6066         else
6067                 err = tg3_setup_copper_phy(tp, force_reset);
6068
6069         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6070                 u32 scale;
6071
6072                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6073                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6074                         scale = 65;
6075                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6076                         scale = 6;
6077                 else
6078                         scale = 12;
6079
6080                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6081                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6082                 tw32(GRC_MISC_CFG, val);
6083         }
6084
6085         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6086               (6 << TX_LENGTHS_IPG_SHIFT);
6087         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6088             tg3_asic_rev(tp) == ASIC_REV_5762)
6089                 val |= tr32(MAC_TX_LENGTHS) &
6090                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6091                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6092
6093         if (tp->link_config.active_speed == SPEED_1000 &&
6094             tp->link_config.active_duplex == DUPLEX_HALF)
6095                 tw32(MAC_TX_LENGTHS, val |
6096                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6097         else
6098                 tw32(MAC_TX_LENGTHS, val |
6099                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6100
6101         if (!tg3_flag(tp, 5705_PLUS)) {
6102                 if (tp->link_up) {
6103                         tw32(HOSTCC_STAT_COAL_TICKS,
6104                              tp->coal.stats_block_coalesce_usecs);
6105                 } else {
6106                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6107                 }
6108         }
6109
6110         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6111                 val = tr32(PCIE_PWR_MGMT_THRESH);
6112                 if (!tp->link_up)
6113                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6114                               tp->pwrmgmt_thresh;
6115                 else
6116                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6117                 tw32(PCIE_PWR_MGMT_THRESH, val);
6118         }
6119
6120         return err;
6121 }
6122
6123 /* tp->lock must be held */
6124 static u64 tg3_refclk_read(struct tg3 *tp)
6125 {
6126         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6127         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6128 }
6129
6130 /* tp->lock must be held */
6131 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6132 {
6133         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6134
6135         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6136         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6137         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6138         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6139 }
6140
6141 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6142 static inline void tg3_full_unlock(struct tg3 *tp);
6143 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6144 {
6145         struct tg3 *tp = netdev_priv(dev);
6146
6147         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6148                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6149                                 SOF_TIMESTAMPING_SOFTWARE;
6150
6151         if (tg3_flag(tp, PTP_CAPABLE)) {
6152                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6153                                         SOF_TIMESTAMPING_RX_HARDWARE |
6154                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6155         }
6156
6157         if (tp->ptp_clock)
6158                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6159         else
6160                 info->phc_index = -1;
6161
6162         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6163
6164         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6165                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6166                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6167                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6168         return 0;
6169 }
6170
6171 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6172 {
6173         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6174         bool neg_adj = false;
6175         u32 correction = 0;
6176
6177         if (ppb < 0) {
6178                 neg_adj = true;
6179                 ppb = -ppb;
6180         }
6181
6182         /* Frequency adjustment is performed using hardware with a 24 bit
6183          * accumulator and a programmable correction value. On each clk, the
6184          * correction value gets added to the accumulator and when it
6185          * overflows, the time counter is incremented/decremented.
6186          *
6187          * So conversion from ppb to correction value is
6188          *              ppb * (1 << 24) / 1000000000
6189          */
6190         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6191                      TG3_EAV_REF_CLK_CORRECT_MASK;
6192
6193         tg3_full_lock(tp, 0);
6194
6195         if (correction)
6196                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6197                      TG3_EAV_REF_CLK_CORRECT_EN |
6198                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6199         else
6200                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6201
6202         tg3_full_unlock(tp);
6203
6204         return 0;
6205 }
6206
6207 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6208 {
6209         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6210
6211         tg3_full_lock(tp, 0);
6212         tp->ptp_adjust += delta;
6213         tg3_full_unlock(tp);
6214
6215         return 0;
6216 }
6217
6218 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6219 {
6220         u64 ns;
6221         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222
6223         tg3_full_lock(tp, 0);
6224         ns = tg3_refclk_read(tp);
6225         ns += tp->ptp_adjust;
6226         tg3_full_unlock(tp);
6227
6228         *ts = ns_to_timespec64(ns);
6229
6230         return 0;
6231 }
6232
6233 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6234                            const struct timespec64 *ts)
6235 {
6236         u64 ns;
6237         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6238
6239         ns = timespec64_to_ns(ts);
6240
6241         tg3_full_lock(tp, 0);
6242         tg3_refclk_write(tp, ns);
6243         tp->ptp_adjust = 0;
6244         tg3_full_unlock(tp);
6245
6246         return 0;
6247 }
6248
6249 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6250                           struct ptp_clock_request *rq, int on)
6251 {
6252         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6253         u32 clock_ctl;
6254         int rval = 0;
6255
6256         switch (rq->type) {
6257         case PTP_CLK_REQ_PEROUT:
6258                 if (rq->perout.index != 0)
6259                         return -EINVAL;
6260
6261                 tg3_full_lock(tp, 0);
6262                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6263                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6264
6265                 if (on) {
6266                         u64 nsec;
6267
6268                         nsec = rq->perout.start.sec * 1000000000ULL +
6269                                rq->perout.start.nsec;
6270
6271                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6272                                 netdev_warn(tp->dev,
6273                                             "Device supports only a one-shot timesync output, period must be 0\n");
6274                                 rval = -EINVAL;
6275                                 goto err_out;
6276                         }
6277
6278                         if (nsec & (1ULL << 63)) {
6279                                 netdev_warn(tp->dev,
6280                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6281                                 rval = -EINVAL;
6282                                 goto err_out;
6283                         }
6284
6285                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6286                         tw32(TG3_EAV_WATCHDOG0_MSB,
6287                              TG3_EAV_WATCHDOG0_EN |
6288                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6289
6290                         tw32(TG3_EAV_REF_CLCK_CTL,
6291                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6292                 } else {
6293                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6294                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6295                 }
6296
6297 err_out:
6298                 tg3_full_unlock(tp);
6299                 return rval;
6300
6301         default:
6302                 break;
6303         }
6304
6305         return -EOPNOTSUPP;
6306 }
6307
6308 static const struct ptp_clock_info tg3_ptp_caps = {
6309         .owner          = THIS_MODULE,
6310         .name           = "tg3 clock",
6311         .max_adj        = 250000000,
6312         .n_alarm        = 0,
6313         .n_ext_ts       = 0,
6314         .n_per_out      = 1,
6315         .n_pins         = 0,
6316         .pps            = 0,
6317         .adjfreq        = tg3_ptp_adjfreq,
6318         .adjtime        = tg3_ptp_adjtime,
6319         .gettime64      = tg3_ptp_gettime,
6320         .settime64      = tg3_ptp_settime,
6321         .enable         = tg3_ptp_enable,
6322 };
6323
6324 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6325                                      struct skb_shared_hwtstamps *timestamp)
6326 {
6327         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6328         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6329                                            tp->ptp_adjust);
6330 }
6331
6332 /* tp->lock must be held */
6333 static void tg3_ptp_init(struct tg3 *tp)
6334 {
6335         if (!tg3_flag(tp, PTP_CAPABLE))
6336                 return;
6337
6338         /* Initialize the hardware clock to the system time. */
6339         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6340         tp->ptp_adjust = 0;
6341         tp->ptp_info = tg3_ptp_caps;
6342 }
6343
6344 /* tp->lock must be held */
6345 static void tg3_ptp_resume(struct tg3 *tp)
6346 {
6347         if (!tg3_flag(tp, PTP_CAPABLE))
6348                 return;
6349
6350         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6351         tp->ptp_adjust = 0;
6352 }
6353
6354 static void tg3_ptp_fini(struct tg3 *tp)
6355 {
6356         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6357                 return;
6358
6359         ptp_clock_unregister(tp->ptp_clock);
6360         tp->ptp_clock = NULL;
6361         tp->ptp_adjust = 0;
6362 }
6363
6364 static inline int tg3_irq_sync(struct tg3 *tp)
6365 {
6366         return tp->irq_sync;
6367 }
6368
6369 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6370 {
6371         int i;
6372
6373         dst = (u32 *)((u8 *)dst + off);
6374         for (i = 0; i < len; i += sizeof(u32))
6375                 *dst++ = tr32(off + i);
6376 }
6377
6378 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6379 {
6380         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6381         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6382         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6383         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6384         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6385         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6386         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6387         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6388         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6389         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6390         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6391         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6392         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6393         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6394         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6395         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6396         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6397         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6398         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6399
6400         if (tg3_flag(tp, SUPPORT_MSIX))
6401                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6402
6403         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6404         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6405         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6406         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6407         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6408         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6409         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6410         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6411
6412         if (!tg3_flag(tp, 5705_PLUS)) {
6413                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6414                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6415                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6416         }
6417
6418         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6419         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6420         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6421         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6422         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6423
6424         if (tg3_flag(tp, NVRAM))
6425                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6426 }
6427
6428 static void tg3_dump_state(struct tg3 *tp)
6429 {
6430         int i;
6431         u32 *regs;
6432
6433         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6434         if (!regs)
6435                 return;
6436
6437         if (tg3_flag(tp, PCI_EXPRESS)) {
6438                 /* Read up to but not including private PCI registers */
6439                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6440                         regs[i / sizeof(u32)] = tr32(i);
6441         } else
6442                 tg3_dump_legacy_regs(tp, regs);
6443
6444         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6445                 if (!regs[i + 0] && !regs[i + 1] &&
6446                     !regs[i + 2] && !regs[i + 3])
6447                         continue;
6448
6449                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6450                            i * 4,
6451                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6452         }
6453
6454         kfree(regs);
6455
6456         for (i = 0; i < tp->irq_cnt; i++) {
6457                 struct tg3_napi *tnapi = &tp->napi[i];
6458
6459                 /* SW status block */
6460                 netdev_err(tp->dev,
6461                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6462                            i,
6463                            tnapi->hw_status->status,
6464                            tnapi->hw_status->status_tag,
6465                            tnapi->hw_status->rx_jumbo_consumer,
6466                            tnapi->hw_status->rx_consumer,
6467                            tnapi->hw_status->rx_mini_consumer,
6468                            tnapi->hw_status->idx[0].rx_producer,
6469                            tnapi->hw_status->idx[0].tx_consumer);
6470
6471                 netdev_err(tp->dev,
6472                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6473                            i,
6474                            tnapi->last_tag, tnapi->last_irq_tag,
6475                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6476                            tnapi->rx_rcb_ptr,
6477                            tnapi->prodring.rx_std_prod_idx,
6478                            tnapi->prodring.rx_std_cons_idx,
6479                            tnapi->prodring.rx_jmb_prod_idx,
6480                            tnapi->prodring.rx_jmb_cons_idx);
6481         }
6482 }
6483
6484 /* This is called whenever we suspect that the system chipset is re-
6485  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6486  * is bogus tx completions. We try to recover by setting the
6487  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6488  * in the workqueue.
6489  */
6490 static void tg3_tx_recover(struct tg3 *tp)
6491 {
6492         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6493                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6494
6495         netdev_warn(tp->dev,
6496                     "The system may be re-ordering memory-mapped I/O "
6497                     "cycles to the network device, attempting to recover. "
6498                     "Please report the problem to the driver maintainer "
6499                     "and include system chipset information.\n");
6500
6501         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6502 }
6503
6504 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6505 {
6506         /* Tell compiler to fetch tx indices from memory. */
6507         barrier();
6508         return tnapi->tx_pending -
6509                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6510 }
6511
6512 /* Tigon3 never reports partial packet sends.  So we do not
6513  * need special logic to handle SKBs that have not had all
6514  * of their frags sent yet, like SunGEM does.
6515  */
6516 static void tg3_tx(struct tg3_napi *tnapi)
6517 {
6518         struct tg3 *tp = tnapi->tp;
6519         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6520         u32 sw_idx = tnapi->tx_cons;
6521         struct netdev_queue *txq;
6522         int index = tnapi - tp->napi;
6523         unsigned int pkts_compl = 0, bytes_compl = 0;
6524
6525         if (tg3_flag(tp, ENABLE_TSS))
6526                 index--;
6527
6528         txq = netdev_get_tx_queue(tp->dev, index);
6529
6530         while (sw_idx != hw_idx) {
6531                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6532                 struct sk_buff *skb = ri->skb;
6533                 int i, tx_bug = 0;
6534
6535                 if (unlikely(skb == NULL)) {
6536                         tg3_tx_recover(tp);
6537                         return;
6538                 }
6539
6540                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6541                         struct skb_shared_hwtstamps timestamp;
6542                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6543                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6544
6545                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6546
6547                         skb_tstamp_tx(skb, &timestamp);
6548                 }
6549
6550                 pci_unmap_single(tp->pdev,
6551                                  dma_unmap_addr(ri, mapping),
6552                                  skb_headlen(skb),
6553                                  PCI_DMA_TODEVICE);
6554
6555                 ri->skb = NULL;
6556
6557                 while (ri->fragmented) {
6558                         ri->fragmented = false;
6559                         sw_idx = NEXT_TX(sw_idx);
6560                         ri = &tnapi->tx_buffers[sw_idx];
6561                 }
6562
6563                 sw_idx = NEXT_TX(sw_idx);
6564
6565                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6566                         ri = &tnapi->tx_buffers[sw_idx];
6567                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6568                                 tx_bug = 1;
6569
6570                         pci_unmap_page(tp->pdev,
6571                                        dma_unmap_addr(ri, mapping),
6572                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6573                                        PCI_DMA_TODEVICE);
6574
6575                         while (ri->fragmented) {
6576                                 ri->fragmented = false;
6577                                 sw_idx = NEXT_TX(sw_idx);
6578                                 ri = &tnapi->tx_buffers[sw_idx];
6579                         }
6580
6581                         sw_idx = NEXT_TX(sw_idx);
6582                 }
6583
6584                 pkts_compl++;
6585                 bytes_compl += skb->len;
6586
6587                 dev_kfree_skb_any(skb);
6588
6589                 if (unlikely(tx_bug)) {
6590                         tg3_tx_recover(tp);
6591                         return;
6592                 }
6593         }
6594
6595         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6596
6597         tnapi->tx_cons = sw_idx;
6598
6599         /* Need to make the tx_cons update visible to tg3_start_xmit()
6600          * before checking for netif_queue_stopped().  Without the
6601          * memory barrier, there is a small possibility that tg3_start_xmit()
6602          * will miss it and cause the queue to be stopped forever.
6603          */
6604         smp_mb();
6605
6606         if (unlikely(netif_tx_queue_stopped(txq) &&
6607                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6608                 __netif_tx_lock(txq, smp_processor_id());
6609                 if (netif_tx_queue_stopped(txq) &&
6610                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6611                         netif_tx_wake_queue(txq);
6612                 __netif_tx_unlock(txq);
6613         }
6614 }
6615
6616 static void tg3_frag_free(bool is_frag, void *data)
6617 {
6618         if (is_frag)
6619                 skb_free_frag(data);
6620         else
6621                 kfree(data);
6622 }
6623
6624 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6625 {
6626         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6627                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6628
6629         if (!ri->data)
6630                 return;
6631
6632         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6633                          map_sz, PCI_DMA_FROMDEVICE);
6634         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6635         ri->data = NULL;
6636 }
6637
6638
6639 /* Returns size of skb allocated or < 0 on error.
6640  *
6641  * We only need to fill in the address because the other members
6642  * of the RX descriptor are invariant, see tg3_init_rings.
6643  *
6644  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6645  * posting buffers we only dirty the first cache line of the RX
6646  * descriptor (containing the address).  Whereas for the RX status
6647  * buffers the cpu only reads the last cacheline of the RX descriptor
6648  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6649  */
6650 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6651                              u32 opaque_key, u32 dest_idx_unmasked,
6652                              unsigned int *frag_size)
6653 {
6654         struct tg3_rx_buffer_desc *desc;
6655         struct ring_info *map;
6656         u8 *data;
6657         dma_addr_t mapping;
6658         int skb_size, data_size, dest_idx;
6659
6660         switch (opaque_key) {
6661         case RXD_OPAQUE_RING_STD:
6662                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6663                 desc = &tpr->rx_std[dest_idx];
6664                 map = &tpr->rx_std_buffers[dest_idx];
6665                 data_size = tp->rx_pkt_map_sz;
6666                 break;
6667
6668         case RXD_OPAQUE_RING_JUMBO:
6669                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6670                 desc = &tpr->rx_jmb[dest_idx].std;
6671                 map = &tpr->rx_jmb_buffers[dest_idx];
6672                 data_size = TG3_RX_JMB_MAP_SZ;
6673                 break;
6674
6675         default:
6676                 return -EINVAL;
6677         }
6678
6679         /* Do not overwrite any of the map or rp information
6680          * until we are sure we can commit to a new buffer.
6681          *
6682          * Callers depend upon this behavior and assume that
6683          * we leave everything unchanged if we fail.
6684          */
6685         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6686                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6687         if (skb_size <= PAGE_SIZE) {
6688                 data = netdev_alloc_frag(skb_size);
6689                 *frag_size = skb_size;
6690         } else {
6691                 data = kmalloc(skb_size, GFP_ATOMIC);
6692                 *frag_size = 0;
6693         }
6694         if (!data)
6695                 return -ENOMEM;
6696
6697         mapping = pci_map_single(tp->pdev,
6698                                  data + TG3_RX_OFFSET(tp),
6699                                  data_size,
6700                                  PCI_DMA_FROMDEVICE);
6701         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6702                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6703                 return -EIO;
6704         }
6705
6706         map->data = data;
6707         dma_unmap_addr_set(map, mapping, mapping);
6708
6709         desc->addr_hi = ((u64)mapping >> 32);
6710         desc->addr_lo = ((u64)mapping & 0xffffffff);
6711
6712         return data_size;
6713 }
6714
6715 /* We only need to move over in the address because the other
6716  * members of the RX descriptor are invariant.  See notes above
6717  * tg3_alloc_rx_data for full details.
6718  */
6719 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6720                            struct tg3_rx_prodring_set *dpr,
6721                            u32 opaque_key, int src_idx,
6722                            u32 dest_idx_unmasked)
6723 {
6724         struct tg3 *tp = tnapi->tp;
6725         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6726         struct ring_info *src_map, *dest_map;
6727         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6728         int dest_idx;
6729
6730         switch (opaque_key) {
6731         case RXD_OPAQUE_RING_STD:
6732                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6733                 dest_desc = &dpr->rx_std[dest_idx];
6734                 dest_map = &dpr->rx_std_buffers[dest_idx];
6735                 src_desc = &spr->rx_std[src_idx];
6736                 src_map = &spr->rx_std_buffers[src_idx];
6737                 break;
6738
6739         case RXD_OPAQUE_RING_JUMBO:
6740                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6741                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6742                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6743                 src_desc = &spr->rx_jmb[src_idx].std;
6744                 src_map = &spr->rx_jmb_buffers[src_idx];
6745                 break;
6746
6747         default:
6748                 return;
6749         }
6750
6751         dest_map->data = src_map->data;
6752         dma_unmap_addr_set(dest_map, mapping,
6753                            dma_unmap_addr(src_map, mapping));
6754         dest_desc->addr_hi = src_desc->addr_hi;
6755         dest_desc->addr_lo = src_desc->addr_lo;
6756
6757         /* Ensure that the update to the skb happens after the physical
6758          * addresses have been transferred to the new BD location.
6759          */
6760         smp_wmb();
6761
6762         src_map->data = NULL;
6763 }
6764
6765 /* The RX ring scheme is composed of multiple rings which post fresh
6766  * buffers to the chip, and one special ring the chip uses to report
6767  * status back to the host.
6768  *
6769  * The special ring reports the status of received packets to the
6770  * host.  The chip does not write into the original descriptor the
6771  * RX buffer was obtained from.  The chip simply takes the original
6772  * descriptor as provided by the host, updates the status and length
6773  * field, then writes this into the next status ring entry.
6774  *
6775  * Each ring the host uses to post buffers to the chip is described
6776  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6777  * it is first placed into the on-chip ram.  When the packet's length
6778  * is known, it walks down the TG3_BDINFO entries to select the ring.
6779  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6780  * which is within the range of the new packet's length is chosen.
6781  *
6782  * The "separate ring for rx status" scheme may sound queer, but it makes
6783  * sense from a cache coherency perspective.  If only the host writes
6784  * to the buffer post rings, and only the chip writes to the rx status
6785  * rings, then cache lines never move beyond shared-modified state.
6786  * If both the host and chip were to write into the same ring, cache line
6787  * eviction could occur since both entities want it in an exclusive state.
6788  */
6789 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6790 {
6791         struct tg3 *tp = tnapi->tp;
6792         u32 work_mask, rx_std_posted = 0;
6793         u32 std_prod_idx, jmb_prod_idx;
6794         u32 sw_idx = tnapi->rx_rcb_ptr;
6795         u16 hw_idx;
6796         int received;
6797         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6798
6799         hw_idx = *(tnapi->rx_rcb_prod_idx);
6800         /*
6801          * We need to order the read of hw_idx and the read of
6802          * the opaque cookie.
6803          */
6804         rmb();
6805         work_mask = 0;
6806         received = 0;
6807         std_prod_idx = tpr->rx_std_prod_idx;
6808         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6809         while (sw_idx != hw_idx && budget > 0) {
6810                 struct ring_info *ri;
6811                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6812                 unsigned int len;
6813                 struct sk_buff *skb;
6814                 dma_addr_t dma_addr;
6815                 u32 opaque_key, desc_idx, *post_ptr;
6816                 u8 *data;
6817                 u64 tstamp = 0;
6818
6819                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6820                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6821                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6822                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6823                         dma_addr = dma_unmap_addr(ri, mapping);
6824                         data = ri->data;
6825                         post_ptr = &std_prod_idx;
6826                         rx_std_posted++;
6827                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6828                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6829                         dma_addr = dma_unmap_addr(ri, mapping);
6830                         data = ri->data;
6831                         post_ptr = &jmb_prod_idx;
6832                 } else
6833                         goto next_pkt_nopost;
6834
6835                 work_mask |= opaque_key;
6836
6837                 if (desc->err_vlan & RXD_ERR_MASK) {
6838                 drop_it:
6839                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6840                                        desc_idx, *post_ptr);
6841                 drop_it_no_recycle:
6842                         /* Other statistics kept track of by card. */
6843                         tp->rx_dropped++;
6844                         goto next_pkt;
6845                 }
6846
6847                 prefetch(data + TG3_RX_OFFSET(tp));
6848                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6849                       ETH_FCS_LEN;
6850
6851                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6852                      RXD_FLAG_PTPSTAT_PTPV1 ||
6853                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6854                      RXD_FLAG_PTPSTAT_PTPV2) {
6855                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6856                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6857                 }
6858
6859                 if (len > TG3_RX_COPY_THRESH(tp)) {
6860                         int skb_size;
6861                         unsigned int frag_size;
6862
6863                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6864                                                     *post_ptr, &frag_size);
6865                         if (skb_size < 0)
6866                                 goto drop_it;
6867
6868                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6869                                          PCI_DMA_FROMDEVICE);
6870
6871                         /* Ensure that the update to the data happens
6872                          * after the usage of the old DMA mapping.
6873                          */
6874                         smp_wmb();
6875
6876                         ri->data = NULL;
6877
6878                         skb = build_skb(data, frag_size);
6879                         if (!skb) {
6880                                 tg3_frag_free(frag_size != 0, data);
6881                                 goto drop_it_no_recycle;
6882                         }
6883                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6884                 } else {
6885                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6886                                        desc_idx, *post_ptr);
6887
6888                         skb = netdev_alloc_skb(tp->dev,
6889                                                len + TG3_RAW_IP_ALIGN);
6890                         if (skb == NULL)
6891                                 goto drop_it_no_recycle;
6892
6893                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6894                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6895                         memcpy(skb->data,
6896                                data + TG3_RX_OFFSET(tp),
6897                                len);
6898                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6899                 }
6900
6901                 skb_put(skb, len);
6902                 if (tstamp)
6903                         tg3_hwclock_to_timestamp(tp, tstamp,
6904                                                  skb_hwtstamps(skb));
6905
6906                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6907                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6908                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6909                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6910                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6911                 else
6912                         skb_checksum_none_assert(skb);
6913
6914                 skb->protocol = eth_type_trans(skb, tp->dev);
6915
6916                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6917                     skb->protocol != htons(ETH_P_8021Q) &&
6918                     skb->protocol != htons(ETH_P_8021AD)) {
6919                         dev_kfree_skb_any(skb);
6920                         goto drop_it_no_recycle;
6921                 }
6922
6923                 if (desc->type_flags & RXD_FLAG_VLAN &&
6924                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6925                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6926                                                desc->err_vlan & RXD_VLAN_MASK);
6927
6928                 napi_gro_receive(&tnapi->napi, skb);
6929
6930                 received++;
6931                 budget--;
6932
6933 next_pkt:
6934                 (*post_ptr)++;
6935
6936                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6937                         tpr->rx_std_prod_idx = std_prod_idx &
6938                                                tp->rx_std_ring_mask;
6939                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6940                                      tpr->rx_std_prod_idx);
6941                         work_mask &= ~RXD_OPAQUE_RING_STD;
6942                         rx_std_posted = 0;
6943                 }
6944 next_pkt_nopost:
6945                 sw_idx++;
6946                 sw_idx &= tp->rx_ret_ring_mask;
6947
6948                 /* Refresh hw_idx to see if there is new work */
6949                 if (sw_idx == hw_idx) {
6950                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6951                         rmb();
6952                 }
6953         }
6954
6955         /* ACK the status ring. */
6956         tnapi->rx_rcb_ptr = sw_idx;
6957         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6958
6959         /* Refill RX ring(s). */
6960         if (!tg3_flag(tp, ENABLE_RSS)) {
6961                 /* Sync BD data before updating mailbox */
6962                 wmb();
6963
6964                 if (work_mask & RXD_OPAQUE_RING_STD) {
6965                         tpr->rx_std_prod_idx = std_prod_idx &
6966                                                tp->rx_std_ring_mask;
6967                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6968                                      tpr->rx_std_prod_idx);
6969                 }
6970                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6971                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6972                                                tp->rx_jmb_ring_mask;
6973                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6974                                      tpr->rx_jmb_prod_idx);
6975                 }
6976                 mmiowb();
6977         } else if (work_mask) {
6978                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6979                  * updated before the producer indices can be updated.
6980                  */
6981                 smp_wmb();
6982
6983                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6984                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6985
6986                 if (tnapi != &tp->napi[1]) {
6987                         tp->rx_refill = true;
6988                         napi_schedule(&tp->napi[1].napi);
6989                 }
6990         }
6991
6992         return received;
6993 }
6994
6995 static void tg3_poll_link(struct tg3 *tp)
6996 {
6997         /* handle link change and other phy events */
6998         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6999                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7000
7001                 if (sblk->status & SD_STATUS_LINK_CHG) {
7002                         sblk->status = SD_STATUS_UPDATED |
7003                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7004                         spin_lock(&tp->lock);
7005                         if (tg3_flag(tp, USE_PHYLIB)) {
7006                                 tw32_f(MAC_STATUS,
7007                                      (MAC_STATUS_SYNC_CHANGED |
7008                                       MAC_STATUS_CFG_CHANGED |
7009                                       MAC_STATUS_MI_COMPLETION |
7010                                       MAC_STATUS_LNKSTATE_CHANGED));
7011                                 udelay(40);
7012                         } else
7013                                 tg3_setup_phy(tp, false);
7014                         spin_unlock(&tp->lock);
7015                 }
7016         }
7017 }
7018
7019 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7020                                 struct tg3_rx_prodring_set *dpr,
7021                                 struct tg3_rx_prodring_set *spr)
7022 {
7023         u32 si, di, cpycnt, src_prod_idx;
7024         int i, err = 0;
7025
7026         while (1) {
7027                 src_prod_idx = spr->rx_std_prod_idx;
7028
7029                 /* Make sure updates to the rx_std_buffers[] entries and the
7030                  * standard producer index are seen in the correct order.
7031                  */
7032                 smp_rmb();
7033
7034                 if (spr->rx_std_cons_idx == src_prod_idx)
7035                         break;
7036
7037                 if (spr->rx_std_cons_idx < src_prod_idx)
7038                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7039                 else
7040                         cpycnt = tp->rx_std_ring_mask + 1 -
7041                                  spr->rx_std_cons_idx;
7042
7043                 cpycnt = min(cpycnt,
7044                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7045
7046                 si = spr->rx_std_cons_idx;
7047                 di = dpr->rx_std_prod_idx;
7048
7049                 for (i = di; i < di + cpycnt; i++) {
7050                         if (dpr->rx_std_buffers[i].data) {
7051                                 cpycnt = i - di;
7052                                 err = -ENOSPC;
7053                                 break;
7054                         }
7055                 }
7056
7057                 if (!cpycnt)
7058                         break;
7059
7060                 /* Ensure that updates to the rx_std_buffers ring and the
7061                  * shadowed hardware producer ring from tg3_recycle_skb() are
7062                  * ordered correctly WRT the skb check above.
7063                  */
7064                 smp_rmb();
7065
7066                 memcpy(&dpr->rx_std_buffers[di],
7067                        &spr->rx_std_buffers[si],
7068                        cpycnt * sizeof(struct ring_info));
7069
7070                 for (i = 0; i < cpycnt; i++, di++, si++) {
7071                         struct tg3_rx_buffer_desc *sbd, *dbd;
7072                         sbd = &spr->rx_std[si];
7073                         dbd = &dpr->rx_std[di];
7074                         dbd->addr_hi = sbd->addr_hi;
7075                         dbd->addr_lo = sbd->addr_lo;
7076                 }
7077
7078                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7079                                        tp->rx_std_ring_mask;
7080                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7081                                        tp->rx_std_ring_mask;
7082         }
7083
7084         while (1) {
7085                 src_prod_idx = spr->rx_jmb_prod_idx;
7086
7087                 /* Make sure updates to the rx_jmb_buffers[] entries and
7088                  * the jumbo producer index are seen in the correct order.
7089                  */
7090                 smp_rmb();
7091
7092                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7093                         break;
7094
7095                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7096                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7097                 else
7098                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7099                                  spr->rx_jmb_cons_idx;
7100
7101                 cpycnt = min(cpycnt,
7102                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7103
7104                 si = spr->rx_jmb_cons_idx;
7105                 di = dpr->rx_jmb_prod_idx;
7106
7107                 for (i = di; i < di + cpycnt; i++) {
7108                         if (dpr->rx_jmb_buffers[i].data) {
7109                                 cpycnt = i - di;
7110                                 err = -ENOSPC;
7111                                 break;
7112                         }
7113                 }
7114
7115                 if (!cpycnt)
7116                         break;
7117
7118                 /* Ensure that updates to the rx_jmb_buffers ring and the
7119                  * shadowed hardware producer ring from tg3_recycle_skb() are
7120                  * ordered correctly WRT the skb check above.
7121                  */
7122                 smp_rmb();
7123
7124                 memcpy(&dpr->rx_jmb_buffers[di],
7125                        &spr->rx_jmb_buffers[si],
7126                        cpycnt * sizeof(struct ring_info));
7127
7128                 for (i = 0; i < cpycnt; i++, di++, si++) {
7129                         struct tg3_rx_buffer_desc *sbd, *dbd;
7130                         sbd = &spr->rx_jmb[si].std;
7131                         dbd = &dpr->rx_jmb[di].std;
7132                         dbd->addr_hi = sbd->addr_hi;
7133                         dbd->addr_lo = sbd->addr_lo;
7134                 }
7135
7136                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7137                                        tp->rx_jmb_ring_mask;
7138                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7139                                        tp->rx_jmb_ring_mask;
7140         }
7141
7142         return err;
7143 }
7144
7145 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7146 {
7147         struct tg3 *tp = tnapi->tp;
7148
7149         /* run TX completion thread */
7150         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7151                 tg3_tx(tnapi);
7152                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7153                         return work_done;
7154         }
7155
7156         if (!tnapi->rx_rcb_prod_idx)
7157                 return work_done;
7158
7159         /* run RX thread, within the bounds set by NAPI.
7160          * All RX "locking" is done by ensuring outside
7161          * code synchronizes with tg3->napi.poll()
7162          */
7163         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7164                 work_done += tg3_rx(tnapi, budget - work_done);
7165
7166         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7167                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7168                 int i, err = 0;
7169                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7170                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7171
7172                 tp->rx_refill = false;
7173                 for (i = 1; i <= tp->rxq_cnt; i++)
7174                         err |= tg3_rx_prodring_xfer(tp, dpr,
7175                                                     &tp->napi[i].prodring);
7176
7177                 wmb();
7178
7179                 if (std_prod_idx != dpr->rx_std_prod_idx)
7180                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7181                                      dpr->rx_std_prod_idx);
7182
7183                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7184                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7185                                      dpr->rx_jmb_prod_idx);
7186
7187                 mmiowb();
7188
7189                 if (err)
7190                         tw32_f(HOSTCC_MODE, tp->coal_now);
7191         }
7192
7193         return work_done;
7194 }
7195
7196 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7197 {
7198         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7199                 schedule_work(&tp->reset_task);
7200 }
7201
7202 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7203 {
7204         cancel_work_sync(&tp->reset_task);
7205         tg3_flag_clear(tp, RESET_TASK_PENDING);
7206         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7207 }
7208
7209 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7210 {
7211         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7212         struct tg3 *tp = tnapi->tp;
7213         int work_done = 0;
7214         struct tg3_hw_status *sblk = tnapi->hw_status;
7215
7216         while (1) {
7217                 work_done = tg3_poll_work(tnapi, work_done, budget);
7218
7219                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7220                         goto tx_recovery;
7221
7222                 if (unlikely(work_done >= budget))
7223                         break;
7224
7225                 /* tp->last_tag is used in tg3_int_reenable() below
7226                  * to tell the hw how much work has been processed,
7227                  * so we must read it before checking for more work.
7228                  */
7229                 tnapi->last_tag = sblk->status_tag;
7230                 tnapi->last_irq_tag = tnapi->last_tag;
7231                 rmb();
7232
7233                 /* check for RX/TX work to do */
7234                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7235                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7236
7237                         /* This test here is not race free, but will reduce
7238                          * the number of interrupts by looping again.
7239                          */
7240                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7241                                 continue;
7242
7243                         napi_complete_done(napi, work_done);
7244                         /* Reenable interrupts. */
7245                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7246
7247                         /* This test here is synchronized by napi_schedule()
7248                          * and napi_complete() to close the race condition.
7249                          */
7250                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7251                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7252                                                   HOSTCC_MODE_ENABLE |
7253                                                   tnapi->coal_now);
7254                         }
7255                         mmiowb();
7256                         break;
7257                 }
7258         }
7259
7260         return work_done;
7261
7262 tx_recovery:
7263         /* work_done is guaranteed to be less than budget. */
7264         napi_complete(napi);
7265         tg3_reset_task_schedule(tp);
7266         return work_done;
7267 }
7268
7269 static void tg3_process_error(struct tg3 *tp)
7270 {
7271         u32 val;
7272         bool real_error = false;
7273
7274         if (tg3_flag(tp, ERROR_PROCESSED))
7275                 return;
7276
7277         /* Check Flow Attention register */
7278         val = tr32(HOSTCC_FLOW_ATTN);
7279         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7280                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7281                 real_error = true;
7282         }
7283
7284         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7285                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7286                 real_error = true;
7287         }
7288
7289         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7290                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7291                 real_error = true;
7292         }
7293
7294         if (!real_error)
7295                 return;
7296
7297         tg3_dump_state(tp);
7298
7299         tg3_flag_set(tp, ERROR_PROCESSED);
7300         tg3_reset_task_schedule(tp);
7301 }
7302
7303 static int tg3_poll(struct napi_struct *napi, int budget)
7304 {
7305         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7306         struct tg3 *tp = tnapi->tp;
7307         int work_done = 0;
7308         struct tg3_hw_status *sblk = tnapi->hw_status;
7309
7310         while (1) {
7311                 if (sblk->status & SD_STATUS_ERROR)
7312                         tg3_process_error(tp);
7313
7314                 tg3_poll_link(tp);
7315
7316                 work_done = tg3_poll_work(tnapi, work_done, budget);
7317
7318                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7319                         goto tx_recovery;
7320
7321                 if (unlikely(work_done >= budget))
7322                         break;
7323
7324                 if (tg3_flag(tp, TAGGED_STATUS)) {
7325                         /* tp->last_tag is used in tg3_int_reenable() below
7326                          * to tell the hw how much work has been processed,
7327                          * so we must read it before checking for more work.
7328                          */
7329                         tnapi->last_tag = sblk->status_tag;
7330                         tnapi->last_irq_tag = tnapi->last_tag;
7331                         rmb();
7332                 } else
7333                         sblk->status &= ~SD_STATUS_UPDATED;
7334
7335                 if (likely(!tg3_has_work(tnapi))) {
7336                         napi_complete_done(napi, work_done);
7337                         tg3_int_reenable(tnapi);
7338                         break;
7339                 }
7340         }
7341
7342         return work_done;
7343
7344 tx_recovery:
7345         /* work_done is guaranteed to be less than budget. */
7346         napi_complete(napi);
7347         tg3_reset_task_schedule(tp);
7348         return work_done;
7349 }
7350
7351 static void tg3_napi_disable(struct tg3 *tp)
7352 {
7353         int i;
7354
7355         for (i = tp->irq_cnt - 1; i >= 0; i--)
7356                 napi_disable(&tp->napi[i].napi);
7357 }
7358
7359 static void tg3_napi_enable(struct tg3 *tp)
7360 {
7361         int i;
7362
7363         for (i = 0; i < tp->irq_cnt; i++)
7364                 napi_enable(&tp->napi[i].napi);
7365 }
7366
7367 static void tg3_napi_init(struct tg3 *tp)
7368 {
7369         int i;
7370
7371         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7372         for (i = 1; i < tp->irq_cnt; i++)
7373                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7374 }
7375
7376 static void tg3_napi_fini(struct tg3 *tp)
7377 {
7378         int i;
7379
7380         for (i = 0; i < tp->irq_cnt; i++)
7381                 netif_napi_del(&tp->napi[i].napi);
7382 }
7383
7384 static inline void tg3_netif_stop(struct tg3 *tp)
7385 {
7386         netif_trans_update(tp->dev);    /* prevent tx timeout */
7387         tg3_napi_disable(tp);
7388         netif_carrier_off(tp->dev);
7389         netif_tx_disable(tp->dev);
7390 }
7391
7392 /* tp->lock must be held */
7393 static inline void tg3_netif_start(struct tg3 *tp)
7394 {
7395         tg3_ptp_resume(tp);
7396
7397         /* NOTE: unconditional netif_tx_wake_all_queues is only
7398          * appropriate so long as all callers are assured to
7399          * have free tx slots (such as after tg3_init_hw)
7400          */
7401         netif_tx_wake_all_queues(tp->dev);
7402
7403         if (tp->link_up)
7404                 netif_carrier_on(tp->dev);
7405
7406         tg3_napi_enable(tp);
7407         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7408         tg3_enable_ints(tp);
7409 }
7410
7411 static void tg3_irq_quiesce(struct tg3 *tp)
7412         __releases(tp->lock)
7413         __acquires(tp->lock)
7414 {
7415         int i;
7416
7417         BUG_ON(tp->irq_sync);
7418
7419         tp->irq_sync = 1;
7420         smp_mb();
7421
7422         spin_unlock_bh(&tp->lock);
7423
7424         for (i = 0; i < tp->irq_cnt; i++)
7425                 synchronize_irq(tp->napi[i].irq_vec);
7426
7427         spin_lock_bh(&tp->lock);
7428 }
7429
7430 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7431  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7432  * with as well.  Most of the time, this is not necessary except when
7433  * shutting down the device.
7434  */
7435 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7436 {
7437         spin_lock_bh(&tp->lock);
7438         if (irq_sync)
7439                 tg3_irq_quiesce(tp);
7440 }
7441
7442 static inline void tg3_full_unlock(struct tg3 *tp)
7443 {
7444         spin_unlock_bh(&tp->lock);
7445 }
7446
7447 /* One-shot MSI handler - Chip automatically disables interrupt
7448  * after sending MSI so driver doesn't have to do it.
7449  */
7450 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7451 {
7452         struct tg3_napi *tnapi = dev_id;
7453         struct tg3 *tp = tnapi->tp;
7454
7455         prefetch(tnapi->hw_status);
7456         if (tnapi->rx_rcb)
7457                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7458
7459         if (likely(!tg3_irq_sync(tp)))
7460                 napi_schedule(&tnapi->napi);
7461
7462         return IRQ_HANDLED;
7463 }
7464
7465 /* MSI ISR - No need to check for interrupt sharing and no need to
7466  * flush status block and interrupt mailbox. PCI ordering rules
7467  * guarantee that MSI will arrive after the status block.
7468  */
7469 static irqreturn_t tg3_msi(int irq, void *dev_id)
7470 {
7471         struct tg3_napi *tnapi = dev_id;
7472         struct tg3 *tp = tnapi->tp;
7473
7474         prefetch(tnapi->hw_status);
7475         if (tnapi->rx_rcb)
7476                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7477         /*
7478          * Writing any value to intr-mbox-0 clears PCI INTA# and
7479          * chip-internal interrupt pending events.
7480          * Writing non-zero to intr-mbox-0 additional tells the
7481          * NIC to stop sending us irqs, engaging "in-intr-handler"
7482          * event coalescing.
7483          */
7484         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7485         if (likely(!tg3_irq_sync(tp)))
7486                 napi_schedule(&tnapi->napi);
7487
7488         return IRQ_RETVAL(1);
7489 }
7490
7491 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7492 {
7493         struct tg3_napi *tnapi = dev_id;
7494         struct tg3 *tp = tnapi->tp;
7495         struct tg3_hw_status *sblk = tnapi->hw_status;
7496         unsigned int handled = 1;
7497
7498         /* In INTx mode, it is possible for the interrupt to arrive at
7499          * the CPU before the status block posted prior to the interrupt.
7500          * Reading the PCI State register will confirm whether the
7501          * interrupt is ours and will flush the status block.
7502          */
7503         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7504                 if (tg3_flag(tp, CHIP_RESETTING) ||
7505                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7506                         handled = 0;
7507                         goto out;
7508                 }
7509         }
7510
7511         /*
7512          * Writing any value to intr-mbox-0 clears PCI INTA# and
7513          * chip-internal interrupt pending events.
7514          * Writing non-zero to intr-mbox-0 additional tells the
7515          * NIC to stop sending us irqs, engaging "in-intr-handler"
7516          * event coalescing.
7517          *
7518          * Flush the mailbox to de-assert the IRQ immediately to prevent
7519          * spurious interrupts.  The flush impacts performance but
7520          * excessive spurious interrupts can be worse in some cases.
7521          */
7522         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7523         if (tg3_irq_sync(tp))
7524                 goto out;
7525         sblk->status &= ~SD_STATUS_UPDATED;
7526         if (likely(tg3_has_work(tnapi))) {
7527                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7528                 napi_schedule(&tnapi->napi);
7529         } else {
7530                 /* No work, shared interrupt perhaps?  re-enable
7531                  * interrupts, and flush that PCI write
7532                  */
7533                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7534                                0x00000000);
7535         }
7536 out:
7537         return IRQ_RETVAL(handled);
7538 }
7539
7540 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7541 {
7542         struct tg3_napi *tnapi = dev_id;
7543         struct tg3 *tp = tnapi->tp;
7544         struct tg3_hw_status *sblk = tnapi->hw_status;
7545         unsigned int handled = 1;
7546
7547         /* In INTx mode, it is possible for the interrupt to arrive at
7548          * the CPU before the status block posted prior to the interrupt.
7549          * Reading the PCI State register will confirm whether the
7550          * interrupt is ours and will flush the status block.
7551          */
7552         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7553                 if (tg3_flag(tp, CHIP_RESETTING) ||
7554                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7555                         handled = 0;
7556                         goto out;
7557                 }
7558         }
7559
7560         /*
7561          * writing any value to intr-mbox-0 clears PCI INTA# and
7562          * chip-internal interrupt pending events.
7563          * writing non-zero to intr-mbox-0 additional tells the
7564          * NIC to stop sending us irqs, engaging "in-intr-handler"
7565          * event coalescing.
7566          *
7567          * Flush the mailbox to de-assert the IRQ immediately to prevent
7568          * spurious interrupts.  The flush impacts performance but
7569          * excessive spurious interrupts can be worse in some cases.
7570          */
7571         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7572
7573         /*
7574          * In a shared interrupt configuration, sometimes other devices'
7575          * interrupts will scream.  We record the current status tag here
7576          * so that the above check can report that the screaming interrupts
7577          * are unhandled.  Eventually they will be silenced.
7578          */
7579         tnapi->last_irq_tag = sblk->status_tag;
7580
7581         if (tg3_irq_sync(tp))
7582                 goto out;
7583
7584         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7585
7586         napi_schedule(&tnapi->napi);
7587
7588 out:
7589         return IRQ_RETVAL(handled);
7590 }
7591
7592 /* ISR for interrupt test */
7593 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7594 {
7595         struct tg3_napi *tnapi = dev_id;
7596         struct tg3 *tp = tnapi->tp;
7597         struct tg3_hw_status *sblk = tnapi->hw_status;
7598
7599         if ((sblk->status & SD_STATUS_UPDATED) ||
7600             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7601                 tg3_disable_ints(tp);
7602                 return IRQ_RETVAL(1);
7603         }
7604         return IRQ_RETVAL(0);
7605 }
7606
7607 #ifdef CONFIG_NET_POLL_CONTROLLER
7608 static void tg3_poll_controller(struct net_device *dev)
7609 {
7610         int i;
7611         struct tg3 *tp = netdev_priv(dev);
7612
7613         if (tg3_irq_sync(tp))
7614                 return;
7615
7616         for (i = 0; i < tp->irq_cnt; i++)
7617                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7618 }
7619 #endif
7620
7621 static void tg3_tx_timeout(struct net_device *dev)
7622 {
7623         struct tg3 *tp = netdev_priv(dev);
7624
7625         if (netif_msg_tx_err(tp)) {
7626                 netdev_err(dev, "transmit timed out, resetting\n");
7627                 tg3_dump_state(tp);
7628         }
7629
7630         tg3_reset_task_schedule(tp);
7631 }
7632
7633 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7634 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7635 {
7636         u32 base = (u32) mapping & 0xffffffff;
7637
7638         return base + len + 8 < base;
7639 }
7640
7641 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7642  * of any 4GB boundaries: 4G, 8G, etc
7643  */
7644 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7645                                            u32 len, u32 mss)
7646 {
7647         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7648                 u32 base = (u32) mapping & 0xffffffff;
7649
7650                 return ((base + len + (mss & 0x3fff)) < base);
7651         }
7652         return 0;
7653 }
7654
7655 /* Test for DMA addresses > 40-bit */
7656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7657                                           int len)
7658 {
7659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7660         if (tg3_flag(tp, 40BIT_DMA_BUG))
7661                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7662         return 0;
7663 #else
7664         return 0;
7665 #endif
7666 }
7667
7668 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7669                                  dma_addr_t mapping, u32 len, u32 flags,
7670                                  u32 mss, u32 vlan)
7671 {
7672         txbd->addr_hi = ((u64) mapping >> 32);
7673         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7674         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7675         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7676 }
7677
7678 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7679                             dma_addr_t map, u32 len, u32 flags,
7680                             u32 mss, u32 vlan)
7681 {
7682         struct tg3 *tp = tnapi->tp;
7683         bool hwbug = false;
7684
7685         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7686                 hwbug = true;
7687
7688         if (tg3_4g_overflow_test(map, len))
7689                 hwbug = true;
7690
7691         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7692                 hwbug = true;
7693
7694         if (tg3_40bit_overflow_test(tp, map, len))
7695                 hwbug = true;
7696
7697         if (tp->dma_limit) {
7698                 u32 prvidx = *entry;
7699                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7700                 while (len > tp->dma_limit && *budget) {
7701                         u32 frag_len = tp->dma_limit;
7702                         len -= tp->dma_limit;
7703
7704                         /* Avoid the 8byte DMA problem */
7705                         if (len <= 8) {
7706                                 len += tp->dma_limit / 2;
7707                                 frag_len = tp->dma_limit / 2;
7708                         }
7709
7710                         tnapi->tx_buffers[*entry].fragmented = true;
7711
7712                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7713                                       frag_len, tmp_flag, mss, vlan);
7714                         *budget -= 1;
7715                         prvidx = *entry;
7716                         *entry = NEXT_TX(*entry);
7717
7718                         map += frag_len;
7719                 }
7720
7721                 if (len) {
7722                         if (*budget) {
7723                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7724                                               len, flags, mss, vlan);
7725                                 *budget -= 1;
7726                                 *entry = NEXT_TX(*entry);
7727                         } else {
7728                                 hwbug = true;
7729                                 tnapi->tx_buffers[prvidx].fragmented = false;
7730                         }
7731                 }
7732         } else {
7733                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7734                               len, flags, mss, vlan);
7735                 *entry = NEXT_TX(*entry);
7736         }
7737
7738         return hwbug;
7739 }
7740
7741 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7742 {
7743         int i;
7744         struct sk_buff *skb;
7745         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7746
7747         skb = txb->skb;
7748         txb->skb = NULL;
7749
7750         pci_unmap_single(tnapi->tp->pdev,
7751                          dma_unmap_addr(txb, mapping),
7752                          skb_headlen(skb),
7753                          PCI_DMA_TODEVICE);
7754
7755         while (txb->fragmented) {
7756                 txb->fragmented = false;
7757                 entry = NEXT_TX(entry);
7758                 txb = &tnapi->tx_buffers[entry];
7759         }
7760
7761         for (i = 0; i <= last; i++) {
7762                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7763
7764                 entry = NEXT_TX(entry);
7765                 txb = &tnapi->tx_buffers[entry];
7766
7767                 pci_unmap_page(tnapi->tp->pdev,
7768                                dma_unmap_addr(txb, mapping),
7769                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7770
7771                 while (txb->fragmented) {
7772                         txb->fragmented = false;
7773                         entry = NEXT_TX(entry);
7774                         txb = &tnapi->tx_buffers[entry];
7775                 }
7776         }
7777 }
7778
7779 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7780 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7781                                        struct sk_buff **pskb,
7782                                        u32 *entry, u32 *budget,
7783                                        u32 base_flags, u32 mss, u32 vlan)
7784 {
7785         struct tg3 *tp = tnapi->tp;
7786         struct sk_buff *new_skb, *skb = *pskb;
7787         dma_addr_t new_addr = 0;
7788         int ret = 0;
7789
7790         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7791                 new_skb = skb_copy(skb, GFP_ATOMIC);
7792         else {
7793                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7794
7795                 new_skb = skb_copy_expand(skb,
7796                                           skb_headroom(skb) + more_headroom,
7797                                           skb_tailroom(skb), GFP_ATOMIC);
7798         }
7799
7800         if (!new_skb) {
7801                 ret = -1;
7802         } else {
7803                 /* New SKB is guaranteed to be linear. */
7804                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7805                                           PCI_DMA_TODEVICE);
7806                 /* Make sure the mapping succeeded */
7807                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7808                         dev_kfree_skb_any(new_skb);
7809                         ret = -1;
7810                 } else {
7811                         u32 save_entry = *entry;
7812
7813                         base_flags |= TXD_FLAG_END;
7814
7815                         tnapi->tx_buffers[*entry].skb = new_skb;
7816                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7817                                            mapping, new_addr);
7818
7819                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7820                                             new_skb->len, base_flags,
7821                                             mss, vlan)) {
7822                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7823                                 dev_kfree_skb_any(new_skb);
7824                                 ret = -1;
7825                         }
7826                 }
7827         }
7828
7829         dev_kfree_skb_any(skb);
7830         *pskb = new_skb;
7831         return ret;
7832 }
7833
7834 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7835 {
7836         /* Check if we will never have enough descriptors,
7837          * as gso_segs can be more than current ring size
7838          */
7839         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7840 }
7841
7842 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7843
7844 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7845  * indicated in tg3_tx_frag_set()
7846  */
7847 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7848                        struct netdev_queue *txq, struct sk_buff *skb)
7849 {
7850         struct sk_buff *segs, *nskb;
7851         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7852
7853         /* Estimate the number of fragments in the worst case */
7854         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7855                 netif_tx_stop_queue(txq);
7856
7857                 /* netif_tx_stop_queue() must be done before checking
7858                  * checking tx index in tg3_tx_avail() below, because in
7859                  * tg3_tx(), we update tx index before checking for
7860                  * netif_tx_queue_stopped().
7861                  */
7862                 smp_mb();
7863                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7864                         return NETDEV_TX_BUSY;
7865
7866                 netif_tx_wake_queue(txq);
7867         }
7868
7869         segs = skb_gso_segment(skb, tp->dev->features &
7870                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7871         if (IS_ERR(segs) || !segs)
7872                 goto tg3_tso_bug_end;
7873
7874         do {
7875                 nskb = segs;
7876                 segs = segs->next;
7877                 nskb->next = NULL;
7878                 tg3_start_xmit(nskb, tp->dev);
7879         } while (segs);
7880
7881 tg3_tso_bug_end:
7882         dev_kfree_skb_any(skb);
7883
7884         return NETDEV_TX_OK;
7885 }
7886
7887 /* hard_start_xmit for all devices */
7888 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7889 {
7890         struct tg3 *tp = netdev_priv(dev);
7891         u32 len, entry, base_flags, mss, vlan = 0;
7892         u32 budget;
7893         int i = -1, would_hit_hwbug;
7894         dma_addr_t mapping;
7895         struct tg3_napi *tnapi;
7896         struct netdev_queue *txq;
7897         unsigned int last;
7898         struct iphdr *iph = NULL;
7899         struct tcphdr *tcph = NULL;
7900         __sum16 tcp_csum = 0, ip_csum = 0;
7901         __be16 ip_tot_len = 0;
7902
7903         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7904         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7905         if (tg3_flag(tp, ENABLE_TSS))
7906                 tnapi++;
7907
7908         budget = tg3_tx_avail(tnapi);
7909
7910         /* We are running in BH disabled context with netif_tx_lock
7911          * and TX reclaim runs via tp->napi.poll inside of a software
7912          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7913          * no IRQ context deadlocks to worry about either.  Rejoice!
7914          */
7915         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7916                 if (!netif_tx_queue_stopped(txq)) {
7917                         netif_tx_stop_queue(txq);
7918
7919                         /* This is a hard error, log it. */
7920                         netdev_err(dev,
7921                                    "BUG! Tx Ring full when queue awake!\n");
7922                 }
7923                 return NETDEV_TX_BUSY;
7924         }
7925
7926         entry = tnapi->tx_prod;
7927         base_flags = 0;
7928
7929         mss = skb_shinfo(skb)->gso_size;
7930         if (mss) {
7931                 u32 tcp_opt_len, hdr_len;
7932
7933                 if (skb_cow_head(skb, 0))
7934                         goto drop;
7935
7936                 iph = ip_hdr(skb);
7937                 tcp_opt_len = tcp_optlen(skb);
7938
7939                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7940
7941                 /* HW/FW can not correctly segment packets that have been
7942                  * vlan encapsulated.
7943                  */
7944                 if (skb->protocol == htons(ETH_P_8021Q) ||
7945                     skb->protocol == htons(ETH_P_8021AD)) {
7946                         if (tg3_tso_bug_gso_check(tnapi, skb))
7947                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7948                         goto drop;
7949                 }
7950
7951                 if (!skb_is_gso_v6(skb)) {
7952                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7953                             tg3_flag(tp, TSO_BUG)) {
7954                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7955                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7956                                 goto drop;
7957                         }
7958                         ip_csum = iph->check;
7959                         ip_tot_len = iph->tot_len;
7960                         iph->check = 0;
7961                         iph->tot_len = htons(mss + hdr_len);
7962                 }
7963
7964                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7965                                TXD_FLAG_CPU_POST_DMA);
7966
7967                 tcph = tcp_hdr(skb);
7968                 tcp_csum = tcph->check;
7969
7970                 if (tg3_flag(tp, HW_TSO_1) ||
7971                     tg3_flag(tp, HW_TSO_2) ||
7972                     tg3_flag(tp, HW_TSO_3)) {
7973                         tcph->check = 0;
7974                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7975                 } else {
7976                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7977                                                          0, IPPROTO_TCP, 0);
7978                 }
7979
7980                 if (tg3_flag(tp, HW_TSO_3)) {
7981                         mss |= (hdr_len & 0xc) << 12;
7982                         if (hdr_len & 0x10)
7983                                 base_flags |= 0x00000010;
7984                         base_flags |= (hdr_len & 0x3e0) << 5;
7985                 } else if (tg3_flag(tp, HW_TSO_2))
7986                         mss |= hdr_len << 9;
7987                 else if (tg3_flag(tp, HW_TSO_1) ||
7988                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7989                         if (tcp_opt_len || iph->ihl > 5) {
7990                                 int tsflags;
7991
7992                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7993                                 mss |= (tsflags << 11);
7994                         }
7995                 } else {
7996                         if (tcp_opt_len || iph->ihl > 5) {
7997                                 int tsflags;
7998
7999                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8000                                 base_flags |= tsflags << 12;
8001                         }
8002                 }
8003         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8004                 /* HW/FW can not correctly checksum packets that have been
8005                  * vlan encapsulated.
8006                  */
8007                 if (skb->protocol == htons(ETH_P_8021Q) ||
8008                     skb->protocol == htons(ETH_P_8021AD)) {
8009                         if (skb_checksum_help(skb))
8010                                 goto drop;
8011                 } else  {
8012                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8013                 }
8014         }
8015
8016         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8017             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8018                 base_flags |= TXD_FLAG_JMB_PKT;
8019
8020         if (skb_vlan_tag_present(skb)) {
8021                 base_flags |= TXD_FLAG_VLAN;
8022                 vlan = skb_vlan_tag_get(skb);
8023         }
8024
8025         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8026             tg3_flag(tp, TX_TSTAMP_EN)) {
8027                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8028                 base_flags |= TXD_FLAG_HWTSTAMP;
8029         }
8030
8031         len = skb_headlen(skb);
8032
8033         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8034         if (pci_dma_mapping_error(tp->pdev, mapping))
8035                 goto drop;
8036
8037
8038         tnapi->tx_buffers[entry].skb = skb;
8039         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8040
8041         would_hit_hwbug = 0;
8042
8043         if (tg3_flag(tp, 5701_DMA_BUG))
8044                 would_hit_hwbug = 1;
8045
8046         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8047                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8048                             mss, vlan)) {
8049                 would_hit_hwbug = 1;
8050         } else if (skb_shinfo(skb)->nr_frags > 0) {
8051                 u32 tmp_mss = mss;
8052
8053                 if (!tg3_flag(tp, HW_TSO_1) &&
8054                     !tg3_flag(tp, HW_TSO_2) &&
8055                     !tg3_flag(tp, HW_TSO_3))
8056                         tmp_mss = 0;
8057
8058                 /* Now loop through additional data
8059                  * fragments, and queue them.
8060                  */
8061                 last = skb_shinfo(skb)->nr_frags - 1;
8062                 for (i = 0; i <= last; i++) {
8063                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8064
8065                         len = skb_frag_size(frag);
8066                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8067                                                    len, DMA_TO_DEVICE);
8068
8069                         tnapi->tx_buffers[entry].skb = NULL;
8070                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8071                                            mapping);
8072                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8073                                 goto dma_error;
8074
8075                         if (!budget ||
8076                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8077                                             len, base_flags |
8078                                             ((i == last) ? TXD_FLAG_END : 0),
8079                                             tmp_mss, vlan)) {
8080                                 would_hit_hwbug = 1;
8081                                 break;
8082                         }
8083                 }
8084         }
8085
8086         if (would_hit_hwbug) {
8087                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8088
8089                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8090                         /* If it's a TSO packet, do GSO instead of
8091                          * allocating and copying to a large linear SKB
8092                          */
8093                         if (ip_tot_len) {
8094                                 iph->check = ip_csum;
8095                                 iph->tot_len = ip_tot_len;
8096                         }
8097                         tcph->check = tcp_csum;
8098                         return tg3_tso_bug(tp, tnapi, txq, skb);
8099                 }
8100
8101                 /* If the workaround fails due to memory/mapping
8102                  * failure, silently drop this packet.
8103                  */
8104                 entry = tnapi->tx_prod;
8105                 budget = tg3_tx_avail(tnapi);
8106                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8107                                                 base_flags, mss, vlan))
8108                         goto drop_nofree;
8109         }
8110
8111         skb_tx_timestamp(skb);
8112         netdev_tx_sent_queue(txq, skb->len);
8113
8114         /* Sync BD data before updating mailbox */
8115         wmb();
8116
8117         tnapi->tx_prod = entry;
8118         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8119                 netif_tx_stop_queue(txq);
8120
8121                 /* netif_tx_stop_queue() must be done before checking
8122                  * checking tx index in tg3_tx_avail() below, because in
8123                  * tg3_tx(), we update tx index before checking for
8124                  * netif_tx_queue_stopped().
8125                  */
8126                 smp_mb();
8127                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8128                         netif_tx_wake_queue(txq);
8129         }
8130
8131         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8132                 /* Packets are ready, update Tx producer idx on card. */
8133                 tw32_tx_mbox(tnapi->prodmbox, entry);
8134                 mmiowb();
8135         }
8136
8137         return NETDEV_TX_OK;
8138
8139 dma_error:
8140         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8141         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8142 drop:
8143         dev_kfree_skb_any(skb);
8144 drop_nofree:
8145         tp->tx_dropped++;
8146         return NETDEV_TX_OK;
8147 }
8148
8149 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8150 {
8151         if (enable) {
8152                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8153                                   MAC_MODE_PORT_MODE_MASK);
8154
8155                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8156
8157                 if (!tg3_flag(tp, 5705_PLUS))
8158                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8159
8160                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8161                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8162                 else
8163                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8164         } else {
8165                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8166
8167                 if (tg3_flag(tp, 5705_PLUS) ||
8168                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8169                     tg3_asic_rev(tp) == ASIC_REV_5700)
8170                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8171         }
8172
8173         tw32(MAC_MODE, tp->mac_mode);
8174         udelay(40);
8175 }
8176
8177 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8178 {
8179         u32 val, bmcr, mac_mode, ptest = 0;
8180
8181         tg3_phy_toggle_apd(tp, false);
8182         tg3_phy_toggle_automdix(tp, false);
8183
8184         if (extlpbk && tg3_phy_set_extloopbk(tp))
8185                 return -EIO;
8186
8187         bmcr = BMCR_FULLDPLX;
8188         switch (speed) {
8189         case SPEED_10:
8190                 break;
8191         case SPEED_100:
8192                 bmcr |= BMCR_SPEED100;
8193                 break;
8194         case SPEED_1000:
8195         default:
8196                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8197                         speed = SPEED_100;
8198                         bmcr |= BMCR_SPEED100;
8199                 } else {
8200                         speed = SPEED_1000;
8201                         bmcr |= BMCR_SPEED1000;
8202                 }
8203         }
8204
8205         if (extlpbk) {
8206                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8207                         tg3_readphy(tp, MII_CTRL1000, &val);
8208                         val |= CTL1000_AS_MASTER |
8209                                CTL1000_ENABLE_MASTER;
8210                         tg3_writephy(tp, MII_CTRL1000, val);
8211                 } else {
8212                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8213                                 MII_TG3_FET_PTEST_TRIM_2;
8214                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8215                 }
8216         } else
8217                 bmcr |= BMCR_LOOPBACK;
8218
8219         tg3_writephy(tp, MII_BMCR, bmcr);
8220
8221         /* The write needs to be flushed for the FETs */
8222         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8223                 tg3_readphy(tp, MII_BMCR, &bmcr);
8224
8225         udelay(40);
8226
8227         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8228             tg3_asic_rev(tp) == ASIC_REV_5785) {
8229                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8230                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8231                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8232
8233                 /* The write needs to be flushed for the AC131 */
8234                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8235         }
8236
8237         /* Reset to prevent losing 1st rx packet intermittently */
8238         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8239             tg3_flag(tp, 5780_CLASS)) {
8240                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8241                 udelay(10);
8242                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8243         }
8244
8245         mac_mode = tp->mac_mode &
8246                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8247         if (speed == SPEED_1000)
8248                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8249         else
8250                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8251
8252         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8253                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8254
8255                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8256                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8257                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8258                         mac_mode |= MAC_MODE_LINK_POLARITY;
8259
8260                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8261                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8262         }
8263
8264         tw32(MAC_MODE, mac_mode);
8265         udelay(40);
8266
8267         return 0;
8268 }
8269
8270 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8271 {
8272         struct tg3 *tp = netdev_priv(dev);
8273
8274         if (features & NETIF_F_LOOPBACK) {
8275                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8276                         return;
8277
8278                 spin_lock_bh(&tp->lock);
8279                 tg3_mac_loopback(tp, true);
8280                 netif_carrier_on(tp->dev);
8281                 spin_unlock_bh(&tp->lock);
8282                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8283         } else {
8284                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8285                         return;
8286
8287                 spin_lock_bh(&tp->lock);
8288                 tg3_mac_loopback(tp, false);
8289                 /* Force link status check */
8290                 tg3_setup_phy(tp, true);
8291                 spin_unlock_bh(&tp->lock);
8292                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8293         }
8294 }
8295
8296 static netdev_features_t tg3_fix_features(struct net_device *dev,
8297         netdev_features_t features)
8298 {
8299         struct tg3 *tp = netdev_priv(dev);
8300
8301         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8302                 features &= ~NETIF_F_ALL_TSO;
8303
8304         return features;
8305 }
8306
8307 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8308 {
8309         netdev_features_t changed = dev->features ^ features;
8310
8311         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8312                 tg3_set_loopback(dev, features);
8313
8314         return 0;
8315 }
8316
8317 static void tg3_rx_prodring_free(struct tg3 *tp,
8318                                  struct tg3_rx_prodring_set *tpr)
8319 {
8320         int i;
8321
8322         if (tpr != &tp->napi[0].prodring) {
8323                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8324                      i = (i + 1) & tp->rx_std_ring_mask)
8325                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8326                                         tp->rx_pkt_map_sz);
8327
8328                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8329                         for (i = tpr->rx_jmb_cons_idx;
8330                              i != tpr->rx_jmb_prod_idx;
8331                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8332                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8333                                                 TG3_RX_JMB_MAP_SZ);
8334                         }
8335                 }
8336
8337                 return;
8338         }
8339
8340         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8341                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8342                                 tp->rx_pkt_map_sz);
8343
8344         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8345                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8346                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8347                                         TG3_RX_JMB_MAP_SZ);
8348         }
8349 }
8350
8351 /* Initialize rx rings for packet processing.
8352  *
8353  * The chip has been shut down and the driver detached from
8354  * the networking, so no interrupts or new tx packets will
8355  * end up in the driver.  tp->{tx,}lock are held and thus
8356  * we may not sleep.
8357  */
8358 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8359                                  struct tg3_rx_prodring_set *tpr)
8360 {
8361         u32 i, rx_pkt_dma_sz;
8362
8363         tpr->rx_std_cons_idx = 0;
8364         tpr->rx_std_prod_idx = 0;
8365         tpr->rx_jmb_cons_idx = 0;
8366         tpr->rx_jmb_prod_idx = 0;
8367
8368         if (tpr != &tp->napi[0].prodring) {
8369                 memset(&tpr->rx_std_buffers[0], 0,
8370                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8371                 if (tpr->rx_jmb_buffers)
8372                         memset(&tpr->rx_jmb_buffers[0], 0,
8373                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8374                 goto done;
8375         }
8376
8377         /* Zero out all descriptors. */
8378         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8379
8380         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8381         if (tg3_flag(tp, 5780_CLASS) &&
8382             tp->dev->mtu > ETH_DATA_LEN)
8383                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8384         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8385
8386         /* Initialize invariants of the rings, we only set this
8387          * stuff once.  This works because the card does not
8388          * write into the rx buffer posting rings.
8389          */
8390         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8391                 struct tg3_rx_buffer_desc *rxd;
8392
8393                 rxd = &tpr->rx_std[i];
8394                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8395                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8396                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8397                                (i << RXD_OPAQUE_INDEX_SHIFT));
8398         }
8399
8400         /* Now allocate fresh SKBs for each rx ring. */
8401         for (i = 0; i < tp->rx_pending; i++) {
8402                 unsigned int frag_size;
8403
8404                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8405                                       &frag_size) < 0) {
8406                         netdev_warn(tp->dev,
8407                                     "Using a smaller RX standard ring. Only "
8408                                     "%d out of %d buffers were allocated "
8409                                     "successfully\n", i, tp->rx_pending);
8410                         if (i == 0)
8411                                 goto initfail;
8412                         tp->rx_pending = i;
8413                         break;
8414                 }
8415         }
8416
8417         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8418                 goto done;
8419
8420         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8421
8422         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8423                 goto done;
8424
8425         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8426                 struct tg3_rx_buffer_desc *rxd;
8427
8428                 rxd = &tpr->rx_jmb[i].std;
8429                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8430                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8431                                   RXD_FLAG_JUMBO;
8432                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8433                        (i << RXD_OPAQUE_INDEX_SHIFT));
8434         }
8435
8436         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8437                 unsigned int frag_size;
8438
8439                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8440                                       &frag_size) < 0) {
8441                         netdev_warn(tp->dev,
8442                                     "Using a smaller RX jumbo ring. Only %d "
8443                                     "out of %d buffers were allocated "
8444                                     "successfully\n", i, tp->rx_jumbo_pending);
8445                         if (i == 0)
8446                                 goto initfail;
8447                         tp->rx_jumbo_pending = i;
8448                         break;
8449                 }
8450         }
8451
8452 done:
8453         return 0;
8454
8455 initfail:
8456         tg3_rx_prodring_free(tp, tpr);
8457         return -ENOMEM;
8458 }
8459
8460 static void tg3_rx_prodring_fini(struct tg3 *tp,
8461                                  struct tg3_rx_prodring_set *tpr)
8462 {
8463         kfree(tpr->rx_std_buffers);
8464         tpr->rx_std_buffers = NULL;
8465         kfree(tpr->rx_jmb_buffers);
8466         tpr->rx_jmb_buffers = NULL;
8467         if (tpr->rx_std) {
8468                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8469                                   tpr->rx_std, tpr->rx_std_mapping);
8470                 tpr->rx_std = NULL;
8471         }
8472         if (tpr->rx_jmb) {
8473                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8474                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8475                 tpr->rx_jmb = NULL;
8476         }
8477 }
8478
8479 static int tg3_rx_prodring_init(struct tg3 *tp,
8480                                 struct tg3_rx_prodring_set *tpr)
8481 {
8482         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8483                                       GFP_KERNEL);
8484         if (!tpr->rx_std_buffers)
8485                 return -ENOMEM;
8486
8487         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8488                                          TG3_RX_STD_RING_BYTES(tp),
8489                                          &tpr->rx_std_mapping,
8490                                          GFP_KERNEL);
8491         if (!tpr->rx_std)
8492                 goto err_out;
8493
8494         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8495                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8496                                               GFP_KERNEL);
8497                 if (!tpr->rx_jmb_buffers)
8498                         goto err_out;
8499
8500                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8501                                                  TG3_RX_JMB_RING_BYTES(tp),
8502                                                  &tpr->rx_jmb_mapping,
8503                                                  GFP_KERNEL);
8504                 if (!tpr->rx_jmb)
8505                         goto err_out;
8506         }
8507
8508         return 0;
8509
8510 err_out:
8511         tg3_rx_prodring_fini(tp, tpr);
8512         return -ENOMEM;
8513 }
8514
8515 /* Free up pending packets in all rx/tx rings.
8516  *
8517  * The chip has been shut down and the driver detached from
8518  * the networking, so no interrupts or new tx packets will
8519  * end up in the driver.  tp->{tx,}lock is not held and we are not
8520  * in an interrupt context and thus may sleep.
8521  */
8522 static void tg3_free_rings(struct tg3 *tp)
8523 {
8524         int i, j;
8525
8526         for (j = 0; j < tp->irq_cnt; j++) {
8527                 struct tg3_napi *tnapi = &tp->napi[j];
8528
8529                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8530
8531                 if (!tnapi->tx_buffers)
8532                         continue;
8533
8534                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8535                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8536
8537                         if (!skb)
8538                                 continue;
8539
8540                         tg3_tx_skb_unmap(tnapi, i,
8541                                          skb_shinfo(skb)->nr_frags - 1);
8542
8543                         dev_kfree_skb_any(skb);
8544                 }
8545                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8546         }
8547 }
8548
8549 /* Initialize tx/rx rings for packet processing.
8550  *
8551  * The chip has been shut down and the driver detached from
8552  * the networking, so no interrupts or new tx packets will
8553  * end up in the driver.  tp->{tx,}lock are held and thus
8554  * we may not sleep.
8555  */
8556 static int tg3_init_rings(struct tg3 *tp)
8557 {
8558         int i;
8559
8560         /* Free up all the SKBs. */
8561         tg3_free_rings(tp);
8562
8563         for (i = 0; i < tp->irq_cnt; i++) {
8564                 struct tg3_napi *tnapi = &tp->napi[i];
8565
8566                 tnapi->last_tag = 0;
8567                 tnapi->last_irq_tag = 0;
8568                 tnapi->hw_status->status = 0;
8569                 tnapi->hw_status->status_tag = 0;
8570                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8571
8572                 tnapi->tx_prod = 0;
8573                 tnapi->tx_cons = 0;
8574                 if (tnapi->tx_ring)
8575                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8576
8577                 tnapi->rx_rcb_ptr = 0;
8578                 if (tnapi->rx_rcb)
8579                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8580
8581                 if (tnapi->prodring.rx_std &&
8582                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8583                         tg3_free_rings(tp);
8584                         return -ENOMEM;
8585                 }
8586         }
8587
8588         return 0;
8589 }
8590
8591 static void tg3_mem_tx_release(struct tg3 *tp)
8592 {
8593         int i;
8594
8595         for (i = 0; i < tp->irq_max; i++) {
8596                 struct tg3_napi *tnapi = &tp->napi[i];
8597
8598                 if (tnapi->tx_ring) {
8599                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8600                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8601                         tnapi->tx_ring = NULL;
8602                 }
8603
8604                 kfree(tnapi->tx_buffers);
8605                 tnapi->tx_buffers = NULL;
8606         }
8607 }
8608
8609 static int tg3_mem_tx_acquire(struct tg3 *tp)
8610 {
8611         int i;
8612         struct tg3_napi *tnapi = &tp->napi[0];
8613
8614         /* If multivector TSS is enabled, vector 0 does not handle
8615          * tx interrupts.  Don't allocate any resources for it.
8616          */
8617         if (tg3_flag(tp, ENABLE_TSS))
8618                 tnapi++;
8619
8620         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8621                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8622                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8623                 if (!tnapi->tx_buffers)
8624                         goto err_out;
8625
8626                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8627                                                     TG3_TX_RING_BYTES,
8628                                                     &tnapi->tx_desc_mapping,
8629                                                     GFP_KERNEL);
8630                 if (!tnapi->tx_ring)
8631                         goto err_out;
8632         }
8633
8634         return 0;
8635
8636 err_out:
8637         tg3_mem_tx_release(tp);
8638         return -ENOMEM;
8639 }
8640
8641 static void tg3_mem_rx_release(struct tg3 *tp)
8642 {
8643         int i;
8644
8645         for (i = 0; i < tp->irq_max; i++) {
8646                 struct tg3_napi *tnapi = &tp->napi[i];
8647
8648                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8649
8650                 if (!tnapi->rx_rcb)
8651                         continue;
8652
8653                 dma_free_coherent(&tp->pdev->dev,
8654                                   TG3_RX_RCB_RING_BYTES(tp),
8655                                   tnapi->rx_rcb,
8656                                   tnapi->rx_rcb_mapping);
8657                 tnapi->rx_rcb = NULL;
8658         }
8659 }
8660
8661 static int tg3_mem_rx_acquire(struct tg3 *tp)
8662 {
8663         unsigned int i, limit;
8664
8665         limit = tp->rxq_cnt;
8666
8667         /* If RSS is enabled, we need a (dummy) producer ring
8668          * set on vector zero.  This is the true hw prodring.
8669          */
8670         if (tg3_flag(tp, ENABLE_RSS))
8671                 limit++;
8672
8673         for (i = 0; i < limit; i++) {
8674                 struct tg3_napi *tnapi = &tp->napi[i];
8675
8676                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8677                         goto err_out;
8678
8679                 /* If multivector RSS is enabled, vector 0
8680                  * does not handle rx or tx interrupts.
8681                  * Don't allocate any resources for it.
8682                  */
8683                 if (!i && tg3_flag(tp, ENABLE_RSS))
8684                         continue;
8685
8686                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8687                                                     TG3_RX_RCB_RING_BYTES(tp),
8688                                                     &tnapi->rx_rcb_mapping,
8689                                                     GFP_KERNEL);
8690                 if (!tnapi->rx_rcb)
8691                         goto err_out;
8692         }
8693
8694         return 0;
8695
8696 err_out:
8697         tg3_mem_rx_release(tp);
8698         return -ENOMEM;
8699 }
8700
8701 /*
8702  * Must not be invoked with interrupt sources disabled and
8703  * the hardware shutdown down.
8704  */
8705 static void tg3_free_consistent(struct tg3 *tp)
8706 {
8707         int i;
8708
8709         for (i = 0; i < tp->irq_cnt; i++) {
8710                 struct tg3_napi *tnapi = &tp->napi[i];
8711
8712                 if (tnapi->hw_status) {
8713                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8714                                           tnapi->hw_status,
8715                                           tnapi->status_mapping);
8716                         tnapi->hw_status = NULL;
8717                 }
8718         }
8719
8720         tg3_mem_rx_release(tp);
8721         tg3_mem_tx_release(tp);
8722
8723         if (tp->hw_stats) {
8724                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8725                                   tp->hw_stats, tp->stats_mapping);
8726                 tp->hw_stats = NULL;
8727         }
8728 }
8729
8730 /*
8731  * Must not be invoked with interrupt sources disabled and
8732  * the hardware shutdown down.  Can sleep.
8733  */
8734 static int tg3_alloc_consistent(struct tg3 *tp)
8735 {
8736         int i;
8737
8738         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8739                                            sizeof(struct tg3_hw_stats),
8740                                            &tp->stats_mapping, GFP_KERNEL);
8741         if (!tp->hw_stats)
8742                 goto err_out;
8743
8744         for (i = 0; i < tp->irq_cnt; i++) {
8745                 struct tg3_napi *tnapi = &tp->napi[i];
8746                 struct tg3_hw_status *sblk;
8747
8748                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8749                                                        TG3_HW_STATUS_SIZE,
8750                                                        &tnapi->status_mapping,
8751                                                        GFP_KERNEL);
8752                 if (!tnapi->hw_status)
8753                         goto err_out;
8754
8755                 sblk = tnapi->hw_status;
8756
8757                 if (tg3_flag(tp, ENABLE_RSS)) {
8758                         u16 *prodptr = NULL;
8759
8760                         /*
8761                          * When RSS is enabled, the status block format changes
8762                          * slightly.  The "rx_jumbo_consumer", "reserved",
8763                          * and "rx_mini_consumer" members get mapped to the
8764                          * other three rx return ring producer indexes.
8765                          */
8766                         switch (i) {
8767                         case 1:
8768                                 prodptr = &sblk->idx[0].rx_producer;
8769                                 break;
8770                         case 2:
8771                                 prodptr = &sblk->rx_jumbo_consumer;
8772                                 break;
8773                         case 3:
8774                                 prodptr = &sblk->reserved;
8775                                 break;
8776                         case 4:
8777                                 prodptr = &sblk->rx_mini_consumer;
8778                                 break;
8779                         }
8780                         tnapi->rx_rcb_prod_idx = prodptr;
8781                 } else {
8782                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8783                 }
8784         }
8785
8786         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8787                 goto err_out;
8788
8789         return 0;
8790
8791 err_out:
8792         tg3_free_consistent(tp);
8793         return -ENOMEM;
8794 }
8795
8796 #define MAX_WAIT_CNT 1000
8797
8798 /* To stop a block, clear the enable bit and poll till it
8799  * clears.  tp->lock is held.
8800  */
8801 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8802 {
8803         unsigned int i;
8804         u32 val;
8805
8806         if (tg3_flag(tp, 5705_PLUS)) {
8807                 switch (ofs) {
8808                 case RCVLSC_MODE:
8809                 case DMAC_MODE:
8810                 case MBFREE_MODE:
8811                 case BUFMGR_MODE:
8812                 case MEMARB_MODE:
8813                         /* We can't enable/disable these bits of the
8814                          * 5705/5750, just say success.
8815                          */
8816                         return 0;
8817
8818                 default:
8819                         break;
8820                 }
8821         }
8822
8823         val = tr32(ofs);
8824         val &= ~enable_bit;
8825         tw32_f(ofs, val);
8826
8827         for (i = 0; i < MAX_WAIT_CNT; i++) {
8828                 if (pci_channel_offline(tp->pdev)) {
8829                         dev_err(&tp->pdev->dev,
8830                                 "tg3_stop_block device offline, "
8831                                 "ofs=%lx enable_bit=%x\n",
8832                                 ofs, enable_bit);
8833                         return -ENODEV;
8834                 }
8835
8836                 udelay(100);
8837                 val = tr32(ofs);
8838                 if ((val & enable_bit) == 0)
8839                         break;
8840         }
8841
8842         if (i == MAX_WAIT_CNT && !silent) {
8843                 dev_err(&tp->pdev->dev,
8844                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8845                         ofs, enable_bit);
8846                 return -ENODEV;
8847         }
8848
8849         return 0;
8850 }
8851
8852 /* tp->lock is held. */
8853 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8854 {
8855         int i, err;
8856
8857         tg3_disable_ints(tp);
8858
8859         if (pci_channel_offline(tp->pdev)) {
8860                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8861                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8862                 err = -ENODEV;
8863                 goto err_no_dev;
8864         }
8865
8866         tp->rx_mode &= ~RX_MODE_ENABLE;
8867         tw32_f(MAC_RX_MODE, tp->rx_mode);
8868         udelay(10);
8869
8870         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8871         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8872         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8873         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8874         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8875         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8876
8877         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8878         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8879         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8880         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8881         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8882         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8883         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8884
8885         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8886         tw32_f(MAC_MODE, tp->mac_mode);
8887         udelay(40);
8888
8889         tp->tx_mode &= ~TX_MODE_ENABLE;
8890         tw32_f(MAC_TX_MODE, tp->tx_mode);
8891
8892         for (i = 0; i < MAX_WAIT_CNT; i++) {
8893                 udelay(100);
8894                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8895                         break;
8896         }
8897         if (i >= MAX_WAIT_CNT) {
8898                 dev_err(&tp->pdev->dev,
8899                         "%s timed out, TX_MODE_ENABLE will not clear "
8900                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8901                 err |= -ENODEV;
8902         }
8903
8904         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8905         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8906         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8907
8908         tw32(FTQ_RESET, 0xffffffff);
8909         tw32(FTQ_RESET, 0x00000000);
8910
8911         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8912         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8913
8914 err_no_dev:
8915         for (i = 0; i < tp->irq_cnt; i++) {
8916                 struct tg3_napi *tnapi = &tp->napi[i];
8917                 if (tnapi->hw_status)
8918                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8919         }
8920
8921         return err;
8922 }
8923
8924 /* Save PCI command register before chip reset */
8925 static void tg3_save_pci_state(struct tg3 *tp)
8926 {
8927         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8928 }
8929
8930 /* Restore PCI state after chip reset */
8931 static void tg3_restore_pci_state(struct tg3 *tp)
8932 {
8933         u32 val;
8934
8935         /* Re-enable indirect register accesses. */
8936         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8937                                tp->misc_host_ctrl);
8938
8939         /* Set MAX PCI retry to zero. */
8940         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8941         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8942             tg3_flag(tp, PCIX_MODE))
8943                 val |= PCISTATE_RETRY_SAME_DMA;
8944         /* Allow reads and writes to the APE register and memory space. */
8945         if (tg3_flag(tp, ENABLE_APE))
8946                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8947                        PCISTATE_ALLOW_APE_SHMEM_WR |
8948                        PCISTATE_ALLOW_APE_PSPACE_WR;
8949         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8950
8951         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8952
8953         if (!tg3_flag(tp, PCI_EXPRESS)) {
8954                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8955                                       tp->pci_cacheline_sz);
8956                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8957                                       tp->pci_lat_timer);
8958         }
8959
8960         /* Make sure PCI-X relaxed ordering bit is clear. */
8961         if (tg3_flag(tp, PCIX_MODE)) {
8962                 u16 pcix_cmd;
8963
8964                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8965                                      &pcix_cmd);
8966                 pcix_cmd &= ~PCI_X_CMD_ERO;
8967                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8968                                       pcix_cmd);
8969         }
8970
8971         if (tg3_flag(tp, 5780_CLASS)) {
8972
8973                 /* Chip reset on 5780 will reset MSI enable bit,
8974                  * so need to restore it.
8975                  */
8976                 if (tg3_flag(tp, USING_MSI)) {
8977                         u16 ctrl;
8978
8979                         pci_read_config_word(tp->pdev,
8980                                              tp->msi_cap + PCI_MSI_FLAGS,
8981                                              &ctrl);
8982                         pci_write_config_word(tp->pdev,
8983                                               tp->msi_cap + PCI_MSI_FLAGS,
8984                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8985                         val = tr32(MSGINT_MODE);
8986                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8987                 }
8988         }
8989 }
8990
8991 static void tg3_override_clk(struct tg3 *tp)
8992 {
8993         u32 val;
8994
8995         switch (tg3_asic_rev(tp)) {
8996         case ASIC_REV_5717:
8997                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8998                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8999                      TG3_CPMU_MAC_ORIDE_ENABLE);
9000                 break;
9001
9002         case ASIC_REV_5719:
9003         case ASIC_REV_5720:
9004                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9005                 break;
9006
9007         default:
9008                 return;
9009         }
9010 }
9011
9012 static void tg3_restore_clk(struct tg3 *tp)
9013 {
9014         u32 val;
9015
9016         switch (tg3_asic_rev(tp)) {
9017         case ASIC_REV_5717:
9018                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9019                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9020                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9021                 break;
9022
9023         case ASIC_REV_5719:
9024         case ASIC_REV_5720:
9025                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9026                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9027                 break;
9028
9029         default:
9030                 return;
9031         }
9032 }
9033
9034 /* tp->lock is held. */
9035 static int tg3_chip_reset(struct tg3 *tp)
9036         __releases(tp->lock)
9037         __acquires(tp->lock)
9038 {
9039         u32 val;
9040         void (*write_op)(struct tg3 *, u32, u32);
9041         int i, err;
9042
9043         if (!pci_device_is_present(tp->pdev))
9044                 return -ENODEV;
9045
9046         tg3_nvram_lock(tp);
9047
9048         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9049
9050         /* No matching tg3_nvram_unlock() after this because
9051          * chip reset below will undo the nvram lock.
9052          */
9053         tp->nvram_lock_cnt = 0;
9054
9055         /* GRC_MISC_CFG core clock reset will clear the memory
9056          * enable bit in PCI register 4 and the MSI enable bit
9057          * on some chips, so we save relevant registers here.
9058          */
9059         tg3_save_pci_state(tp);
9060
9061         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9062             tg3_flag(tp, 5755_PLUS))
9063                 tw32(GRC_FASTBOOT_PC, 0);
9064
9065         /*
9066          * We must avoid the readl() that normally takes place.
9067          * It locks machines, causes machine checks, and other
9068          * fun things.  So, temporarily disable the 5701
9069          * hardware workaround, while we do the reset.
9070          */
9071         write_op = tp->write32;
9072         if (write_op == tg3_write_flush_reg32)
9073                 tp->write32 = tg3_write32;
9074
9075         /* Prevent the irq handler from reading or writing PCI registers
9076          * during chip reset when the memory enable bit in the PCI command
9077          * register may be cleared.  The chip does not generate interrupt
9078          * at this time, but the irq handler may still be called due to irq
9079          * sharing or irqpoll.
9080          */
9081         tg3_flag_set(tp, CHIP_RESETTING);
9082         for (i = 0; i < tp->irq_cnt; i++) {
9083                 struct tg3_napi *tnapi = &tp->napi[i];
9084                 if (tnapi->hw_status) {
9085                         tnapi->hw_status->status = 0;
9086                         tnapi->hw_status->status_tag = 0;
9087                 }
9088                 tnapi->last_tag = 0;
9089                 tnapi->last_irq_tag = 0;
9090         }
9091         smp_mb();
9092
9093         tg3_full_unlock(tp);
9094
9095         for (i = 0; i < tp->irq_cnt; i++)
9096                 synchronize_irq(tp->napi[i].irq_vec);
9097
9098         tg3_full_lock(tp, 0);
9099
9100         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9101                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9102                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9103         }
9104
9105         /* do the reset */
9106         val = GRC_MISC_CFG_CORECLK_RESET;
9107
9108         if (tg3_flag(tp, PCI_EXPRESS)) {
9109                 /* Force PCIe 1.0a mode */
9110                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9111                     !tg3_flag(tp, 57765_PLUS) &&
9112                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9113                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9114                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9115
9116                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9117                         tw32(GRC_MISC_CFG, (1 << 29));
9118                         val |= (1 << 29);
9119                 }
9120         }
9121
9122         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9123                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9124                 tw32(GRC_VCPU_EXT_CTRL,
9125                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9126         }
9127
9128         /* Set the clock to the highest frequency to avoid timeouts. With link
9129          * aware mode, the clock speed could be slow and bootcode does not
9130          * complete within the expected time. Override the clock to allow the
9131          * bootcode to finish sooner and then restore it.
9132          */
9133         tg3_override_clk(tp);
9134
9135         /* Manage gphy power for all CPMU absent PCIe devices. */
9136         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9137                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9138
9139         tw32(GRC_MISC_CFG, val);
9140
9141         /* restore 5701 hardware bug workaround write method */
9142         tp->write32 = write_op;
9143
9144         /* Unfortunately, we have to delay before the PCI read back.
9145          * Some 575X chips even will not respond to a PCI cfg access
9146          * when the reset command is given to the chip.
9147          *
9148          * How do these hardware designers expect things to work
9149          * properly if the PCI write is posted for a long period
9150          * of time?  It is always necessary to have some method by
9151          * which a register read back can occur to push the write
9152          * out which does the reset.
9153          *
9154          * For most tg3 variants the trick below was working.
9155          * Ho hum...
9156          */
9157         udelay(120);
9158
9159         /* Flush PCI posted writes.  The normal MMIO registers
9160          * are inaccessible at this time so this is the only
9161          * way to make this reliably (actually, this is no longer
9162          * the case, see above).  I tried to use indirect
9163          * register read/write but this upset some 5701 variants.
9164          */
9165         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9166
9167         udelay(120);
9168
9169         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9170                 u16 val16;
9171
9172                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9173                         int j;
9174                         u32 cfg_val;
9175
9176                         /* Wait for link training to complete.  */
9177                         for (j = 0; j < 5000; j++)
9178                                 udelay(100);
9179
9180                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9181                         pci_write_config_dword(tp->pdev, 0xc4,
9182                                                cfg_val | (1 << 15));
9183                 }
9184
9185                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9186                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9187                 /*
9188                  * Older PCIe devices only support the 128 byte
9189                  * MPS setting.  Enforce the restriction.
9190                  */
9191                 if (!tg3_flag(tp, CPMU_PRESENT))
9192                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9193                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9194
9195                 /* Clear error status */
9196                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9197                                       PCI_EXP_DEVSTA_CED |
9198                                       PCI_EXP_DEVSTA_NFED |
9199                                       PCI_EXP_DEVSTA_FED |
9200                                       PCI_EXP_DEVSTA_URD);
9201         }
9202
9203         tg3_restore_pci_state(tp);
9204
9205         tg3_flag_clear(tp, CHIP_RESETTING);
9206         tg3_flag_clear(tp, ERROR_PROCESSED);
9207
9208         val = 0;
9209         if (tg3_flag(tp, 5780_CLASS))
9210                 val = tr32(MEMARB_MODE);
9211         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9212
9213         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9214                 tg3_stop_fw(tp);
9215                 tw32(0x5000, 0x400);
9216         }
9217
9218         if (tg3_flag(tp, IS_SSB_CORE)) {
9219                 /*
9220                  * BCM4785: In order to avoid repercussions from using
9221                  * potentially defective internal ROM, stop the Rx RISC CPU,
9222                  * which is not required.
9223                  */
9224                 tg3_stop_fw(tp);
9225                 tg3_halt_cpu(tp, RX_CPU_BASE);
9226         }
9227
9228         err = tg3_poll_fw(tp);
9229         if (err)
9230                 return err;
9231
9232         tw32(GRC_MODE, tp->grc_mode);
9233
9234         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9235                 val = tr32(0xc4);
9236
9237                 tw32(0xc4, val | (1 << 15));
9238         }
9239
9240         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9241             tg3_asic_rev(tp) == ASIC_REV_5705) {
9242                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9243                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9244                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9245                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9246         }
9247
9248         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9249                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9250                 val = tp->mac_mode;
9251         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9252                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9253                 val = tp->mac_mode;
9254         } else
9255                 val = 0;
9256
9257         tw32_f(MAC_MODE, val);
9258         udelay(40);
9259
9260         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9261
9262         tg3_mdio_start(tp);
9263
9264         if (tg3_flag(tp, PCI_EXPRESS) &&
9265             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9266             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9267             !tg3_flag(tp, 57765_PLUS)) {
9268                 val = tr32(0x7c00);
9269
9270                 tw32(0x7c00, val | (1 << 25));
9271         }
9272
9273         tg3_restore_clk(tp);
9274
9275         /* Reprobe ASF enable state.  */
9276         tg3_flag_clear(tp, ENABLE_ASF);
9277         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9278                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9279
9280         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9281         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9282         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9283                 u32 nic_cfg;
9284
9285                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9286                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9287                         tg3_flag_set(tp, ENABLE_ASF);
9288                         tp->last_event_jiffies = jiffies;
9289                         if (tg3_flag(tp, 5750_PLUS))
9290                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9291
9292                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9293                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9294                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9295                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9296                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9297                 }
9298         }
9299
9300         return 0;
9301 }
9302
9303 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9304 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9305 static void __tg3_set_rx_mode(struct net_device *);
9306
9307 /* tp->lock is held. */
9308 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9309 {
9310         int err;
9311
9312         tg3_stop_fw(tp);
9313
9314         tg3_write_sig_pre_reset(tp, kind);
9315
9316         tg3_abort_hw(tp, silent);
9317         err = tg3_chip_reset(tp);
9318
9319         __tg3_set_mac_addr(tp, false);
9320
9321         tg3_write_sig_legacy(tp, kind);
9322         tg3_write_sig_post_reset(tp, kind);
9323
9324         if (tp->hw_stats) {
9325                 /* Save the stats across chip resets... */
9326                 tg3_get_nstats(tp, &tp->net_stats_prev);
9327                 tg3_get_estats(tp, &tp->estats_prev);
9328
9329                 /* And make sure the next sample is new data */
9330                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9331         }
9332
9333         return err;
9334 }
9335
9336 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9337 {
9338         struct tg3 *tp = netdev_priv(dev);
9339         struct sockaddr *addr = p;
9340         int err = 0;
9341         bool skip_mac_1 = false;
9342
9343         if (!is_valid_ether_addr(addr->sa_data))
9344                 return -EADDRNOTAVAIL;
9345
9346         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9347
9348         if (!netif_running(dev))
9349                 return 0;
9350
9351         if (tg3_flag(tp, ENABLE_ASF)) {
9352                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9353
9354                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9355                 addr0_low = tr32(MAC_ADDR_0_LOW);
9356                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9357                 addr1_low = tr32(MAC_ADDR_1_LOW);
9358
9359                 /* Skip MAC addr 1 if ASF is using it. */
9360                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9361                     !(addr1_high == 0 && addr1_low == 0))
9362                         skip_mac_1 = true;
9363         }
9364         spin_lock_bh(&tp->lock);
9365         __tg3_set_mac_addr(tp, skip_mac_1);
9366         __tg3_set_rx_mode(dev);
9367         spin_unlock_bh(&tp->lock);
9368
9369         return err;
9370 }
9371
9372 /* tp->lock is held. */
9373 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9374                            dma_addr_t mapping, u32 maxlen_flags,
9375                            u32 nic_addr)
9376 {
9377         tg3_write_mem(tp,
9378                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9379                       ((u64) mapping >> 32));
9380         tg3_write_mem(tp,
9381                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9382                       ((u64) mapping & 0xffffffff));
9383         tg3_write_mem(tp,
9384                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9385                        maxlen_flags);
9386
9387         if (!tg3_flag(tp, 5705_PLUS))
9388                 tg3_write_mem(tp,
9389                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9390                               nic_addr);
9391 }
9392
9393
9394 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9395 {
9396         int i = 0;
9397
9398         if (!tg3_flag(tp, ENABLE_TSS)) {
9399                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9400                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9401                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9402         } else {
9403                 tw32(HOSTCC_TXCOL_TICKS, 0);
9404                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9405                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9406
9407                 for (; i < tp->txq_cnt; i++) {
9408                         u32 reg;
9409
9410                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9411                         tw32(reg, ec->tx_coalesce_usecs);
9412                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9413                         tw32(reg, ec->tx_max_coalesced_frames);
9414                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9415                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9416                 }
9417         }
9418
9419         for (; i < tp->irq_max - 1; i++) {
9420                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9421                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9422                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9423         }
9424 }
9425
9426 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9427 {
9428         int i = 0;
9429         u32 limit = tp->rxq_cnt;
9430
9431         if (!tg3_flag(tp, ENABLE_RSS)) {
9432                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9433                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9434                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9435                 limit--;
9436         } else {
9437                 tw32(HOSTCC_RXCOL_TICKS, 0);
9438                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9439                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9440         }
9441
9442         for (; i < limit; i++) {
9443                 u32 reg;
9444
9445                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9446                 tw32(reg, ec->rx_coalesce_usecs);
9447                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9448                 tw32(reg, ec->rx_max_coalesced_frames);
9449                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9450                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9451         }
9452
9453         for (; i < tp->irq_max - 1; i++) {
9454                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9455                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9456                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9457         }
9458 }
9459
9460 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9461 {
9462         tg3_coal_tx_init(tp, ec);
9463         tg3_coal_rx_init(tp, ec);
9464
9465         if (!tg3_flag(tp, 5705_PLUS)) {
9466                 u32 val = ec->stats_block_coalesce_usecs;
9467
9468                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9469                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9470
9471                 if (!tp->link_up)
9472                         val = 0;
9473
9474                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9475         }
9476 }
9477
9478 /* tp->lock is held. */
9479 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9480 {
9481         u32 txrcb, limit;
9482
9483         /* Disable all transmit rings but the first. */
9484         if (!tg3_flag(tp, 5705_PLUS))
9485                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9486         else if (tg3_flag(tp, 5717_PLUS))
9487                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9488         else if (tg3_flag(tp, 57765_CLASS) ||
9489                  tg3_asic_rev(tp) == ASIC_REV_5762)
9490                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9491         else
9492                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9493
9494         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9495              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9496                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9497                               BDINFO_FLAGS_DISABLED);
9498 }
9499
9500 /* tp->lock is held. */
9501 static void tg3_tx_rcbs_init(struct tg3 *tp)
9502 {
9503         int i = 0;
9504         u32 txrcb = NIC_SRAM_SEND_RCB;
9505
9506         if (tg3_flag(tp, ENABLE_TSS))
9507                 i++;
9508
9509         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9510                 struct tg3_napi *tnapi = &tp->napi[i];
9511
9512                 if (!tnapi->tx_ring)
9513                         continue;
9514
9515                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9516                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9517                                NIC_SRAM_TX_BUFFER_DESC);
9518         }
9519 }
9520
9521 /* tp->lock is held. */
9522 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9523 {
9524         u32 rxrcb, limit;
9525
9526         /* Disable all receive return rings but the first. */
9527         if (tg3_flag(tp, 5717_PLUS))
9528                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9529         else if (!tg3_flag(tp, 5705_PLUS))
9530                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9531         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9532                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9533                  tg3_flag(tp, 57765_CLASS))
9534                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9535         else
9536                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9537
9538         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9539              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9540                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9541                               BDINFO_FLAGS_DISABLED);
9542 }
9543
9544 /* tp->lock is held. */
9545 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9546 {
9547         int i = 0;
9548         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9549
9550         if (tg3_flag(tp, ENABLE_RSS))
9551                 i++;
9552
9553         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9554                 struct tg3_napi *tnapi = &tp->napi[i];
9555
9556                 if (!tnapi->rx_rcb)
9557                         continue;
9558
9559                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9560                                (tp->rx_ret_ring_mask + 1) <<
9561                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9562         }
9563 }
9564
9565 /* tp->lock is held. */
9566 static void tg3_rings_reset(struct tg3 *tp)
9567 {
9568         int i;
9569         u32 stblk;
9570         struct tg3_napi *tnapi = &tp->napi[0];
9571
9572         tg3_tx_rcbs_disable(tp);
9573
9574         tg3_rx_ret_rcbs_disable(tp);
9575
9576         /* Disable interrupts */
9577         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9578         tp->napi[0].chk_msi_cnt = 0;
9579         tp->napi[0].last_rx_cons = 0;
9580         tp->napi[0].last_tx_cons = 0;
9581
9582         /* Zero mailbox registers. */
9583         if (tg3_flag(tp, SUPPORT_MSIX)) {
9584                 for (i = 1; i < tp->irq_max; i++) {
9585                         tp->napi[i].tx_prod = 0;
9586                         tp->napi[i].tx_cons = 0;
9587                         if (tg3_flag(tp, ENABLE_TSS))
9588                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9589                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9590                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9591                         tp->napi[i].chk_msi_cnt = 0;
9592                         tp->napi[i].last_rx_cons = 0;
9593                         tp->napi[i].last_tx_cons = 0;
9594                 }
9595                 if (!tg3_flag(tp, ENABLE_TSS))
9596                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9597         } else {
9598                 tp->napi[0].tx_prod = 0;
9599                 tp->napi[0].tx_cons = 0;
9600                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9601                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9602         }
9603
9604         /* Make sure the NIC-based send BD rings are disabled. */
9605         if (!tg3_flag(tp, 5705_PLUS)) {
9606                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9607                 for (i = 0; i < 16; i++)
9608                         tw32_tx_mbox(mbox + i * 8, 0);
9609         }
9610
9611         /* Clear status block in ram. */
9612         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9613
9614         /* Set status block DMA address */
9615         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9616              ((u64) tnapi->status_mapping >> 32));
9617         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9618              ((u64) tnapi->status_mapping & 0xffffffff));
9619
9620         stblk = HOSTCC_STATBLCK_RING1;
9621
9622         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9623                 u64 mapping = (u64)tnapi->status_mapping;
9624                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9625                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9626                 stblk += 8;
9627
9628                 /* Clear status block in ram. */
9629                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9630         }
9631
9632         tg3_tx_rcbs_init(tp);
9633         tg3_rx_ret_rcbs_init(tp);
9634 }
9635
9636 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9637 {
9638         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9639
9640         if (!tg3_flag(tp, 5750_PLUS) ||
9641             tg3_flag(tp, 5780_CLASS) ||
9642             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9643             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9644             tg3_flag(tp, 57765_PLUS))
9645                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9646         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9647                  tg3_asic_rev(tp) == ASIC_REV_5787)
9648                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9649         else
9650                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9651
9652         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9653         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9654
9655         val = min(nic_rep_thresh, host_rep_thresh);
9656         tw32(RCVBDI_STD_THRESH, val);
9657
9658         if (tg3_flag(tp, 57765_PLUS))
9659                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9660
9661         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9662                 return;
9663
9664         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9665
9666         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9667
9668         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9669         tw32(RCVBDI_JUMBO_THRESH, val);
9670
9671         if (tg3_flag(tp, 57765_PLUS))
9672                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9673 }
9674
9675 static inline u32 calc_crc(unsigned char *buf, int len)
9676 {
9677         u32 reg;
9678         u32 tmp;
9679         int j, k;
9680
9681         reg = 0xffffffff;
9682
9683         for (j = 0; j < len; j++) {
9684                 reg ^= buf[j];
9685
9686                 for (k = 0; k < 8; k++) {
9687                         tmp = reg & 0x01;
9688
9689                         reg >>= 1;
9690
9691                         if (tmp)
9692                                 reg ^= 0xedb88320;
9693                 }
9694         }
9695
9696         return ~reg;
9697 }
9698
9699 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9700 {
9701         /* accept or reject all multicast frames */
9702         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9703         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9704         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9705         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9706 }
9707
9708 static void __tg3_set_rx_mode(struct net_device *dev)
9709 {
9710         struct tg3 *tp = netdev_priv(dev);
9711         u32 rx_mode;
9712
9713         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9714                                   RX_MODE_KEEP_VLAN_TAG);
9715
9716 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9717         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9718          * flag clear.
9719          */
9720         if (!tg3_flag(tp, ENABLE_ASF))
9721                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9722 #endif
9723
9724         if (dev->flags & IFF_PROMISC) {
9725                 /* Promiscuous mode. */
9726                 rx_mode |= RX_MODE_PROMISC;
9727         } else if (dev->flags & IFF_ALLMULTI) {
9728                 /* Accept all multicast. */
9729                 tg3_set_multi(tp, 1);
9730         } else if (netdev_mc_empty(dev)) {
9731                 /* Reject all multicast. */
9732                 tg3_set_multi(tp, 0);
9733         } else {
9734                 /* Accept one or more multicast(s). */
9735                 struct netdev_hw_addr *ha;
9736                 u32 mc_filter[4] = { 0, };
9737                 u32 regidx;
9738                 u32 bit;
9739                 u32 crc;
9740
9741                 netdev_for_each_mc_addr(ha, dev) {
9742                         crc = calc_crc(ha->addr, ETH_ALEN);
9743                         bit = ~crc & 0x7f;
9744                         regidx = (bit & 0x60) >> 5;
9745                         bit &= 0x1f;
9746                         mc_filter[regidx] |= (1 << bit);
9747                 }
9748
9749                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9750                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9751                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9752                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9753         }
9754
9755         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9756                 rx_mode |= RX_MODE_PROMISC;
9757         } else if (!(dev->flags & IFF_PROMISC)) {
9758                 /* Add all entries into to the mac addr filter list */
9759                 int i = 0;
9760                 struct netdev_hw_addr *ha;
9761
9762                 netdev_for_each_uc_addr(ha, dev) {
9763                         __tg3_set_one_mac_addr(tp, ha->addr,
9764                                                i + TG3_UCAST_ADDR_IDX(tp));
9765                         i++;
9766                 }
9767         }
9768
9769         if (rx_mode != tp->rx_mode) {
9770                 tp->rx_mode = rx_mode;
9771                 tw32_f(MAC_RX_MODE, rx_mode);
9772                 udelay(10);
9773         }
9774 }
9775
9776 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9777 {
9778         int i;
9779
9780         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9781                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9782 }
9783
9784 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9785 {
9786         int i;
9787
9788         if (!tg3_flag(tp, SUPPORT_MSIX))
9789                 return;
9790
9791         if (tp->rxq_cnt == 1) {
9792                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9793                 return;
9794         }
9795
9796         /* Validate table against current IRQ count */
9797         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9798                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9799                         break;
9800         }
9801
9802         if (i != TG3_RSS_INDIR_TBL_SIZE)
9803                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9804 }
9805
9806 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9807 {
9808         int i = 0;
9809         u32 reg = MAC_RSS_INDIR_TBL_0;
9810
9811         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9812                 u32 val = tp->rss_ind_tbl[i];
9813                 i++;
9814                 for (; i % 8; i++) {
9815                         val <<= 4;
9816                         val |= tp->rss_ind_tbl[i];
9817                 }
9818                 tw32(reg, val);
9819                 reg += 4;
9820         }
9821 }
9822
9823 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9824 {
9825         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9826                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9827         else
9828                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9829 }
9830
9831 /* tp->lock is held. */
9832 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9833 {
9834         u32 val, rdmac_mode;
9835         int i, err, limit;
9836         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9837
9838         tg3_disable_ints(tp);
9839
9840         tg3_stop_fw(tp);
9841
9842         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9843
9844         if (tg3_flag(tp, INIT_COMPLETE))
9845                 tg3_abort_hw(tp, 1);
9846
9847         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9848             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9849                 tg3_phy_pull_config(tp);
9850                 tg3_eee_pull_config(tp, NULL);
9851                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9852         }
9853
9854         /* Enable MAC control of LPI */
9855         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9856                 tg3_setup_eee(tp);
9857
9858         if (reset_phy)
9859                 tg3_phy_reset(tp);
9860
9861         err = tg3_chip_reset(tp);
9862         if (err)
9863                 return err;
9864
9865         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9866
9867         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9868                 val = tr32(TG3_CPMU_CTRL);
9869                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9870                 tw32(TG3_CPMU_CTRL, val);
9871
9872                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9873                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9874                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9875                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9876
9877                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9878                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9879                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9880                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9881
9882                 val = tr32(TG3_CPMU_HST_ACC);
9883                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9884                 val |= CPMU_HST_ACC_MACCLK_6_25;
9885                 tw32(TG3_CPMU_HST_ACC, val);
9886         }
9887
9888         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9889                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9890                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9891                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9892                 tw32(PCIE_PWR_MGMT_THRESH, val);
9893
9894                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9895                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9896
9897                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9898
9899                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9900                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9901         }
9902
9903         if (tg3_flag(tp, L1PLLPD_EN)) {
9904                 u32 grc_mode = tr32(GRC_MODE);
9905
9906                 /* Access the lower 1K of PL PCIE block registers. */
9907                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9908                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9909
9910                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9911                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9912                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9913
9914                 tw32(GRC_MODE, grc_mode);
9915         }
9916
9917         if (tg3_flag(tp, 57765_CLASS)) {
9918                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9919                         u32 grc_mode = tr32(GRC_MODE);
9920
9921                         /* Access the lower 1K of PL PCIE block registers. */
9922                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9923                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9924
9925                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9926                                    TG3_PCIE_PL_LO_PHYCTL5);
9927                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9928                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9929
9930                         tw32(GRC_MODE, grc_mode);
9931                 }
9932
9933                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9934                         u32 grc_mode;
9935
9936                         /* Fix transmit hangs */
9937                         val = tr32(TG3_CPMU_PADRNG_CTL);
9938                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9939                         tw32(TG3_CPMU_PADRNG_CTL, val);
9940
9941                         grc_mode = tr32(GRC_MODE);
9942
9943                         /* Access the lower 1K of DL PCIE block registers. */
9944                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9945                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9946
9947                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9948                                    TG3_PCIE_DL_LO_FTSMAX);
9949                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9950                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9951                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9952
9953                         tw32(GRC_MODE, grc_mode);
9954                 }
9955
9956                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9957                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9958                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9959                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9960         }
9961
9962         /* This works around an issue with Athlon chipsets on
9963          * B3 tigon3 silicon.  This bit has no effect on any
9964          * other revision.  But do not set this on PCI Express
9965          * chips and don't even touch the clocks if the CPMU is present.
9966          */
9967         if (!tg3_flag(tp, CPMU_PRESENT)) {
9968                 if (!tg3_flag(tp, PCI_EXPRESS))
9969                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9970                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9971         }
9972
9973         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9974             tg3_flag(tp, PCIX_MODE)) {
9975                 val = tr32(TG3PCI_PCISTATE);
9976                 val |= PCISTATE_RETRY_SAME_DMA;
9977                 tw32(TG3PCI_PCISTATE, val);
9978         }
9979
9980         if (tg3_flag(tp, ENABLE_APE)) {
9981                 /* Allow reads and writes to the
9982                  * APE register and memory space.
9983                  */
9984                 val = tr32(TG3PCI_PCISTATE);
9985                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9986                        PCISTATE_ALLOW_APE_SHMEM_WR |
9987                        PCISTATE_ALLOW_APE_PSPACE_WR;
9988                 tw32(TG3PCI_PCISTATE, val);
9989         }
9990
9991         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9992                 /* Enable some hw fixes.  */
9993                 val = tr32(TG3PCI_MSI_DATA);
9994                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9995                 tw32(TG3PCI_MSI_DATA, val);
9996         }
9997
9998         /* Descriptor ring init may make accesses to the
9999          * NIC SRAM area to setup the TX descriptors, so we
10000          * can only do this after the hardware has been
10001          * successfully reset.
10002          */
10003         err = tg3_init_rings(tp);
10004         if (err)
10005                 return err;
10006
10007         if (tg3_flag(tp, 57765_PLUS)) {
10008                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10009                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10010                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10011                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10012                 if (!tg3_flag(tp, 57765_CLASS) &&
10013                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10014                     tg3_asic_rev(tp) != ASIC_REV_5762)
10015                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10016                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10017         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10018                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10019                 /* This value is determined during the probe time DMA
10020                  * engine test, tg3_test_dma.
10021                  */
10022                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10023         }
10024
10025         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10026                           GRC_MODE_4X_NIC_SEND_RINGS |
10027                           GRC_MODE_NO_TX_PHDR_CSUM |
10028                           GRC_MODE_NO_RX_PHDR_CSUM);
10029         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10030
10031         /* Pseudo-header checksum is done by hardware logic and not
10032          * the offload processers, so make the chip do the pseudo-
10033          * header checksums on receive.  For transmit it is more
10034          * convenient to do the pseudo-header checksum in software
10035          * as Linux does that on transmit for us in all cases.
10036          */
10037         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10038
10039         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10040         if (tp->rxptpctl)
10041                 tw32(TG3_RX_PTP_CTL,
10042                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10043
10044         if (tg3_flag(tp, PTP_CAPABLE))
10045                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10046
10047         tw32(GRC_MODE, tp->grc_mode | val);
10048
10049         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10050         val = tr32(GRC_MISC_CFG);
10051         val &= ~0xff;
10052         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10053         tw32(GRC_MISC_CFG, val);
10054
10055         /* Initialize MBUF/DESC pool. */
10056         if (tg3_flag(tp, 5750_PLUS)) {
10057                 /* Do nothing.  */
10058         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10059                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10060                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10061                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10062                 else
10063                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10064                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10065                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10066         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10067                 int fw_len;
10068
10069                 fw_len = tp->fw_len;
10070                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10071                 tw32(BUFMGR_MB_POOL_ADDR,
10072                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10073                 tw32(BUFMGR_MB_POOL_SIZE,
10074                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10075         }
10076
10077         if (tp->dev->mtu <= ETH_DATA_LEN) {
10078                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10079                      tp->bufmgr_config.mbuf_read_dma_low_water);
10080                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10081                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10082                 tw32(BUFMGR_MB_HIGH_WATER,
10083                      tp->bufmgr_config.mbuf_high_water);
10084         } else {
10085                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10086                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10087                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10088                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10089                 tw32(BUFMGR_MB_HIGH_WATER,
10090                      tp->bufmgr_config.mbuf_high_water_jumbo);
10091         }
10092         tw32(BUFMGR_DMA_LOW_WATER,
10093              tp->bufmgr_config.dma_low_water);
10094         tw32(BUFMGR_DMA_HIGH_WATER,
10095              tp->bufmgr_config.dma_high_water);
10096
10097         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10098         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10099                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10100         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10101             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10102             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10103             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10104                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10105         tw32(BUFMGR_MODE, val);
10106         for (i = 0; i < 2000; i++) {
10107                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10108                         break;
10109                 udelay(10);
10110         }
10111         if (i >= 2000) {
10112                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10113                 return -ENODEV;
10114         }
10115
10116         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10117                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10118
10119         tg3_setup_rxbd_thresholds(tp);
10120
10121         /* Initialize TG3_BDINFO's at:
10122          *  RCVDBDI_STD_BD:     standard eth size rx ring
10123          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10124          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10125          *
10126          * like so:
10127          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10128          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10129          *                              ring attribute flags
10130          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10131          *
10132          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10133          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10134          *
10135          * The size of each ring is fixed in the firmware, but the location is
10136          * configurable.
10137          */
10138         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10139              ((u64) tpr->rx_std_mapping >> 32));
10140         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10141              ((u64) tpr->rx_std_mapping & 0xffffffff));
10142         if (!tg3_flag(tp, 5717_PLUS))
10143                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10144                      NIC_SRAM_RX_BUFFER_DESC);
10145
10146         /* Disable the mini ring */
10147         if (!tg3_flag(tp, 5705_PLUS))
10148                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10149                      BDINFO_FLAGS_DISABLED);
10150
10151         /* Program the jumbo buffer descriptor ring control
10152          * blocks on those devices that have them.
10153          */
10154         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10155             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10156
10157                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10158                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10159                              ((u64) tpr->rx_jmb_mapping >> 32));
10160                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10161                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10162                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10163                               BDINFO_FLAGS_MAXLEN_SHIFT;
10164                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10165                              val | BDINFO_FLAGS_USE_EXT_RECV);
10166                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10167                             tg3_flag(tp, 57765_CLASS) ||
10168                             tg3_asic_rev(tp) == ASIC_REV_5762)
10169                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10170                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10171                 } else {
10172                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10173                              BDINFO_FLAGS_DISABLED);
10174                 }
10175
10176                 if (tg3_flag(tp, 57765_PLUS)) {
10177                         val = TG3_RX_STD_RING_SIZE(tp);
10178                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10179                         val |= (TG3_RX_STD_DMA_SZ << 2);
10180                 } else
10181                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10182         } else
10183                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10184
10185         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10186
10187         tpr->rx_std_prod_idx = tp->rx_pending;
10188         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10189
10190         tpr->rx_jmb_prod_idx =
10191                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10192         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10193
10194         tg3_rings_reset(tp);
10195
10196         /* Initialize MAC address and backoff seed. */
10197         __tg3_set_mac_addr(tp, false);
10198
10199         /* MTU + ethernet header + FCS + optional VLAN tag */
10200         tw32(MAC_RX_MTU_SIZE,
10201              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10202
10203         /* The slot time is changed by tg3_setup_phy if we
10204          * run at gigabit with half duplex.
10205          */
10206         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10207               (6 << TX_LENGTHS_IPG_SHIFT) |
10208               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10209
10210         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10211             tg3_asic_rev(tp) == ASIC_REV_5762)
10212                 val |= tr32(MAC_TX_LENGTHS) &
10213                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10214                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10215
10216         tw32(MAC_TX_LENGTHS, val);
10217
10218         /* Receive rules. */
10219         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10220         tw32(RCVLPC_CONFIG, 0x0181);
10221
10222         /* Calculate RDMAC_MODE setting early, we need it to determine
10223          * the RCVLPC_STATE_ENABLE mask.
10224          */
10225         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10226                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10227                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10228                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10229                       RDMAC_MODE_LNGREAD_ENAB);
10230
10231         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10232                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10233
10234         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10235             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10236             tg3_asic_rev(tp) == ASIC_REV_57780)
10237                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10238                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10239                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10240
10241         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10242             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10243                 if (tg3_flag(tp, TSO_CAPABLE) &&
10244                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10245                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10246                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10247                            !tg3_flag(tp, IS_5788)) {
10248                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10249                 }
10250         }
10251
10252         if (tg3_flag(tp, PCI_EXPRESS))
10253                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10254
10255         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10256                 tp->dma_limit = 0;
10257                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10258                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10259                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10260                 }
10261         }
10262
10263         if (tg3_flag(tp, HW_TSO_1) ||
10264             tg3_flag(tp, HW_TSO_2) ||
10265             tg3_flag(tp, HW_TSO_3))
10266                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10267
10268         if (tg3_flag(tp, 57765_PLUS) ||
10269             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10270             tg3_asic_rev(tp) == ASIC_REV_57780)
10271                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10272
10273         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10274             tg3_asic_rev(tp) == ASIC_REV_5762)
10275                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10276
10277         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10278             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10279             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10280             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10281             tg3_flag(tp, 57765_PLUS)) {
10282                 u32 tgtreg;
10283
10284                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10285                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10286                 else
10287                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10288
10289                 val = tr32(tgtreg);
10290                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10291                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10292                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10293                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10294                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10295                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10296                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10297                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10298                 }
10299                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10300         }
10301
10302         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10303             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10304             tg3_asic_rev(tp) == ASIC_REV_5762) {
10305                 u32 tgtreg;
10306
10307                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10308                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10309                 else
10310                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10311
10312                 val = tr32(tgtreg);
10313                 tw32(tgtreg, val |
10314                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10315                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10316         }
10317
10318         /* Receive/send statistics. */
10319         if (tg3_flag(tp, 5750_PLUS)) {
10320                 val = tr32(RCVLPC_STATS_ENABLE);
10321                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10322                 tw32(RCVLPC_STATS_ENABLE, val);
10323         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10324                    tg3_flag(tp, TSO_CAPABLE)) {
10325                 val = tr32(RCVLPC_STATS_ENABLE);
10326                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10327                 tw32(RCVLPC_STATS_ENABLE, val);
10328         } else {
10329                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10330         }
10331         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10332         tw32(SNDDATAI_STATSENAB, 0xffffff);
10333         tw32(SNDDATAI_STATSCTRL,
10334              (SNDDATAI_SCTRL_ENABLE |
10335               SNDDATAI_SCTRL_FASTUPD));
10336
10337         /* Setup host coalescing engine. */
10338         tw32(HOSTCC_MODE, 0);
10339         for (i = 0; i < 2000; i++) {
10340                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10341                         break;
10342                 udelay(10);
10343         }
10344
10345         __tg3_set_coalesce(tp, &tp->coal);
10346
10347         if (!tg3_flag(tp, 5705_PLUS)) {
10348                 /* Status/statistics block address.  See tg3_timer,
10349                  * the tg3_periodic_fetch_stats call there, and
10350                  * tg3_get_stats to see how this works for 5705/5750 chips.
10351                  */
10352                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10353                      ((u64) tp->stats_mapping >> 32));
10354                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10355                      ((u64) tp->stats_mapping & 0xffffffff));
10356                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10357
10358                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10359
10360                 /* Clear statistics and status block memory areas */
10361                 for (i = NIC_SRAM_STATS_BLK;
10362                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10363                      i += sizeof(u32)) {
10364                         tg3_write_mem(tp, i, 0);
10365                         udelay(40);
10366                 }
10367         }
10368
10369         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10370
10371         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10372         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10373         if (!tg3_flag(tp, 5705_PLUS))
10374                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10375
10376         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10377                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10378                 /* reset to prevent losing 1st rx packet intermittently */
10379                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10380                 udelay(10);
10381         }
10382
10383         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10384                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10385                         MAC_MODE_FHDE_ENABLE;
10386         if (tg3_flag(tp, ENABLE_APE))
10387                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10388         if (!tg3_flag(tp, 5705_PLUS) &&
10389             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10390             tg3_asic_rev(tp) != ASIC_REV_5700)
10391                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10392         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10393         udelay(40);
10394
10395         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10396          * If TG3_FLAG_IS_NIC is zero, we should read the
10397          * register to preserve the GPIO settings for LOMs. The GPIOs,
10398          * whether used as inputs or outputs, are set by boot code after
10399          * reset.
10400          */
10401         if (!tg3_flag(tp, IS_NIC)) {
10402                 u32 gpio_mask;
10403
10404                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10405                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10406                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10407
10408                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10409                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10410                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10411
10412                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10413                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10414
10415                 tp->grc_local_ctrl &= ~gpio_mask;
10416                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10417
10418                 /* GPIO1 must be driven high for eeprom write protect */
10419                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10420                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10421                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10422         }
10423         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10424         udelay(100);
10425
10426         if (tg3_flag(tp, USING_MSIX)) {
10427                 val = tr32(MSGINT_MODE);
10428                 val |= MSGINT_MODE_ENABLE;
10429                 if (tp->irq_cnt > 1)
10430                         val |= MSGINT_MODE_MULTIVEC_EN;
10431                 if (!tg3_flag(tp, 1SHOT_MSI))
10432                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10433                 tw32(MSGINT_MODE, val);
10434         }
10435
10436         if (!tg3_flag(tp, 5705_PLUS)) {
10437                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10438                 udelay(40);
10439         }
10440
10441         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10442                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10443                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10444                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10445                WDMAC_MODE_LNGREAD_ENAB);
10446
10447         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10448             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10449                 if (tg3_flag(tp, TSO_CAPABLE) &&
10450                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10451                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10452                         /* nothing */
10453                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10454                            !tg3_flag(tp, IS_5788)) {
10455                         val |= WDMAC_MODE_RX_ACCEL;
10456                 }
10457         }
10458
10459         /* Enable host coalescing bug fix */
10460         if (tg3_flag(tp, 5755_PLUS))
10461                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10462
10463         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10464                 val |= WDMAC_MODE_BURST_ALL_DATA;
10465
10466         tw32_f(WDMAC_MODE, val);
10467         udelay(40);
10468
10469         if (tg3_flag(tp, PCIX_MODE)) {
10470                 u16 pcix_cmd;
10471
10472                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10473                                      &pcix_cmd);
10474                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10475                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10476                         pcix_cmd |= PCI_X_CMD_READ_2K;
10477                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10478                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10479                         pcix_cmd |= PCI_X_CMD_READ_2K;
10480                 }
10481                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10482                                       pcix_cmd);
10483         }
10484
10485         tw32_f(RDMAC_MODE, rdmac_mode);
10486         udelay(40);
10487
10488         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10489             tg3_asic_rev(tp) == ASIC_REV_5720) {
10490                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10491                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10492                                 break;
10493                 }
10494                 if (i < TG3_NUM_RDMA_CHANNELS) {
10495                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10496                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10497                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10498                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10499                 }
10500         }
10501
10502         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10503         if (!tg3_flag(tp, 5705_PLUS))
10504                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10505
10506         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10507                 tw32(SNDDATAC_MODE,
10508                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10509         else
10510                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10511
10512         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10513         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10514         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10515         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10516                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10517         tw32(RCVDBDI_MODE, val);
10518         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10519         if (tg3_flag(tp, HW_TSO_1) ||
10520             tg3_flag(tp, HW_TSO_2) ||
10521             tg3_flag(tp, HW_TSO_3))
10522                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10523         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10524         if (tg3_flag(tp, ENABLE_TSS))
10525                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10526         tw32(SNDBDI_MODE, val);
10527         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10528
10529         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10530                 err = tg3_load_5701_a0_firmware_fix(tp);
10531                 if (err)
10532                         return err;
10533         }
10534
10535         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10536                 /* Ignore any errors for the firmware download. If download
10537                  * fails, the device will operate with EEE disabled
10538                  */
10539                 tg3_load_57766_firmware(tp);
10540         }
10541
10542         if (tg3_flag(tp, TSO_CAPABLE)) {
10543                 err = tg3_load_tso_firmware(tp);
10544                 if (err)
10545                         return err;
10546         }
10547
10548         tp->tx_mode = TX_MODE_ENABLE;
10549
10550         if (tg3_flag(tp, 5755_PLUS) ||
10551             tg3_asic_rev(tp) == ASIC_REV_5906)
10552                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10553
10554         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10555             tg3_asic_rev(tp) == ASIC_REV_5762) {
10556                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10557                 tp->tx_mode &= ~val;
10558                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10559         }
10560
10561         tw32_f(MAC_TX_MODE, tp->tx_mode);
10562         udelay(100);
10563
10564         if (tg3_flag(tp, ENABLE_RSS)) {
10565                 u32 rss_key[10];
10566
10567                 tg3_rss_write_indir_tbl(tp);
10568
10569                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10570
10571                 for (i = 0; i < 10 ; i++)
10572                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10573         }
10574
10575         tp->rx_mode = RX_MODE_ENABLE;
10576         if (tg3_flag(tp, 5755_PLUS))
10577                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10578
10579         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10580                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10581
10582         if (tg3_flag(tp, ENABLE_RSS))
10583                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10584                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10585                                RX_MODE_RSS_IPV6_HASH_EN |
10586                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10587                                RX_MODE_RSS_IPV4_HASH_EN |
10588                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10589
10590         tw32_f(MAC_RX_MODE, tp->rx_mode);
10591         udelay(10);
10592
10593         tw32(MAC_LED_CTRL, tp->led_ctrl);
10594
10595         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10596         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10597                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10598                 udelay(10);
10599         }
10600         tw32_f(MAC_RX_MODE, tp->rx_mode);
10601         udelay(10);
10602
10603         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10604                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10605                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10606                         /* Set drive transmission level to 1.2V  */
10607                         /* only if the signal pre-emphasis bit is not set  */
10608                         val = tr32(MAC_SERDES_CFG);
10609                         val &= 0xfffff000;
10610                         val |= 0x880;
10611                         tw32(MAC_SERDES_CFG, val);
10612                 }
10613                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10614                         tw32(MAC_SERDES_CFG, 0x616000);
10615         }
10616
10617         /* Prevent chip from dropping frames when flow control
10618          * is enabled.
10619          */
10620         if (tg3_flag(tp, 57765_CLASS))
10621                 val = 1;
10622         else
10623                 val = 2;
10624         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10625
10626         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10627             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10628                 /* Use hardware link auto-negotiation */
10629                 tg3_flag_set(tp, HW_AUTONEG);
10630         }
10631
10632         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10633             tg3_asic_rev(tp) == ASIC_REV_5714) {
10634                 u32 tmp;
10635
10636                 tmp = tr32(SERDES_RX_CTRL);
10637                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10638                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10639                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10640                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10641         }
10642
10643         if (!tg3_flag(tp, USE_PHYLIB)) {
10644                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10645                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10646
10647                 err = tg3_setup_phy(tp, false);
10648                 if (err)
10649                         return err;
10650
10651                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10652                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10653                         u32 tmp;
10654
10655                         /* Clear CRC stats. */
10656                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10657                                 tg3_writephy(tp, MII_TG3_TEST1,
10658                                              tmp | MII_TG3_TEST1_CRC_EN);
10659                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10660                         }
10661                 }
10662         }
10663
10664         __tg3_set_rx_mode(tp->dev);
10665
10666         /* Initialize receive rules. */
10667         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10668         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10669         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10670         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10671
10672         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10673                 limit = 8;
10674         else
10675                 limit = 16;
10676         if (tg3_flag(tp, ENABLE_ASF))
10677                 limit -= 4;
10678         switch (limit) {
10679         case 16:
10680                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10681         case 15:
10682                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10683         case 14:
10684                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10685         case 13:
10686                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10687         case 12:
10688                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10689         case 11:
10690                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10691         case 10:
10692                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10693         case 9:
10694                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10695         case 8:
10696                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10697         case 7:
10698                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10699         case 6:
10700                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10701         case 5:
10702                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10703         case 4:
10704                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10705         case 3:
10706                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10707         case 2:
10708         case 1:
10709
10710         default:
10711                 break;
10712         }
10713
10714         if (tg3_flag(tp, ENABLE_APE))
10715                 /* Write our heartbeat update interval to APE. */
10716                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10717                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10718
10719         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10720
10721         return 0;
10722 }
10723
10724 /* Called at device open time to get the chip ready for
10725  * packet processing.  Invoked with tp->lock held.
10726  */
10727 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10728 {
10729         /* Chip may have been just powered on. If so, the boot code may still
10730          * be running initialization. Wait for it to finish to avoid races in
10731          * accessing the hardware.
10732          */
10733         tg3_enable_register_access(tp);
10734         tg3_poll_fw(tp);
10735
10736         tg3_switch_clocks(tp);
10737
10738         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10739
10740         return tg3_reset_hw(tp, reset_phy);
10741 }
10742
10743 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10744 {
10745         int i;
10746
10747         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10748                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10749
10750                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10751                 off += len;
10752
10753                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10754                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10755                         memset(ocir, 0, TG3_OCIR_LEN);
10756         }
10757 }
10758
10759 /* sysfs attributes for hwmon */
10760 static ssize_t tg3_show_temp(struct device *dev,
10761                              struct device_attribute *devattr, char *buf)
10762 {
10763         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10764         struct tg3 *tp = dev_get_drvdata(dev);
10765         u32 temperature;
10766
10767         spin_lock_bh(&tp->lock);
10768         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10769                                 sizeof(temperature));
10770         spin_unlock_bh(&tp->lock);
10771         return sprintf(buf, "%u\n", temperature * 1000);
10772 }
10773
10774
10775 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10776                           TG3_TEMP_SENSOR_OFFSET);
10777 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10778                           TG3_TEMP_CAUTION_OFFSET);
10779 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10780                           TG3_TEMP_MAX_OFFSET);
10781
10782 static struct attribute *tg3_attrs[] = {
10783         &sensor_dev_attr_temp1_input.dev_attr.attr,
10784         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10785         &sensor_dev_attr_temp1_max.dev_attr.attr,
10786         NULL
10787 };
10788 ATTRIBUTE_GROUPS(tg3);
10789
10790 static void tg3_hwmon_close(struct tg3 *tp)
10791 {
10792         if (tp->hwmon_dev) {
10793                 hwmon_device_unregister(tp->hwmon_dev);
10794                 tp->hwmon_dev = NULL;
10795         }
10796 }
10797
10798 static void tg3_hwmon_open(struct tg3 *tp)
10799 {
10800         int i;
10801         u32 size = 0;
10802         struct pci_dev *pdev = tp->pdev;
10803         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10804
10805         tg3_sd_scan_scratchpad(tp, ocirs);
10806
10807         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10808                 if (!ocirs[i].src_data_length)
10809                         continue;
10810
10811                 size += ocirs[i].src_hdr_length;
10812                 size += ocirs[i].src_data_length;
10813         }
10814
10815         if (!size)
10816                 return;
10817
10818         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10819                                                           tp, tg3_groups);
10820         if (IS_ERR(tp->hwmon_dev)) {
10821                 tp->hwmon_dev = NULL;
10822                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10823         }
10824 }
10825
10826
10827 #define TG3_STAT_ADD32(PSTAT, REG) \
10828 do {    u32 __val = tr32(REG); \
10829         (PSTAT)->low += __val; \
10830         if ((PSTAT)->low < __val) \
10831                 (PSTAT)->high += 1; \
10832 } while (0)
10833
10834 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10835 {
10836         struct tg3_hw_stats *sp = tp->hw_stats;
10837
10838         if (!tp->link_up)
10839                 return;
10840
10841         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10842         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10843         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10844         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10845         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10846         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10847         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10848         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10849         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10850         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10851         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10852         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10853         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10854         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10855                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10856                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10857                 u32 val;
10858
10859                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10860                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10861                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10862                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10863         }
10864
10865         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10866         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10867         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10868         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10869         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10870         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10871         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10872         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10873         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10874         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10875         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10876         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10877         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10878         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10879
10880         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10881         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10882             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10883             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10884             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10885                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10886         } else {
10887                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10888                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10889                 if (val) {
10890                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10891                         sp->rx_discards.low += val;
10892                         if (sp->rx_discards.low < val)
10893                                 sp->rx_discards.high += 1;
10894                 }
10895                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10896         }
10897         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10898 }
10899
10900 static void tg3_chk_missed_msi(struct tg3 *tp)
10901 {
10902         u32 i;
10903
10904         for (i = 0; i < tp->irq_cnt; i++) {
10905                 struct tg3_napi *tnapi = &tp->napi[i];
10906
10907                 if (tg3_has_work(tnapi)) {
10908                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10909                             tnapi->last_tx_cons == tnapi->tx_cons) {
10910                                 if (tnapi->chk_msi_cnt < 1) {
10911                                         tnapi->chk_msi_cnt++;
10912                                         return;
10913                                 }
10914                                 tg3_msi(0, tnapi);
10915                         }
10916                 }
10917                 tnapi->chk_msi_cnt = 0;
10918                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10919                 tnapi->last_tx_cons = tnapi->tx_cons;
10920         }
10921 }
10922
10923 static void tg3_timer(unsigned long __opaque)
10924 {
10925         struct tg3 *tp = (struct tg3 *) __opaque;
10926
10927         spin_lock(&tp->lock);
10928
10929         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10930                 spin_unlock(&tp->lock);
10931                 goto restart_timer;
10932         }
10933
10934         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10935             tg3_flag(tp, 57765_CLASS))
10936                 tg3_chk_missed_msi(tp);
10937
10938         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10939                 /* BCM4785: Flush posted writes from GbE to host memory. */
10940                 tr32(HOSTCC_MODE);
10941         }
10942
10943         if (!tg3_flag(tp, TAGGED_STATUS)) {
10944                 /* All of this garbage is because when using non-tagged
10945                  * IRQ status the mailbox/status_block protocol the chip
10946                  * uses with the cpu is race prone.
10947                  */
10948                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10949                         tw32(GRC_LOCAL_CTRL,
10950                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10951                 } else {
10952                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10953                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10954                 }
10955
10956                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10957                         spin_unlock(&tp->lock);
10958                         tg3_reset_task_schedule(tp);
10959                         goto restart_timer;
10960                 }
10961         }
10962
10963         /* This part only runs once per second. */
10964         if (!--tp->timer_counter) {
10965                 if (tg3_flag(tp, 5705_PLUS))
10966                         tg3_periodic_fetch_stats(tp);
10967
10968                 if (tp->setlpicnt && !--tp->setlpicnt)
10969                         tg3_phy_eee_enable(tp);
10970
10971                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10972                         u32 mac_stat;
10973                         int phy_event;
10974
10975                         mac_stat = tr32(MAC_STATUS);
10976
10977                         phy_event = 0;
10978                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10979                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10980                                         phy_event = 1;
10981                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10982                                 phy_event = 1;
10983
10984                         if (phy_event)
10985                                 tg3_setup_phy(tp, false);
10986                 } else if (tg3_flag(tp, POLL_SERDES)) {
10987                         u32 mac_stat = tr32(MAC_STATUS);
10988                         int need_setup = 0;
10989
10990                         if (tp->link_up &&
10991                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10992                                 need_setup = 1;
10993                         }
10994                         if (!tp->link_up &&
10995                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10996                                          MAC_STATUS_SIGNAL_DET))) {
10997                                 need_setup = 1;
10998                         }
10999                         if (need_setup) {
11000                                 if (!tp->serdes_counter) {
11001                                         tw32_f(MAC_MODE,
11002                                              (tp->mac_mode &
11003                                               ~MAC_MODE_PORT_MODE_MASK));
11004                                         udelay(40);
11005                                         tw32_f(MAC_MODE, tp->mac_mode);
11006                                         udelay(40);
11007                                 }
11008                                 tg3_setup_phy(tp, false);
11009                         }
11010                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11011                            tg3_flag(tp, 5780_CLASS)) {
11012                         tg3_serdes_parallel_detect(tp);
11013                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11014                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11015                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11016                                          TG3_CPMU_STATUS_LINK_MASK);
11017
11018                         if (link_up != tp->link_up)
11019                                 tg3_setup_phy(tp, false);
11020                 }
11021
11022                 tp->timer_counter = tp->timer_multiplier;
11023         }
11024
11025         /* Heartbeat is only sent once every 2 seconds.
11026          *
11027          * The heartbeat is to tell the ASF firmware that the host
11028          * driver is still alive.  In the event that the OS crashes,
11029          * ASF needs to reset the hardware to free up the FIFO space
11030          * that may be filled with rx packets destined for the host.
11031          * If the FIFO is full, ASF will no longer function properly.
11032          *
11033          * Unintended resets have been reported on real time kernels
11034          * where the timer doesn't run on time.  Netpoll will also have
11035          * same problem.
11036          *
11037          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11038          * to check the ring condition when the heartbeat is expiring
11039          * before doing the reset.  This will prevent most unintended
11040          * resets.
11041          */
11042         if (!--tp->asf_counter) {
11043                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11044                         tg3_wait_for_event_ack(tp);
11045
11046                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11047                                       FWCMD_NICDRV_ALIVE3);
11048                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11049                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11050                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11051
11052                         tg3_generate_fw_event(tp);
11053                 }
11054                 tp->asf_counter = tp->asf_multiplier;
11055         }
11056
11057         spin_unlock(&tp->lock);
11058
11059 restart_timer:
11060         tp->timer.expires = jiffies + tp->timer_offset;
11061         add_timer(&tp->timer);
11062 }
11063
11064 static void tg3_timer_init(struct tg3 *tp)
11065 {
11066         if (tg3_flag(tp, TAGGED_STATUS) &&
11067             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11068             !tg3_flag(tp, 57765_CLASS))
11069                 tp->timer_offset = HZ;
11070         else
11071                 tp->timer_offset = HZ / 10;
11072
11073         BUG_ON(tp->timer_offset > HZ);
11074
11075         tp->timer_multiplier = (HZ / tp->timer_offset);
11076         tp->asf_multiplier = (HZ / tp->timer_offset) *
11077                              TG3_FW_UPDATE_FREQ_SEC;
11078
11079         init_timer(&tp->timer);
11080         tp->timer.data = (unsigned long) tp;
11081         tp->timer.function = tg3_timer;
11082 }
11083
11084 static void tg3_timer_start(struct tg3 *tp)
11085 {
11086         tp->asf_counter   = tp->asf_multiplier;
11087         tp->timer_counter = tp->timer_multiplier;
11088
11089         tp->timer.expires = jiffies + tp->timer_offset;
11090         add_timer(&tp->timer);
11091 }
11092
11093 static void tg3_timer_stop(struct tg3 *tp)
11094 {
11095         del_timer_sync(&tp->timer);
11096 }
11097
11098 /* Restart hardware after configuration changes, self-test, etc.
11099  * Invoked with tp->lock held.
11100  */
11101 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11102         __releases(tp->lock)
11103         __acquires(tp->lock)
11104 {
11105         int err;
11106
11107         err = tg3_init_hw(tp, reset_phy);
11108         if (err) {
11109                 netdev_err(tp->dev,
11110                            "Failed to re-initialize device, aborting\n");
11111                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11112                 tg3_full_unlock(tp);
11113                 tg3_timer_stop(tp);
11114                 tp->irq_sync = 0;
11115                 tg3_napi_enable(tp);
11116                 dev_close(tp->dev);
11117                 tg3_full_lock(tp, 0);
11118         }
11119         return err;
11120 }
11121
11122 static void tg3_reset_task(struct work_struct *work)
11123 {
11124         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11125         int err;
11126
11127         rtnl_lock();
11128         tg3_full_lock(tp, 0);
11129
11130         if (!netif_running(tp->dev)) {
11131                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11132                 tg3_full_unlock(tp);
11133                 rtnl_unlock();
11134                 return;
11135         }
11136
11137         tg3_full_unlock(tp);
11138
11139         tg3_phy_stop(tp);
11140
11141         tg3_netif_stop(tp);
11142
11143         tg3_full_lock(tp, 1);
11144
11145         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11146                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11147                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11148                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11149                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11150         }
11151
11152         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11153         err = tg3_init_hw(tp, true);
11154         if (err)
11155                 goto out;
11156
11157         tg3_netif_start(tp);
11158
11159 out:
11160         tg3_full_unlock(tp);
11161
11162         if (!err)
11163                 tg3_phy_start(tp);
11164
11165         tg3_flag_clear(tp, RESET_TASK_PENDING);
11166         rtnl_unlock();
11167 }
11168
11169 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11170 {
11171         irq_handler_t fn;
11172         unsigned long flags;
11173         char *name;
11174         struct tg3_napi *tnapi = &tp->napi[irq_num];
11175
11176         if (tp->irq_cnt == 1)
11177                 name = tp->dev->name;
11178         else {
11179                 name = &tnapi->irq_lbl[0];
11180                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11181                         snprintf(name, IFNAMSIZ,
11182                                  "%s-txrx-%d", tp->dev->name, irq_num);
11183                 else if (tnapi->tx_buffers)
11184                         snprintf(name, IFNAMSIZ,
11185                                  "%s-tx-%d", tp->dev->name, irq_num);
11186                 else if (tnapi->rx_rcb)
11187                         snprintf(name, IFNAMSIZ,
11188                                  "%s-rx-%d", tp->dev->name, irq_num);
11189                 else
11190                         snprintf(name, IFNAMSIZ,
11191                                  "%s-%d", tp->dev->name, irq_num);
11192                 name[IFNAMSIZ-1] = 0;
11193         }
11194
11195         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11196                 fn = tg3_msi;
11197                 if (tg3_flag(tp, 1SHOT_MSI))
11198                         fn = tg3_msi_1shot;
11199                 flags = 0;
11200         } else {
11201                 fn = tg3_interrupt;
11202                 if (tg3_flag(tp, TAGGED_STATUS))
11203                         fn = tg3_interrupt_tagged;
11204                 flags = IRQF_SHARED;
11205         }
11206
11207         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11208 }
11209
11210 static int tg3_test_interrupt(struct tg3 *tp)
11211 {
11212         struct tg3_napi *tnapi = &tp->napi[0];
11213         struct net_device *dev = tp->dev;
11214         int err, i, intr_ok = 0;
11215         u32 val;
11216
11217         if (!netif_running(dev))
11218                 return -ENODEV;
11219
11220         tg3_disable_ints(tp);
11221
11222         free_irq(tnapi->irq_vec, tnapi);
11223
11224         /*
11225          * Turn off MSI one shot mode.  Otherwise this test has no
11226          * observable way to know whether the interrupt was delivered.
11227          */
11228         if (tg3_flag(tp, 57765_PLUS)) {
11229                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11230                 tw32(MSGINT_MODE, val);
11231         }
11232
11233         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11234                           IRQF_SHARED, dev->name, tnapi);
11235         if (err)
11236                 return err;
11237
11238         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11239         tg3_enable_ints(tp);
11240
11241         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11242                tnapi->coal_now);
11243
11244         for (i = 0; i < 5; i++) {
11245                 u32 int_mbox, misc_host_ctrl;
11246
11247                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11248                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11249
11250                 if ((int_mbox != 0) ||
11251                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11252                         intr_ok = 1;
11253                         break;
11254                 }
11255
11256                 if (tg3_flag(tp, 57765_PLUS) &&
11257                     tnapi->hw_status->status_tag != tnapi->last_tag)
11258                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11259
11260                 msleep(10);
11261         }
11262
11263         tg3_disable_ints(tp);
11264
11265         free_irq(tnapi->irq_vec, tnapi);
11266
11267         err = tg3_request_irq(tp, 0);
11268
11269         if (err)
11270                 return err;
11271
11272         if (intr_ok) {
11273                 /* Reenable MSI one shot mode. */
11274                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11275                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11276                         tw32(MSGINT_MODE, val);
11277                 }
11278                 return 0;
11279         }
11280
11281         return -EIO;
11282 }
11283
11284 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11285  * successfully restored
11286  */
11287 static int tg3_test_msi(struct tg3 *tp)
11288 {
11289         int err;
11290         u16 pci_cmd;
11291
11292         if (!tg3_flag(tp, USING_MSI))
11293                 return 0;
11294
11295         /* Turn off SERR reporting in case MSI terminates with Master
11296          * Abort.
11297          */
11298         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11299         pci_write_config_word(tp->pdev, PCI_COMMAND,
11300                               pci_cmd & ~PCI_COMMAND_SERR);
11301
11302         err = tg3_test_interrupt(tp);
11303
11304         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11305
11306         if (!err)
11307                 return 0;
11308
11309         /* other failures */
11310         if (err != -EIO)
11311                 return err;
11312
11313         /* MSI test failed, go back to INTx mode */
11314         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11315                     "to INTx mode. Please report this failure to the PCI "
11316                     "maintainer and include system chipset information\n");
11317
11318         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11319
11320         pci_disable_msi(tp->pdev);
11321
11322         tg3_flag_clear(tp, USING_MSI);
11323         tp->napi[0].irq_vec = tp->pdev->irq;
11324
11325         err = tg3_request_irq(tp, 0);
11326         if (err)
11327                 return err;
11328
11329         /* Need to reset the chip because the MSI cycle may have terminated
11330          * with Master Abort.
11331          */
11332         tg3_full_lock(tp, 1);
11333
11334         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11335         err = tg3_init_hw(tp, true);
11336
11337         tg3_full_unlock(tp);
11338
11339         if (err)
11340                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11341
11342         return err;
11343 }
11344
11345 static int tg3_request_firmware(struct tg3 *tp)
11346 {
11347         const struct tg3_firmware_hdr *fw_hdr;
11348
11349         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11350                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11351                            tp->fw_needed);
11352                 return -ENOENT;
11353         }
11354
11355         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11356
11357         /* Firmware blob starts with version numbers, followed by
11358          * start address and _full_ length including BSS sections
11359          * (which must be longer than the actual data, of course
11360          */
11361
11362         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11363         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11364                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11365                            tp->fw_len, tp->fw_needed);
11366                 release_firmware(tp->fw);
11367                 tp->fw = NULL;
11368                 return -EINVAL;
11369         }
11370
11371         /* We no longer need firmware; we have it. */
11372         tp->fw_needed = NULL;
11373         return 0;
11374 }
11375
11376 static u32 tg3_irq_count(struct tg3 *tp)
11377 {
11378         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11379
11380         if (irq_cnt > 1) {
11381                 /* We want as many rx rings enabled as there are cpus.
11382                  * In multiqueue MSI-X mode, the first MSI-X vector
11383                  * only deals with link interrupts, etc, so we add
11384                  * one to the number of vectors we are requesting.
11385                  */
11386                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11387         }
11388
11389         return irq_cnt;
11390 }
11391
11392 static bool tg3_enable_msix(struct tg3 *tp)
11393 {
11394         int i, rc;
11395         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11396
11397         tp->txq_cnt = tp->txq_req;
11398         tp->rxq_cnt = tp->rxq_req;
11399         if (!tp->rxq_cnt)
11400                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11401         if (tp->rxq_cnt > tp->rxq_max)
11402                 tp->rxq_cnt = tp->rxq_max;
11403
11404         /* Disable multiple TX rings by default.  Simple round-robin hardware
11405          * scheduling of the TX rings can cause starvation of rings with
11406          * small packets when other rings have TSO or jumbo packets.
11407          */
11408         if (!tp->txq_req)
11409                 tp->txq_cnt = 1;
11410
11411         tp->irq_cnt = tg3_irq_count(tp);
11412
11413         for (i = 0; i < tp->irq_max; i++) {
11414                 msix_ent[i].entry  = i;
11415                 msix_ent[i].vector = 0;
11416         }
11417
11418         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11419         if (rc < 0) {
11420                 return false;
11421         } else if (rc < tp->irq_cnt) {
11422                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11423                               tp->irq_cnt, rc);
11424                 tp->irq_cnt = rc;
11425                 tp->rxq_cnt = max(rc - 1, 1);
11426                 if (tp->txq_cnt)
11427                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11428         }
11429
11430         for (i = 0; i < tp->irq_max; i++)
11431                 tp->napi[i].irq_vec = msix_ent[i].vector;
11432
11433         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11434                 pci_disable_msix(tp->pdev);
11435                 return false;
11436         }
11437
11438         if (tp->irq_cnt == 1)
11439                 return true;
11440
11441         tg3_flag_set(tp, ENABLE_RSS);
11442
11443         if (tp->txq_cnt > 1)
11444                 tg3_flag_set(tp, ENABLE_TSS);
11445
11446         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11447
11448         return true;
11449 }
11450
11451 static void tg3_ints_init(struct tg3 *tp)
11452 {
11453         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11454             !tg3_flag(tp, TAGGED_STATUS)) {
11455                 /* All MSI supporting chips should support tagged
11456                  * status.  Assert that this is the case.
11457                  */
11458                 netdev_warn(tp->dev,
11459                             "MSI without TAGGED_STATUS? Not using MSI\n");
11460                 goto defcfg;
11461         }
11462
11463         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11464                 tg3_flag_set(tp, USING_MSIX);
11465         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11466                 tg3_flag_set(tp, USING_MSI);
11467
11468         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11469                 u32 msi_mode = tr32(MSGINT_MODE);
11470                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11471                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11472                 if (!tg3_flag(tp, 1SHOT_MSI))
11473                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11474                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11475         }
11476 defcfg:
11477         if (!tg3_flag(tp, USING_MSIX)) {
11478                 tp->irq_cnt = 1;
11479                 tp->napi[0].irq_vec = tp->pdev->irq;
11480         }
11481
11482         if (tp->irq_cnt == 1) {
11483                 tp->txq_cnt = 1;
11484                 tp->rxq_cnt = 1;
11485                 netif_set_real_num_tx_queues(tp->dev, 1);
11486                 netif_set_real_num_rx_queues(tp->dev, 1);
11487         }
11488 }
11489
11490 static void tg3_ints_fini(struct tg3 *tp)
11491 {
11492         if (tg3_flag(tp, USING_MSIX))
11493                 pci_disable_msix(tp->pdev);
11494         else if (tg3_flag(tp, USING_MSI))
11495                 pci_disable_msi(tp->pdev);
11496         tg3_flag_clear(tp, USING_MSI);
11497         tg3_flag_clear(tp, USING_MSIX);
11498         tg3_flag_clear(tp, ENABLE_RSS);
11499         tg3_flag_clear(tp, ENABLE_TSS);
11500 }
11501
11502 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11503                      bool init)
11504 {
11505         struct net_device *dev = tp->dev;
11506         int i, err;
11507
11508         /*
11509          * Setup interrupts first so we know how
11510          * many NAPI resources to allocate
11511          */
11512         tg3_ints_init(tp);
11513
11514         tg3_rss_check_indir_tbl(tp);
11515
11516         /* The placement of this call is tied
11517          * to the setup and use of Host TX descriptors.
11518          */
11519         err = tg3_alloc_consistent(tp);
11520         if (err)
11521                 goto out_ints_fini;
11522
11523         tg3_napi_init(tp);
11524
11525         tg3_napi_enable(tp);
11526
11527         for (i = 0; i < tp->irq_cnt; i++) {
11528                 struct tg3_napi *tnapi = &tp->napi[i];
11529                 err = tg3_request_irq(tp, i);
11530                 if (err) {
11531                         for (i--; i >= 0; i--) {
11532                                 tnapi = &tp->napi[i];
11533                                 free_irq(tnapi->irq_vec, tnapi);
11534                         }
11535                         goto out_napi_fini;
11536                 }
11537         }
11538
11539         tg3_full_lock(tp, 0);
11540
11541         if (init)
11542                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11543
11544         err = tg3_init_hw(tp, reset_phy);
11545         if (err) {
11546                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11547                 tg3_free_rings(tp);
11548         }
11549
11550         tg3_full_unlock(tp);
11551
11552         if (err)
11553                 goto out_free_irq;
11554
11555         if (test_irq && tg3_flag(tp, USING_MSI)) {
11556                 err = tg3_test_msi(tp);
11557
11558                 if (err) {
11559                         tg3_full_lock(tp, 0);
11560                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11561                         tg3_free_rings(tp);
11562                         tg3_full_unlock(tp);
11563
11564                         goto out_napi_fini;
11565                 }
11566
11567                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11568                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11569
11570                         tw32(PCIE_TRANSACTION_CFG,
11571                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11572                 }
11573         }
11574
11575         tg3_phy_start(tp);
11576
11577         tg3_hwmon_open(tp);
11578
11579         tg3_full_lock(tp, 0);
11580
11581         tg3_timer_start(tp);
11582         tg3_flag_set(tp, INIT_COMPLETE);
11583         tg3_enable_ints(tp);
11584
11585         tg3_ptp_resume(tp);
11586
11587         tg3_full_unlock(tp);
11588
11589         netif_tx_start_all_queues(dev);
11590
11591         /*
11592          * Reset loopback feature if it was turned on while the device was down
11593          * make sure that it's installed properly now.
11594          */
11595         if (dev->features & NETIF_F_LOOPBACK)
11596                 tg3_set_loopback(dev, dev->features);
11597
11598         return 0;
11599
11600 out_free_irq:
11601         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11602                 struct tg3_napi *tnapi = &tp->napi[i];
11603                 free_irq(tnapi->irq_vec, tnapi);
11604         }
11605
11606 out_napi_fini:
11607         tg3_napi_disable(tp);
11608         tg3_napi_fini(tp);
11609         tg3_free_consistent(tp);
11610
11611 out_ints_fini:
11612         tg3_ints_fini(tp);
11613
11614         return err;
11615 }
11616
11617 static void tg3_stop(struct tg3 *tp)
11618 {
11619         int i;
11620
11621         tg3_reset_task_cancel(tp);
11622         tg3_netif_stop(tp);
11623
11624         tg3_timer_stop(tp);
11625
11626         tg3_hwmon_close(tp);
11627
11628         tg3_phy_stop(tp);
11629
11630         tg3_full_lock(tp, 1);
11631
11632         tg3_disable_ints(tp);
11633
11634         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11635         tg3_free_rings(tp);
11636         tg3_flag_clear(tp, INIT_COMPLETE);
11637
11638         tg3_full_unlock(tp);
11639
11640         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11641                 struct tg3_napi *tnapi = &tp->napi[i];
11642                 free_irq(tnapi->irq_vec, tnapi);
11643         }
11644
11645         tg3_ints_fini(tp);
11646
11647         tg3_napi_fini(tp);
11648
11649         tg3_free_consistent(tp);
11650 }
11651
11652 static int tg3_open(struct net_device *dev)
11653 {
11654         struct tg3 *tp = netdev_priv(dev);
11655         int err;
11656
11657         if (tp->pcierr_recovery) {
11658                 netdev_err(dev, "Failed to open device. PCI error recovery "
11659                            "in progress\n");
11660                 return -EAGAIN;
11661         }
11662
11663         if (tp->fw_needed) {
11664                 err = tg3_request_firmware(tp);
11665                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11666                         if (err) {
11667                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11668                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11669                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11670                                 netdev_warn(tp->dev, "EEE capability restored\n");
11671                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11672                         }
11673                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11674                         if (err)
11675                                 return err;
11676                 } else if (err) {
11677                         netdev_warn(tp->dev, "TSO capability disabled\n");
11678                         tg3_flag_clear(tp, TSO_CAPABLE);
11679                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11680                         netdev_notice(tp->dev, "TSO capability restored\n");
11681                         tg3_flag_set(tp, TSO_CAPABLE);
11682                 }
11683         }
11684
11685         tg3_carrier_off(tp);
11686
11687         err = tg3_power_up(tp);
11688         if (err)
11689                 return err;
11690
11691         tg3_full_lock(tp, 0);
11692
11693         tg3_disable_ints(tp);
11694         tg3_flag_clear(tp, INIT_COMPLETE);
11695
11696         tg3_full_unlock(tp);
11697
11698         err = tg3_start(tp,
11699                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11700                         true, true);
11701         if (err) {
11702                 tg3_frob_aux_power(tp, false);
11703                 pci_set_power_state(tp->pdev, PCI_D3hot);
11704         }
11705
11706         return err;
11707 }
11708
11709 static int tg3_close(struct net_device *dev)
11710 {
11711         struct tg3 *tp = netdev_priv(dev);
11712
11713         if (tp->pcierr_recovery) {
11714                 netdev_err(dev, "Failed to close device. PCI error recovery "
11715                            "in progress\n");
11716                 return -EAGAIN;
11717         }
11718
11719         tg3_stop(tp);
11720
11721         /* Clear stats across close / open calls */
11722         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11723         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11724
11725         if (pci_device_is_present(tp->pdev)) {
11726                 tg3_power_down_prepare(tp);
11727
11728                 tg3_carrier_off(tp);
11729         }
11730         return 0;
11731 }
11732
11733 static inline u64 get_stat64(tg3_stat64_t *val)
11734 {
11735        return ((u64)val->high << 32) | ((u64)val->low);
11736 }
11737
11738 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11739 {
11740         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11741
11742         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11743             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11744              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11745                 u32 val;
11746
11747                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11748                         tg3_writephy(tp, MII_TG3_TEST1,
11749                                      val | MII_TG3_TEST1_CRC_EN);
11750                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11751                 } else
11752                         val = 0;
11753
11754                 tp->phy_crc_errors += val;
11755
11756                 return tp->phy_crc_errors;
11757         }
11758
11759         return get_stat64(&hw_stats->rx_fcs_errors);
11760 }
11761
11762 #define ESTAT_ADD(member) \
11763         estats->member =        old_estats->member + \
11764                                 get_stat64(&hw_stats->member)
11765
11766 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11767 {
11768         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11769         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11770
11771         ESTAT_ADD(rx_octets);
11772         ESTAT_ADD(rx_fragments);
11773         ESTAT_ADD(rx_ucast_packets);
11774         ESTAT_ADD(rx_mcast_packets);
11775         ESTAT_ADD(rx_bcast_packets);
11776         ESTAT_ADD(rx_fcs_errors);
11777         ESTAT_ADD(rx_align_errors);
11778         ESTAT_ADD(rx_xon_pause_rcvd);
11779         ESTAT_ADD(rx_xoff_pause_rcvd);
11780         ESTAT_ADD(rx_mac_ctrl_rcvd);
11781         ESTAT_ADD(rx_xoff_entered);
11782         ESTAT_ADD(rx_frame_too_long_errors);
11783         ESTAT_ADD(rx_jabbers);
11784         ESTAT_ADD(rx_undersize_packets);
11785         ESTAT_ADD(rx_in_length_errors);
11786         ESTAT_ADD(rx_out_length_errors);
11787         ESTAT_ADD(rx_64_or_less_octet_packets);
11788         ESTAT_ADD(rx_65_to_127_octet_packets);
11789         ESTAT_ADD(rx_128_to_255_octet_packets);
11790         ESTAT_ADD(rx_256_to_511_octet_packets);
11791         ESTAT_ADD(rx_512_to_1023_octet_packets);
11792         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11793         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11794         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11795         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11796         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11797
11798         ESTAT_ADD(tx_octets);
11799         ESTAT_ADD(tx_collisions);
11800         ESTAT_ADD(tx_xon_sent);
11801         ESTAT_ADD(tx_xoff_sent);
11802         ESTAT_ADD(tx_flow_control);
11803         ESTAT_ADD(tx_mac_errors);
11804         ESTAT_ADD(tx_single_collisions);
11805         ESTAT_ADD(tx_mult_collisions);
11806         ESTAT_ADD(tx_deferred);
11807         ESTAT_ADD(tx_excessive_collisions);
11808         ESTAT_ADD(tx_late_collisions);
11809         ESTAT_ADD(tx_collide_2times);
11810         ESTAT_ADD(tx_collide_3times);
11811         ESTAT_ADD(tx_collide_4times);
11812         ESTAT_ADD(tx_collide_5times);
11813         ESTAT_ADD(tx_collide_6times);
11814         ESTAT_ADD(tx_collide_7times);
11815         ESTAT_ADD(tx_collide_8times);
11816         ESTAT_ADD(tx_collide_9times);
11817         ESTAT_ADD(tx_collide_10times);
11818         ESTAT_ADD(tx_collide_11times);
11819         ESTAT_ADD(tx_collide_12times);
11820         ESTAT_ADD(tx_collide_13times);
11821         ESTAT_ADD(tx_collide_14times);
11822         ESTAT_ADD(tx_collide_15times);
11823         ESTAT_ADD(tx_ucast_packets);
11824         ESTAT_ADD(tx_mcast_packets);
11825         ESTAT_ADD(tx_bcast_packets);
11826         ESTAT_ADD(tx_carrier_sense_errors);
11827         ESTAT_ADD(tx_discards);
11828         ESTAT_ADD(tx_errors);
11829
11830         ESTAT_ADD(dma_writeq_full);
11831         ESTAT_ADD(dma_write_prioq_full);
11832         ESTAT_ADD(rxbds_empty);
11833         ESTAT_ADD(rx_discards);
11834         ESTAT_ADD(rx_errors);
11835         ESTAT_ADD(rx_threshold_hit);
11836
11837         ESTAT_ADD(dma_readq_full);
11838         ESTAT_ADD(dma_read_prioq_full);
11839         ESTAT_ADD(tx_comp_queue_full);
11840
11841         ESTAT_ADD(ring_set_send_prod_index);
11842         ESTAT_ADD(ring_status_update);
11843         ESTAT_ADD(nic_irqs);
11844         ESTAT_ADD(nic_avoided_irqs);
11845         ESTAT_ADD(nic_tx_threshold_hit);
11846
11847         ESTAT_ADD(mbuf_lwm_thresh_hit);
11848 }
11849
11850 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11851 {
11852         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11853         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11854
11855         stats->rx_packets = old_stats->rx_packets +
11856                 get_stat64(&hw_stats->rx_ucast_packets) +
11857                 get_stat64(&hw_stats->rx_mcast_packets) +
11858                 get_stat64(&hw_stats->rx_bcast_packets);
11859
11860         stats->tx_packets = old_stats->tx_packets +
11861                 get_stat64(&hw_stats->tx_ucast_packets) +
11862                 get_stat64(&hw_stats->tx_mcast_packets) +
11863                 get_stat64(&hw_stats->tx_bcast_packets);
11864
11865         stats->rx_bytes = old_stats->rx_bytes +
11866                 get_stat64(&hw_stats->rx_octets);
11867         stats->tx_bytes = old_stats->tx_bytes +
11868                 get_stat64(&hw_stats->tx_octets);
11869
11870         stats->rx_errors = old_stats->rx_errors +
11871                 get_stat64(&hw_stats->rx_errors);
11872         stats->tx_errors = old_stats->tx_errors +
11873                 get_stat64(&hw_stats->tx_errors) +
11874                 get_stat64(&hw_stats->tx_mac_errors) +
11875                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11876                 get_stat64(&hw_stats->tx_discards);
11877
11878         stats->multicast = old_stats->multicast +
11879                 get_stat64(&hw_stats->rx_mcast_packets);
11880         stats->collisions = old_stats->collisions +
11881                 get_stat64(&hw_stats->tx_collisions);
11882
11883         stats->rx_length_errors = old_stats->rx_length_errors +
11884                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11885                 get_stat64(&hw_stats->rx_undersize_packets);
11886
11887         stats->rx_frame_errors = old_stats->rx_frame_errors +
11888                 get_stat64(&hw_stats->rx_align_errors);
11889         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11890                 get_stat64(&hw_stats->tx_discards);
11891         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11892                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11893
11894         stats->rx_crc_errors = old_stats->rx_crc_errors +
11895                 tg3_calc_crc_errors(tp);
11896
11897         stats->rx_missed_errors = old_stats->rx_missed_errors +
11898                 get_stat64(&hw_stats->rx_discards);
11899
11900         stats->rx_dropped = tp->rx_dropped;
11901         stats->tx_dropped = tp->tx_dropped;
11902 }
11903
11904 static int tg3_get_regs_len(struct net_device *dev)
11905 {
11906         return TG3_REG_BLK_SIZE;
11907 }
11908
11909 static void tg3_get_regs(struct net_device *dev,
11910                 struct ethtool_regs *regs, void *_p)
11911 {
11912         struct tg3 *tp = netdev_priv(dev);
11913
11914         regs->version = 0;
11915
11916         memset(_p, 0, TG3_REG_BLK_SIZE);
11917
11918         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11919                 return;
11920
11921         tg3_full_lock(tp, 0);
11922
11923         tg3_dump_legacy_regs(tp, (u32 *)_p);
11924
11925         tg3_full_unlock(tp);
11926 }
11927
11928 static int tg3_get_eeprom_len(struct net_device *dev)
11929 {
11930         struct tg3 *tp = netdev_priv(dev);
11931
11932         return tp->nvram_size;
11933 }
11934
11935 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11936 {
11937         struct tg3 *tp = netdev_priv(dev);
11938         int ret, cpmu_restore = 0;
11939         u8  *pd;
11940         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11941         __be32 val;
11942
11943         if (tg3_flag(tp, NO_NVRAM))
11944                 return -EINVAL;
11945
11946         offset = eeprom->offset;
11947         len = eeprom->len;
11948         eeprom->len = 0;
11949
11950         eeprom->magic = TG3_EEPROM_MAGIC;
11951
11952         /* Override clock, link aware and link idle modes */
11953         if (tg3_flag(tp, CPMU_PRESENT)) {
11954                 cpmu_val = tr32(TG3_CPMU_CTRL);
11955                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11956                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11957                         tw32(TG3_CPMU_CTRL, cpmu_val &
11958                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11959                                              CPMU_CTRL_LINK_IDLE_MODE));
11960                         cpmu_restore = 1;
11961                 }
11962         }
11963         tg3_override_clk(tp);
11964
11965         if (offset & 3) {
11966                 /* adjustments to start on required 4 byte boundary */
11967                 b_offset = offset & 3;
11968                 b_count = 4 - b_offset;
11969                 if (b_count > len) {
11970                         /* i.e. offset=1 len=2 */
11971                         b_count = len;
11972                 }
11973                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11974                 if (ret)
11975                         goto eeprom_done;
11976                 memcpy(data, ((char *)&val) + b_offset, b_count);
11977                 len -= b_count;
11978                 offset += b_count;
11979                 eeprom->len += b_count;
11980         }
11981
11982         /* read bytes up to the last 4 byte boundary */
11983         pd = &data[eeprom->len];
11984         for (i = 0; i < (len - (len & 3)); i += 4) {
11985                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11986                 if (ret) {
11987                         if (i)
11988                                 i -= 4;
11989                         eeprom->len += i;
11990                         goto eeprom_done;
11991                 }
11992                 memcpy(pd + i, &val, 4);
11993                 if (need_resched()) {
11994                         if (signal_pending(current)) {
11995                                 eeprom->len += i;
11996                                 ret = -EINTR;
11997                                 goto eeprom_done;
11998                         }
11999                         cond_resched();
12000                 }
12001         }
12002         eeprom->len += i;
12003
12004         if (len & 3) {
12005                 /* read last bytes not ending on 4 byte boundary */
12006                 pd = &data[eeprom->len];
12007                 b_count = len & 3;
12008                 b_offset = offset + len - b_count;
12009                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12010                 if (ret)
12011                         goto eeprom_done;
12012                 memcpy(pd, &val, b_count);
12013                 eeprom->len += b_count;
12014         }
12015         ret = 0;
12016
12017 eeprom_done:
12018         /* Restore clock, link aware and link idle modes */
12019         tg3_restore_clk(tp);
12020         if (cpmu_restore)
12021                 tw32(TG3_CPMU_CTRL, cpmu_val);
12022
12023         return ret;
12024 }
12025
12026 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12027 {
12028         struct tg3 *tp = netdev_priv(dev);
12029         int ret;
12030         u32 offset, len, b_offset, odd_len;
12031         u8 *buf;
12032         __be32 start = 0, end;
12033
12034         if (tg3_flag(tp, NO_NVRAM) ||
12035             eeprom->magic != TG3_EEPROM_MAGIC)
12036                 return -EINVAL;
12037
12038         offset = eeprom->offset;
12039         len = eeprom->len;
12040
12041         if ((b_offset = (offset & 3))) {
12042                 /* adjustments to start on required 4 byte boundary */
12043                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12044                 if (ret)
12045                         return ret;
12046                 len += b_offset;
12047                 offset &= ~3;
12048                 if (len < 4)
12049                         len = 4;
12050         }
12051
12052         odd_len = 0;
12053         if (len & 3) {
12054                 /* adjustments to end on required 4 byte boundary */
12055                 odd_len = 1;
12056                 len = (len + 3) & ~3;
12057                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12058                 if (ret)
12059                         return ret;
12060         }
12061
12062         buf = data;
12063         if (b_offset || odd_len) {
12064                 buf = kmalloc(len, GFP_KERNEL);
12065                 if (!buf)
12066                         return -ENOMEM;
12067                 if (b_offset)
12068                         memcpy(buf, &start, 4);
12069                 if (odd_len)
12070                         memcpy(buf+len-4, &end, 4);
12071                 memcpy(buf + b_offset, data, eeprom->len);
12072         }
12073
12074         ret = tg3_nvram_write_block(tp, offset, len, buf);
12075
12076         if (buf != data)
12077                 kfree(buf);
12078
12079         return ret;
12080 }
12081
12082 static int tg3_get_link_ksettings(struct net_device *dev,
12083                                   struct ethtool_link_ksettings *cmd)
12084 {
12085         struct tg3 *tp = netdev_priv(dev);
12086         u32 supported, advertising;
12087
12088         if (tg3_flag(tp, USE_PHYLIB)) {
12089                 struct phy_device *phydev;
12090                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12091                         return -EAGAIN;
12092                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12093                 return phy_ethtool_ksettings_get(phydev, cmd);
12094         }
12095
12096         supported = (SUPPORTED_Autoneg);
12097
12098         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12099                 supported |= (SUPPORTED_1000baseT_Half |
12100                               SUPPORTED_1000baseT_Full);
12101
12102         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12103                 supported |= (SUPPORTED_100baseT_Half |
12104                               SUPPORTED_100baseT_Full |
12105                               SUPPORTED_10baseT_Half |
12106                               SUPPORTED_10baseT_Full |
12107                               SUPPORTED_TP);
12108                 cmd->base.port = PORT_TP;
12109         } else {
12110                 supported |= SUPPORTED_FIBRE;
12111                 cmd->base.port = PORT_FIBRE;
12112         }
12113         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12114                                                 supported);
12115
12116         advertising = tp->link_config.advertising;
12117         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12118                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12119                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12120                                 advertising |= ADVERTISED_Pause;
12121                         } else {
12122                                 advertising |= ADVERTISED_Pause |
12123                                         ADVERTISED_Asym_Pause;
12124                         }
12125                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12126                         advertising |= ADVERTISED_Asym_Pause;
12127                 }
12128         }
12129         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12130                                                 advertising);
12131
12132         if (netif_running(dev) && tp->link_up) {
12133                 cmd->base.speed = tp->link_config.active_speed;
12134                 cmd->base.duplex = tp->link_config.active_duplex;
12135                 ethtool_convert_legacy_u32_to_link_mode(
12136                         cmd->link_modes.lp_advertising,
12137                         tp->link_config.rmt_adv);
12138
12139                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12140                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12141                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12142                         else
12143                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12144                 }
12145         } else {
12146                 cmd->base.speed = SPEED_UNKNOWN;
12147                 cmd->base.duplex = DUPLEX_UNKNOWN;
12148                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12149         }
12150         cmd->base.phy_address = tp->phy_addr;
12151         cmd->base.autoneg = tp->link_config.autoneg;
12152         return 0;
12153 }
12154
12155 static int tg3_set_link_ksettings(struct net_device *dev,
12156                                   const struct ethtool_link_ksettings *cmd)
12157 {
12158         struct tg3 *tp = netdev_priv(dev);
12159         u32 speed = cmd->base.speed;
12160         u32 advertising;
12161
12162         if (tg3_flag(tp, USE_PHYLIB)) {
12163                 struct phy_device *phydev;
12164                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12165                         return -EAGAIN;
12166                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12167                 return phy_ethtool_ksettings_set(phydev, cmd);
12168         }
12169
12170         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12171             cmd->base.autoneg != AUTONEG_DISABLE)
12172                 return -EINVAL;
12173
12174         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12175             cmd->base.duplex != DUPLEX_FULL &&
12176             cmd->base.duplex != DUPLEX_HALF)
12177                 return -EINVAL;
12178
12179         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12180                                                 cmd->link_modes.advertising);
12181
12182         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12183                 u32 mask = ADVERTISED_Autoneg |
12184                            ADVERTISED_Pause |
12185                            ADVERTISED_Asym_Pause;
12186
12187                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12188                         mask |= ADVERTISED_1000baseT_Half |
12189                                 ADVERTISED_1000baseT_Full;
12190
12191                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12192                         mask |= ADVERTISED_100baseT_Half |
12193                                 ADVERTISED_100baseT_Full |
12194                                 ADVERTISED_10baseT_Half |
12195                                 ADVERTISED_10baseT_Full |
12196                                 ADVERTISED_TP;
12197                 else
12198                         mask |= ADVERTISED_FIBRE;
12199
12200                 if (advertising & ~mask)
12201                         return -EINVAL;
12202
12203                 mask &= (ADVERTISED_1000baseT_Half |
12204                          ADVERTISED_1000baseT_Full |
12205                          ADVERTISED_100baseT_Half |
12206                          ADVERTISED_100baseT_Full |
12207                          ADVERTISED_10baseT_Half |
12208                          ADVERTISED_10baseT_Full);
12209
12210                 advertising &= mask;
12211         } else {
12212                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12213                         if (speed != SPEED_1000)
12214                                 return -EINVAL;
12215
12216                         if (cmd->base.duplex != DUPLEX_FULL)
12217                                 return -EINVAL;
12218                 } else {
12219                         if (speed != SPEED_100 &&
12220                             speed != SPEED_10)
12221                                 return -EINVAL;
12222                 }
12223         }
12224
12225         tg3_full_lock(tp, 0);
12226
12227         tp->link_config.autoneg = cmd->base.autoneg;
12228         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12229                 tp->link_config.advertising = (advertising |
12230                                               ADVERTISED_Autoneg);
12231                 tp->link_config.speed = SPEED_UNKNOWN;
12232                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12233         } else {
12234                 tp->link_config.advertising = 0;
12235                 tp->link_config.speed = speed;
12236                 tp->link_config.duplex = cmd->base.duplex;
12237         }
12238
12239         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12240
12241         tg3_warn_mgmt_link_flap(tp);
12242
12243         if (netif_running(dev))
12244                 tg3_setup_phy(tp, true);
12245
12246         tg3_full_unlock(tp);
12247
12248         return 0;
12249 }
12250
12251 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12252 {
12253         struct tg3 *tp = netdev_priv(dev);
12254
12255         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12256         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12257         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12258         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12259 }
12260
12261 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12262 {
12263         struct tg3 *tp = netdev_priv(dev);
12264
12265         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12266                 wol->supported = WAKE_MAGIC;
12267         else
12268                 wol->supported = 0;
12269         wol->wolopts = 0;
12270         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12271                 wol->wolopts = WAKE_MAGIC;
12272         memset(&wol->sopass, 0, sizeof(wol->sopass));
12273 }
12274
12275 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12276 {
12277         struct tg3 *tp = netdev_priv(dev);
12278         struct device *dp = &tp->pdev->dev;
12279
12280         if (wol->wolopts & ~WAKE_MAGIC)
12281                 return -EINVAL;
12282         if ((wol->wolopts & WAKE_MAGIC) &&
12283             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12284                 return -EINVAL;
12285
12286         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12287
12288         if (device_may_wakeup(dp))
12289                 tg3_flag_set(tp, WOL_ENABLE);
12290         else
12291                 tg3_flag_clear(tp, WOL_ENABLE);
12292
12293         return 0;
12294 }
12295
12296 static u32 tg3_get_msglevel(struct net_device *dev)
12297 {
12298         struct tg3 *tp = netdev_priv(dev);
12299         return tp->msg_enable;
12300 }
12301
12302 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12303 {
12304         struct tg3 *tp = netdev_priv(dev);
12305         tp->msg_enable = value;
12306 }
12307
12308 static int tg3_nway_reset(struct net_device *dev)
12309 {
12310         struct tg3 *tp = netdev_priv(dev);
12311         int r;
12312
12313         if (!netif_running(dev))
12314                 return -EAGAIN;
12315
12316         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12317                 return -EINVAL;
12318
12319         tg3_warn_mgmt_link_flap(tp);
12320
12321         if (tg3_flag(tp, USE_PHYLIB)) {
12322                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12323                         return -EAGAIN;
12324                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12325         } else {
12326                 u32 bmcr;
12327
12328                 spin_lock_bh(&tp->lock);
12329                 r = -EINVAL;
12330                 tg3_readphy(tp, MII_BMCR, &bmcr);
12331                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12332                     ((bmcr & BMCR_ANENABLE) ||
12333                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12334                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12335                                                    BMCR_ANENABLE);
12336                         r = 0;
12337                 }
12338                 spin_unlock_bh(&tp->lock);
12339         }
12340
12341         return r;
12342 }
12343
12344 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12345 {
12346         struct tg3 *tp = netdev_priv(dev);
12347
12348         ering->rx_max_pending = tp->rx_std_ring_mask;
12349         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12350                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12351         else
12352                 ering->rx_jumbo_max_pending = 0;
12353
12354         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12355
12356         ering->rx_pending = tp->rx_pending;
12357         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12358                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12359         else
12360                 ering->rx_jumbo_pending = 0;
12361
12362         ering->tx_pending = tp->napi[0].tx_pending;
12363 }
12364
12365 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12366 {
12367         struct tg3 *tp = netdev_priv(dev);
12368         int i, irq_sync = 0, err = 0;
12369
12370         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12371             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12372             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12373             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12374             (tg3_flag(tp, TSO_BUG) &&
12375              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12376                 return -EINVAL;
12377
12378         if (netif_running(dev)) {
12379                 tg3_phy_stop(tp);
12380                 tg3_netif_stop(tp);
12381                 irq_sync = 1;
12382         }
12383
12384         tg3_full_lock(tp, irq_sync);
12385
12386         tp->rx_pending = ering->rx_pending;
12387
12388         if (tg3_flag(tp, MAX_RXPEND_64) &&
12389             tp->rx_pending > 63)
12390                 tp->rx_pending = 63;
12391
12392         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12393                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12394
12395         for (i = 0; i < tp->irq_max; i++)
12396                 tp->napi[i].tx_pending = ering->tx_pending;
12397
12398         if (netif_running(dev)) {
12399                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12400                 err = tg3_restart_hw(tp, false);
12401                 if (!err)
12402                         tg3_netif_start(tp);
12403         }
12404
12405         tg3_full_unlock(tp);
12406
12407         if (irq_sync && !err)
12408                 tg3_phy_start(tp);
12409
12410         return err;
12411 }
12412
12413 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12414 {
12415         struct tg3 *tp = netdev_priv(dev);
12416
12417         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12418
12419         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12420                 epause->rx_pause = 1;
12421         else
12422                 epause->rx_pause = 0;
12423
12424         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12425                 epause->tx_pause = 1;
12426         else
12427                 epause->tx_pause = 0;
12428 }
12429
12430 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12431 {
12432         struct tg3 *tp = netdev_priv(dev);
12433         int err = 0;
12434
12435         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12436                 tg3_warn_mgmt_link_flap(tp);
12437
12438         if (tg3_flag(tp, USE_PHYLIB)) {
12439                 u32 newadv;
12440                 struct phy_device *phydev;
12441
12442                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12443
12444                 if (!(phydev->supported & SUPPORTED_Pause) ||
12445                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12446                      (epause->rx_pause != epause->tx_pause)))
12447                         return -EINVAL;
12448
12449                 tp->link_config.flowctrl = 0;
12450                 if (epause->rx_pause) {
12451                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12452
12453                         if (epause->tx_pause) {
12454                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12455                                 newadv = ADVERTISED_Pause;
12456                         } else
12457                                 newadv = ADVERTISED_Pause |
12458                                          ADVERTISED_Asym_Pause;
12459                 } else if (epause->tx_pause) {
12460                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12461                         newadv = ADVERTISED_Asym_Pause;
12462                 } else
12463                         newadv = 0;
12464
12465                 if (epause->autoneg)
12466                         tg3_flag_set(tp, PAUSE_AUTONEG);
12467                 else
12468                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12469
12470                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12471                         u32 oldadv = phydev->advertising &
12472                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12473                         if (oldadv != newadv) {
12474                                 phydev->advertising &=
12475                                         ~(ADVERTISED_Pause |
12476                                           ADVERTISED_Asym_Pause);
12477                                 phydev->advertising |= newadv;
12478                                 if (phydev->autoneg) {
12479                                         /*
12480                                          * Always renegotiate the link to
12481                                          * inform our link partner of our
12482                                          * flow control settings, even if the
12483                                          * flow control is forced.  Let
12484                                          * tg3_adjust_link() do the final
12485                                          * flow control setup.
12486                                          */
12487                                         return phy_start_aneg(phydev);
12488                                 }
12489                         }
12490
12491                         if (!epause->autoneg)
12492                                 tg3_setup_flow_control(tp, 0, 0);
12493                 } else {
12494                         tp->link_config.advertising &=
12495                                         ~(ADVERTISED_Pause |
12496                                           ADVERTISED_Asym_Pause);
12497                         tp->link_config.advertising |= newadv;
12498                 }
12499         } else {
12500                 int irq_sync = 0;
12501
12502                 if (netif_running(dev)) {
12503                         tg3_netif_stop(tp);
12504                         irq_sync = 1;
12505                 }
12506
12507                 tg3_full_lock(tp, irq_sync);
12508
12509                 if (epause->autoneg)
12510                         tg3_flag_set(tp, PAUSE_AUTONEG);
12511                 else
12512                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12513                 if (epause->rx_pause)
12514                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12515                 else
12516                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12517                 if (epause->tx_pause)
12518                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12519                 else
12520                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12521
12522                 if (netif_running(dev)) {
12523                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12524                         err = tg3_restart_hw(tp, false);
12525                         if (!err)
12526                                 tg3_netif_start(tp);
12527                 }
12528
12529                 tg3_full_unlock(tp);
12530         }
12531
12532         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12533
12534         return err;
12535 }
12536
12537 static int tg3_get_sset_count(struct net_device *dev, int sset)
12538 {
12539         switch (sset) {
12540         case ETH_SS_TEST:
12541                 return TG3_NUM_TEST;
12542         case ETH_SS_STATS:
12543                 return TG3_NUM_STATS;
12544         default:
12545                 return -EOPNOTSUPP;
12546         }
12547 }
12548
12549 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12550                          u32 *rules __always_unused)
12551 {
12552         struct tg3 *tp = netdev_priv(dev);
12553
12554         if (!tg3_flag(tp, SUPPORT_MSIX))
12555                 return -EOPNOTSUPP;
12556
12557         switch (info->cmd) {
12558         case ETHTOOL_GRXRINGS:
12559                 if (netif_running(tp->dev))
12560                         info->data = tp->rxq_cnt;
12561                 else {
12562                         info->data = num_online_cpus();
12563                         if (info->data > TG3_RSS_MAX_NUM_QS)
12564                                 info->data = TG3_RSS_MAX_NUM_QS;
12565                 }
12566
12567                 return 0;
12568
12569         default:
12570                 return -EOPNOTSUPP;
12571         }
12572 }
12573
12574 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12575 {
12576         u32 size = 0;
12577         struct tg3 *tp = netdev_priv(dev);
12578
12579         if (tg3_flag(tp, SUPPORT_MSIX))
12580                 size = TG3_RSS_INDIR_TBL_SIZE;
12581
12582         return size;
12583 }
12584
12585 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12586 {
12587         struct tg3 *tp = netdev_priv(dev);
12588         int i;
12589
12590         if (hfunc)
12591                 *hfunc = ETH_RSS_HASH_TOP;
12592         if (!indir)
12593                 return 0;
12594
12595         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12596                 indir[i] = tp->rss_ind_tbl[i];
12597
12598         return 0;
12599 }
12600
12601 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12602                         const u8 hfunc)
12603 {
12604         struct tg3 *tp = netdev_priv(dev);
12605         size_t i;
12606
12607         /* We require at least one supported parameter to be changed and no
12608          * change in any of the unsupported parameters
12609          */
12610         if (key ||
12611             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12612                 return -EOPNOTSUPP;
12613
12614         if (!indir)
12615                 return 0;
12616
12617         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12618                 tp->rss_ind_tbl[i] = indir[i];
12619
12620         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12621                 return 0;
12622
12623         /* It is legal to write the indirection
12624          * table while the device is running.
12625          */
12626         tg3_full_lock(tp, 0);
12627         tg3_rss_write_indir_tbl(tp);
12628         tg3_full_unlock(tp);
12629
12630         return 0;
12631 }
12632
12633 static void tg3_get_channels(struct net_device *dev,
12634                              struct ethtool_channels *channel)
12635 {
12636         struct tg3 *tp = netdev_priv(dev);
12637         u32 deflt_qs = netif_get_num_default_rss_queues();
12638
12639         channel->max_rx = tp->rxq_max;
12640         channel->max_tx = tp->txq_max;
12641
12642         if (netif_running(dev)) {
12643                 channel->rx_count = tp->rxq_cnt;
12644                 channel->tx_count = tp->txq_cnt;
12645         } else {
12646                 if (tp->rxq_req)
12647                         channel->rx_count = tp->rxq_req;
12648                 else
12649                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12650
12651                 if (tp->txq_req)
12652                         channel->tx_count = tp->txq_req;
12653                 else
12654                         channel->tx_count = min(deflt_qs, tp->txq_max);
12655         }
12656 }
12657
12658 static int tg3_set_channels(struct net_device *dev,
12659                             struct ethtool_channels *channel)
12660 {
12661         struct tg3 *tp = netdev_priv(dev);
12662
12663         if (!tg3_flag(tp, SUPPORT_MSIX))
12664                 return -EOPNOTSUPP;
12665
12666         if (channel->rx_count > tp->rxq_max ||
12667             channel->tx_count > tp->txq_max)
12668                 return -EINVAL;
12669
12670         tp->rxq_req = channel->rx_count;
12671         tp->txq_req = channel->tx_count;
12672
12673         if (!netif_running(dev))
12674                 return 0;
12675
12676         tg3_stop(tp);
12677
12678         tg3_carrier_off(tp);
12679
12680         tg3_start(tp, true, false, false);
12681
12682         return 0;
12683 }
12684
12685 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12686 {
12687         switch (stringset) {
12688         case ETH_SS_STATS:
12689                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12690                 break;
12691         case ETH_SS_TEST:
12692                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12693                 break;
12694         default:
12695                 WARN_ON(1);     /* we need a WARN() */
12696                 break;
12697         }
12698 }
12699
12700 static int tg3_set_phys_id(struct net_device *dev,
12701                             enum ethtool_phys_id_state state)
12702 {
12703         struct tg3 *tp = netdev_priv(dev);
12704
12705         if (!netif_running(tp->dev))
12706                 return -EAGAIN;
12707
12708         switch (state) {
12709         case ETHTOOL_ID_ACTIVE:
12710                 return 1;       /* cycle on/off once per second */
12711
12712         case ETHTOOL_ID_ON:
12713                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12714                      LED_CTRL_1000MBPS_ON |
12715                      LED_CTRL_100MBPS_ON |
12716                      LED_CTRL_10MBPS_ON |
12717                      LED_CTRL_TRAFFIC_OVERRIDE |
12718                      LED_CTRL_TRAFFIC_BLINK |
12719                      LED_CTRL_TRAFFIC_LED);
12720                 break;
12721
12722         case ETHTOOL_ID_OFF:
12723                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12724                      LED_CTRL_TRAFFIC_OVERRIDE);
12725                 break;
12726
12727         case ETHTOOL_ID_INACTIVE:
12728                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12729                 break;
12730         }
12731
12732         return 0;
12733 }
12734
12735 static void tg3_get_ethtool_stats(struct net_device *dev,
12736                                    struct ethtool_stats *estats, u64 *tmp_stats)
12737 {
12738         struct tg3 *tp = netdev_priv(dev);
12739
12740         if (tp->hw_stats)
12741                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12742         else
12743                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12744 }
12745
12746 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12747 {
12748         int i;
12749         __be32 *buf;
12750         u32 offset = 0, len = 0;
12751         u32 magic, val;
12752
12753         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12754                 return NULL;
12755
12756         if (magic == TG3_EEPROM_MAGIC) {
12757                 for (offset = TG3_NVM_DIR_START;
12758                      offset < TG3_NVM_DIR_END;
12759                      offset += TG3_NVM_DIRENT_SIZE) {
12760                         if (tg3_nvram_read(tp, offset, &val))
12761                                 return NULL;
12762
12763                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12764                             TG3_NVM_DIRTYPE_EXTVPD)
12765                                 break;
12766                 }
12767
12768                 if (offset != TG3_NVM_DIR_END) {
12769                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12770                         if (tg3_nvram_read(tp, offset + 4, &offset))
12771                                 return NULL;
12772
12773                         offset = tg3_nvram_logical_addr(tp, offset);
12774                 }
12775         }
12776
12777         if (!offset || !len) {
12778                 offset = TG3_NVM_VPD_OFF;
12779                 len = TG3_NVM_VPD_LEN;
12780         }
12781
12782         buf = kmalloc(len, GFP_KERNEL);
12783         if (buf == NULL)
12784                 return NULL;
12785
12786         if (magic == TG3_EEPROM_MAGIC) {
12787                 for (i = 0; i < len; i += 4) {
12788                         /* The data is in little-endian format in NVRAM.
12789                          * Use the big-endian read routines to preserve
12790                          * the byte order as it exists in NVRAM.
12791                          */
12792                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12793                                 goto error;
12794                 }
12795         } else {
12796                 u8 *ptr;
12797                 ssize_t cnt;
12798                 unsigned int pos = 0;
12799
12800                 ptr = (u8 *)&buf[0];
12801                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12802                         cnt = pci_read_vpd(tp->pdev, pos,
12803                                            len - pos, ptr);
12804                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12805                                 cnt = 0;
12806                         else if (cnt < 0)
12807                                 goto error;
12808                 }
12809                 if (pos != len)
12810                         goto error;
12811         }
12812
12813         *vpdlen = len;
12814
12815         return buf;
12816
12817 error:
12818         kfree(buf);
12819         return NULL;
12820 }
12821
12822 #define NVRAM_TEST_SIZE 0x100
12823 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12824 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12825 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12826 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12827 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12828 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12829 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12830 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12831
12832 static int tg3_test_nvram(struct tg3 *tp)
12833 {
12834         u32 csum, magic, len;
12835         __be32 *buf;
12836         int i, j, k, err = 0, size;
12837
12838         if (tg3_flag(tp, NO_NVRAM))
12839                 return 0;
12840
12841         if (tg3_nvram_read(tp, 0, &magic) != 0)
12842                 return -EIO;
12843
12844         if (magic == TG3_EEPROM_MAGIC)
12845                 size = NVRAM_TEST_SIZE;
12846         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12847                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12848                     TG3_EEPROM_SB_FORMAT_1) {
12849                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12850                         case TG3_EEPROM_SB_REVISION_0:
12851                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12852                                 break;
12853                         case TG3_EEPROM_SB_REVISION_2:
12854                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12855                                 break;
12856                         case TG3_EEPROM_SB_REVISION_3:
12857                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12858                                 break;
12859                         case TG3_EEPROM_SB_REVISION_4:
12860                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12861                                 break;
12862                         case TG3_EEPROM_SB_REVISION_5:
12863                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12864                                 break;
12865                         case TG3_EEPROM_SB_REVISION_6:
12866                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12867                                 break;
12868                         default:
12869                                 return -EIO;
12870                         }
12871                 } else
12872                         return 0;
12873         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12874                 size = NVRAM_SELFBOOT_HW_SIZE;
12875         else
12876                 return -EIO;
12877
12878         buf = kmalloc(size, GFP_KERNEL);
12879         if (buf == NULL)
12880                 return -ENOMEM;
12881
12882         err = -EIO;
12883         for (i = 0, j = 0; i < size; i += 4, j++) {
12884                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12885                 if (err)
12886                         break;
12887         }
12888         if (i < size)
12889                 goto out;
12890
12891         /* Selfboot format */
12892         magic = be32_to_cpu(buf[0]);
12893         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12894             TG3_EEPROM_MAGIC_FW) {
12895                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12896
12897                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12898                     TG3_EEPROM_SB_REVISION_2) {
12899                         /* For rev 2, the csum doesn't include the MBA. */
12900                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12901                                 csum8 += buf8[i];
12902                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12903                                 csum8 += buf8[i];
12904                 } else {
12905                         for (i = 0; i < size; i++)
12906                                 csum8 += buf8[i];
12907                 }
12908
12909                 if (csum8 == 0) {
12910                         err = 0;
12911                         goto out;
12912                 }
12913
12914                 err = -EIO;
12915                 goto out;
12916         }
12917
12918         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12919             TG3_EEPROM_MAGIC_HW) {
12920                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12921                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12922                 u8 *buf8 = (u8 *) buf;
12923
12924                 /* Separate the parity bits and the data bytes.  */
12925                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12926                         if ((i == 0) || (i == 8)) {
12927                                 int l;
12928                                 u8 msk;
12929
12930                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12931                                         parity[k++] = buf8[i] & msk;
12932                                 i++;
12933                         } else if (i == 16) {
12934                                 int l;
12935                                 u8 msk;
12936
12937                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12938                                         parity[k++] = buf8[i] & msk;
12939                                 i++;
12940
12941                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12942                                         parity[k++] = buf8[i] & msk;
12943                                 i++;
12944                         }
12945                         data[j++] = buf8[i];
12946                 }
12947
12948                 err = -EIO;
12949                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12950                         u8 hw8 = hweight8(data[i]);
12951
12952                         if ((hw8 & 0x1) && parity[i])
12953                                 goto out;
12954                         else if (!(hw8 & 0x1) && !parity[i])
12955                                 goto out;
12956                 }
12957                 err = 0;
12958                 goto out;
12959         }
12960
12961         err = -EIO;
12962
12963         /* Bootstrap checksum at offset 0x10 */
12964         csum = calc_crc((unsigned char *) buf, 0x10);
12965         if (csum != le32_to_cpu(buf[0x10/4]))
12966                 goto out;
12967
12968         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12969         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12970         if (csum != le32_to_cpu(buf[0xfc/4]))
12971                 goto out;
12972
12973         kfree(buf);
12974
12975         buf = tg3_vpd_readblock(tp, &len);
12976         if (!buf)
12977                 return -ENOMEM;
12978
12979         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12980         if (i > 0) {
12981                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12982                 if (j < 0)
12983                         goto out;
12984
12985                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12986                         goto out;
12987
12988                 i += PCI_VPD_LRDT_TAG_SIZE;
12989                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12990                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12991                 if (j > 0) {
12992                         u8 csum8 = 0;
12993
12994                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12995
12996                         for (i = 0; i <= j; i++)
12997                                 csum8 += ((u8 *)buf)[i];
12998
12999                         if (csum8)
13000                                 goto out;
13001                 }
13002         }
13003
13004         err = 0;
13005
13006 out:
13007         kfree(buf);
13008         return err;
13009 }
13010
13011 #define TG3_SERDES_TIMEOUT_SEC  2
13012 #define TG3_COPPER_TIMEOUT_SEC  6
13013
13014 static int tg3_test_link(struct tg3 *tp)
13015 {
13016         int i, max;
13017
13018         if (!netif_running(tp->dev))
13019                 return -ENODEV;
13020
13021         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13022                 max = TG3_SERDES_TIMEOUT_SEC;
13023         else
13024                 max = TG3_COPPER_TIMEOUT_SEC;
13025
13026         for (i = 0; i < max; i++) {
13027                 if (tp->link_up)
13028                         return 0;
13029
13030                 if (msleep_interruptible(1000))
13031                         break;
13032         }
13033
13034         return -EIO;
13035 }
13036
13037 /* Only test the commonly used registers */
13038 static int tg3_test_registers(struct tg3 *tp)
13039 {
13040         int i, is_5705, is_5750;
13041         u32 offset, read_mask, write_mask, val, save_val, read_val;
13042         static struct {
13043                 u16 offset;
13044                 u16 flags;
13045 #define TG3_FL_5705     0x1
13046 #define TG3_FL_NOT_5705 0x2
13047 #define TG3_FL_NOT_5788 0x4
13048 #define TG3_FL_NOT_5750 0x8
13049                 u32 read_mask;
13050                 u32 write_mask;
13051         } reg_tbl[] = {
13052                 /* MAC Control Registers */
13053                 { MAC_MODE, TG3_FL_NOT_5705,
13054                         0x00000000, 0x00ef6f8c },
13055                 { MAC_MODE, TG3_FL_5705,
13056                         0x00000000, 0x01ef6b8c },
13057                 { MAC_STATUS, TG3_FL_NOT_5705,
13058                         0x03800107, 0x00000000 },
13059                 { MAC_STATUS, TG3_FL_5705,
13060                         0x03800100, 0x00000000 },
13061                 { MAC_ADDR_0_HIGH, 0x0000,
13062                         0x00000000, 0x0000ffff },
13063                 { MAC_ADDR_0_LOW, 0x0000,
13064                         0x00000000, 0xffffffff },
13065                 { MAC_RX_MTU_SIZE, 0x0000,
13066                         0x00000000, 0x0000ffff },
13067                 { MAC_TX_MODE, 0x0000,
13068                         0x00000000, 0x00000070 },
13069                 { MAC_TX_LENGTHS, 0x0000,
13070                         0x00000000, 0x00003fff },
13071                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13072                         0x00000000, 0x000007fc },
13073                 { MAC_RX_MODE, TG3_FL_5705,
13074                         0x00000000, 0x000007dc },
13075                 { MAC_HASH_REG_0, 0x0000,
13076                         0x00000000, 0xffffffff },
13077                 { MAC_HASH_REG_1, 0x0000,
13078                         0x00000000, 0xffffffff },
13079                 { MAC_HASH_REG_2, 0x0000,
13080                         0x00000000, 0xffffffff },
13081                 { MAC_HASH_REG_3, 0x0000,
13082                         0x00000000, 0xffffffff },
13083
13084                 /* Receive Data and Receive BD Initiator Control Registers. */
13085                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13086                         0x00000000, 0xffffffff },
13087                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13088                         0x00000000, 0xffffffff },
13089                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13090                         0x00000000, 0x00000003 },
13091                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13092                         0x00000000, 0xffffffff },
13093                 { RCVDBDI_STD_BD+0, 0x0000,
13094                         0x00000000, 0xffffffff },
13095                 { RCVDBDI_STD_BD+4, 0x0000,
13096                         0x00000000, 0xffffffff },
13097                 { RCVDBDI_STD_BD+8, 0x0000,
13098                         0x00000000, 0xffff0002 },
13099                 { RCVDBDI_STD_BD+0xc, 0x0000,
13100                         0x00000000, 0xffffffff },
13101
13102                 /* Receive BD Initiator Control Registers. */
13103                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13104                         0x00000000, 0xffffffff },
13105                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13106                         0x00000000, 0x000003ff },
13107                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13108                         0x00000000, 0xffffffff },
13109
13110                 /* Host Coalescing Control Registers. */
13111                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13112                         0x00000000, 0x00000004 },
13113                 { HOSTCC_MODE, TG3_FL_5705,
13114                         0x00000000, 0x000000f6 },
13115                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13116                         0x00000000, 0xffffffff },
13117                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13118                         0x00000000, 0x000003ff },
13119                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13120                         0x00000000, 0xffffffff },
13121                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13122                         0x00000000, 0x000003ff },
13123                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13124                         0x00000000, 0xffffffff },
13125                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13126                         0x00000000, 0x000000ff },
13127                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13128                         0x00000000, 0xffffffff },
13129                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13130                         0x00000000, 0x000000ff },
13131                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13132                         0x00000000, 0xffffffff },
13133                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13134                         0x00000000, 0xffffffff },
13135                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13136                         0x00000000, 0xffffffff },
13137                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13138                         0x00000000, 0x000000ff },
13139                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13140                         0x00000000, 0xffffffff },
13141                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13142                         0x00000000, 0x000000ff },
13143                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13144                         0x00000000, 0xffffffff },
13145                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13146                         0x00000000, 0xffffffff },
13147                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13148                         0x00000000, 0xffffffff },
13149                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13150                         0x00000000, 0xffffffff },
13151                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13152                         0x00000000, 0xffffffff },
13153                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13154                         0xffffffff, 0x00000000 },
13155                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13156                         0xffffffff, 0x00000000 },
13157
13158                 /* Buffer Manager Control Registers. */
13159                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13160                         0x00000000, 0x007fff80 },
13161                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13162                         0x00000000, 0x007fffff },
13163                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13164                         0x00000000, 0x0000003f },
13165                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13166                         0x00000000, 0x000001ff },
13167                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13168                         0x00000000, 0x000001ff },
13169                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13170                         0xffffffff, 0x00000000 },
13171                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13172                         0xffffffff, 0x00000000 },
13173
13174                 /* Mailbox Registers */
13175                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13176                         0x00000000, 0x000001ff },
13177                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13178                         0x00000000, 0x000001ff },
13179                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13180                         0x00000000, 0x000007ff },
13181                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13182                         0x00000000, 0x000001ff },
13183
13184                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13185         };
13186
13187         is_5705 = is_5750 = 0;
13188         if (tg3_flag(tp, 5705_PLUS)) {
13189                 is_5705 = 1;
13190                 if (tg3_flag(tp, 5750_PLUS))
13191                         is_5750 = 1;
13192         }
13193
13194         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13195                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13196                         continue;
13197
13198                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13199                         continue;
13200
13201                 if (tg3_flag(tp, IS_5788) &&
13202                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13203                         continue;
13204
13205                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13206                         continue;
13207
13208                 offset = (u32) reg_tbl[i].offset;
13209                 read_mask = reg_tbl[i].read_mask;
13210                 write_mask = reg_tbl[i].write_mask;
13211
13212                 /* Save the original register content */
13213                 save_val = tr32(offset);
13214
13215                 /* Determine the read-only value. */
13216                 read_val = save_val & read_mask;
13217
13218                 /* Write zero to the register, then make sure the read-only bits
13219                  * are not changed and the read/write bits are all zeros.
13220                  */
13221                 tw32(offset, 0);
13222
13223                 val = tr32(offset);
13224
13225                 /* Test the read-only and read/write bits. */
13226                 if (((val & read_mask) != read_val) || (val & write_mask))
13227                         goto out;
13228
13229                 /* Write ones to all the bits defined by RdMask and WrMask, then
13230                  * make sure the read-only bits are not changed and the
13231                  * read/write bits are all ones.
13232                  */
13233                 tw32(offset, read_mask | write_mask);
13234
13235                 val = tr32(offset);
13236
13237                 /* Test the read-only bits. */
13238                 if ((val & read_mask) != read_val)
13239                         goto out;
13240
13241                 /* Test the read/write bits. */
13242                 if ((val & write_mask) != write_mask)
13243                         goto out;
13244
13245                 tw32(offset, save_val);
13246         }
13247
13248         return 0;
13249
13250 out:
13251         if (netif_msg_hw(tp))
13252                 netdev_err(tp->dev,
13253                            "Register test failed at offset %x\n", offset);
13254         tw32(offset, save_val);
13255         return -EIO;
13256 }
13257
13258 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13259 {
13260         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13261         int i;
13262         u32 j;
13263
13264         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13265                 for (j = 0; j < len; j += 4) {
13266                         u32 val;
13267
13268                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13269                         tg3_read_mem(tp, offset + j, &val);
13270                         if (val != test_pattern[i])
13271                                 return -EIO;
13272                 }
13273         }
13274         return 0;
13275 }
13276
13277 static int tg3_test_memory(struct tg3 *tp)
13278 {
13279         static struct mem_entry {
13280                 u32 offset;
13281                 u32 len;
13282         } mem_tbl_570x[] = {
13283                 { 0x00000000, 0x00b50},
13284                 { 0x00002000, 0x1c000},
13285                 { 0xffffffff, 0x00000}
13286         }, mem_tbl_5705[] = {
13287                 { 0x00000100, 0x0000c},
13288                 { 0x00000200, 0x00008},
13289                 { 0x00004000, 0x00800},
13290                 { 0x00006000, 0x01000},
13291                 { 0x00008000, 0x02000},
13292                 { 0x00010000, 0x0e000},
13293                 { 0xffffffff, 0x00000}
13294         }, mem_tbl_5755[] = {
13295                 { 0x00000200, 0x00008},
13296                 { 0x00004000, 0x00800},
13297                 { 0x00006000, 0x00800},
13298                 { 0x00008000, 0x02000},
13299                 { 0x00010000, 0x0c000},
13300                 { 0xffffffff, 0x00000}
13301         }, mem_tbl_5906[] = {
13302                 { 0x00000200, 0x00008},
13303                 { 0x00004000, 0x00400},
13304                 { 0x00006000, 0x00400},
13305                 { 0x00008000, 0x01000},
13306                 { 0x00010000, 0x01000},
13307                 { 0xffffffff, 0x00000}
13308         }, mem_tbl_5717[] = {
13309                 { 0x00000200, 0x00008},
13310                 { 0x00010000, 0x0a000},
13311                 { 0x00020000, 0x13c00},
13312                 { 0xffffffff, 0x00000}
13313         }, mem_tbl_57765[] = {
13314                 { 0x00000200, 0x00008},
13315                 { 0x00004000, 0x00800},
13316                 { 0x00006000, 0x09800},
13317                 { 0x00010000, 0x0a000},
13318                 { 0xffffffff, 0x00000}
13319         };
13320         struct mem_entry *mem_tbl;
13321         int err = 0;
13322         int i;
13323
13324         if (tg3_flag(tp, 5717_PLUS))
13325                 mem_tbl = mem_tbl_5717;
13326         else if (tg3_flag(tp, 57765_CLASS) ||
13327                  tg3_asic_rev(tp) == ASIC_REV_5762)
13328                 mem_tbl = mem_tbl_57765;
13329         else if (tg3_flag(tp, 5755_PLUS))
13330                 mem_tbl = mem_tbl_5755;
13331         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13332                 mem_tbl = mem_tbl_5906;
13333         else if (tg3_flag(tp, 5705_PLUS))
13334                 mem_tbl = mem_tbl_5705;
13335         else
13336                 mem_tbl = mem_tbl_570x;
13337
13338         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13339                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13340                 if (err)
13341                         break;
13342         }
13343
13344         return err;
13345 }
13346
13347 #define TG3_TSO_MSS             500
13348
13349 #define TG3_TSO_IP_HDR_LEN      20
13350 #define TG3_TSO_TCP_HDR_LEN     20
13351 #define TG3_TSO_TCP_OPT_LEN     12
13352
13353 static const u8 tg3_tso_header[] = {
13354 0x08, 0x00,
13355 0x45, 0x00, 0x00, 0x00,
13356 0x00, 0x00, 0x40, 0x00,
13357 0x40, 0x06, 0x00, 0x00,
13358 0x0a, 0x00, 0x00, 0x01,
13359 0x0a, 0x00, 0x00, 0x02,
13360 0x0d, 0x00, 0xe0, 0x00,
13361 0x00, 0x00, 0x01, 0x00,
13362 0x00, 0x00, 0x02, 0x00,
13363 0x80, 0x10, 0x10, 0x00,
13364 0x14, 0x09, 0x00, 0x00,
13365 0x01, 0x01, 0x08, 0x0a,
13366 0x11, 0x11, 0x11, 0x11,
13367 0x11, 0x11, 0x11, 0x11,
13368 };
13369
13370 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13371 {
13372         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13373         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13374         u32 budget;
13375         struct sk_buff *skb;
13376         u8 *tx_data, *rx_data;
13377         dma_addr_t map;
13378         int num_pkts, tx_len, rx_len, i, err;
13379         struct tg3_rx_buffer_desc *desc;
13380         struct tg3_napi *tnapi, *rnapi;
13381         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13382
13383         tnapi = &tp->napi[0];
13384         rnapi = &tp->napi[0];
13385         if (tp->irq_cnt > 1) {
13386                 if (tg3_flag(tp, ENABLE_RSS))
13387                         rnapi = &tp->napi[1];
13388                 if (tg3_flag(tp, ENABLE_TSS))
13389                         tnapi = &tp->napi[1];
13390         }
13391         coal_now = tnapi->coal_now | rnapi->coal_now;
13392
13393         err = -EIO;
13394
13395         tx_len = pktsz;
13396         skb = netdev_alloc_skb(tp->dev, tx_len);
13397         if (!skb)
13398                 return -ENOMEM;
13399
13400         tx_data = skb_put(skb, tx_len);
13401         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13402         memset(tx_data + ETH_ALEN, 0x0, 8);
13403
13404         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13405
13406         if (tso_loopback) {
13407                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13408
13409                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13410                               TG3_TSO_TCP_OPT_LEN;
13411
13412                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13413                        sizeof(tg3_tso_header));
13414                 mss = TG3_TSO_MSS;
13415
13416                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13417                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13418
13419                 /* Set the total length field in the IP header */
13420                 iph->tot_len = htons((u16)(mss + hdr_len));
13421
13422                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13423                               TXD_FLAG_CPU_POST_DMA);
13424
13425                 if (tg3_flag(tp, HW_TSO_1) ||
13426                     tg3_flag(tp, HW_TSO_2) ||
13427                     tg3_flag(tp, HW_TSO_3)) {
13428                         struct tcphdr *th;
13429                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13430                         th = (struct tcphdr *)&tx_data[val];
13431                         th->check = 0;
13432                 } else
13433                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13434
13435                 if (tg3_flag(tp, HW_TSO_3)) {
13436                         mss |= (hdr_len & 0xc) << 12;
13437                         if (hdr_len & 0x10)
13438                                 base_flags |= 0x00000010;
13439                         base_flags |= (hdr_len & 0x3e0) << 5;
13440                 } else if (tg3_flag(tp, HW_TSO_2))
13441                         mss |= hdr_len << 9;
13442                 else if (tg3_flag(tp, HW_TSO_1) ||
13443                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13444                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13445                 } else {
13446                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13447                 }
13448
13449                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13450         } else {
13451                 num_pkts = 1;
13452                 data_off = ETH_HLEN;
13453
13454                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13455                     tx_len > VLAN_ETH_FRAME_LEN)
13456                         base_flags |= TXD_FLAG_JMB_PKT;
13457         }
13458
13459         for (i = data_off; i < tx_len; i++)
13460                 tx_data[i] = (u8) (i & 0xff);
13461
13462         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13463         if (pci_dma_mapping_error(tp->pdev, map)) {
13464                 dev_kfree_skb(skb);
13465                 return -EIO;
13466         }
13467
13468         val = tnapi->tx_prod;
13469         tnapi->tx_buffers[val].skb = skb;
13470         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13471
13472         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13473                rnapi->coal_now);
13474
13475         udelay(10);
13476
13477         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13478
13479         budget = tg3_tx_avail(tnapi);
13480         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13481                             base_flags | TXD_FLAG_END, mss, 0)) {
13482                 tnapi->tx_buffers[val].skb = NULL;
13483                 dev_kfree_skb(skb);
13484                 return -EIO;
13485         }
13486
13487         tnapi->tx_prod++;
13488
13489         /* Sync BD data before updating mailbox */
13490         wmb();
13491
13492         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13493         tr32_mailbox(tnapi->prodmbox);
13494
13495         udelay(10);
13496
13497         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13498         for (i = 0; i < 35; i++) {
13499                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13500                        coal_now);
13501
13502                 udelay(10);
13503
13504                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13505                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13506                 if ((tx_idx == tnapi->tx_prod) &&
13507                     (rx_idx == (rx_start_idx + num_pkts)))
13508                         break;
13509         }
13510
13511         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13512         dev_kfree_skb(skb);
13513
13514         if (tx_idx != tnapi->tx_prod)
13515                 goto out;
13516
13517         if (rx_idx != rx_start_idx + num_pkts)
13518                 goto out;
13519
13520         val = data_off;
13521         while (rx_idx != rx_start_idx) {
13522                 desc = &rnapi->rx_rcb[rx_start_idx++];
13523                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13524                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13525
13526                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13527                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13528                         goto out;
13529
13530                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13531                          - ETH_FCS_LEN;
13532
13533                 if (!tso_loopback) {
13534                         if (rx_len != tx_len)
13535                                 goto out;
13536
13537                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13538                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13539                                         goto out;
13540                         } else {
13541                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13542                                         goto out;
13543                         }
13544                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13545                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13546                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13547                         goto out;
13548                 }
13549
13550                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13551                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13552                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13553                                              mapping);
13554                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13555                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13556                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13557                                              mapping);
13558                 } else
13559                         goto out;
13560
13561                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13562                                             PCI_DMA_FROMDEVICE);
13563
13564                 rx_data += TG3_RX_OFFSET(tp);
13565                 for (i = data_off; i < rx_len; i++, val++) {
13566                         if (*(rx_data + i) != (u8) (val & 0xff))
13567                                 goto out;
13568                 }
13569         }
13570
13571         err = 0;
13572
13573         /* tg3_free_rings will unmap and free the rx_data */
13574 out:
13575         return err;
13576 }
13577
13578 #define TG3_STD_LOOPBACK_FAILED         1
13579 #define TG3_JMB_LOOPBACK_FAILED         2
13580 #define TG3_TSO_LOOPBACK_FAILED         4
13581 #define TG3_LOOPBACK_FAILED \
13582         (TG3_STD_LOOPBACK_FAILED | \
13583          TG3_JMB_LOOPBACK_FAILED | \
13584          TG3_TSO_LOOPBACK_FAILED)
13585
13586 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13587 {
13588         int err = -EIO;
13589         u32 eee_cap;
13590         u32 jmb_pkt_sz = 9000;
13591
13592         if (tp->dma_limit)
13593                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13594
13595         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13596         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13597
13598         if (!netif_running(tp->dev)) {
13599                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13600                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13601                 if (do_extlpbk)
13602                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13603                 goto done;
13604         }
13605
13606         err = tg3_reset_hw(tp, true);
13607         if (err) {
13608                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13609                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13610                 if (do_extlpbk)
13611                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13612                 goto done;
13613         }
13614
13615         if (tg3_flag(tp, ENABLE_RSS)) {
13616                 int i;
13617
13618                 /* Reroute all rx packets to the 1st queue */
13619                 for (i = MAC_RSS_INDIR_TBL_0;
13620                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13621                         tw32(i, 0x0);
13622         }
13623
13624         /* HW errata - mac loopback fails in some cases on 5780.
13625          * Normal traffic and PHY loopback are not affected by
13626          * errata.  Also, the MAC loopback test is deprecated for
13627          * all newer ASIC revisions.
13628          */
13629         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13630             !tg3_flag(tp, CPMU_PRESENT)) {
13631                 tg3_mac_loopback(tp, true);
13632
13633                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13634                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13635
13636                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13637                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13638                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13639
13640                 tg3_mac_loopback(tp, false);
13641         }
13642
13643         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13644             !tg3_flag(tp, USE_PHYLIB)) {
13645                 int i;
13646
13647                 tg3_phy_lpbk_set(tp, 0, false);
13648
13649                 /* Wait for link */
13650                 for (i = 0; i < 100; i++) {
13651                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13652                                 break;
13653                         mdelay(1);
13654                 }
13655
13656                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13657                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13658                 if (tg3_flag(tp, TSO_CAPABLE) &&
13659                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13660                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13661                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13662                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13663                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13664
13665                 if (do_extlpbk) {
13666                         tg3_phy_lpbk_set(tp, 0, true);
13667
13668                         /* All link indications report up, but the hardware
13669                          * isn't really ready for about 20 msec.  Double it
13670                          * to be sure.
13671                          */
13672                         mdelay(40);
13673
13674                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13675                                 data[TG3_EXT_LOOPB_TEST] |=
13676                                                         TG3_STD_LOOPBACK_FAILED;
13677                         if (tg3_flag(tp, TSO_CAPABLE) &&
13678                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13679                                 data[TG3_EXT_LOOPB_TEST] |=
13680                                                         TG3_TSO_LOOPBACK_FAILED;
13681                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13682                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13683                                 data[TG3_EXT_LOOPB_TEST] |=
13684                                                         TG3_JMB_LOOPBACK_FAILED;
13685                 }
13686
13687                 /* Re-enable gphy autopowerdown. */
13688                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13689                         tg3_phy_toggle_apd(tp, true);
13690         }
13691
13692         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13693                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13694
13695 done:
13696         tp->phy_flags |= eee_cap;
13697
13698         return err;
13699 }
13700
13701 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13702                           u64 *data)
13703 {
13704         struct tg3 *tp = netdev_priv(dev);
13705         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13706
13707         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13708                 if (tg3_power_up(tp)) {
13709                         etest->flags |= ETH_TEST_FL_FAILED;
13710                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13711                         return;
13712                 }
13713                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13714         }
13715
13716         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13717
13718         if (tg3_test_nvram(tp) != 0) {
13719                 etest->flags |= ETH_TEST_FL_FAILED;
13720                 data[TG3_NVRAM_TEST] = 1;
13721         }
13722         if (!doextlpbk && tg3_test_link(tp)) {
13723                 etest->flags |= ETH_TEST_FL_FAILED;
13724                 data[TG3_LINK_TEST] = 1;
13725         }
13726         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13727                 int err, err2 = 0, irq_sync = 0;
13728
13729                 if (netif_running(dev)) {
13730                         tg3_phy_stop(tp);
13731                         tg3_netif_stop(tp);
13732                         irq_sync = 1;
13733                 }
13734
13735                 tg3_full_lock(tp, irq_sync);
13736                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13737                 err = tg3_nvram_lock(tp);
13738                 tg3_halt_cpu(tp, RX_CPU_BASE);
13739                 if (!tg3_flag(tp, 5705_PLUS))
13740                         tg3_halt_cpu(tp, TX_CPU_BASE);
13741                 if (!err)
13742                         tg3_nvram_unlock(tp);
13743
13744                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13745                         tg3_phy_reset(tp);
13746
13747                 if (tg3_test_registers(tp) != 0) {
13748                         etest->flags |= ETH_TEST_FL_FAILED;
13749                         data[TG3_REGISTER_TEST] = 1;
13750                 }
13751
13752                 if (tg3_test_memory(tp) != 0) {
13753                         etest->flags |= ETH_TEST_FL_FAILED;
13754                         data[TG3_MEMORY_TEST] = 1;
13755                 }
13756
13757                 if (doextlpbk)
13758                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13759
13760                 if (tg3_test_loopback(tp, data, doextlpbk))
13761                         etest->flags |= ETH_TEST_FL_FAILED;
13762
13763                 tg3_full_unlock(tp);
13764
13765                 if (tg3_test_interrupt(tp) != 0) {
13766                         etest->flags |= ETH_TEST_FL_FAILED;
13767                         data[TG3_INTERRUPT_TEST] = 1;
13768                 }
13769
13770                 tg3_full_lock(tp, 0);
13771
13772                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13773                 if (netif_running(dev)) {
13774                         tg3_flag_set(tp, INIT_COMPLETE);
13775                         err2 = tg3_restart_hw(tp, true);
13776                         if (!err2)
13777                                 tg3_netif_start(tp);
13778                 }
13779
13780                 tg3_full_unlock(tp);
13781
13782                 if (irq_sync && !err2)
13783                         tg3_phy_start(tp);
13784         }
13785         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13786                 tg3_power_down_prepare(tp);
13787
13788 }
13789
13790 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13791 {
13792         struct tg3 *tp = netdev_priv(dev);
13793         struct hwtstamp_config stmpconf;
13794
13795         if (!tg3_flag(tp, PTP_CAPABLE))
13796                 return -EOPNOTSUPP;
13797
13798         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13799                 return -EFAULT;
13800
13801         if (stmpconf.flags)
13802                 return -EINVAL;
13803
13804         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13805             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13806                 return -ERANGE;
13807
13808         switch (stmpconf.rx_filter) {
13809         case HWTSTAMP_FILTER_NONE:
13810                 tp->rxptpctl = 0;
13811                 break;
13812         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13813                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13814                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13815                 break;
13816         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13817                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13818                                TG3_RX_PTP_CTL_SYNC_EVNT;
13819                 break;
13820         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13821                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13822                                TG3_RX_PTP_CTL_DELAY_REQ;
13823                 break;
13824         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13825                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13826                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13827                 break;
13828         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13829                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13830                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13831                 break;
13832         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13833                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13834                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13835                 break;
13836         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13837                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13838                                TG3_RX_PTP_CTL_SYNC_EVNT;
13839                 break;
13840         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13841                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13842                                TG3_RX_PTP_CTL_SYNC_EVNT;
13843                 break;
13844         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13845                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13846                                TG3_RX_PTP_CTL_SYNC_EVNT;
13847                 break;
13848         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13849                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13850                                TG3_RX_PTP_CTL_DELAY_REQ;
13851                 break;
13852         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13853                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13854                                TG3_RX_PTP_CTL_DELAY_REQ;
13855                 break;
13856         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13857                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13858                                TG3_RX_PTP_CTL_DELAY_REQ;
13859                 break;
13860         default:
13861                 return -ERANGE;
13862         }
13863
13864         if (netif_running(dev) && tp->rxptpctl)
13865                 tw32(TG3_RX_PTP_CTL,
13866                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13867
13868         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13869                 tg3_flag_set(tp, TX_TSTAMP_EN);
13870         else
13871                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13872
13873         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13874                 -EFAULT : 0;
13875 }
13876
13877 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13878 {
13879         struct tg3 *tp = netdev_priv(dev);
13880         struct hwtstamp_config stmpconf;
13881
13882         if (!tg3_flag(tp, PTP_CAPABLE))
13883                 return -EOPNOTSUPP;
13884
13885         stmpconf.flags = 0;
13886         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13887                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13888
13889         switch (tp->rxptpctl) {
13890         case 0:
13891                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13892                 break;
13893         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13894                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13895                 break;
13896         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13897                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13898                 break;
13899         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13900                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13901                 break;
13902         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13903                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13904                 break;
13905         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13906                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13907                 break;
13908         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13909                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13910                 break;
13911         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13912                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13913                 break;
13914         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13915                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13916                 break;
13917         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13918                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13919                 break;
13920         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13921                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13922                 break;
13923         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13924                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13925                 break;
13926         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13927                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13928                 break;
13929         default:
13930                 WARN_ON_ONCE(1);
13931                 return -ERANGE;
13932         }
13933
13934         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13935                 -EFAULT : 0;
13936 }
13937
13938 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13939 {
13940         struct mii_ioctl_data *data = if_mii(ifr);
13941         struct tg3 *tp = netdev_priv(dev);
13942         int err;
13943
13944         if (tg3_flag(tp, USE_PHYLIB)) {
13945                 struct phy_device *phydev;
13946                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13947                         return -EAGAIN;
13948                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13949                 return phy_mii_ioctl(phydev, ifr, cmd);
13950         }
13951
13952         switch (cmd) {
13953         case SIOCGMIIPHY:
13954                 data->phy_id = tp->phy_addr;
13955
13956                 /* fallthru */
13957         case SIOCGMIIREG: {
13958                 u32 mii_regval;
13959
13960                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13961                         break;                  /* We have no PHY */
13962
13963                 if (!netif_running(dev))
13964                         return -EAGAIN;
13965
13966                 spin_lock_bh(&tp->lock);
13967                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13968                                     data->reg_num & 0x1f, &mii_regval);
13969                 spin_unlock_bh(&tp->lock);
13970
13971                 data->val_out = mii_regval;
13972
13973                 return err;
13974         }
13975
13976         case SIOCSMIIREG:
13977                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13978                         break;                  /* We have no PHY */
13979
13980                 if (!netif_running(dev))
13981                         return -EAGAIN;
13982
13983                 spin_lock_bh(&tp->lock);
13984                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13985                                      data->reg_num & 0x1f, data->val_in);
13986                 spin_unlock_bh(&tp->lock);
13987
13988                 return err;
13989
13990         case SIOCSHWTSTAMP:
13991                 return tg3_hwtstamp_set(dev, ifr);
13992
13993         case SIOCGHWTSTAMP:
13994                 return tg3_hwtstamp_get(dev, ifr);
13995
13996         default:
13997                 /* do nothing */
13998                 break;
13999         }
14000         return -EOPNOTSUPP;
14001 }
14002
14003 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14004 {
14005         struct tg3 *tp = netdev_priv(dev);
14006
14007         memcpy(ec, &tp->coal, sizeof(*ec));
14008         return 0;
14009 }
14010
14011 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14012 {
14013         struct tg3 *tp = netdev_priv(dev);
14014         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14015         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14016
14017         if (!tg3_flag(tp, 5705_PLUS)) {
14018                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14019                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14020                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14021                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14022         }
14023
14024         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14025             (!ec->rx_coalesce_usecs) ||
14026             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14027             (!ec->tx_coalesce_usecs) ||
14028             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14029             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14030             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14031             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14032             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14033             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14034             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14035             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14036                 return -EINVAL;
14037
14038         /* Only copy relevant parameters, ignore all others. */
14039         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14040         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14041         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14042         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14043         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14044         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14045         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14046         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14047         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14048
14049         if (netif_running(dev)) {
14050                 tg3_full_lock(tp, 0);
14051                 __tg3_set_coalesce(tp, &tp->coal);
14052                 tg3_full_unlock(tp);
14053         }
14054         return 0;
14055 }
14056
14057 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14058 {
14059         struct tg3 *tp = netdev_priv(dev);
14060
14061         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14062                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14063                 return -EOPNOTSUPP;
14064         }
14065
14066         if (edata->advertised != tp->eee.advertised) {
14067                 netdev_warn(tp->dev,
14068                             "Direct manipulation of EEE advertisement is not supported\n");
14069                 return -EINVAL;
14070         }
14071
14072         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14073                 netdev_warn(tp->dev,
14074                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14075                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14076                 return -EINVAL;
14077         }
14078
14079         tp->eee = *edata;
14080
14081         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14082         tg3_warn_mgmt_link_flap(tp);
14083
14084         if (netif_running(tp->dev)) {
14085                 tg3_full_lock(tp, 0);
14086                 tg3_setup_eee(tp);
14087                 tg3_phy_reset(tp);
14088                 tg3_full_unlock(tp);
14089         }
14090
14091         return 0;
14092 }
14093
14094 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14095 {
14096         struct tg3 *tp = netdev_priv(dev);
14097
14098         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14099                 netdev_warn(tp->dev,
14100                             "Board does not support EEE!\n");
14101                 return -EOPNOTSUPP;
14102         }
14103
14104         *edata = tp->eee;
14105         return 0;
14106 }
14107
14108 static const struct ethtool_ops tg3_ethtool_ops = {
14109         .get_drvinfo            = tg3_get_drvinfo,
14110         .get_regs_len           = tg3_get_regs_len,
14111         .get_regs               = tg3_get_regs,
14112         .get_wol                = tg3_get_wol,
14113         .set_wol                = tg3_set_wol,
14114         .get_msglevel           = tg3_get_msglevel,
14115         .set_msglevel           = tg3_set_msglevel,
14116         .nway_reset             = tg3_nway_reset,
14117         .get_link               = ethtool_op_get_link,
14118         .get_eeprom_len         = tg3_get_eeprom_len,
14119         .get_eeprom             = tg3_get_eeprom,
14120         .set_eeprom             = tg3_set_eeprom,
14121         .get_ringparam          = tg3_get_ringparam,
14122         .set_ringparam          = tg3_set_ringparam,
14123         .get_pauseparam         = tg3_get_pauseparam,
14124         .set_pauseparam         = tg3_set_pauseparam,
14125         .self_test              = tg3_self_test,
14126         .get_strings            = tg3_get_strings,
14127         .set_phys_id            = tg3_set_phys_id,
14128         .get_ethtool_stats      = tg3_get_ethtool_stats,
14129         .get_coalesce           = tg3_get_coalesce,
14130         .set_coalesce           = tg3_set_coalesce,
14131         .get_sset_count         = tg3_get_sset_count,
14132         .get_rxnfc              = tg3_get_rxnfc,
14133         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14134         .get_rxfh               = tg3_get_rxfh,
14135         .set_rxfh               = tg3_set_rxfh,
14136         .get_channels           = tg3_get_channels,
14137         .set_channels           = tg3_set_channels,
14138         .get_ts_info            = tg3_get_ts_info,
14139         .get_eee                = tg3_get_eee,
14140         .set_eee                = tg3_set_eee,
14141         .get_link_ksettings     = tg3_get_link_ksettings,
14142         .set_link_ksettings     = tg3_set_link_ksettings,
14143 };
14144
14145 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14146                                                 struct rtnl_link_stats64 *stats)
14147 {
14148         struct tg3 *tp = netdev_priv(dev);
14149
14150         spin_lock_bh(&tp->lock);
14151         if (!tp->hw_stats) {
14152                 *stats = tp->net_stats_prev;
14153                 spin_unlock_bh(&tp->lock);
14154                 return stats;
14155         }
14156
14157         tg3_get_nstats(tp, stats);
14158         spin_unlock_bh(&tp->lock);
14159
14160         return stats;
14161 }
14162
14163 static void tg3_set_rx_mode(struct net_device *dev)
14164 {
14165         struct tg3 *tp = netdev_priv(dev);
14166
14167         if (!netif_running(dev))
14168                 return;
14169
14170         tg3_full_lock(tp, 0);
14171         __tg3_set_rx_mode(dev);
14172         tg3_full_unlock(tp);
14173 }
14174
14175 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14176                                int new_mtu)
14177 {
14178         dev->mtu = new_mtu;
14179
14180         if (new_mtu > ETH_DATA_LEN) {
14181                 if (tg3_flag(tp, 5780_CLASS)) {
14182                         netdev_update_features(dev);
14183                         tg3_flag_clear(tp, TSO_CAPABLE);
14184                 } else {
14185                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14186                 }
14187         } else {
14188                 if (tg3_flag(tp, 5780_CLASS)) {
14189                         tg3_flag_set(tp, TSO_CAPABLE);
14190                         netdev_update_features(dev);
14191                 }
14192                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14193         }
14194 }
14195
14196 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14197 {
14198         struct tg3 *tp = netdev_priv(dev);
14199         int err;
14200         bool reset_phy = false;
14201
14202         if (!netif_running(dev)) {
14203                 /* We'll just catch it later when the
14204                  * device is up'd.
14205                  */
14206                 tg3_set_mtu(dev, tp, new_mtu);
14207                 return 0;
14208         }
14209
14210         tg3_phy_stop(tp);
14211
14212         tg3_netif_stop(tp);
14213
14214         tg3_set_mtu(dev, tp, new_mtu);
14215
14216         tg3_full_lock(tp, 1);
14217
14218         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14219
14220         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14221          * breaks all requests to 256 bytes.
14222          */
14223         if (tg3_asic_rev(tp) == ASIC_REV_57766)
14224                 reset_phy = true;
14225
14226         err = tg3_restart_hw(tp, reset_phy);
14227
14228         if (!err)
14229                 tg3_netif_start(tp);
14230
14231         tg3_full_unlock(tp);
14232
14233         if (!err)
14234                 tg3_phy_start(tp);
14235
14236         return err;
14237 }
14238
14239 static const struct net_device_ops tg3_netdev_ops = {
14240         .ndo_open               = tg3_open,
14241         .ndo_stop               = tg3_close,
14242         .ndo_start_xmit         = tg3_start_xmit,
14243         .ndo_get_stats64        = tg3_get_stats64,
14244         .ndo_validate_addr      = eth_validate_addr,
14245         .ndo_set_rx_mode        = tg3_set_rx_mode,
14246         .ndo_set_mac_address    = tg3_set_mac_addr,
14247         .ndo_do_ioctl           = tg3_ioctl,
14248         .ndo_tx_timeout         = tg3_tx_timeout,
14249         .ndo_change_mtu         = tg3_change_mtu,
14250         .ndo_fix_features       = tg3_fix_features,
14251         .ndo_set_features       = tg3_set_features,
14252 #ifdef CONFIG_NET_POLL_CONTROLLER
14253         .ndo_poll_controller    = tg3_poll_controller,
14254 #endif
14255 };
14256
14257 static void tg3_get_eeprom_size(struct tg3 *tp)
14258 {
14259         u32 cursize, val, magic;
14260
14261         tp->nvram_size = EEPROM_CHIP_SIZE;
14262
14263         if (tg3_nvram_read(tp, 0, &magic) != 0)
14264                 return;
14265
14266         if ((magic != TG3_EEPROM_MAGIC) &&
14267             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14268             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14269                 return;
14270
14271         /*
14272          * Size the chip by reading offsets at increasing powers of two.
14273          * When we encounter our validation signature, we know the addressing
14274          * has wrapped around, and thus have our chip size.
14275          */
14276         cursize = 0x10;
14277
14278         while (cursize < tp->nvram_size) {
14279                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14280                         return;
14281
14282                 if (val == magic)
14283                         break;
14284
14285                 cursize <<= 1;
14286         }
14287
14288         tp->nvram_size = cursize;
14289 }
14290
14291 static void tg3_get_nvram_size(struct tg3 *tp)
14292 {
14293         u32 val;
14294
14295         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14296                 return;
14297
14298         /* Selfboot format */
14299         if (val != TG3_EEPROM_MAGIC) {
14300                 tg3_get_eeprom_size(tp);
14301                 return;
14302         }
14303
14304         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14305                 if (val != 0) {
14306                         /* This is confusing.  We want to operate on the
14307                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14308                          * call will read from NVRAM and byteswap the data
14309                          * according to the byteswapping settings for all
14310                          * other register accesses.  This ensures the data we
14311                          * want will always reside in the lower 16-bits.
14312                          * However, the data in NVRAM is in LE format, which
14313                          * means the data from the NVRAM read will always be
14314                          * opposite the endianness of the CPU.  The 16-bit
14315                          * byteswap then brings the data to CPU endianness.
14316                          */
14317                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14318                         return;
14319                 }
14320         }
14321         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14322 }
14323
14324 static void tg3_get_nvram_info(struct tg3 *tp)
14325 {
14326         u32 nvcfg1;
14327
14328         nvcfg1 = tr32(NVRAM_CFG1);
14329         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14330                 tg3_flag_set(tp, FLASH);
14331         } else {
14332                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14333                 tw32(NVRAM_CFG1, nvcfg1);
14334         }
14335
14336         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14337             tg3_flag(tp, 5780_CLASS)) {
14338                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14339                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14340                         tp->nvram_jedecnum = JEDEC_ATMEL;
14341                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14342                         tg3_flag_set(tp, NVRAM_BUFFERED);
14343                         break;
14344                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14345                         tp->nvram_jedecnum = JEDEC_ATMEL;
14346                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14347                         break;
14348                 case FLASH_VENDOR_ATMEL_EEPROM:
14349                         tp->nvram_jedecnum = JEDEC_ATMEL;
14350                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14351                         tg3_flag_set(tp, NVRAM_BUFFERED);
14352                         break;
14353                 case FLASH_VENDOR_ST:
14354                         tp->nvram_jedecnum = JEDEC_ST;
14355                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14356                         tg3_flag_set(tp, NVRAM_BUFFERED);
14357                         break;
14358                 case FLASH_VENDOR_SAIFUN:
14359                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14360                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14361                         break;
14362                 case FLASH_VENDOR_SST_SMALL:
14363                 case FLASH_VENDOR_SST_LARGE:
14364                         tp->nvram_jedecnum = JEDEC_SST;
14365                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14366                         break;
14367                 }
14368         } else {
14369                 tp->nvram_jedecnum = JEDEC_ATMEL;
14370                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14371                 tg3_flag_set(tp, NVRAM_BUFFERED);
14372         }
14373 }
14374
14375 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14376 {
14377         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14378         case FLASH_5752PAGE_SIZE_256:
14379                 tp->nvram_pagesize = 256;
14380                 break;
14381         case FLASH_5752PAGE_SIZE_512:
14382                 tp->nvram_pagesize = 512;
14383                 break;
14384         case FLASH_5752PAGE_SIZE_1K:
14385                 tp->nvram_pagesize = 1024;
14386                 break;
14387         case FLASH_5752PAGE_SIZE_2K:
14388                 tp->nvram_pagesize = 2048;
14389                 break;
14390         case FLASH_5752PAGE_SIZE_4K:
14391                 tp->nvram_pagesize = 4096;
14392                 break;
14393         case FLASH_5752PAGE_SIZE_264:
14394                 tp->nvram_pagesize = 264;
14395                 break;
14396         case FLASH_5752PAGE_SIZE_528:
14397                 tp->nvram_pagesize = 528;
14398                 break;
14399         }
14400 }
14401
14402 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14403 {
14404         u32 nvcfg1;
14405
14406         nvcfg1 = tr32(NVRAM_CFG1);
14407
14408         /* NVRAM protection for TPM */
14409         if (nvcfg1 & (1 << 27))
14410                 tg3_flag_set(tp, PROTECTED_NVRAM);
14411
14412         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14413         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14414         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14415                 tp->nvram_jedecnum = JEDEC_ATMEL;
14416                 tg3_flag_set(tp, NVRAM_BUFFERED);
14417                 break;
14418         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14419                 tp->nvram_jedecnum = JEDEC_ATMEL;
14420                 tg3_flag_set(tp, NVRAM_BUFFERED);
14421                 tg3_flag_set(tp, FLASH);
14422                 break;
14423         case FLASH_5752VENDOR_ST_M45PE10:
14424         case FLASH_5752VENDOR_ST_M45PE20:
14425         case FLASH_5752VENDOR_ST_M45PE40:
14426                 tp->nvram_jedecnum = JEDEC_ST;
14427                 tg3_flag_set(tp, NVRAM_BUFFERED);
14428                 tg3_flag_set(tp, FLASH);
14429                 break;
14430         }
14431
14432         if (tg3_flag(tp, FLASH)) {
14433                 tg3_nvram_get_pagesize(tp, nvcfg1);
14434         } else {
14435                 /* For eeprom, set pagesize to maximum eeprom size */
14436                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14437
14438                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14439                 tw32(NVRAM_CFG1, nvcfg1);
14440         }
14441 }
14442
14443 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14444 {
14445         u32 nvcfg1, protect = 0;
14446
14447         nvcfg1 = tr32(NVRAM_CFG1);
14448
14449         /* NVRAM protection for TPM */
14450         if (nvcfg1 & (1 << 27)) {
14451                 tg3_flag_set(tp, PROTECTED_NVRAM);
14452                 protect = 1;
14453         }
14454
14455         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14456         switch (nvcfg1) {
14457         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14458         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14459         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14460         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14461                 tp->nvram_jedecnum = JEDEC_ATMEL;
14462                 tg3_flag_set(tp, NVRAM_BUFFERED);
14463                 tg3_flag_set(tp, FLASH);
14464                 tp->nvram_pagesize = 264;
14465                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14466                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14467                         tp->nvram_size = (protect ? 0x3e200 :
14468                                           TG3_NVRAM_SIZE_512KB);
14469                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14470                         tp->nvram_size = (protect ? 0x1f200 :
14471                                           TG3_NVRAM_SIZE_256KB);
14472                 else
14473                         tp->nvram_size = (protect ? 0x1f200 :
14474                                           TG3_NVRAM_SIZE_128KB);
14475                 break;
14476         case FLASH_5752VENDOR_ST_M45PE10:
14477         case FLASH_5752VENDOR_ST_M45PE20:
14478         case FLASH_5752VENDOR_ST_M45PE40:
14479                 tp->nvram_jedecnum = JEDEC_ST;
14480                 tg3_flag_set(tp, NVRAM_BUFFERED);
14481                 tg3_flag_set(tp, FLASH);
14482                 tp->nvram_pagesize = 256;
14483                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14484                         tp->nvram_size = (protect ?
14485                                           TG3_NVRAM_SIZE_64KB :
14486                                           TG3_NVRAM_SIZE_128KB);
14487                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14488                         tp->nvram_size = (protect ?
14489                                           TG3_NVRAM_SIZE_64KB :
14490                                           TG3_NVRAM_SIZE_256KB);
14491                 else
14492                         tp->nvram_size = (protect ?
14493                                           TG3_NVRAM_SIZE_128KB :
14494                                           TG3_NVRAM_SIZE_512KB);
14495                 break;
14496         }
14497 }
14498
14499 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14500 {
14501         u32 nvcfg1;
14502
14503         nvcfg1 = tr32(NVRAM_CFG1);
14504
14505         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14506         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14507         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14508         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14509         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14510                 tp->nvram_jedecnum = JEDEC_ATMEL;
14511                 tg3_flag_set(tp, NVRAM_BUFFERED);
14512                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14513
14514                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14515                 tw32(NVRAM_CFG1, nvcfg1);
14516                 break;
14517         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14518         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14519         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14520         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14521                 tp->nvram_jedecnum = JEDEC_ATMEL;
14522                 tg3_flag_set(tp, NVRAM_BUFFERED);
14523                 tg3_flag_set(tp, FLASH);
14524                 tp->nvram_pagesize = 264;
14525                 break;
14526         case FLASH_5752VENDOR_ST_M45PE10:
14527         case FLASH_5752VENDOR_ST_M45PE20:
14528         case FLASH_5752VENDOR_ST_M45PE40:
14529                 tp->nvram_jedecnum = JEDEC_ST;
14530                 tg3_flag_set(tp, NVRAM_BUFFERED);
14531                 tg3_flag_set(tp, FLASH);
14532                 tp->nvram_pagesize = 256;
14533                 break;
14534         }
14535 }
14536
14537 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14538 {
14539         u32 nvcfg1, protect = 0;
14540
14541         nvcfg1 = tr32(NVRAM_CFG1);
14542
14543         /* NVRAM protection for TPM */
14544         if (nvcfg1 & (1 << 27)) {
14545                 tg3_flag_set(tp, PROTECTED_NVRAM);
14546                 protect = 1;
14547         }
14548
14549         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14550         switch (nvcfg1) {
14551         case FLASH_5761VENDOR_ATMEL_ADB021D:
14552         case FLASH_5761VENDOR_ATMEL_ADB041D:
14553         case FLASH_5761VENDOR_ATMEL_ADB081D:
14554         case FLASH_5761VENDOR_ATMEL_ADB161D:
14555         case FLASH_5761VENDOR_ATMEL_MDB021D:
14556         case FLASH_5761VENDOR_ATMEL_MDB041D:
14557         case FLASH_5761VENDOR_ATMEL_MDB081D:
14558         case FLASH_5761VENDOR_ATMEL_MDB161D:
14559                 tp->nvram_jedecnum = JEDEC_ATMEL;
14560                 tg3_flag_set(tp, NVRAM_BUFFERED);
14561                 tg3_flag_set(tp, FLASH);
14562                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14563                 tp->nvram_pagesize = 256;
14564                 break;
14565         case FLASH_5761VENDOR_ST_A_M45PE20:
14566         case FLASH_5761VENDOR_ST_A_M45PE40:
14567         case FLASH_5761VENDOR_ST_A_M45PE80:
14568         case FLASH_5761VENDOR_ST_A_M45PE16:
14569         case FLASH_5761VENDOR_ST_M_M45PE20:
14570         case FLASH_5761VENDOR_ST_M_M45PE40:
14571         case FLASH_5761VENDOR_ST_M_M45PE80:
14572         case FLASH_5761VENDOR_ST_M_M45PE16:
14573                 tp->nvram_jedecnum = JEDEC_ST;
14574                 tg3_flag_set(tp, NVRAM_BUFFERED);
14575                 tg3_flag_set(tp, FLASH);
14576                 tp->nvram_pagesize = 256;
14577                 break;
14578         }
14579
14580         if (protect) {
14581                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14582         } else {
14583                 switch (nvcfg1) {
14584                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14585                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14586                 case FLASH_5761VENDOR_ST_A_M45PE16:
14587                 case FLASH_5761VENDOR_ST_M_M45PE16:
14588                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14589                         break;
14590                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14591                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14592                 case FLASH_5761VENDOR_ST_A_M45PE80:
14593                 case FLASH_5761VENDOR_ST_M_M45PE80:
14594                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14595                         break;
14596                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14597                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14598                 case FLASH_5761VENDOR_ST_A_M45PE40:
14599                 case FLASH_5761VENDOR_ST_M_M45PE40:
14600                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14601                         break;
14602                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14603                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14604                 case FLASH_5761VENDOR_ST_A_M45PE20:
14605                 case FLASH_5761VENDOR_ST_M_M45PE20:
14606                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14607                         break;
14608                 }
14609         }
14610 }
14611
14612 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14613 {
14614         tp->nvram_jedecnum = JEDEC_ATMEL;
14615         tg3_flag_set(tp, NVRAM_BUFFERED);
14616         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14617 }
14618
14619 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14620 {
14621         u32 nvcfg1;
14622
14623         nvcfg1 = tr32(NVRAM_CFG1);
14624
14625         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14626         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14627         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14628                 tp->nvram_jedecnum = JEDEC_ATMEL;
14629                 tg3_flag_set(tp, NVRAM_BUFFERED);
14630                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14631
14632                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14633                 tw32(NVRAM_CFG1, nvcfg1);
14634                 return;
14635         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14636         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14637         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14638         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14639         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14640         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14641         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14642                 tp->nvram_jedecnum = JEDEC_ATMEL;
14643                 tg3_flag_set(tp, NVRAM_BUFFERED);
14644                 tg3_flag_set(tp, FLASH);
14645
14646                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14647                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14648                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14649                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14650                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14651                         break;
14652                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14653                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14654                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14655                         break;
14656                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14657                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14658                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14659                         break;
14660                 }
14661                 break;
14662         case FLASH_5752VENDOR_ST_M45PE10:
14663         case FLASH_5752VENDOR_ST_M45PE20:
14664         case FLASH_5752VENDOR_ST_M45PE40:
14665                 tp->nvram_jedecnum = JEDEC_ST;
14666                 tg3_flag_set(tp, NVRAM_BUFFERED);
14667                 tg3_flag_set(tp, FLASH);
14668
14669                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14670                 case FLASH_5752VENDOR_ST_M45PE10:
14671                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14672                         break;
14673                 case FLASH_5752VENDOR_ST_M45PE20:
14674                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14675                         break;
14676                 case FLASH_5752VENDOR_ST_M45PE40:
14677                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14678                         break;
14679                 }
14680                 break;
14681         default:
14682                 tg3_flag_set(tp, NO_NVRAM);
14683                 return;
14684         }
14685
14686         tg3_nvram_get_pagesize(tp, nvcfg1);
14687         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14688                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14689 }
14690
14691
14692 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14693 {
14694         u32 nvcfg1;
14695
14696         nvcfg1 = tr32(NVRAM_CFG1);
14697
14698         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14699         case FLASH_5717VENDOR_ATMEL_EEPROM:
14700         case FLASH_5717VENDOR_MICRO_EEPROM:
14701                 tp->nvram_jedecnum = JEDEC_ATMEL;
14702                 tg3_flag_set(tp, NVRAM_BUFFERED);
14703                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14704
14705                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14706                 tw32(NVRAM_CFG1, nvcfg1);
14707                 return;
14708         case FLASH_5717VENDOR_ATMEL_MDB011D:
14709         case FLASH_5717VENDOR_ATMEL_ADB011B:
14710         case FLASH_5717VENDOR_ATMEL_ADB011D:
14711         case FLASH_5717VENDOR_ATMEL_MDB021D:
14712         case FLASH_5717VENDOR_ATMEL_ADB021B:
14713         case FLASH_5717VENDOR_ATMEL_ADB021D:
14714         case FLASH_5717VENDOR_ATMEL_45USPT:
14715                 tp->nvram_jedecnum = JEDEC_ATMEL;
14716                 tg3_flag_set(tp, NVRAM_BUFFERED);
14717                 tg3_flag_set(tp, FLASH);
14718
14719                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14720                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14721                         /* Detect size with tg3_nvram_get_size() */
14722                         break;
14723                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14724                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14725                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14726                         break;
14727                 default:
14728                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14729                         break;
14730                 }
14731                 break;
14732         case FLASH_5717VENDOR_ST_M_M25PE10:
14733         case FLASH_5717VENDOR_ST_A_M25PE10:
14734         case FLASH_5717VENDOR_ST_M_M45PE10:
14735         case FLASH_5717VENDOR_ST_A_M45PE10:
14736         case FLASH_5717VENDOR_ST_M_M25PE20:
14737         case FLASH_5717VENDOR_ST_A_M25PE20:
14738         case FLASH_5717VENDOR_ST_M_M45PE20:
14739         case FLASH_5717VENDOR_ST_A_M45PE20:
14740         case FLASH_5717VENDOR_ST_25USPT:
14741         case FLASH_5717VENDOR_ST_45USPT:
14742                 tp->nvram_jedecnum = JEDEC_ST;
14743                 tg3_flag_set(tp, NVRAM_BUFFERED);
14744                 tg3_flag_set(tp, FLASH);
14745
14746                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14747                 case FLASH_5717VENDOR_ST_M_M25PE20:
14748                 case FLASH_5717VENDOR_ST_M_M45PE20:
14749                         /* Detect size with tg3_nvram_get_size() */
14750                         break;
14751                 case FLASH_5717VENDOR_ST_A_M25PE20:
14752                 case FLASH_5717VENDOR_ST_A_M45PE20:
14753                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14754                         break;
14755                 default:
14756                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14757                         break;
14758                 }
14759                 break;
14760         default:
14761                 tg3_flag_set(tp, NO_NVRAM);
14762                 return;
14763         }
14764
14765         tg3_nvram_get_pagesize(tp, nvcfg1);
14766         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14767                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14768 }
14769
14770 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14771 {
14772         u32 nvcfg1, nvmpinstrp;
14773
14774         nvcfg1 = tr32(NVRAM_CFG1);
14775         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14776
14777         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14778                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14779                         tg3_flag_set(tp, NO_NVRAM);
14780                         return;
14781                 }
14782
14783                 switch (nvmpinstrp) {
14784                 case FLASH_5762_EEPROM_HD:
14785                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14786                         break;
14787                 case FLASH_5762_EEPROM_LD:
14788                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14789                         break;
14790                 case FLASH_5720VENDOR_M_ST_M45PE20:
14791                         /* This pinstrap supports multiple sizes, so force it
14792                          * to read the actual size from location 0xf0.
14793                          */
14794                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14795                         break;
14796                 }
14797         }
14798
14799         switch (nvmpinstrp) {
14800         case FLASH_5720_EEPROM_HD:
14801         case FLASH_5720_EEPROM_LD:
14802                 tp->nvram_jedecnum = JEDEC_ATMEL;
14803                 tg3_flag_set(tp, NVRAM_BUFFERED);
14804
14805                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14806                 tw32(NVRAM_CFG1, nvcfg1);
14807                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14808                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14809                 else
14810                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14811                 return;
14812         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14813         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14814         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14815         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14816         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14817         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14818         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14819         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14820         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14821         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14822         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14823         case FLASH_5720VENDOR_ATMEL_45USPT:
14824                 tp->nvram_jedecnum = JEDEC_ATMEL;
14825                 tg3_flag_set(tp, NVRAM_BUFFERED);
14826                 tg3_flag_set(tp, FLASH);
14827
14828                 switch (nvmpinstrp) {
14829                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14830                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14831                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14832                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14833                         break;
14834                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14835                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14836                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14837                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14838                         break;
14839                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14840                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14841                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14842                         break;
14843                 default:
14844                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14845                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14846                         break;
14847                 }
14848                 break;
14849         case FLASH_5720VENDOR_M_ST_M25PE10:
14850         case FLASH_5720VENDOR_M_ST_M45PE10:
14851         case FLASH_5720VENDOR_A_ST_M25PE10:
14852         case FLASH_5720VENDOR_A_ST_M45PE10:
14853         case FLASH_5720VENDOR_M_ST_M25PE20:
14854         case FLASH_5720VENDOR_M_ST_M45PE20:
14855         case FLASH_5720VENDOR_A_ST_M25PE20:
14856         case FLASH_5720VENDOR_A_ST_M45PE20:
14857         case FLASH_5720VENDOR_M_ST_M25PE40:
14858         case FLASH_5720VENDOR_M_ST_M45PE40:
14859         case FLASH_5720VENDOR_A_ST_M25PE40:
14860         case FLASH_5720VENDOR_A_ST_M45PE40:
14861         case FLASH_5720VENDOR_M_ST_M25PE80:
14862         case FLASH_5720VENDOR_M_ST_M45PE80:
14863         case FLASH_5720VENDOR_A_ST_M25PE80:
14864         case FLASH_5720VENDOR_A_ST_M45PE80:
14865         case FLASH_5720VENDOR_ST_25USPT:
14866         case FLASH_5720VENDOR_ST_45USPT:
14867                 tp->nvram_jedecnum = JEDEC_ST;
14868                 tg3_flag_set(tp, NVRAM_BUFFERED);
14869                 tg3_flag_set(tp, FLASH);
14870
14871                 switch (nvmpinstrp) {
14872                 case FLASH_5720VENDOR_M_ST_M25PE20:
14873                 case FLASH_5720VENDOR_M_ST_M45PE20:
14874                 case FLASH_5720VENDOR_A_ST_M25PE20:
14875                 case FLASH_5720VENDOR_A_ST_M45PE20:
14876                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14877                         break;
14878                 case FLASH_5720VENDOR_M_ST_M25PE40:
14879                 case FLASH_5720VENDOR_M_ST_M45PE40:
14880                 case FLASH_5720VENDOR_A_ST_M25PE40:
14881                 case FLASH_5720VENDOR_A_ST_M45PE40:
14882                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14883                         break;
14884                 case FLASH_5720VENDOR_M_ST_M25PE80:
14885                 case FLASH_5720VENDOR_M_ST_M45PE80:
14886                 case FLASH_5720VENDOR_A_ST_M25PE80:
14887                 case FLASH_5720VENDOR_A_ST_M45PE80:
14888                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14889                         break;
14890                 default:
14891                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14892                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14893                         break;
14894                 }
14895                 break;
14896         default:
14897                 tg3_flag_set(tp, NO_NVRAM);
14898                 return;
14899         }
14900
14901         tg3_nvram_get_pagesize(tp, nvcfg1);
14902         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14903                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14904
14905         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14906                 u32 val;
14907
14908                 if (tg3_nvram_read(tp, 0, &val))
14909                         return;
14910
14911                 if (val != TG3_EEPROM_MAGIC &&
14912                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14913                         tg3_flag_set(tp, NO_NVRAM);
14914         }
14915 }
14916
14917 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14918 static void tg3_nvram_init(struct tg3 *tp)
14919 {
14920         if (tg3_flag(tp, IS_SSB_CORE)) {
14921                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14922                 tg3_flag_clear(tp, NVRAM);
14923                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14924                 tg3_flag_set(tp, NO_NVRAM);
14925                 return;
14926         }
14927
14928         tw32_f(GRC_EEPROM_ADDR,
14929              (EEPROM_ADDR_FSM_RESET |
14930               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14931                EEPROM_ADDR_CLKPERD_SHIFT)));
14932
14933         msleep(1);
14934
14935         /* Enable seeprom accesses. */
14936         tw32_f(GRC_LOCAL_CTRL,
14937              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14938         udelay(100);
14939
14940         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14941             tg3_asic_rev(tp) != ASIC_REV_5701) {
14942                 tg3_flag_set(tp, NVRAM);
14943
14944                 if (tg3_nvram_lock(tp)) {
14945                         netdev_warn(tp->dev,
14946                                     "Cannot get nvram lock, %s failed\n",
14947                                     __func__);
14948                         return;
14949                 }
14950                 tg3_enable_nvram_access(tp);
14951
14952                 tp->nvram_size = 0;
14953
14954                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14955                         tg3_get_5752_nvram_info(tp);
14956                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14957                         tg3_get_5755_nvram_info(tp);
14958                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14959                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14960                          tg3_asic_rev(tp) == ASIC_REV_5785)
14961                         tg3_get_5787_nvram_info(tp);
14962                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14963                         tg3_get_5761_nvram_info(tp);
14964                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14965                         tg3_get_5906_nvram_info(tp);
14966                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14967                          tg3_flag(tp, 57765_CLASS))
14968                         tg3_get_57780_nvram_info(tp);
14969                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14970                          tg3_asic_rev(tp) == ASIC_REV_5719)
14971                         tg3_get_5717_nvram_info(tp);
14972                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14973                          tg3_asic_rev(tp) == ASIC_REV_5762)
14974                         tg3_get_5720_nvram_info(tp);
14975                 else
14976                         tg3_get_nvram_info(tp);
14977
14978                 if (tp->nvram_size == 0)
14979                         tg3_get_nvram_size(tp);
14980
14981                 tg3_disable_nvram_access(tp);
14982                 tg3_nvram_unlock(tp);
14983
14984         } else {
14985                 tg3_flag_clear(tp, NVRAM);
14986                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14987
14988                 tg3_get_eeprom_size(tp);
14989         }
14990 }
14991
14992 struct subsys_tbl_ent {
14993         u16 subsys_vendor, subsys_devid;
14994         u32 phy_id;
14995 };
14996
14997 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14998         /* Broadcom boards. */
14999         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15000           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15001         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15002           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15003         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15004           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15005         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15006           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15007         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15008           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15009         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15010           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15011         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15012           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15013         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15014           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15015         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15016           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15017         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15018           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15019         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15020           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15021
15022         /* 3com boards. */
15023         { TG3PCI_SUBVENDOR_ID_3COM,
15024           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15025         { TG3PCI_SUBVENDOR_ID_3COM,
15026           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15027         { TG3PCI_SUBVENDOR_ID_3COM,
15028           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15029         { TG3PCI_SUBVENDOR_ID_3COM,
15030           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15031         { TG3PCI_SUBVENDOR_ID_3COM,
15032           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15033
15034         /* DELL boards. */
15035         { TG3PCI_SUBVENDOR_ID_DELL,
15036           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15037         { TG3PCI_SUBVENDOR_ID_DELL,
15038           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15039         { TG3PCI_SUBVENDOR_ID_DELL,
15040           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15041         { TG3PCI_SUBVENDOR_ID_DELL,
15042           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15043
15044         /* Compaq boards. */
15045         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15046           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15047         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15048           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15049         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15050           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15051         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15052           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15053         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15054           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15055
15056         /* IBM boards. */
15057         { TG3PCI_SUBVENDOR_ID_IBM,
15058           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15059 };
15060
15061 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15062 {
15063         int i;
15064
15065         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15066                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15067                      tp->pdev->subsystem_vendor) &&
15068                     (subsys_id_to_phy_id[i].subsys_devid ==
15069                      tp->pdev->subsystem_device))
15070                         return &subsys_id_to_phy_id[i];
15071         }
15072         return NULL;
15073 }
15074
15075 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15076 {
15077         u32 val;
15078
15079         tp->phy_id = TG3_PHY_ID_INVALID;
15080         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15081
15082         /* Assume an onboard device and WOL capable by default.  */
15083         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15084         tg3_flag_set(tp, WOL_CAP);
15085
15086         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15087                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15088                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15089                         tg3_flag_set(tp, IS_NIC);
15090                 }
15091                 val = tr32(VCPU_CFGSHDW);
15092                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15093                         tg3_flag_set(tp, ASPM_WORKAROUND);
15094                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15095                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15096                         tg3_flag_set(tp, WOL_ENABLE);
15097                         device_set_wakeup_enable(&tp->pdev->dev, true);
15098                 }
15099                 goto done;
15100         }
15101
15102         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15103         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15104                 u32 nic_cfg, led_cfg;
15105                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15106                 u32 nic_phy_id, ver, eeprom_phy_id;
15107                 int eeprom_phy_serdes = 0;
15108
15109                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15110                 tp->nic_sram_data_cfg = nic_cfg;
15111
15112                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15113                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15114                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15115                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15116                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15117                     (ver > 0) && (ver < 0x100))
15118                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15119
15120                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15121                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15122
15123                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15124                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15125                     tg3_asic_rev(tp) == ASIC_REV_5720)
15126                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15127
15128                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15129                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15130                         eeprom_phy_serdes = 1;
15131
15132                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15133                 if (nic_phy_id != 0) {
15134                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15135                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15136
15137                         eeprom_phy_id  = (id1 >> 16) << 10;
15138                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15139                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15140                 } else
15141                         eeprom_phy_id = 0;
15142
15143                 tp->phy_id = eeprom_phy_id;
15144                 if (eeprom_phy_serdes) {
15145                         if (!tg3_flag(tp, 5705_PLUS))
15146                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15147                         else
15148                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15149                 }
15150
15151                 if (tg3_flag(tp, 5750_PLUS))
15152                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15153                                     SHASTA_EXT_LED_MODE_MASK);
15154                 else
15155                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15156
15157                 switch (led_cfg) {
15158                 default:
15159                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15160                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15161                         break;
15162
15163                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15164                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15165                         break;
15166
15167                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15168                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15169
15170                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15171                          * read on some older 5700/5701 bootcode.
15172                          */
15173                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15174                             tg3_asic_rev(tp) == ASIC_REV_5701)
15175                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15176
15177                         break;
15178
15179                 case SHASTA_EXT_LED_SHARED:
15180                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15181                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15182                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15183                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15184                                                  LED_CTRL_MODE_PHY_2);
15185
15186                         if (tg3_flag(tp, 5717_PLUS) ||
15187                             tg3_asic_rev(tp) == ASIC_REV_5762)
15188                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15189                                                 LED_CTRL_BLINK_RATE_MASK;
15190
15191                         break;
15192
15193                 case SHASTA_EXT_LED_MAC:
15194                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15195                         break;
15196
15197                 case SHASTA_EXT_LED_COMBO:
15198                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15199                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15200                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15201                                                  LED_CTRL_MODE_PHY_2);
15202                         break;
15203
15204                 }
15205
15206                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15207                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15208                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15209                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15210
15211                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15212                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15213
15214                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15215                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15216                         if ((tp->pdev->subsystem_vendor ==
15217                              PCI_VENDOR_ID_ARIMA) &&
15218                             (tp->pdev->subsystem_device == 0x205a ||
15219                              tp->pdev->subsystem_device == 0x2063))
15220                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15221                 } else {
15222                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15223                         tg3_flag_set(tp, IS_NIC);
15224                 }
15225
15226                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15227                         tg3_flag_set(tp, ENABLE_ASF);
15228                         if (tg3_flag(tp, 5750_PLUS))
15229                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15230                 }
15231
15232                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15233                     tg3_flag(tp, 5750_PLUS))
15234                         tg3_flag_set(tp, ENABLE_APE);
15235
15236                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15237                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15238                         tg3_flag_clear(tp, WOL_CAP);
15239
15240                 if (tg3_flag(tp, WOL_CAP) &&
15241                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15242                         tg3_flag_set(tp, WOL_ENABLE);
15243                         device_set_wakeup_enable(&tp->pdev->dev, true);
15244                 }
15245
15246                 if (cfg2 & (1 << 17))
15247                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15248
15249                 /* serdes signal pre-emphasis in register 0x590 set by */
15250                 /* bootcode if bit 18 is set */
15251                 if (cfg2 & (1 << 18))
15252                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15253
15254                 if ((tg3_flag(tp, 57765_PLUS) ||
15255                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15256                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15257                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15258                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15259
15260                 if (tg3_flag(tp, PCI_EXPRESS)) {
15261                         u32 cfg3;
15262
15263                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15264                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15265                             !tg3_flag(tp, 57765_PLUS) &&
15266                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15267                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15268                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15269                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15270                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15271                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15272                 }
15273
15274                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15275                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15276                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15277                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15278                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15279                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15280
15281                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15282                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15283         }
15284 done:
15285         if (tg3_flag(tp, WOL_CAP))
15286                 device_set_wakeup_enable(&tp->pdev->dev,
15287                                          tg3_flag(tp, WOL_ENABLE));
15288         else
15289                 device_set_wakeup_capable(&tp->pdev->dev, false);
15290 }
15291
15292 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15293 {
15294         int i, err;
15295         u32 val2, off = offset * 8;
15296
15297         err = tg3_nvram_lock(tp);
15298         if (err)
15299                 return err;
15300
15301         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15302         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15303                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15304         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15305         udelay(10);
15306
15307         for (i = 0; i < 100; i++) {
15308                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15309                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15310                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15311                         break;
15312                 }
15313                 udelay(10);
15314         }
15315
15316         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15317
15318         tg3_nvram_unlock(tp);
15319         if (val2 & APE_OTP_STATUS_CMD_DONE)
15320                 return 0;
15321
15322         return -EBUSY;
15323 }
15324
15325 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15326 {
15327         int i;
15328         u32 val;
15329
15330         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15331         tw32(OTP_CTRL, cmd);
15332
15333         /* Wait for up to 1 ms for command to execute. */
15334         for (i = 0; i < 100; i++) {
15335                 val = tr32(OTP_STATUS);
15336                 if (val & OTP_STATUS_CMD_DONE)
15337                         break;
15338                 udelay(10);
15339         }
15340
15341         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15342 }
15343
15344 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15345  * configuration is a 32-bit value that straddles the alignment boundary.
15346  * We do two 32-bit reads and then shift and merge the results.
15347  */
15348 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15349 {
15350         u32 bhalf_otp, thalf_otp;
15351
15352         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15353
15354         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15355                 return 0;
15356
15357         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15358
15359         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15360                 return 0;
15361
15362         thalf_otp = tr32(OTP_READ_DATA);
15363
15364         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15365
15366         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15367                 return 0;
15368
15369         bhalf_otp = tr32(OTP_READ_DATA);
15370
15371         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15372 }
15373
15374 static void tg3_phy_init_link_config(struct tg3 *tp)
15375 {
15376         u32 adv = ADVERTISED_Autoneg;
15377
15378         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15379                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15380                         adv |= ADVERTISED_1000baseT_Half;
15381                 adv |= ADVERTISED_1000baseT_Full;
15382         }
15383
15384         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15385                 adv |= ADVERTISED_100baseT_Half |
15386                        ADVERTISED_100baseT_Full |
15387                        ADVERTISED_10baseT_Half |
15388                        ADVERTISED_10baseT_Full |
15389                        ADVERTISED_TP;
15390         else
15391                 adv |= ADVERTISED_FIBRE;
15392
15393         tp->link_config.advertising = adv;
15394         tp->link_config.speed = SPEED_UNKNOWN;
15395         tp->link_config.duplex = DUPLEX_UNKNOWN;
15396         tp->link_config.autoneg = AUTONEG_ENABLE;
15397         tp->link_config.active_speed = SPEED_UNKNOWN;
15398         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15399
15400         tp->old_link = -1;
15401 }
15402
15403 static int tg3_phy_probe(struct tg3 *tp)
15404 {
15405         u32 hw_phy_id_1, hw_phy_id_2;
15406         u32 hw_phy_id, hw_phy_id_masked;
15407         int err;
15408
15409         /* flow control autonegotiation is default behavior */
15410         tg3_flag_set(tp, PAUSE_AUTONEG);
15411         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15412
15413         if (tg3_flag(tp, ENABLE_APE)) {
15414                 switch (tp->pci_fn) {
15415                 case 0:
15416                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15417                         break;
15418                 case 1:
15419                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15420                         break;
15421                 case 2:
15422                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15423                         break;
15424                 case 3:
15425                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15426                         break;
15427                 }
15428         }
15429
15430         if (!tg3_flag(tp, ENABLE_ASF) &&
15431             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15432             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15433                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15434                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15435
15436         if (tg3_flag(tp, USE_PHYLIB))
15437                 return tg3_phy_init(tp);
15438
15439         /* Reading the PHY ID register can conflict with ASF
15440          * firmware access to the PHY hardware.
15441          */
15442         err = 0;
15443         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15444                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15445         } else {
15446                 /* Now read the physical PHY_ID from the chip and verify
15447                  * that it is sane.  If it doesn't look good, we fall back
15448                  * to either the hard-coded table based PHY_ID and failing
15449                  * that the value found in the eeprom area.
15450                  */
15451                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15452                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15453
15454                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15455                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15456                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15457
15458                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15459         }
15460
15461         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15462                 tp->phy_id = hw_phy_id;
15463                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15464                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15465                 else
15466                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15467         } else {
15468                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15469                         /* Do nothing, phy ID already set up in
15470                          * tg3_get_eeprom_hw_cfg().
15471                          */
15472                 } else {
15473                         struct subsys_tbl_ent *p;
15474
15475                         /* No eeprom signature?  Try the hardcoded
15476                          * subsys device table.
15477                          */
15478                         p = tg3_lookup_by_subsys(tp);
15479                         if (p) {
15480                                 tp->phy_id = p->phy_id;
15481                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15482                                 /* For now we saw the IDs 0xbc050cd0,
15483                                  * 0xbc050f80 and 0xbc050c30 on devices
15484                                  * connected to an BCM4785 and there are
15485                                  * probably more. Just assume that the phy is
15486                                  * supported when it is connected to a SSB core
15487                                  * for now.
15488                                  */
15489                                 return -ENODEV;
15490                         }
15491
15492                         if (!tp->phy_id ||
15493                             tp->phy_id == TG3_PHY_ID_BCM8002)
15494                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15495                 }
15496         }
15497
15498         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15499             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15500              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15501              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15502              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15503              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15504               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15505              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15506               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15507                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15508
15509                 tp->eee.supported = SUPPORTED_100baseT_Full |
15510                                     SUPPORTED_1000baseT_Full;
15511                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15512                                      ADVERTISED_1000baseT_Full;
15513                 tp->eee.eee_enabled = 1;
15514                 tp->eee.tx_lpi_enabled = 1;
15515                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15516         }
15517
15518         tg3_phy_init_link_config(tp);
15519
15520         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15521             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15522             !tg3_flag(tp, ENABLE_APE) &&
15523             !tg3_flag(tp, ENABLE_ASF)) {
15524                 u32 bmsr, dummy;
15525
15526                 tg3_readphy(tp, MII_BMSR, &bmsr);
15527                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15528                     (bmsr & BMSR_LSTATUS))
15529                         goto skip_phy_reset;
15530
15531                 err = tg3_phy_reset(tp);
15532                 if (err)
15533                         return err;
15534
15535                 tg3_phy_set_wirespeed(tp);
15536
15537                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15538                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15539                                             tp->link_config.flowctrl);
15540
15541                         tg3_writephy(tp, MII_BMCR,
15542                                      BMCR_ANENABLE | BMCR_ANRESTART);
15543                 }
15544         }
15545
15546 skip_phy_reset:
15547         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15548                 err = tg3_init_5401phy_dsp(tp);
15549                 if (err)
15550                         return err;
15551
15552                 err = tg3_init_5401phy_dsp(tp);
15553         }
15554
15555         return err;
15556 }
15557
15558 static void tg3_read_vpd(struct tg3 *tp)
15559 {
15560         u8 *vpd_data;
15561         unsigned int block_end, rosize, len;
15562         u32 vpdlen;
15563         int j, i = 0;
15564
15565         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15566         if (!vpd_data)
15567                 goto out_no_vpd;
15568
15569         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15570         if (i < 0)
15571                 goto out_not_found;
15572
15573         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15574         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15575         i += PCI_VPD_LRDT_TAG_SIZE;
15576
15577         if (block_end > vpdlen)
15578                 goto out_not_found;
15579
15580         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15581                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15582         if (j > 0) {
15583                 len = pci_vpd_info_field_size(&vpd_data[j]);
15584
15585                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15586                 if (j + len > block_end || len != 4 ||
15587                     memcmp(&vpd_data[j], "1028", 4))
15588                         goto partno;
15589
15590                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15591                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15592                 if (j < 0)
15593                         goto partno;
15594
15595                 len = pci_vpd_info_field_size(&vpd_data[j]);
15596
15597                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15598                 if (j + len > block_end)
15599                         goto partno;
15600
15601                 if (len >= sizeof(tp->fw_ver))
15602                         len = sizeof(tp->fw_ver) - 1;
15603                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15604                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15605                          &vpd_data[j]);
15606         }
15607
15608 partno:
15609         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15610                                       PCI_VPD_RO_KEYWORD_PARTNO);
15611         if (i < 0)
15612                 goto out_not_found;
15613
15614         len = pci_vpd_info_field_size(&vpd_data[i]);
15615
15616         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15617         if (len > TG3_BPN_SIZE ||
15618             (len + i) > vpdlen)
15619                 goto out_not_found;
15620
15621         memcpy(tp->board_part_number, &vpd_data[i], len);
15622
15623 out_not_found:
15624         kfree(vpd_data);
15625         if (tp->board_part_number[0])
15626                 return;
15627
15628 out_no_vpd:
15629         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15630                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15631                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15632                         strcpy(tp->board_part_number, "BCM5717");
15633                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15634                         strcpy(tp->board_part_number, "BCM5718");
15635                 else
15636                         goto nomatch;
15637         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15638                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15639                         strcpy(tp->board_part_number, "BCM57780");
15640                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15641                         strcpy(tp->board_part_number, "BCM57760");
15642                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15643                         strcpy(tp->board_part_number, "BCM57790");
15644                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15645                         strcpy(tp->board_part_number, "BCM57788");
15646                 else
15647                         goto nomatch;
15648         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15649                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15650                         strcpy(tp->board_part_number, "BCM57761");
15651                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15652                         strcpy(tp->board_part_number, "BCM57765");
15653                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15654                         strcpy(tp->board_part_number, "BCM57781");
15655                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15656                         strcpy(tp->board_part_number, "BCM57785");
15657                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15658                         strcpy(tp->board_part_number, "BCM57791");
15659                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15660                         strcpy(tp->board_part_number, "BCM57795");
15661                 else
15662                         goto nomatch;
15663         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15664                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15665                         strcpy(tp->board_part_number, "BCM57762");
15666                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15667                         strcpy(tp->board_part_number, "BCM57766");
15668                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15669                         strcpy(tp->board_part_number, "BCM57782");
15670                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15671                         strcpy(tp->board_part_number, "BCM57786");
15672                 else
15673                         goto nomatch;
15674         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15675                 strcpy(tp->board_part_number, "BCM95906");
15676         } else {
15677 nomatch:
15678                 strcpy(tp->board_part_number, "none");
15679         }
15680 }
15681
15682 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15683 {
15684         u32 val;
15685
15686         if (tg3_nvram_read(tp, offset, &val) ||
15687             (val & 0xfc000000) != 0x0c000000 ||
15688             tg3_nvram_read(tp, offset + 4, &val) ||
15689             val != 0)
15690                 return 0;
15691
15692         return 1;
15693 }
15694
15695 static void tg3_read_bc_ver(struct tg3 *tp)
15696 {
15697         u32 val, offset, start, ver_offset;
15698         int i, dst_off;
15699         bool newver = false;
15700
15701         if (tg3_nvram_read(tp, 0xc, &offset) ||
15702             tg3_nvram_read(tp, 0x4, &start))
15703                 return;
15704
15705         offset = tg3_nvram_logical_addr(tp, offset);
15706
15707         if (tg3_nvram_read(tp, offset, &val))
15708                 return;
15709
15710         if ((val & 0xfc000000) == 0x0c000000) {
15711                 if (tg3_nvram_read(tp, offset + 4, &val))
15712                         return;
15713
15714                 if (val == 0)
15715                         newver = true;
15716         }
15717
15718         dst_off = strlen(tp->fw_ver);
15719
15720         if (newver) {
15721                 if (TG3_VER_SIZE - dst_off < 16 ||
15722                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15723                         return;
15724
15725                 offset = offset + ver_offset - start;
15726                 for (i = 0; i < 16; i += 4) {
15727                         __be32 v;
15728                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15729                                 return;
15730
15731                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15732                 }
15733         } else {
15734                 u32 major, minor;
15735
15736                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15737                         return;
15738
15739                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15740                         TG3_NVM_BCVER_MAJSFT;
15741                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15742                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15743                          "v%d.%02d", major, minor);
15744         }
15745 }
15746
15747 static void tg3_read_hwsb_ver(struct tg3 *tp)
15748 {
15749         u32 val, major, minor;
15750
15751         /* Use native endian representation */
15752         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15753                 return;
15754
15755         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15756                 TG3_NVM_HWSB_CFG1_MAJSFT;
15757         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15758                 TG3_NVM_HWSB_CFG1_MINSFT;
15759
15760         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15761 }
15762
15763 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15764 {
15765         u32 offset, major, minor, build;
15766
15767         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15768
15769         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15770                 return;
15771
15772         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15773         case TG3_EEPROM_SB_REVISION_0:
15774                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15775                 break;
15776         case TG3_EEPROM_SB_REVISION_2:
15777                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15778                 break;
15779         case TG3_EEPROM_SB_REVISION_3:
15780                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15781                 break;
15782         case TG3_EEPROM_SB_REVISION_4:
15783                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15784                 break;
15785         case TG3_EEPROM_SB_REVISION_5:
15786                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15787                 break;
15788         case TG3_EEPROM_SB_REVISION_6:
15789                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15790                 break;
15791         default:
15792                 return;
15793         }
15794
15795         if (tg3_nvram_read(tp, offset, &val))
15796                 return;
15797
15798         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15799                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15800         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15801                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15802         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15803
15804         if (minor > 99 || build > 26)
15805                 return;
15806
15807         offset = strlen(tp->fw_ver);
15808         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15809                  " v%d.%02d", major, minor);
15810
15811         if (build > 0) {
15812                 offset = strlen(tp->fw_ver);
15813                 if (offset < TG3_VER_SIZE - 1)
15814                         tp->fw_ver[offset] = 'a' + build - 1;
15815         }
15816 }
15817
15818 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15819 {
15820         u32 val, offset, start;
15821         int i, vlen;
15822
15823         for (offset = TG3_NVM_DIR_START;
15824              offset < TG3_NVM_DIR_END;
15825              offset += TG3_NVM_DIRENT_SIZE) {
15826                 if (tg3_nvram_read(tp, offset, &val))
15827                         return;
15828
15829                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15830                         break;
15831         }
15832
15833         if (offset == TG3_NVM_DIR_END)
15834                 return;
15835
15836         if (!tg3_flag(tp, 5705_PLUS))
15837                 start = 0x08000000;
15838         else if (tg3_nvram_read(tp, offset - 4, &start))
15839                 return;
15840
15841         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15842             !tg3_fw_img_is_valid(tp, offset) ||
15843             tg3_nvram_read(tp, offset + 8, &val))
15844                 return;
15845
15846         offset += val - start;
15847
15848         vlen = strlen(tp->fw_ver);
15849
15850         tp->fw_ver[vlen++] = ',';
15851         tp->fw_ver[vlen++] = ' ';
15852
15853         for (i = 0; i < 4; i++) {
15854                 __be32 v;
15855                 if (tg3_nvram_read_be32(tp, offset, &v))
15856                         return;
15857
15858                 offset += sizeof(v);
15859
15860                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15861                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15862                         break;
15863                 }
15864
15865                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15866                 vlen += sizeof(v);
15867         }
15868 }
15869
15870 static void tg3_probe_ncsi(struct tg3 *tp)
15871 {
15872         u32 apedata;
15873
15874         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15875         if (apedata != APE_SEG_SIG_MAGIC)
15876                 return;
15877
15878         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15879         if (!(apedata & APE_FW_STATUS_READY))
15880                 return;
15881
15882         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15883                 tg3_flag_set(tp, APE_HAS_NCSI);
15884 }
15885
15886 static void tg3_read_dash_ver(struct tg3 *tp)
15887 {
15888         int vlen;
15889         u32 apedata;
15890         char *fwtype;
15891
15892         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15893
15894         if (tg3_flag(tp, APE_HAS_NCSI))
15895                 fwtype = "NCSI";
15896         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15897                 fwtype = "SMASH";
15898         else
15899                 fwtype = "DASH";
15900
15901         vlen = strlen(tp->fw_ver);
15902
15903         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15904                  fwtype,
15905                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15906                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15907                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15908                  (apedata & APE_FW_VERSION_BLDMSK));
15909 }
15910
15911 static void tg3_read_otp_ver(struct tg3 *tp)
15912 {
15913         u32 val, val2;
15914
15915         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15916                 return;
15917
15918         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15919             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15920             TG3_OTP_MAGIC0_VALID(val)) {
15921                 u64 val64 = (u64) val << 32 | val2;
15922                 u32 ver = 0;
15923                 int i, vlen;
15924
15925                 for (i = 0; i < 7; i++) {
15926                         if ((val64 & 0xff) == 0)
15927                                 break;
15928                         ver = val64 & 0xff;
15929                         val64 >>= 8;
15930                 }
15931                 vlen = strlen(tp->fw_ver);
15932                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15933         }
15934 }
15935
15936 static void tg3_read_fw_ver(struct tg3 *tp)
15937 {
15938         u32 val;
15939         bool vpd_vers = false;
15940
15941         if (tp->fw_ver[0] != 0)
15942                 vpd_vers = true;
15943
15944         if (tg3_flag(tp, NO_NVRAM)) {
15945                 strcat(tp->fw_ver, "sb");
15946                 tg3_read_otp_ver(tp);
15947                 return;
15948         }
15949
15950         if (tg3_nvram_read(tp, 0, &val))
15951                 return;
15952
15953         if (val == TG3_EEPROM_MAGIC)
15954                 tg3_read_bc_ver(tp);
15955         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15956                 tg3_read_sb_ver(tp, val);
15957         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15958                 tg3_read_hwsb_ver(tp);
15959
15960         if (tg3_flag(tp, ENABLE_ASF)) {
15961                 if (tg3_flag(tp, ENABLE_APE)) {
15962                         tg3_probe_ncsi(tp);
15963                         if (!vpd_vers)
15964                                 tg3_read_dash_ver(tp);
15965                 } else if (!vpd_vers) {
15966                         tg3_read_mgmtfw_ver(tp);
15967                 }
15968         }
15969
15970         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15971 }
15972
15973 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15974 {
15975         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15976                 return TG3_RX_RET_MAX_SIZE_5717;
15977         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15978                 return TG3_RX_RET_MAX_SIZE_5700;
15979         else
15980                 return TG3_RX_RET_MAX_SIZE_5705;
15981 }
15982
15983 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15984         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15985         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15986         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15987         { },
15988 };
15989
15990 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15991 {
15992         struct pci_dev *peer;
15993         unsigned int func, devnr = tp->pdev->devfn & ~7;
15994
15995         for (func = 0; func < 8; func++) {
15996                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15997                 if (peer && peer != tp->pdev)
15998                         break;
15999                 pci_dev_put(peer);
16000         }
16001         /* 5704 can be configured in single-port mode, set peer to
16002          * tp->pdev in that case.
16003          */
16004         if (!peer) {
16005                 peer = tp->pdev;
16006                 return peer;
16007         }
16008
16009         /*
16010          * We don't need to keep the refcount elevated; there's no way
16011          * to remove one half of this device without removing the other
16012          */
16013         pci_dev_put(peer);
16014
16015         return peer;
16016 }
16017
16018 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16019 {
16020         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16021         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16022                 u32 reg;
16023
16024                 /* All devices that use the alternate
16025                  * ASIC REV location have a CPMU.
16026                  */
16027                 tg3_flag_set(tp, CPMU_PRESENT);
16028
16029                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16030                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16031                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16032                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16033                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16034                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16035                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16036                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16037                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16038                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16039                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16040                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16041                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16042                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16043                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16044                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16045                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16046                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16047                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16048                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16049                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16050                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16051                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16052                 else
16053                         reg = TG3PCI_PRODID_ASICREV;
16054
16055                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16056         }
16057
16058         /* Wrong chip ID in 5752 A0. This code can be removed later
16059          * as A0 is not in production.
16060          */
16061         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16062                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16063
16064         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16065                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16066
16067         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16068             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16069             tg3_asic_rev(tp) == ASIC_REV_5720)
16070                 tg3_flag_set(tp, 5717_PLUS);
16071
16072         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16073             tg3_asic_rev(tp) == ASIC_REV_57766)
16074                 tg3_flag_set(tp, 57765_CLASS);
16075
16076         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16077              tg3_asic_rev(tp) == ASIC_REV_5762)
16078                 tg3_flag_set(tp, 57765_PLUS);
16079
16080         /* Intentionally exclude ASIC_REV_5906 */
16081         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16082             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16083             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16084             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16085             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16086             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16087             tg3_flag(tp, 57765_PLUS))
16088                 tg3_flag_set(tp, 5755_PLUS);
16089
16090         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16091             tg3_asic_rev(tp) == ASIC_REV_5714)
16092                 tg3_flag_set(tp, 5780_CLASS);
16093
16094         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16095             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16096             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16097             tg3_flag(tp, 5755_PLUS) ||
16098             tg3_flag(tp, 5780_CLASS))
16099                 tg3_flag_set(tp, 5750_PLUS);
16100
16101         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16102             tg3_flag(tp, 5750_PLUS))
16103                 tg3_flag_set(tp, 5705_PLUS);
16104 }
16105
16106 static bool tg3_10_100_only_device(struct tg3 *tp,
16107                                    const struct pci_device_id *ent)
16108 {
16109         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16110
16111         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16112              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16113             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16114                 return true;
16115
16116         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16117                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16118                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16119                                 return true;
16120                 } else {
16121                         return true;
16122                 }
16123         }
16124
16125         return false;
16126 }
16127
16128 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16129 {
16130         u32 misc_ctrl_reg;
16131         u32 pci_state_reg, grc_misc_cfg;
16132         u32 val;
16133         u16 pci_cmd;
16134         int err;
16135
16136         /* Force memory write invalidate off.  If we leave it on,
16137          * then on 5700_BX chips we have to enable a workaround.
16138          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16139          * to match the cacheline size.  The Broadcom driver have this
16140          * workaround but turns MWI off all the times so never uses
16141          * it.  This seems to suggest that the workaround is insufficient.
16142          */
16143         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16144         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16145         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16146
16147         /* Important! -- Make sure register accesses are byteswapped
16148          * correctly.  Also, for those chips that require it, make
16149          * sure that indirect register accesses are enabled before
16150          * the first operation.
16151          */
16152         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16153                               &misc_ctrl_reg);
16154         tp->misc_host_ctrl |= (misc_ctrl_reg &
16155                                MISC_HOST_CTRL_CHIPREV);
16156         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16157                                tp->misc_host_ctrl);
16158
16159         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16160
16161         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16162          * we need to disable memory and use config. cycles
16163          * only to access all registers. The 5702/03 chips
16164          * can mistakenly decode the special cycles from the
16165          * ICH chipsets as memory write cycles, causing corruption
16166          * of register and memory space. Only certain ICH bridges
16167          * will drive special cycles with non-zero data during the
16168          * address phase which can fall within the 5703's address
16169          * range. This is not an ICH bug as the PCI spec allows
16170          * non-zero address during special cycles. However, only
16171          * these ICH bridges are known to drive non-zero addresses
16172          * during special cycles.
16173          *
16174          * Since special cycles do not cross PCI bridges, we only
16175          * enable this workaround if the 5703 is on the secondary
16176          * bus of these ICH bridges.
16177          */
16178         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16179             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16180                 static struct tg3_dev_id {
16181                         u32     vendor;
16182                         u32     device;
16183                         u32     rev;
16184                 } ich_chipsets[] = {
16185                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16186                           PCI_ANY_ID },
16187                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16188                           PCI_ANY_ID },
16189                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16190                           0xa },
16191                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16192                           PCI_ANY_ID },
16193                         { },
16194                 };
16195                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16196                 struct pci_dev *bridge = NULL;
16197
16198                 while (pci_id->vendor != 0) {
16199                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16200                                                 bridge);
16201                         if (!bridge) {
16202                                 pci_id++;
16203                                 continue;
16204                         }
16205                         if (pci_id->rev != PCI_ANY_ID) {
16206                                 if (bridge->revision > pci_id->rev)
16207                                         continue;
16208                         }
16209                         if (bridge->subordinate &&
16210                             (bridge->subordinate->number ==
16211                              tp->pdev->bus->number)) {
16212                                 tg3_flag_set(tp, ICH_WORKAROUND);
16213                                 pci_dev_put(bridge);
16214                                 break;
16215                         }
16216                 }
16217         }
16218
16219         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16220                 static struct tg3_dev_id {
16221                         u32     vendor;
16222                         u32     device;
16223                 } bridge_chipsets[] = {
16224                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16225                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16226                         { },
16227                 };
16228                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16229                 struct pci_dev *bridge = NULL;
16230
16231                 while (pci_id->vendor != 0) {
16232                         bridge = pci_get_device(pci_id->vendor,
16233                                                 pci_id->device,
16234                                                 bridge);
16235                         if (!bridge) {
16236                                 pci_id++;
16237                                 continue;
16238                         }
16239                         if (bridge->subordinate &&
16240                             (bridge->subordinate->number <=
16241                              tp->pdev->bus->number) &&
16242                             (bridge->subordinate->busn_res.end >=
16243                              tp->pdev->bus->number)) {
16244                                 tg3_flag_set(tp, 5701_DMA_BUG);
16245                                 pci_dev_put(bridge);
16246                                 break;
16247                         }
16248                 }
16249         }
16250
16251         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16252          * DMA addresses > 40-bit. This bridge may have other additional
16253          * 57xx devices behind it in some 4-port NIC designs for example.
16254          * Any tg3 device found behind the bridge will also need the 40-bit
16255          * DMA workaround.
16256          */
16257         if (tg3_flag(tp, 5780_CLASS)) {
16258                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16259                 tp->msi_cap = tp->pdev->msi_cap;
16260         } else {
16261                 struct pci_dev *bridge = NULL;
16262
16263                 do {
16264                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16265                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16266                                                 bridge);
16267                         if (bridge && bridge->subordinate &&
16268                             (bridge->subordinate->number <=
16269                              tp->pdev->bus->number) &&
16270                             (bridge->subordinate->busn_res.end >=
16271                              tp->pdev->bus->number)) {
16272                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16273                                 pci_dev_put(bridge);
16274                                 break;
16275                         }
16276                 } while (bridge);
16277         }
16278
16279         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16280             tg3_asic_rev(tp) == ASIC_REV_5714)
16281                 tp->pdev_peer = tg3_find_peer(tp);
16282
16283         /* Determine TSO capabilities */
16284         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16285                 ; /* Do nothing. HW bug. */
16286         else if (tg3_flag(tp, 57765_PLUS))
16287                 tg3_flag_set(tp, HW_TSO_3);
16288         else if (tg3_flag(tp, 5755_PLUS) ||
16289                  tg3_asic_rev(tp) == ASIC_REV_5906)
16290                 tg3_flag_set(tp, HW_TSO_2);
16291         else if (tg3_flag(tp, 5750_PLUS)) {
16292                 tg3_flag_set(tp, HW_TSO_1);
16293                 tg3_flag_set(tp, TSO_BUG);
16294                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16295                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16296                         tg3_flag_clear(tp, TSO_BUG);
16297         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16298                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16299                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16300                 tg3_flag_set(tp, FW_TSO);
16301                 tg3_flag_set(tp, TSO_BUG);
16302                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16303                         tp->fw_needed = FIRMWARE_TG3TSO5;
16304                 else
16305                         tp->fw_needed = FIRMWARE_TG3TSO;
16306         }
16307
16308         /* Selectively allow TSO based on operating conditions */
16309         if (tg3_flag(tp, HW_TSO_1) ||
16310             tg3_flag(tp, HW_TSO_2) ||
16311             tg3_flag(tp, HW_TSO_3) ||
16312             tg3_flag(tp, FW_TSO)) {
16313                 /* For firmware TSO, assume ASF is disabled.
16314                  * We'll disable TSO later if we discover ASF
16315                  * is enabled in tg3_get_eeprom_hw_cfg().
16316                  */
16317                 tg3_flag_set(tp, TSO_CAPABLE);
16318         } else {
16319                 tg3_flag_clear(tp, TSO_CAPABLE);
16320                 tg3_flag_clear(tp, TSO_BUG);
16321                 tp->fw_needed = NULL;
16322         }
16323
16324         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16325                 tp->fw_needed = FIRMWARE_TG3;
16326
16327         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16328                 tp->fw_needed = FIRMWARE_TG357766;
16329
16330         tp->irq_max = 1;
16331
16332         if (tg3_flag(tp, 5750_PLUS)) {
16333                 tg3_flag_set(tp, SUPPORT_MSI);
16334                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16335                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16336                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16337                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16338                      tp->pdev_peer == tp->pdev))
16339                         tg3_flag_clear(tp, SUPPORT_MSI);
16340
16341                 if (tg3_flag(tp, 5755_PLUS) ||
16342                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16343                         tg3_flag_set(tp, 1SHOT_MSI);
16344                 }
16345
16346                 if (tg3_flag(tp, 57765_PLUS)) {
16347                         tg3_flag_set(tp, SUPPORT_MSIX);
16348                         tp->irq_max = TG3_IRQ_MAX_VECS;
16349                 }
16350         }
16351
16352         tp->txq_max = 1;
16353         tp->rxq_max = 1;
16354         if (tp->irq_max > 1) {
16355                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16356                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16357
16358                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16359                     tg3_asic_rev(tp) == ASIC_REV_5720)
16360                         tp->txq_max = tp->irq_max - 1;
16361         }
16362
16363         if (tg3_flag(tp, 5755_PLUS) ||
16364             tg3_asic_rev(tp) == ASIC_REV_5906)
16365                 tg3_flag_set(tp, SHORT_DMA_BUG);
16366
16367         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16368                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16369
16370         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16371             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16372             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16373             tg3_asic_rev(tp) == ASIC_REV_5762)
16374                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16375
16376         if (tg3_flag(tp, 57765_PLUS) &&
16377             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16378                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16379
16380         if (!tg3_flag(tp, 5705_PLUS) ||
16381             tg3_flag(tp, 5780_CLASS) ||
16382             tg3_flag(tp, USE_JUMBO_BDFLAG))
16383                 tg3_flag_set(tp, JUMBO_CAPABLE);
16384
16385         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16386                               &pci_state_reg);
16387
16388         if (pci_is_pcie(tp->pdev)) {
16389                 u16 lnkctl;
16390
16391                 tg3_flag_set(tp, PCI_EXPRESS);
16392
16393                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16394                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16395                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16396                                 tg3_flag_clear(tp, HW_TSO_2);
16397                                 tg3_flag_clear(tp, TSO_CAPABLE);
16398                         }
16399                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16400                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16401                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16402                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16403                                 tg3_flag_set(tp, CLKREQ_BUG);
16404                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16405                         tg3_flag_set(tp, L1PLLPD_EN);
16406                 }
16407         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16408                 /* BCM5785 devices are effectively PCIe devices, and should
16409                  * follow PCIe codepaths, but do not have a PCIe capabilities
16410                  * section.
16411                  */
16412                 tg3_flag_set(tp, PCI_EXPRESS);
16413         } else if (!tg3_flag(tp, 5705_PLUS) ||
16414                    tg3_flag(tp, 5780_CLASS)) {
16415                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16416                 if (!tp->pcix_cap) {
16417                         dev_err(&tp->pdev->dev,
16418                                 "Cannot find PCI-X capability, aborting\n");
16419                         return -EIO;
16420                 }
16421
16422                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16423                         tg3_flag_set(tp, PCIX_MODE);
16424         }
16425
16426         /* If we have an AMD 762 or VIA K8T800 chipset, write
16427          * reordering to the mailbox registers done by the host
16428          * controller can cause major troubles.  We read back from
16429          * every mailbox register write to force the writes to be
16430          * posted to the chip in order.
16431          */
16432         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16433             !tg3_flag(tp, PCI_EXPRESS))
16434                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16435
16436         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16437                              &tp->pci_cacheline_sz);
16438         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16439                              &tp->pci_lat_timer);
16440         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16441             tp->pci_lat_timer < 64) {
16442                 tp->pci_lat_timer = 64;
16443                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16444                                       tp->pci_lat_timer);
16445         }
16446
16447         /* Important! -- It is critical that the PCI-X hw workaround
16448          * situation is decided before the first MMIO register access.
16449          */
16450         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16451                 /* 5700 BX chips need to have their TX producer index
16452                  * mailboxes written twice to workaround a bug.
16453                  */
16454                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16455
16456                 /* If we are in PCI-X mode, enable register write workaround.
16457                  *
16458                  * The workaround is to use indirect register accesses
16459                  * for all chip writes not to mailbox registers.
16460                  */
16461                 if (tg3_flag(tp, PCIX_MODE)) {
16462                         u32 pm_reg;
16463
16464                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16465
16466                         /* The chip can have it's power management PCI config
16467                          * space registers clobbered due to this bug.
16468                          * So explicitly force the chip into D0 here.
16469                          */
16470                         pci_read_config_dword(tp->pdev,
16471                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16472                                               &pm_reg);
16473                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16474                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16475                         pci_write_config_dword(tp->pdev,
16476                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16477                                                pm_reg);
16478
16479                         /* Also, force SERR#/PERR# in PCI command. */
16480                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16481                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16482                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16483                 }
16484         }
16485
16486         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16487                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16488         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16489                 tg3_flag_set(tp, PCI_32BIT);
16490
16491         /* Chip-specific fixup from Broadcom driver */
16492         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16493             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16494                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16495                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16496         }
16497
16498         /* Default fast path register access methods */
16499         tp->read32 = tg3_read32;
16500         tp->write32 = tg3_write32;
16501         tp->read32_mbox = tg3_read32;
16502         tp->write32_mbox = tg3_write32;
16503         tp->write32_tx_mbox = tg3_write32;
16504         tp->write32_rx_mbox = tg3_write32;
16505
16506         /* Various workaround register access methods */
16507         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16508                 tp->write32 = tg3_write_indirect_reg32;
16509         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16510                  (tg3_flag(tp, PCI_EXPRESS) &&
16511                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16512                 /*
16513                  * Back to back register writes can cause problems on these
16514                  * chips, the workaround is to read back all reg writes
16515                  * except those to mailbox regs.
16516                  *
16517                  * See tg3_write_indirect_reg32().
16518                  */
16519                 tp->write32 = tg3_write_flush_reg32;
16520         }
16521
16522         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16523                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16524                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16525                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16526         }
16527
16528         if (tg3_flag(tp, ICH_WORKAROUND)) {
16529                 tp->read32 = tg3_read_indirect_reg32;
16530                 tp->write32 = tg3_write_indirect_reg32;
16531                 tp->read32_mbox = tg3_read_indirect_mbox;
16532                 tp->write32_mbox = tg3_write_indirect_mbox;
16533                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16534                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16535
16536                 iounmap(tp->regs);
16537                 tp->regs = NULL;
16538
16539                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16540                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16541                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16542         }
16543         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16544                 tp->read32_mbox = tg3_read32_mbox_5906;
16545                 tp->write32_mbox = tg3_write32_mbox_5906;
16546                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16547                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16548         }
16549
16550         if (tp->write32 == tg3_write_indirect_reg32 ||
16551             (tg3_flag(tp, PCIX_MODE) &&
16552              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16553               tg3_asic_rev(tp) == ASIC_REV_5701)))
16554                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16555
16556         /* The memory arbiter has to be enabled in order for SRAM accesses
16557          * to succeed.  Normally on powerup the tg3 chip firmware will make
16558          * sure it is enabled, but other entities such as system netboot
16559          * code might disable it.
16560          */
16561         val = tr32(MEMARB_MODE);
16562         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16563
16564         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16565         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16566             tg3_flag(tp, 5780_CLASS)) {
16567                 if (tg3_flag(tp, PCIX_MODE)) {
16568                         pci_read_config_dword(tp->pdev,
16569                                               tp->pcix_cap + PCI_X_STATUS,
16570                                               &val);
16571                         tp->pci_fn = val & 0x7;
16572                 }
16573         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16574                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16575                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16576                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16577                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16578                         val = tr32(TG3_CPMU_STATUS);
16579
16580                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16581                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16582                 else
16583                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16584                                      TG3_CPMU_STATUS_FSHFT_5719;
16585         }
16586
16587         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16588                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16589                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16590         }
16591
16592         /* Get eeprom hw config before calling tg3_set_power_state().
16593          * In particular, the TG3_FLAG_IS_NIC flag must be
16594          * determined before calling tg3_set_power_state() so that
16595          * we know whether or not to switch out of Vaux power.
16596          * When the flag is set, it means that GPIO1 is used for eeprom
16597          * write protect and also implies that it is a LOM where GPIOs
16598          * are not used to switch power.
16599          */
16600         tg3_get_eeprom_hw_cfg(tp);
16601
16602         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16603                 tg3_flag_clear(tp, TSO_CAPABLE);
16604                 tg3_flag_clear(tp, TSO_BUG);
16605                 tp->fw_needed = NULL;
16606         }
16607
16608         if (tg3_flag(tp, ENABLE_APE)) {
16609                 /* Allow reads and writes to the
16610                  * APE register and memory space.
16611                  */
16612                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16613                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16614                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16615                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16616                                        pci_state_reg);
16617
16618                 tg3_ape_lock_init(tp);
16619         }
16620
16621         /* Set up tp->grc_local_ctrl before calling
16622          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16623          * will bring 5700's external PHY out of reset.
16624          * It is also used as eeprom write protect on LOMs.
16625          */
16626         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16627         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16628             tg3_flag(tp, EEPROM_WRITE_PROT))
16629                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16630                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16631         /* Unused GPIO3 must be driven as output on 5752 because there
16632          * are no pull-up resistors on unused GPIO pins.
16633          */
16634         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16635                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16636
16637         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16638             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16639             tg3_flag(tp, 57765_CLASS))
16640                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16641
16642         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16643             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16644                 /* Turn off the debug UART. */
16645                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16646                 if (tg3_flag(tp, IS_NIC))
16647                         /* Keep VMain power. */
16648                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16649                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16650         }
16651
16652         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16653                 tp->grc_local_ctrl |=
16654                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16655
16656         /* Switch out of Vaux if it is a NIC */
16657         tg3_pwrsrc_switch_to_vmain(tp);
16658
16659         /* Derive initial jumbo mode from MTU assigned in
16660          * ether_setup() via the alloc_etherdev() call
16661          */
16662         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16663                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16664
16665         /* Determine WakeOnLan speed to use. */
16666         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16667             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16668             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16669             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16670                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16671         } else {
16672                 tg3_flag_set(tp, WOL_SPEED_100MB);
16673         }
16674
16675         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16676                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16677
16678         /* A few boards don't want Ethernet@WireSpeed phy feature */
16679         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16680             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16681              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16682              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16683             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16684             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16685                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16686
16687         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16688             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16689                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16690         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16691                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16692
16693         if (tg3_flag(tp, 5705_PLUS) &&
16694             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16695             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16696             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16697             !tg3_flag(tp, 57765_PLUS)) {
16698                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16699                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16700                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16701                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16702                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16703                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16704                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16705                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16706                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16707                 } else
16708                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16709         }
16710
16711         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16712             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16713                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16714                 if (tp->phy_otp == 0)
16715                         tp->phy_otp = TG3_OTP_DEFAULT;
16716         }
16717
16718         if (tg3_flag(tp, CPMU_PRESENT))
16719                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16720         else
16721                 tp->mi_mode = MAC_MI_MODE_BASE;
16722
16723         tp->coalesce_mode = 0;
16724         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16725             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16726                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16727
16728         /* Set these bits to enable statistics workaround. */
16729         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16730             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16731             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16732             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16733                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16734                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16735         }
16736
16737         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16738             tg3_asic_rev(tp) == ASIC_REV_57780)
16739                 tg3_flag_set(tp, USE_PHYLIB);
16740
16741         err = tg3_mdio_init(tp);
16742         if (err)
16743                 return err;
16744
16745         /* Initialize data/descriptor byte/word swapping. */
16746         val = tr32(GRC_MODE);
16747         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16748             tg3_asic_rev(tp) == ASIC_REV_5762)
16749                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16750                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16751                         GRC_MODE_B2HRX_ENABLE |
16752                         GRC_MODE_HTX2B_ENABLE |
16753                         GRC_MODE_HOST_STACKUP);
16754         else
16755                 val &= GRC_MODE_HOST_STACKUP;
16756
16757         tw32(GRC_MODE, val | tp->grc_mode);
16758
16759         tg3_switch_clocks(tp);
16760
16761         /* Clear this out for sanity. */
16762         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16763
16764         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16765         tw32(TG3PCI_REG_BASE_ADDR, 0);
16766
16767         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16768                               &pci_state_reg);
16769         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16770             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16771                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16772                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16773                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16774                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16775                         void __iomem *sram_base;
16776
16777                         /* Write some dummy words into the SRAM status block
16778                          * area, see if it reads back correctly.  If the return
16779                          * value is bad, force enable the PCIX workaround.
16780                          */
16781                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16782
16783                         writel(0x00000000, sram_base);
16784                         writel(0x00000000, sram_base + 4);
16785                         writel(0xffffffff, sram_base + 4);
16786                         if (readl(sram_base) != 0x00000000)
16787                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16788                 }
16789         }
16790
16791         udelay(50);
16792         tg3_nvram_init(tp);
16793
16794         /* If the device has an NVRAM, no need to load patch firmware */
16795         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16796             !tg3_flag(tp, NO_NVRAM))
16797                 tp->fw_needed = NULL;
16798
16799         grc_misc_cfg = tr32(GRC_MISC_CFG);
16800         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16801
16802         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16803             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16804              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16805                 tg3_flag_set(tp, IS_5788);
16806
16807         if (!tg3_flag(tp, IS_5788) &&
16808             tg3_asic_rev(tp) != ASIC_REV_5700)
16809                 tg3_flag_set(tp, TAGGED_STATUS);
16810         if (tg3_flag(tp, TAGGED_STATUS)) {
16811                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16812                                       HOSTCC_MODE_CLRTICK_TXBD);
16813
16814                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16815                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16816                                        tp->misc_host_ctrl);
16817         }
16818
16819         /* Preserve the APE MAC_MODE bits */
16820         if (tg3_flag(tp, ENABLE_APE))
16821                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16822         else
16823                 tp->mac_mode = 0;
16824
16825         if (tg3_10_100_only_device(tp, ent))
16826                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16827
16828         err = tg3_phy_probe(tp);
16829         if (err) {
16830                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16831                 /* ... but do not return immediately ... */
16832                 tg3_mdio_fini(tp);
16833         }
16834
16835         tg3_read_vpd(tp);
16836         tg3_read_fw_ver(tp);
16837
16838         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16839                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16840         } else {
16841                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16842                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16843                 else
16844                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16845         }
16846
16847         /* 5700 {AX,BX} chips have a broken status block link
16848          * change bit implementation, so we must use the
16849          * status register in those cases.
16850          */
16851         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16852                 tg3_flag_set(tp, USE_LINKCHG_REG);
16853         else
16854                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16855
16856         /* The led_ctrl is set during tg3_phy_probe, here we might
16857          * have to force the link status polling mechanism based
16858          * upon subsystem IDs.
16859          */
16860         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16861             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16862             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16863                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16864                 tg3_flag_set(tp, USE_LINKCHG_REG);
16865         }
16866
16867         /* For all SERDES we poll the MAC status register. */
16868         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16869                 tg3_flag_set(tp, POLL_SERDES);
16870         else
16871                 tg3_flag_clear(tp, POLL_SERDES);
16872
16873         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16874                 tg3_flag_set(tp, POLL_CPMU_LINK);
16875
16876         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16877         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16878         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16879             tg3_flag(tp, PCIX_MODE)) {
16880                 tp->rx_offset = NET_SKB_PAD;
16881 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16882                 tp->rx_copy_thresh = ~(u16)0;
16883 #endif
16884         }
16885
16886         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16887         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16888         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16889
16890         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16891
16892         /* Increment the rx prod index on the rx std ring by at most
16893          * 8 for these chips to workaround hw errata.
16894          */
16895         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16896             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16897             tg3_asic_rev(tp) == ASIC_REV_5755)
16898                 tp->rx_std_max_post = 8;
16899
16900         if (tg3_flag(tp, ASPM_WORKAROUND))
16901                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16902                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16903
16904         return err;
16905 }
16906
16907 #ifdef CONFIG_SPARC
16908 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16909 {
16910         struct net_device *dev = tp->dev;
16911         struct pci_dev *pdev = tp->pdev;
16912         struct device_node *dp = pci_device_to_OF_node(pdev);
16913         const unsigned char *addr;
16914         int len;
16915
16916         addr = of_get_property(dp, "local-mac-address", &len);
16917         if (addr && len == ETH_ALEN) {
16918                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16919                 return 0;
16920         }
16921         return -ENODEV;
16922 }
16923
16924 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16925 {
16926         struct net_device *dev = tp->dev;
16927
16928         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16929         return 0;
16930 }
16931 #endif
16932
16933 static int tg3_get_device_address(struct tg3 *tp)
16934 {
16935         struct net_device *dev = tp->dev;
16936         u32 hi, lo, mac_offset;
16937         int addr_ok = 0;
16938         int err;
16939
16940 #ifdef CONFIG_SPARC
16941         if (!tg3_get_macaddr_sparc(tp))
16942                 return 0;
16943 #endif
16944
16945         if (tg3_flag(tp, IS_SSB_CORE)) {
16946                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16947                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16948                         return 0;
16949         }
16950
16951         mac_offset = 0x7c;
16952         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16953             tg3_flag(tp, 5780_CLASS)) {
16954                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16955                         mac_offset = 0xcc;
16956                 if (tg3_nvram_lock(tp))
16957                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16958                 else
16959                         tg3_nvram_unlock(tp);
16960         } else if (tg3_flag(tp, 5717_PLUS)) {
16961                 if (tp->pci_fn & 1)
16962                         mac_offset = 0xcc;
16963                 if (tp->pci_fn > 1)
16964                         mac_offset += 0x18c;
16965         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16966                 mac_offset = 0x10;
16967
16968         /* First try to get it from MAC address mailbox. */
16969         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16970         if ((hi >> 16) == 0x484b) {
16971                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16972                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16973
16974                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16975                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16976                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16977                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16978                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16979
16980                 /* Some old bootcode may report a 0 MAC address in SRAM */
16981                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16982         }
16983         if (!addr_ok) {
16984                 /* Next, try NVRAM. */
16985                 if (!tg3_flag(tp, NO_NVRAM) &&
16986                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16987                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16988                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16989                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16990                 }
16991                 /* Finally just fetch it out of the MAC control regs. */
16992                 else {
16993                         hi = tr32(MAC_ADDR_0_HIGH);
16994                         lo = tr32(MAC_ADDR_0_LOW);
16995
16996                         dev->dev_addr[5] = lo & 0xff;
16997                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16998                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16999                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17000                         dev->dev_addr[1] = hi & 0xff;
17001                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17002                 }
17003         }
17004
17005         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17006 #ifdef CONFIG_SPARC
17007                 if (!tg3_get_default_macaddr_sparc(tp))
17008                         return 0;
17009 #endif
17010                 return -EINVAL;
17011         }
17012         return 0;
17013 }
17014
17015 #define BOUNDARY_SINGLE_CACHELINE       1
17016 #define BOUNDARY_MULTI_CACHELINE        2
17017
17018 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17019 {
17020         int cacheline_size;
17021         u8 byte;
17022         int goal;
17023
17024         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17025         if (byte == 0)
17026                 cacheline_size = 1024;
17027         else
17028                 cacheline_size = (int) byte * 4;
17029
17030         /* On 5703 and later chips, the boundary bits have no
17031          * effect.
17032          */
17033         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17034             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17035             !tg3_flag(tp, PCI_EXPRESS))
17036                 goto out;
17037
17038 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17039         goal = BOUNDARY_MULTI_CACHELINE;
17040 #else
17041 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17042         goal = BOUNDARY_SINGLE_CACHELINE;
17043 #else
17044         goal = 0;
17045 #endif
17046 #endif
17047
17048         if (tg3_flag(tp, 57765_PLUS)) {
17049                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17050                 goto out;
17051         }
17052
17053         if (!goal)
17054                 goto out;
17055
17056         /* PCI controllers on most RISC systems tend to disconnect
17057          * when a device tries to burst across a cache-line boundary.
17058          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17059          *
17060          * Unfortunately, for PCI-E there are only limited
17061          * write-side controls for this, and thus for reads
17062          * we will still get the disconnects.  We'll also waste
17063          * these PCI cycles for both read and write for chips
17064          * other than 5700 and 5701 which do not implement the
17065          * boundary bits.
17066          */
17067         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17068                 switch (cacheline_size) {
17069                 case 16:
17070                 case 32:
17071                 case 64:
17072                 case 128:
17073                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17074                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17075                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17076                         } else {
17077                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17078                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17079                         }
17080                         break;
17081
17082                 case 256:
17083                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17084                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17085                         break;
17086
17087                 default:
17088                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17089                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17090                         break;
17091                 }
17092         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17093                 switch (cacheline_size) {
17094                 case 16:
17095                 case 32:
17096                 case 64:
17097                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17098                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17099                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17100                                 break;
17101                         }
17102                         /* fallthrough */
17103                 case 128:
17104                 default:
17105                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17106                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17107                         break;
17108                 }
17109         } else {
17110                 switch (cacheline_size) {
17111                 case 16:
17112                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17113                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17114                                         DMA_RWCTRL_WRITE_BNDRY_16);
17115                                 break;
17116                         }
17117                         /* fallthrough */
17118                 case 32:
17119                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17120                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17121                                         DMA_RWCTRL_WRITE_BNDRY_32);
17122                                 break;
17123                         }
17124                         /* fallthrough */
17125                 case 64:
17126                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17127                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17128                                         DMA_RWCTRL_WRITE_BNDRY_64);
17129                                 break;
17130                         }
17131                         /* fallthrough */
17132                 case 128:
17133                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17134                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17135                                         DMA_RWCTRL_WRITE_BNDRY_128);
17136                                 break;
17137                         }
17138                         /* fallthrough */
17139                 case 256:
17140                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17141                                 DMA_RWCTRL_WRITE_BNDRY_256);
17142                         break;
17143                 case 512:
17144                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17145                                 DMA_RWCTRL_WRITE_BNDRY_512);
17146                         break;
17147                 case 1024:
17148                 default:
17149                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17150                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17151                         break;
17152                 }
17153         }
17154
17155 out:
17156         return val;
17157 }
17158
17159 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17160                            int size, bool to_device)
17161 {
17162         struct tg3_internal_buffer_desc test_desc;
17163         u32 sram_dma_descs;
17164         int i, ret;
17165
17166         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17167
17168         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17169         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17170         tw32(RDMAC_STATUS, 0);
17171         tw32(WDMAC_STATUS, 0);
17172
17173         tw32(BUFMGR_MODE, 0);
17174         tw32(FTQ_RESET, 0);
17175
17176         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17177         test_desc.addr_lo = buf_dma & 0xffffffff;
17178         test_desc.nic_mbuf = 0x00002100;
17179         test_desc.len = size;
17180
17181         /*
17182          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17183          * the *second* time the tg3 driver was getting loaded after an
17184          * initial scan.
17185          *
17186          * Broadcom tells me:
17187          *   ...the DMA engine is connected to the GRC block and a DMA
17188          *   reset may affect the GRC block in some unpredictable way...
17189          *   The behavior of resets to individual blocks has not been tested.
17190          *
17191          * Broadcom noted the GRC reset will also reset all sub-components.
17192          */
17193         if (to_device) {
17194                 test_desc.cqid_sqid = (13 << 8) | 2;
17195
17196                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17197                 udelay(40);
17198         } else {
17199                 test_desc.cqid_sqid = (16 << 8) | 7;
17200
17201                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17202                 udelay(40);
17203         }
17204         test_desc.flags = 0x00000005;
17205
17206         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17207                 u32 val;
17208
17209                 val = *(((u32 *)&test_desc) + i);
17210                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17211                                        sram_dma_descs + (i * sizeof(u32)));
17212                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17213         }
17214         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17215
17216         if (to_device)
17217                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17218         else
17219                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17220
17221         ret = -ENODEV;
17222         for (i = 0; i < 40; i++) {
17223                 u32 val;
17224
17225                 if (to_device)
17226                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17227                 else
17228                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17229                 if ((val & 0xffff) == sram_dma_descs) {
17230                         ret = 0;
17231                         break;
17232                 }
17233
17234                 udelay(100);
17235         }
17236
17237         return ret;
17238 }
17239
17240 #define TEST_BUFFER_SIZE        0x2000
17241
17242 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17243         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17244         { },
17245 };
17246
17247 static int tg3_test_dma(struct tg3 *tp)
17248 {
17249         dma_addr_t buf_dma;
17250         u32 *buf, saved_dma_rwctrl;
17251         int ret = 0;
17252
17253         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17254                                  &buf_dma, GFP_KERNEL);
17255         if (!buf) {
17256                 ret = -ENOMEM;
17257                 goto out_nofree;
17258         }
17259
17260         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17261                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17262
17263         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17264
17265         if (tg3_flag(tp, 57765_PLUS))
17266                 goto out;
17267
17268         if (tg3_flag(tp, PCI_EXPRESS)) {
17269                 /* DMA read watermark not used on PCIE */
17270                 tp->dma_rwctrl |= 0x00180000;
17271         } else if (!tg3_flag(tp, PCIX_MODE)) {
17272                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17273                     tg3_asic_rev(tp) == ASIC_REV_5750)
17274                         tp->dma_rwctrl |= 0x003f0000;
17275                 else
17276                         tp->dma_rwctrl |= 0x003f000f;
17277         } else {
17278                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17279                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17280                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17281                         u32 read_water = 0x7;
17282
17283                         /* If the 5704 is behind the EPB bridge, we can
17284                          * do the less restrictive ONE_DMA workaround for
17285                          * better performance.
17286                          */
17287                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17288                             tg3_asic_rev(tp) == ASIC_REV_5704)
17289                                 tp->dma_rwctrl |= 0x8000;
17290                         else if (ccval == 0x6 || ccval == 0x7)
17291                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17292
17293                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17294                                 read_water = 4;
17295                         /* Set bit 23 to enable PCIX hw bug fix */
17296                         tp->dma_rwctrl |=
17297                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17298                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17299                                 (1 << 23);
17300                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17301                         /* 5780 always in PCIX mode */
17302                         tp->dma_rwctrl |= 0x00144000;
17303                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17304                         /* 5714 always in PCIX mode */
17305                         tp->dma_rwctrl |= 0x00148000;
17306                 } else {
17307                         tp->dma_rwctrl |= 0x001b000f;
17308                 }
17309         }
17310         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17311                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17312
17313         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17314             tg3_asic_rev(tp) == ASIC_REV_5704)
17315                 tp->dma_rwctrl &= 0xfffffff0;
17316
17317         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17318             tg3_asic_rev(tp) == ASIC_REV_5701) {
17319                 /* Remove this if it causes problems for some boards. */
17320                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17321
17322                 /* On 5700/5701 chips, we need to set this bit.
17323                  * Otherwise the chip will issue cacheline transactions
17324                  * to streamable DMA memory with not all the byte
17325                  * enables turned on.  This is an error on several
17326                  * RISC PCI controllers, in particular sparc64.
17327                  *
17328                  * On 5703/5704 chips, this bit has been reassigned
17329                  * a different meaning.  In particular, it is used
17330                  * on those chips to enable a PCI-X workaround.
17331                  */
17332                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17333         }
17334
17335         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17336
17337
17338         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17339             tg3_asic_rev(tp) != ASIC_REV_5701)
17340                 goto out;
17341
17342         /* It is best to perform DMA test with maximum write burst size
17343          * to expose the 5700/5701 write DMA bug.
17344          */
17345         saved_dma_rwctrl = tp->dma_rwctrl;
17346         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17347         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17348
17349         while (1) {
17350                 u32 *p = buf, i;
17351
17352                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17353                         p[i] = i;
17354
17355                 /* Send the buffer to the chip. */
17356                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17357                 if (ret) {
17358                         dev_err(&tp->pdev->dev,
17359                                 "%s: Buffer write failed. err = %d\n",
17360                                 __func__, ret);
17361                         break;
17362                 }
17363
17364                 /* Now read it back. */
17365                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17366                 if (ret) {
17367                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17368                                 "err = %d\n", __func__, ret);
17369                         break;
17370                 }
17371
17372                 /* Verify it. */
17373                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17374                         if (p[i] == i)
17375                                 continue;
17376
17377                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17378                             DMA_RWCTRL_WRITE_BNDRY_16) {
17379                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17380                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17381                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17382                                 break;
17383                         } else {
17384                                 dev_err(&tp->pdev->dev,
17385                                         "%s: Buffer corrupted on read back! "
17386                                         "(%d != %d)\n", __func__, p[i], i);
17387                                 ret = -ENODEV;
17388                                 goto out;
17389                         }
17390                 }
17391
17392                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17393                         /* Success. */
17394                         ret = 0;
17395                         break;
17396                 }
17397         }
17398         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17399             DMA_RWCTRL_WRITE_BNDRY_16) {
17400                 /* DMA test passed without adjusting DMA boundary,
17401                  * now look for chipsets that are known to expose the
17402                  * DMA bug without failing the test.
17403                  */
17404                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17405                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17406                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17407                 } else {
17408                         /* Safe to use the calculated DMA boundary. */
17409                         tp->dma_rwctrl = saved_dma_rwctrl;
17410                 }
17411
17412                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17413         }
17414
17415 out:
17416         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17417 out_nofree:
17418         return ret;
17419 }
17420
17421 static void tg3_init_bufmgr_config(struct tg3 *tp)
17422 {
17423         if (tg3_flag(tp, 57765_PLUS)) {
17424                 tp->bufmgr_config.mbuf_read_dma_low_water =
17425                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17426                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17427                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17428                 tp->bufmgr_config.mbuf_high_water =
17429                         DEFAULT_MB_HIGH_WATER_57765;
17430
17431                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17432                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17433                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17434                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17435                 tp->bufmgr_config.mbuf_high_water_jumbo =
17436                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17437         } else if (tg3_flag(tp, 5705_PLUS)) {
17438                 tp->bufmgr_config.mbuf_read_dma_low_water =
17439                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17440                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17441                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17442                 tp->bufmgr_config.mbuf_high_water =
17443                         DEFAULT_MB_HIGH_WATER_5705;
17444                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17445                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17446                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17447                         tp->bufmgr_config.mbuf_high_water =
17448                                 DEFAULT_MB_HIGH_WATER_5906;
17449                 }
17450
17451                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17452                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17453                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17454                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17455                 tp->bufmgr_config.mbuf_high_water_jumbo =
17456                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17457         } else {
17458                 tp->bufmgr_config.mbuf_read_dma_low_water =
17459                         DEFAULT_MB_RDMA_LOW_WATER;
17460                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17461                         DEFAULT_MB_MACRX_LOW_WATER;
17462                 tp->bufmgr_config.mbuf_high_water =
17463                         DEFAULT_MB_HIGH_WATER;
17464
17465                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17466                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17467                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17468                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17469                 tp->bufmgr_config.mbuf_high_water_jumbo =
17470                         DEFAULT_MB_HIGH_WATER_JUMBO;
17471         }
17472
17473         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17474         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17475 }
17476
17477 static char *tg3_phy_string(struct tg3 *tp)
17478 {
17479         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17480         case TG3_PHY_ID_BCM5400:        return "5400";
17481         case TG3_PHY_ID_BCM5401:        return "5401";
17482         case TG3_PHY_ID_BCM5411:        return "5411";
17483         case TG3_PHY_ID_BCM5701:        return "5701";
17484         case TG3_PHY_ID_BCM5703:        return "5703";
17485         case TG3_PHY_ID_BCM5704:        return "5704";
17486         case TG3_PHY_ID_BCM5705:        return "5705";
17487         case TG3_PHY_ID_BCM5750:        return "5750";
17488         case TG3_PHY_ID_BCM5752:        return "5752";
17489         case TG3_PHY_ID_BCM5714:        return "5714";
17490         case TG3_PHY_ID_BCM5780:        return "5780";
17491         case TG3_PHY_ID_BCM5755:        return "5755";
17492         case TG3_PHY_ID_BCM5787:        return "5787";
17493         case TG3_PHY_ID_BCM5784:        return "5784";
17494         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17495         case TG3_PHY_ID_BCM5906:        return "5906";
17496         case TG3_PHY_ID_BCM5761:        return "5761";
17497         case TG3_PHY_ID_BCM5718C:       return "5718C";
17498         case TG3_PHY_ID_BCM5718S:       return "5718S";
17499         case TG3_PHY_ID_BCM57765:       return "57765";
17500         case TG3_PHY_ID_BCM5719C:       return "5719C";
17501         case TG3_PHY_ID_BCM5720C:       return "5720C";
17502         case TG3_PHY_ID_BCM5762:        return "5762C";
17503         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17504         case 0:                 return "serdes";
17505         default:                return "unknown";
17506         }
17507 }
17508
17509 static char *tg3_bus_string(struct tg3 *tp, char *str)
17510 {
17511         if (tg3_flag(tp, PCI_EXPRESS)) {
17512                 strcpy(str, "PCI Express");
17513                 return str;
17514         } else if (tg3_flag(tp, PCIX_MODE)) {
17515                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17516
17517                 strcpy(str, "PCIX:");
17518
17519                 if ((clock_ctrl == 7) ||
17520                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17521                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17522                         strcat(str, "133MHz");
17523                 else if (clock_ctrl == 0)
17524                         strcat(str, "33MHz");
17525                 else if (clock_ctrl == 2)
17526                         strcat(str, "50MHz");
17527                 else if (clock_ctrl == 4)
17528                         strcat(str, "66MHz");
17529                 else if (clock_ctrl == 6)
17530                         strcat(str, "100MHz");
17531         } else {
17532                 strcpy(str, "PCI:");
17533                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17534                         strcat(str, "66MHz");
17535                 else
17536                         strcat(str, "33MHz");
17537         }
17538         if (tg3_flag(tp, PCI_32BIT))
17539                 strcat(str, ":32-bit");
17540         else
17541                 strcat(str, ":64-bit");
17542         return str;
17543 }
17544
17545 static void tg3_init_coal(struct tg3 *tp)
17546 {
17547         struct ethtool_coalesce *ec = &tp->coal;
17548
17549         memset(ec, 0, sizeof(*ec));
17550         ec->cmd = ETHTOOL_GCOALESCE;
17551         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17552         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17553         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17554         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17555         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17556         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17557         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17558         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17559         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17560
17561         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17562                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17563                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17564                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17565                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17566                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17567         }
17568
17569         if (tg3_flag(tp, 5705_PLUS)) {
17570                 ec->rx_coalesce_usecs_irq = 0;
17571                 ec->tx_coalesce_usecs_irq = 0;
17572                 ec->stats_block_coalesce_usecs = 0;
17573         }
17574 }
17575
17576 static int tg3_init_one(struct pci_dev *pdev,
17577                                   const struct pci_device_id *ent)
17578 {
17579         struct net_device *dev;
17580         struct tg3 *tp;
17581         int i, err;
17582         u32 sndmbx, rcvmbx, intmbx;
17583         char str[40];
17584         u64 dma_mask, persist_dma_mask;
17585         netdev_features_t features = 0;
17586
17587         printk_once(KERN_INFO "%s\n", version);
17588
17589         err = pci_enable_device(pdev);
17590         if (err) {
17591                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17592                 return err;
17593         }
17594
17595         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17596         if (err) {
17597                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17598                 goto err_out_disable_pdev;
17599         }
17600
17601         pci_set_master(pdev);
17602
17603         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17604         if (!dev) {
17605                 err = -ENOMEM;
17606                 goto err_out_free_res;
17607         }
17608
17609         SET_NETDEV_DEV(dev, &pdev->dev);
17610
17611         tp = netdev_priv(dev);
17612         tp->pdev = pdev;
17613         tp->dev = dev;
17614         tp->rx_mode = TG3_DEF_RX_MODE;
17615         tp->tx_mode = TG3_DEF_TX_MODE;
17616         tp->irq_sync = 1;
17617         tp->pcierr_recovery = false;
17618
17619         if (tg3_debug > 0)
17620                 tp->msg_enable = tg3_debug;
17621         else
17622                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17623
17624         if (pdev_is_ssb_gige_core(pdev)) {
17625                 tg3_flag_set(tp, IS_SSB_CORE);
17626                 if (ssb_gige_must_flush_posted_writes(pdev))
17627                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17628                 if (ssb_gige_one_dma_at_once(pdev))
17629                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17630                 if (ssb_gige_have_roboswitch(pdev)) {
17631                         tg3_flag_set(tp, USE_PHYLIB);
17632                         tg3_flag_set(tp, ROBOSWITCH);
17633                 }
17634                 if (ssb_gige_is_rgmii(pdev))
17635                         tg3_flag_set(tp, RGMII_MODE);
17636         }
17637
17638         /* The word/byte swap controls here control register access byte
17639          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17640          * setting below.
17641          */
17642         tp->misc_host_ctrl =
17643                 MISC_HOST_CTRL_MASK_PCI_INT |
17644                 MISC_HOST_CTRL_WORD_SWAP |
17645                 MISC_HOST_CTRL_INDIR_ACCESS |
17646                 MISC_HOST_CTRL_PCISTATE_RW;
17647
17648         /* The NONFRM (non-frame) byte/word swap controls take effect
17649          * on descriptor entries, anything which isn't packet data.
17650          *
17651          * The StrongARM chips on the board (one for tx, one for rx)
17652          * are running in big-endian mode.
17653          */
17654         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17655                         GRC_MODE_WSWAP_NONFRM_DATA);
17656 #ifdef __BIG_ENDIAN
17657         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17658 #endif
17659         spin_lock_init(&tp->lock);
17660         spin_lock_init(&tp->indirect_lock);
17661         INIT_WORK(&tp->reset_task, tg3_reset_task);
17662
17663         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17664         if (!tp->regs) {
17665                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17666                 err = -ENOMEM;
17667                 goto err_out_free_dev;
17668         }
17669
17670         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17671             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17672             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17673             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17674             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17675             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17676             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17677             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17678             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17679             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17680             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17681             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17682             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17683             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17684             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17685                 tg3_flag_set(tp, ENABLE_APE);
17686                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17687                 if (!tp->aperegs) {
17688                         dev_err(&pdev->dev,
17689                                 "Cannot map APE registers, aborting\n");
17690                         err = -ENOMEM;
17691                         goto err_out_iounmap;
17692                 }
17693         }
17694
17695         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17696         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17697
17698         dev->ethtool_ops = &tg3_ethtool_ops;
17699         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17700         dev->netdev_ops = &tg3_netdev_ops;
17701         dev->irq = pdev->irq;
17702
17703         err = tg3_get_invariants(tp, ent);
17704         if (err) {
17705                 dev_err(&pdev->dev,
17706                         "Problem fetching invariants of chip, aborting\n");
17707                 goto err_out_apeunmap;
17708         }
17709
17710         /* The EPB bridge inside 5714, 5715, and 5780 and any
17711          * device behind the EPB cannot support DMA addresses > 40-bit.
17712          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17713          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17714          * do DMA address check in tg3_start_xmit().
17715          */
17716         if (tg3_flag(tp, IS_5788))
17717                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17718         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17719                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17720 #ifdef CONFIG_HIGHMEM
17721                 dma_mask = DMA_BIT_MASK(64);
17722 #endif
17723         } else
17724                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17725
17726         /* Configure DMA attributes. */
17727         if (dma_mask > DMA_BIT_MASK(32)) {
17728                 err = pci_set_dma_mask(pdev, dma_mask);
17729                 if (!err) {
17730                         features |= NETIF_F_HIGHDMA;
17731                         err = pci_set_consistent_dma_mask(pdev,
17732                                                           persist_dma_mask);
17733                         if (err < 0) {
17734                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17735                                         "DMA for consistent allocations\n");
17736                                 goto err_out_apeunmap;
17737                         }
17738                 }
17739         }
17740         if (err || dma_mask == DMA_BIT_MASK(32)) {
17741                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17742                 if (err) {
17743                         dev_err(&pdev->dev,
17744                                 "No usable DMA configuration, aborting\n");
17745                         goto err_out_apeunmap;
17746                 }
17747         }
17748
17749         tg3_init_bufmgr_config(tp);
17750
17751         /* 5700 B0 chips do not support checksumming correctly due
17752          * to hardware bugs.
17753          */
17754         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17755                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17756
17757                 if (tg3_flag(tp, 5755_PLUS))
17758                         features |= NETIF_F_IPV6_CSUM;
17759         }
17760
17761         /* TSO is on by default on chips that support hardware TSO.
17762          * Firmware TSO on older chips gives lower performance, so it
17763          * is off by default, but can be enabled using ethtool.
17764          */
17765         if ((tg3_flag(tp, HW_TSO_1) ||
17766              tg3_flag(tp, HW_TSO_2) ||
17767              tg3_flag(tp, HW_TSO_3)) &&
17768             (features & NETIF_F_IP_CSUM))
17769                 features |= NETIF_F_TSO;
17770         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17771                 if (features & NETIF_F_IPV6_CSUM)
17772                         features |= NETIF_F_TSO6;
17773                 if (tg3_flag(tp, HW_TSO_3) ||
17774                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17775                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17776                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17777                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17778                     tg3_asic_rev(tp) == ASIC_REV_57780)
17779                         features |= NETIF_F_TSO_ECN;
17780         }
17781
17782         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17783                          NETIF_F_HW_VLAN_CTAG_RX;
17784         dev->vlan_features |= features;
17785
17786         /*
17787          * Add loopback capability only for a subset of devices that support
17788          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17789          * loopback for the remaining devices.
17790          */
17791         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17792             !tg3_flag(tp, CPMU_PRESENT))
17793                 /* Add the loopback capability */
17794                 features |= NETIF_F_LOOPBACK;
17795
17796         dev->hw_features |= features;
17797         dev->priv_flags |= IFF_UNICAST_FLT;
17798
17799         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17800         dev->min_mtu = TG3_MIN_MTU;
17801         dev->max_mtu = TG3_MAX_MTU(tp);
17802
17803         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17804             !tg3_flag(tp, TSO_CAPABLE) &&
17805             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17806                 tg3_flag_set(tp, MAX_RXPEND_64);
17807                 tp->rx_pending = 63;
17808         }
17809
17810         err = tg3_get_device_address(tp);
17811         if (err) {
17812                 dev_err(&pdev->dev,
17813                         "Could not obtain valid ethernet address, aborting\n");
17814                 goto err_out_apeunmap;
17815         }
17816
17817         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17818         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17819         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17820         for (i = 0; i < tp->irq_max; i++) {
17821                 struct tg3_napi *tnapi = &tp->napi[i];
17822
17823                 tnapi->tp = tp;
17824                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17825
17826                 tnapi->int_mbox = intmbx;
17827                 if (i <= 4)
17828                         intmbx += 0x8;
17829                 else
17830                         intmbx += 0x4;
17831
17832                 tnapi->consmbox = rcvmbx;
17833                 tnapi->prodmbox = sndmbx;
17834
17835                 if (i)
17836                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17837                 else
17838                         tnapi->coal_now = HOSTCC_MODE_NOW;
17839
17840                 if (!tg3_flag(tp, SUPPORT_MSIX))
17841                         break;
17842
17843                 /*
17844                  * If we support MSIX, we'll be using RSS.  If we're using
17845                  * RSS, the first vector only handles link interrupts and the
17846                  * remaining vectors handle rx and tx interrupts.  Reuse the
17847                  * mailbox values for the next iteration.  The values we setup
17848                  * above are still useful for the single vectored mode.
17849                  */
17850                 if (!i)
17851                         continue;
17852
17853                 rcvmbx += 0x8;
17854
17855                 if (sndmbx & 0x4)
17856                         sndmbx -= 0x4;
17857                 else
17858                         sndmbx += 0xc;
17859         }
17860
17861         /*
17862          * Reset chip in case UNDI or EFI driver did not shutdown
17863          * DMA self test will enable WDMAC and we'll see (spurious)
17864          * pending DMA on the PCI bus at that point.
17865          */
17866         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17867             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17868                 tg3_full_lock(tp, 0);
17869                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17870                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17871                 tg3_full_unlock(tp);
17872         }
17873
17874         err = tg3_test_dma(tp);
17875         if (err) {
17876                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17877                 goto err_out_apeunmap;
17878         }
17879
17880         tg3_init_coal(tp);
17881
17882         pci_set_drvdata(pdev, dev);
17883
17884         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17885             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17886             tg3_asic_rev(tp) == ASIC_REV_5762)
17887                 tg3_flag_set(tp, PTP_CAPABLE);
17888
17889         tg3_timer_init(tp);
17890
17891         tg3_carrier_off(tp);
17892
17893         err = register_netdev(dev);
17894         if (err) {
17895                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17896                 goto err_out_apeunmap;
17897         }
17898
17899         if (tg3_flag(tp, PTP_CAPABLE)) {
17900                 tg3_ptp_init(tp);
17901                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17902                                                    &tp->pdev->dev);
17903                 if (IS_ERR(tp->ptp_clock))
17904                         tp->ptp_clock = NULL;
17905         }
17906
17907         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17908                     tp->board_part_number,
17909                     tg3_chip_rev_id(tp),
17910                     tg3_bus_string(tp, str),
17911                     dev->dev_addr);
17912
17913         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17914                 char *ethtype;
17915
17916                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17917                         ethtype = "10/100Base-TX";
17918                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17919                         ethtype = "1000Base-SX";
17920                 else
17921                         ethtype = "10/100/1000Base-T";
17922
17923                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17924                             "(WireSpeed[%d], EEE[%d])\n",
17925                             tg3_phy_string(tp), ethtype,
17926                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17927                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17928         }
17929
17930         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17931                     (dev->features & NETIF_F_RXCSUM) != 0,
17932                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17933                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17934                     tg3_flag(tp, ENABLE_ASF) != 0,
17935                     tg3_flag(tp, TSO_CAPABLE) != 0);
17936         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17937                     tp->dma_rwctrl,
17938                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17939                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17940
17941         pci_save_state(pdev);
17942
17943         return 0;
17944
17945 err_out_apeunmap:
17946         if (tp->aperegs) {
17947                 iounmap(tp->aperegs);
17948                 tp->aperegs = NULL;
17949         }
17950
17951 err_out_iounmap:
17952         if (tp->regs) {
17953                 iounmap(tp->regs);
17954                 tp->regs = NULL;
17955         }
17956
17957 err_out_free_dev:
17958         free_netdev(dev);
17959
17960 err_out_free_res:
17961         pci_release_regions(pdev);
17962
17963 err_out_disable_pdev:
17964         if (pci_is_enabled(pdev))
17965                 pci_disable_device(pdev);
17966         return err;
17967 }
17968
17969 static void tg3_remove_one(struct pci_dev *pdev)
17970 {
17971         struct net_device *dev = pci_get_drvdata(pdev);
17972
17973         if (dev) {
17974                 struct tg3 *tp = netdev_priv(dev);
17975
17976                 tg3_ptp_fini(tp);
17977
17978                 release_firmware(tp->fw);
17979
17980                 tg3_reset_task_cancel(tp);
17981
17982                 if (tg3_flag(tp, USE_PHYLIB)) {
17983                         tg3_phy_fini(tp);
17984                         tg3_mdio_fini(tp);
17985                 }
17986
17987                 unregister_netdev(dev);
17988                 if (tp->aperegs) {
17989                         iounmap(tp->aperegs);
17990                         tp->aperegs = NULL;
17991                 }
17992                 if (tp->regs) {
17993                         iounmap(tp->regs);
17994                         tp->regs = NULL;
17995                 }
17996                 free_netdev(dev);
17997                 pci_release_regions(pdev);
17998                 pci_disable_device(pdev);
17999         }
18000 }
18001
18002 #ifdef CONFIG_PM_SLEEP
18003 static int tg3_suspend(struct device *device)
18004 {
18005         struct pci_dev *pdev = to_pci_dev(device);
18006         struct net_device *dev = pci_get_drvdata(pdev);
18007         struct tg3 *tp = netdev_priv(dev);
18008         int err = 0;
18009
18010         rtnl_lock();
18011
18012         if (!netif_running(dev))
18013                 goto unlock;
18014
18015         tg3_reset_task_cancel(tp);
18016         tg3_phy_stop(tp);
18017         tg3_netif_stop(tp);
18018
18019         tg3_timer_stop(tp);
18020
18021         tg3_full_lock(tp, 1);
18022         tg3_disable_ints(tp);
18023         tg3_full_unlock(tp);
18024
18025         netif_device_detach(dev);
18026
18027         tg3_full_lock(tp, 0);
18028         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18029         tg3_flag_clear(tp, INIT_COMPLETE);
18030         tg3_full_unlock(tp);
18031
18032         err = tg3_power_down_prepare(tp);
18033         if (err) {
18034                 int err2;
18035
18036                 tg3_full_lock(tp, 0);
18037
18038                 tg3_flag_set(tp, INIT_COMPLETE);
18039                 err2 = tg3_restart_hw(tp, true);
18040                 if (err2)
18041                         goto out;
18042
18043                 tg3_timer_start(tp);
18044
18045                 netif_device_attach(dev);
18046                 tg3_netif_start(tp);
18047
18048 out:
18049                 tg3_full_unlock(tp);
18050
18051                 if (!err2)
18052                         tg3_phy_start(tp);
18053         }
18054
18055 unlock:
18056         rtnl_unlock();
18057         return err;
18058 }
18059
18060 static int tg3_resume(struct device *device)
18061 {
18062         struct pci_dev *pdev = to_pci_dev(device);
18063         struct net_device *dev = pci_get_drvdata(pdev);
18064         struct tg3 *tp = netdev_priv(dev);
18065         int err = 0;
18066
18067         rtnl_lock();
18068
18069         if (!netif_running(dev))
18070                 goto unlock;
18071
18072         netif_device_attach(dev);
18073
18074         tg3_full_lock(tp, 0);
18075
18076         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18077
18078         tg3_flag_set(tp, INIT_COMPLETE);
18079         err = tg3_restart_hw(tp,
18080                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18081         if (err)
18082                 goto out;
18083
18084         tg3_timer_start(tp);
18085
18086         tg3_netif_start(tp);
18087
18088 out:
18089         tg3_full_unlock(tp);
18090
18091         if (!err)
18092                 tg3_phy_start(tp);
18093
18094 unlock:
18095         rtnl_unlock();
18096         return err;
18097 }
18098 #endif /* CONFIG_PM_SLEEP */
18099
18100 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18101
18102 static void tg3_shutdown(struct pci_dev *pdev)
18103 {
18104         struct net_device *dev = pci_get_drvdata(pdev);
18105         struct tg3 *tp = netdev_priv(dev);
18106
18107         rtnl_lock();
18108         netif_device_detach(dev);
18109
18110         if (netif_running(dev))
18111                 dev_close(dev);
18112
18113         if (system_state == SYSTEM_POWER_OFF)
18114                 tg3_power_down(tp);
18115
18116         rtnl_unlock();
18117 }
18118
18119 /**
18120  * tg3_io_error_detected - called when PCI error is detected
18121  * @pdev: Pointer to PCI device
18122  * @state: The current pci connection state
18123  *
18124  * This function is called after a PCI bus error affecting
18125  * this device has been detected.
18126  */
18127 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18128                                               pci_channel_state_t state)
18129 {
18130         struct net_device *netdev = pci_get_drvdata(pdev);
18131         struct tg3 *tp = netdev_priv(netdev);
18132         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18133
18134         netdev_info(netdev, "PCI I/O error detected\n");
18135
18136         rtnl_lock();
18137
18138         /* We probably don't have netdev yet */
18139         if (!netdev || !netif_running(netdev))
18140                 goto done;
18141
18142         /* We needn't recover from permanent error */
18143         if (state == pci_channel_io_frozen)
18144                 tp->pcierr_recovery = true;
18145
18146         tg3_phy_stop(tp);
18147
18148         tg3_netif_stop(tp);
18149
18150         tg3_timer_stop(tp);
18151
18152         /* Want to make sure that the reset task doesn't run */
18153         tg3_reset_task_cancel(tp);
18154
18155         netif_device_detach(netdev);
18156
18157         /* Clean up software state, even if MMIO is blocked */
18158         tg3_full_lock(tp, 0);
18159         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18160         tg3_full_unlock(tp);
18161
18162 done:
18163         if (state == pci_channel_io_perm_failure) {
18164                 if (netdev) {
18165                         tg3_napi_enable(tp);
18166                         dev_close(netdev);
18167                 }
18168                 err = PCI_ERS_RESULT_DISCONNECT;
18169         } else {
18170                 pci_disable_device(pdev);
18171         }
18172
18173         rtnl_unlock();
18174
18175         return err;
18176 }
18177
18178 /**
18179  * tg3_io_slot_reset - called after the pci bus has been reset.
18180  * @pdev: Pointer to PCI device
18181  *
18182  * Restart the card from scratch, as if from a cold-boot.
18183  * At this point, the card has exprienced a hard reset,
18184  * followed by fixups by BIOS, and has its config space
18185  * set up identically to what it was at cold boot.
18186  */
18187 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18188 {
18189         struct net_device *netdev = pci_get_drvdata(pdev);
18190         struct tg3 *tp = netdev_priv(netdev);
18191         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18192         int err;
18193
18194         rtnl_lock();
18195
18196         if (pci_enable_device(pdev)) {
18197                 dev_err(&pdev->dev,
18198                         "Cannot re-enable PCI device after reset.\n");
18199                 goto done;
18200         }
18201
18202         pci_set_master(pdev);
18203         pci_restore_state(pdev);
18204         pci_save_state(pdev);
18205
18206         if (!netdev || !netif_running(netdev)) {
18207                 rc = PCI_ERS_RESULT_RECOVERED;
18208                 goto done;
18209         }
18210
18211         err = tg3_power_up(tp);
18212         if (err)
18213                 goto done;
18214
18215         rc = PCI_ERS_RESULT_RECOVERED;
18216
18217 done:
18218         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18219                 tg3_napi_enable(tp);
18220                 dev_close(netdev);
18221         }
18222         rtnl_unlock();
18223
18224         return rc;
18225 }
18226
18227 /**
18228  * tg3_io_resume - called when traffic can start flowing again.
18229  * @pdev: Pointer to PCI device
18230  *
18231  * This callback is called when the error recovery driver tells
18232  * us that its OK to resume normal operation.
18233  */
18234 static void tg3_io_resume(struct pci_dev *pdev)
18235 {
18236         struct net_device *netdev = pci_get_drvdata(pdev);
18237         struct tg3 *tp = netdev_priv(netdev);
18238         int err;
18239
18240         rtnl_lock();
18241
18242         if (!netdev || !netif_running(netdev))
18243                 goto done;
18244
18245         tg3_full_lock(tp, 0);
18246         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18247         tg3_flag_set(tp, INIT_COMPLETE);
18248         err = tg3_restart_hw(tp, true);
18249         if (err) {
18250                 tg3_full_unlock(tp);
18251                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18252                 goto done;
18253         }
18254
18255         netif_device_attach(netdev);
18256
18257         tg3_timer_start(tp);
18258
18259         tg3_netif_start(tp);
18260
18261         tg3_full_unlock(tp);
18262
18263         tg3_phy_start(tp);
18264
18265 done:
18266         tp->pcierr_recovery = false;
18267         rtnl_unlock();
18268 }
18269
18270 static const struct pci_error_handlers tg3_err_handler = {
18271         .error_detected = tg3_io_error_detected,
18272         .slot_reset     = tg3_io_slot_reset,
18273         .resume         = tg3_io_resume
18274 };
18275
18276 static struct pci_driver tg3_driver = {
18277         .name           = DRV_MODULE_NAME,
18278         .id_table       = tg3_pci_tbl,
18279         .probe          = tg3_init_one,
18280         .remove         = tg3_remove_one,
18281         .err_handler    = &tg3_err_handler,
18282         .driver.pm      = &tg3_pm_ops,
18283         .shutdown       = tg3_shutdown,
18284 };
18285
18286 module_pci_driver(tg3_driver);