Merge remote-tracking branches 'asoc/fix/compress', 'asoc/fix/core', 'asoc/fix/dapm...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  *
10  * Firmware is:
11  *      Derived from proprietary unpublished source code,
12  *      Copyright (C) 2000-2016 Broadcom Corporation.
13  *      Copyright (C) 2016-2017 Broadcom Ltd.
14  *
15  *      Permission is hereby granted for the distribution of this firmware
16  *      data in hexadecimal or equivalent format, provided this copyright
17  *      notice is accompanying it.
18  */
19
20
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/stringify.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/types.h>
27 #include <linux/compiler.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/in.h>
31 #include <linux/interrupt.h>
32 #include <linux/ioport.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/mdio.h>
39 #include <linux/mii.h>
40 #include <linux/phy.h>
41 #include <linux/brcmphy.h>
42 #include <linux/if.h>
43 #include <linux/if_vlan.h>
44 #include <linux/ip.h>
45 #include <linux/tcp.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/firmware.h>
50 #include <linux/ssb/ssb_driver_gige.h>
51 #include <linux/hwmon.h>
52 #include <linux/hwmon-sysfs.h>
53
54 #include <net/checksum.h>
55 #include <net/ip.h>
56
57 #include <linux/io.h>
58 #include <asm/byteorder.h>
59 #include <linux/uaccess.h>
60
61 #include <uapi/linux/net_tstamp.h>
62 #include <linux/ptp_clock_kernel.h>
63
64 #ifdef CONFIG_SPARC
65 #include <asm/idprom.h>
66 #include <asm/prom.h>
67 #endif
68
69 #define BAR_0   0
70 #define BAR_2   2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         return test_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         set_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag)                              \
92         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)                          \
94         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)                        \
96         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME         "tg3"
99 #define TG3_MAJ_NUM                     3
100 #define TG3_MIN_NUM                     137
101 #define DRV_MODULE_VERSION      \
102         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE      "May 11, 2014"
104
105 #define RESET_KIND_SHUTDOWN     0
106 #define RESET_KIND_INIT         1
107 #define RESET_KIND_SUSPEND      2
108
109 #define TG3_DEF_RX_MODE         0
110 #define TG3_DEF_TX_MODE         0
111 #define TG3_DEF_MSG_ENABLE        \
112         (NETIF_MSG_DRV          | \
113          NETIF_MSG_PROBE        | \
114          NETIF_MSG_LINK         | \
115          NETIF_MSG_TIMER        | \
116          NETIF_MSG_IFDOWN       | \
117          NETIF_MSG_IFUP         | \
118          NETIF_MSG_RX_ERR       | \
119          NETIF_MSG_TX_ERR)
120
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
122
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126
127 #define TG3_TX_TIMEOUT                  (5 * HZ)
128
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU                     ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING         200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
146
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153
154 #define TG3_TX_RING_SIZE                512
155 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
156
157 #define TG3_RX_STD_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
164                                  TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166
167 #define TG3_DMA_BYTE_ENAB               64
168
169 #define TG3_RX_STD_DMA_SZ               1536
170 #define TG3_RX_JMB_DMA_SZ               9046
171
172 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
173
174 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD           256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
197 #else
198         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
199 #endif
200
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
205 #endif
206
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K            2048
210 #define TG3_TX_BD_DMA_MAX_4K            4096
211
212 #define TG3_RAW_IP_ALIGN 2
213
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216
217 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
218 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219
220 #define FIRMWARE_TG3            "tigon/tg3.bin"
221 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
224
225 static char version[] =
226         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
227
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
235
236 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
239
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
242
243 static const struct pci_device_id tg3_pci_tbl[] = {
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267                         TG3_DRV_DATA_FLAG_5705_10_100},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271                         TG3_DRV_DATA_FLAG_5705_10_100},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293                         PCI_VENDOR_ID_LENOVO,
294                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359         {}
360 };
361
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
363
364 static const struct {
365         const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367         { "rx_octets" },
368         { "rx_fragments" },
369         { "rx_ucast_packets" },
370         { "rx_mcast_packets" },
371         { "rx_bcast_packets" },
372         { "rx_fcs_errors" },
373         { "rx_align_errors" },
374         { "rx_xon_pause_rcvd" },
375         { "rx_xoff_pause_rcvd" },
376         { "rx_mac_ctrl_rcvd" },
377         { "rx_xoff_entered" },
378         { "rx_frame_too_long_errors" },
379         { "rx_jabbers" },
380         { "rx_undersize_packets" },
381         { "rx_in_length_errors" },
382         { "rx_out_length_errors" },
383         { "rx_64_or_less_octet_packets" },
384         { "rx_65_to_127_octet_packets" },
385         { "rx_128_to_255_octet_packets" },
386         { "rx_256_to_511_octet_packets" },
387         { "rx_512_to_1023_octet_packets" },
388         { "rx_1024_to_1522_octet_packets" },
389         { "rx_1523_to_2047_octet_packets" },
390         { "rx_2048_to_4095_octet_packets" },
391         { "rx_4096_to_8191_octet_packets" },
392         { "rx_8192_to_9022_octet_packets" },
393
394         { "tx_octets" },
395         { "tx_collisions" },
396
397         { "tx_xon_sent" },
398         { "tx_xoff_sent" },
399         { "tx_flow_control" },
400         { "tx_mac_errors" },
401         { "tx_single_collisions" },
402         { "tx_mult_collisions" },
403         { "tx_deferred" },
404         { "tx_excessive_collisions" },
405         { "tx_late_collisions" },
406         { "tx_collide_2times" },
407         { "tx_collide_3times" },
408         { "tx_collide_4times" },
409         { "tx_collide_5times" },
410         { "tx_collide_6times" },
411         { "tx_collide_7times" },
412         { "tx_collide_8times" },
413         { "tx_collide_9times" },
414         { "tx_collide_10times" },
415         { "tx_collide_11times" },
416         { "tx_collide_12times" },
417         { "tx_collide_13times" },
418         { "tx_collide_14times" },
419         { "tx_collide_15times" },
420         { "tx_ucast_packets" },
421         { "tx_mcast_packets" },
422         { "tx_bcast_packets" },
423         { "tx_carrier_sense_errors" },
424         { "tx_discards" },
425         { "tx_errors" },
426
427         { "dma_writeq_full" },
428         { "dma_write_prioq_full" },
429         { "rxbds_empty" },
430         { "rx_discards" },
431         { "rx_errors" },
432         { "rx_threshold_hit" },
433
434         { "dma_readq_full" },
435         { "dma_read_prioq_full" },
436         { "tx_comp_queue_full" },
437
438         { "ring_set_send_prod_index" },
439         { "ring_status_update" },
440         { "nic_irqs" },
441         { "nic_avoided_irqs" },
442         { "nic_tx_threshold_hit" },
443
444         { "mbuf_lwm_thresh_hit" },
445 };
446
447 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST          0
449 #define TG3_LINK_TEST           1
450 #define TG3_REGISTER_TEST       2
451 #define TG3_MEMORY_TEST         3
452 #define TG3_MAC_LOOPB_TEST      4
453 #define TG3_PHY_LOOPB_TEST      5
454 #define TG3_EXT_LOOPB_TEST      6
455 #define TG3_INTERRUPT_TEST      7
456
457
458 static const struct {
459         const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
462         [TG3_LINK_TEST]         = { "link test         (online) " },
463         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
464         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
465         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
466         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
467         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
468         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
469 };
470
471 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
472
473
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
475 {
476         writel(val, tp->regs + off);
477 }
478
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
480 {
481         return readl(tp->regs + off);
482 }
483
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
485 {
486         writel(val, tp->aperegs + off);
487 }
488
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
490 {
491         return readl(tp->aperegs + off);
492 }
493
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
495 {
496         unsigned long flags;
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
505 {
506         writel(val, tp->regs + off);
507         readl(tp->regs + off);
508 }
509
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 {
512         unsigned long flags;
513         u32 val;
514
515         spin_lock_irqsave(&tp->indirect_lock, flags);
516         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518         spin_unlock_irqrestore(&tp->indirect_lock, flags);
519         return val;
520 }
521
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         unsigned long flags;
525
526         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528                                        TG3_64BIT_REG_LOW, val);
529                 return;
530         }
531         if (off == TG3_RX_STD_PROD_IDX_REG) {
532                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533                                        TG3_64BIT_REG_LOW, val);
534                 return;
535         }
536
537         spin_lock_irqsave(&tp->indirect_lock, flags);
538         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540         spin_unlock_irqrestore(&tp->indirect_lock, flags);
541
542         /* In indirect mode when disabling interrupts, we also need
543          * to clear the interrupt bit in the GRC local ctrl register.
544          */
545         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546             (val == 0x1)) {
547                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
549         }
550 }
551
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 {
554         unsigned long flags;
555         u32 val;
556
557         spin_lock_irqsave(&tp->indirect_lock, flags);
558         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560         spin_unlock_irqrestore(&tp->indirect_lock, flags);
561         return val;
562 }
563
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565  * where it is unsafe to read back the register without some delay.
566  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
568  */
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
570 {
571         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572                 /* Non-posted methods */
573                 tp->write32(tp, off, val);
574         else {
575                 /* Posted method */
576                 tg3_write32(tp, off, val);
577                 if (usec_wait)
578                         udelay(usec_wait);
579                 tp->read32(tp, off);
580         }
581         /* Wait again after the read for the posted method to guarantee that
582          * the wait time is met.
583          */
584         if (usec_wait)
585                 udelay(usec_wait);
586 }
587
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
589 {
590         tp->write32_mbox(tp, off, val);
591         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593              !tg3_flag(tp, ICH_WORKAROUND)))
594                 tp->read32_mbox(tp, off);
595 }
596
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
598 {
599         void __iomem *mbox = tp->regs + off;
600         writel(val, mbox);
601         if (tg3_flag(tp, TXD_MBOX_HWBUG))
602                 writel(val, mbox);
603         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604             tg3_flag(tp, FLUSH_POSTED_WRITES))
605                 readl(mbox);
606 }
607
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
609 {
610         return readl(tp->regs + off + GRCMBOX_BASE);
611 }
612
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
614 {
615         writel(val, tp->regs + off + GRCMBOX_BASE);
616 }
617
618 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
623
624 #define tw32(reg, val)                  tp->write32(tp, reg, val)
625 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg)                       tp->read32(tp, reg)
628
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
630 {
631         unsigned long flags;
632
633         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635                 return;
636
637         spin_lock_irqsave(&tp->indirect_lock, flags);
638         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         } else {
645                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         }
651         spin_unlock_irqrestore(&tp->indirect_lock, flags);
652 }
653
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
655 {
656         unsigned long flags;
657
658         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660                 *val = 0;
661                 return;
662         }
663
664         spin_lock_irqsave(&tp->indirect_lock, flags);
665         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
668
669                 /* Always leave this as zero. */
670                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         } else {
672                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673                 *val = tr32(TG3PCI_MEM_WIN_DATA);
674
675                 /* Always leave this as zero. */
676                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
677         }
678         spin_unlock_irqrestore(&tp->indirect_lock, flags);
679 }
680
681 static void tg3_ape_lock_init(struct tg3 *tp)
682 {
683         int i;
684         u32 regbase, bit;
685
686         if (tg3_asic_rev(tp) == ASIC_REV_5761)
687                 regbase = TG3_APE_LOCK_GRANT;
688         else
689                 regbase = TG3_APE_PER_LOCK_GRANT;
690
691         /* Make sure the driver hasn't any stale locks. */
692         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693                 switch (i) {
694                 case TG3_APE_LOCK_PHY0:
695                 case TG3_APE_LOCK_PHY1:
696                 case TG3_APE_LOCK_PHY2:
697                 case TG3_APE_LOCK_PHY3:
698                         bit = APE_LOCK_GRANT_DRIVER;
699                         break;
700                 default:
701                         if (!tp->pci_fn)
702                                 bit = APE_LOCK_GRANT_DRIVER;
703                         else
704                                 bit = 1 << tp->pci_fn;
705                 }
706                 tg3_ape_write32(tp, regbase + 4 * i, bit);
707         }
708
709 }
710
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
712 {
713         int i, off;
714         int ret = 0;
715         u32 status, req, gnt, bit;
716
717         if (!tg3_flag(tp, ENABLE_APE))
718                 return 0;
719
720         switch (locknum) {
721         case TG3_APE_LOCK_GPIO:
722                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
723                         return 0;
724         case TG3_APE_LOCK_GRC:
725         case TG3_APE_LOCK_MEM:
726                 if (!tp->pci_fn)
727                         bit = APE_LOCK_REQ_DRIVER;
728                 else
729                         bit = 1 << tp->pci_fn;
730                 break;
731         case TG3_APE_LOCK_PHY0:
732         case TG3_APE_LOCK_PHY1:
733         case TG3_APE_LOCK_PHY2:
734         case TG3_APE_LOCK_PHY3:
735                 bit = APE_LOCK_REQ_DRIVER;
736                 break;
737         default:
738                 return -EINVAL;
739         }
740
741         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
742                 req = TG3_APE_LOCK_REQ;
743                 gnt = TG3_APE_LOCK_GRANT;
744         } else {
745                 req = TG3_APE_PER_LOCK_REQ;
746                 gnt = TG3_APE_PER_LOCK_GRANT;
747         }
748
749         off = 4 * locknum;
750
751         tg3_ape_write32(tp, req + off, bit);
752
753         /* Wait for up to 1 millisecond to acquire lock. */
754         for (i = 0; i < 100; i++) {
755                 status = tg3_ape_read32(tp, gnt + off);
756                 if (status == bit)
757                         break;
758                 if (pci_channel_offline(tp->pdev))
759                         break;
760
761                 udelay(10);
762         }
763
764         if (status != bit) {
765                 /* Revoke the lock request. */
766                 tg3_ape_write32(tp, gnt + off, bit);
767                 ret = -EBUSY;
768         }
769
770         return ret;
771 }
772
773 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
774 {
775         u32 gnt, bit;
776
777         if (!tg3_flag(tp, ENABLE_APE))
778                 return;
779
780         switch (locknum) {
781         case TG3_APE_LOCK_GPIO:
782                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
783                         return;
784         case TG3_APE_LOCK_GRC:
785         case TG3_APE_LOCK_MEM:
786                 if (!tp->pci_fn)
787                         bit = APE_LOCK_GRANT_DRIVER;
788                 else
789                         bit = 1 << tp->pci_fn;
790                 break;
791         case TG3_APE_LOCK_PHY0:
792         case TG3_APE_LOCK_PHY1:
793         case TG3_APE_LOCK_PHY2:
794         case TG3_APE_LOCK_PHY3:
795                 bit = APE_LOCK_GRANT_DRIVER;
796                 break;
797         default:
798                 return;
799         }
800
801         if (tg3_asic_rev(tp) == ASIC_REV_5761)
802                 gnt = TG3_APE_LOCK_GRANT;
803         else
804                 gnt = TG3_APE_PER_LOCK_GRANT;
805
806         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
807 }
808
809 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
810 {
811         u32 apedata;
812
813         while (timeout_us) {
814                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
815                         return -EBUSY;
816
817                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
818                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
819                         break;
820
821                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822
823                 udelay(10);
824                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825         }
826
827         return timeout_us ? 0 : -EBUSY;
828 }
829
830 #ifdef CONFIG_TIGON3_HWMON
831 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
832 {
833         u32 i, apedata;
834
835         for (i = 0; i < timeout_us / 10; i++) {
836                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
837
838                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
839                         break;
840
841                 udelay(10);
842         }
843
844         return i == timeout_us / 10;
845 }
846
847 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
848                                    u32 len)
849 {
850         int err;
851         u32 i, bufoff, msgoff, maxlen, apedata;
852
853         if (!tg3_flag(tp, APE_HAS_NCSI))
854                 return 0;
855
856         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
857         if (apedata != APE_SEG_SIG_MAGIC)
858                 return -ENODEV;
859
860         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861         if (!(apedata & APE_FW_STATUS_READY))
862                 return -EAGAIN;
863
864         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
865                  TG3_APE_SHMEM_BASE;
866         msgoff = bufoff + 2 * sizeof(u32);
867         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
868
869         while (len) {
870                 u32 length;
871
872                 /* Cap xfer sizes to scratchpad limits. */
873                 length = (len > maxlen) ? maxlen : len;
874                 len -= length;
875
876                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
877                 if (!(apedata & APE_FW_STATUS_READY))
878                         return -EAGAIN;
879
880                 /* Wait for up to 1 msec for APE to service previous event. */
881                 err = tg3_ape_event_lock(tp, 1000);
882                 if (err)
883                         return err;
884
885                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
886                           APE_EVENT_STATUS_SCRTCHPD_READ |
887                           APE_EVENT_STATUS_EVENT_PENDING;
888                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
889
890                 tg3_ape_write32(tp, bufoff, base_off);
891                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
892
893                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
894                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
895
896                 base_off += length;
897
898                 if (tg3_ape_wait_for_event(tp, 30000))
899                         return -EAGAIN;
900
901                 for (i = 0; length; i += 4, length -= 4) {
902                         u32 val = tg3_ape_read32(tp, msgoff + i);
903                         memcpy(data, &val, sizeof(u32));
904                         data++;
905                 }
906         }
907
908         return 0;
909 }
910 #endif
911
912 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
913 {
914         int err;
915         u32 apedata;
916
917         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
918         if (apedata != APE_SEG_SIG_MAGIC)
919                 return -EAGAIN;
920
921         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
922         if (!(apedata & APE_FW_STATUS_READY))
923                 return -EAGAIN;
924
925         /* Wait for up to 1 millisecond for APE to service previous event. */
926         err = tg3_ape_event_lock(tp, 1000);
927         if (err)
928                 return err;
929
930         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
931                         event | APE_EVENT_STATUS_EVENT_PENDING);
932
933         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
934         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
935
936         return 0;
937 }
938
939 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
940 {
941         u32 event;
942         u32 apedata;
943
944         if (!tg3_flag(tp, ENABLE_APE))
945                 return;
946
947         switch (kind) {
948         case RESET_KIND_INIT:
949                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
950                                 APE_HOST_SEG_SIG_MAGIC);
951                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
952                                 APE_HOST_SEG_LEN_MAGIC);
953                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
954                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
955                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
956                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
957                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
958                                 APE_HOST_BEHAV_NO_PHYLOCK);
959                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
960                                     TG3_APE_HOST_DRVR_STATE_START);
961
962                 event = APE_EVENT_STATUS_STATE_START;
963                 break;
964         case RESET_KIND_SHUTDOWN:
965                 /* With the interface we are currently using,
966                  * APE does not track driver state.  Wiping
967                  * out the HOST SEGMENT SIGNATURE forces
968                  * the APE to assume OS absent status.
969                  */
970                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
971
972                 if (device_may_wakeup(&tp->pdev->dev) &&
973                     tg3_flag(tp, WOL_ENABLE)) {
974                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
975                                             TG3_APE_HOST_WOL_SPEED_AUTO);
976                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
977                 } else
978                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
979
980                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
981
982                 event = APE_EVENT_STATUS_STATE_UNLOAD;
983                 break;
984         default:
985                 return;
986         }
987
988         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
989
990         tg3_ape_send_event(tp, event);
991 }
992
993 static void tg3_disable_ints(struct tg3 *tp)
994 {
995         int i;
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
999         for (i = 0; i < tp->irq_max; i++)
1000                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1001 }
1002
1003 static void tg3_enable_ints(struct tg3 *tp)
1004 {
1005         int i;
1006
1007         tp->irq_sync = 0;
1008         wmb();
1009
1010         tw32(TG3PCI_MISC_HOST_CTRL,
1011              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1012
1013         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1014         for (i = 0; i < tp->irq_cnt; i++) {
1015                 struct tg3_napi *tnapi = &tp->napi[i];
1016
1017                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1018                 if (tg3_flag(tp, 1SHOT_MSI))
1019                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020
1021                 tp->coal_now |= tnapi->coal_now;
1022         }
1023
1024         /* Force an initial interrupt */
1025         if (!tg3_flag(tp, TAGGED_STATUS) &&
1026             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1027                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1028         else
1029                 tw32(HOSTCC_MODE, tp->coal_now);
1030
1031         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1032 }
1033
1034 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1035 {
1036         struct tg3 *tp = tnapi->tp;
1037         struct tg3_hw_status *sblk = tnapi->hw_status;
1038         unsigned int work_exists = 0;
1039
1040         /* check for phy events */
1041         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1042                 if (sblk->status & SD_STATUS_LINK_CHG)
1043                         work_exists = 1;
1044         }
1045
1046         /* check for TX work to do */
1047         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1048                 work_exists = 1;
1049
1050         /* check for RX work to do */
1051         if (tnapi->rx_rcb_prod_idx &&
1052             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1053                 work_exists = 1;
1054
1055         return work_exists;
1056 }
1057
1058 /* tg3_int_reenable
1059  *  similar to tg3_enable_ints, but it accurately determines whether there
1060  *  is new work pending and can return without flushing the PIO write
1061  *  which reenables interrupts
1062  */
1063 static void tg3_int_reenable(struct tg3_napi *tnapi)
1064 {
1065         struct tg3 *tp = tnapi->tp;
1066
1067         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1068         mmiowb();
1069
1070         /* When doing tagged status, this work check is unnecessary.
1071          * The last_tag we write above tells the chip which piece of
1072          * work we've completed.
1073          */
1074         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1075                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1076                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1077 }
1078
1079 static void tg3_switch_clocks(struct tg3 *tp)
1080 {
1081         u32 clock_ctrl;
1082         u32 orig_clock_ctrl;
1083
1084         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1085                 return;
1086
1087         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1088
1089         orig_clock_ctrl = clock_ctrl;
1090         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1091                        CLOCK_CTRL_CLKRUN_OENABLE |
1092                        0x1f);
1093         tp->pci_clock_ctrl = clock_ctrl;
1094
1095         if (tg3_flag(tp, 5705_PLUS)) {
1096                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1097                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1099                 }
1100         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1101                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1102                             clock_ctrl |
1103                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1104                             40);
1105                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1107                             40);
1108         }
1109         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1110 }
1111
1112 #define PHY_BUSY_LOOPS  5000
1113
1114 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1115                          u32 *val)
1116 {
1117         u32 frame_val;
1118         unsigned int loops;
1119         int ret;
1120
1121         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1122                 tw32_f(MAC_MI_MODE,
1123                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1124                 udelay(80);
1125         }
1126
1127         tg3_ape_lock(tp, tp->phy_ape_lock);
1128
1129         *val = 0x0;
1130
1131         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1132                       MI_COM_PHY_ADDR_MASK);
1133         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1134                       MI_COM_REG_ADDR_MASK);
1135         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1136
1137         tw32_f(MAC_MI_COM, frame_val);
1138
1139         loops = PHY_BUSY_LOOPS;
1140         while (loops != 0) {
1141                 udelay(10);
1142                 frame_val = tr32(MAC_MI_COM);
1143
1144                 if ((frame_val & MI_COM_BUSY) == 0) {
1145                         udelay(5);
1146                         frame_val = tr32(MAC_MI_COM);
1147                         break;
1148                 }
1149                 loops -= 1;
1150         }
1151
1152         ret = -EBUSY;
1153         if (loops != 0) {
1154                 *val = frame_val & MI_COM_DATA_MASK;
1155                 ret = 0;
1156         }
1157
1158         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1159                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1160                 udelay(80);
1161         }
1162
1163         tg3_ape_unlock(tp, tp->phy_ape_lock);
1164
1165         return ret;
1166 }
1167
1168 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1169 {
1170         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1171 }
1172
1173 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1174                           u32 val)
1175 {
1176         u32 frame_val;
1177         unsigned int loops;
1178         int ret;
1179
1180         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1181             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1182                 return 0;
1183
1184         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1185                 tw32_f(MAC_MI_MODE,
1186                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1187                 udelay(80);
1188         }
1189
1190         tg3_ape_lock(tp, tp->phy_ape_lock);
1191
1192         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1193                       MI_COM_PHY_ADDR_MASK);
1194         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1195                       MI_COM_REG_ADDR_MASK);
1196         frame_val |= (val & MI_COM_DATA_MASK);
1197         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1198
1199         tw32_f(MAC_MI_COM, frame_val);
1200
1201         loops = PHY_BUSY_LOOPS;
1202         while (loops != 0) {
1203                 udelay(10);
1204                 frame_val = tr32(MAC_MI_COM);
1205                 if ((frame_val & MI_COM_BUSY) == 0) {
1206                         udelay(5);
1207                         frame_val = tr32(MAC_MI_COM);
1208                         break;
1209                 }
1210                 loops -= 1;
1211         }
1212
1213         ret = -EBUSY;
1214         if (loops != 0)
1215                 ret = 0;
1216
1217         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1218                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1219                 udelay(80);
1220         }
1221
1222         tg3_ape_unlock(tp, tp->phy_ape_lock);
1223
1224         return ret;
1225 }
1226
1227 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1228 {
1229         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1230 }
1231
1232 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1233 {
1234         int err;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1237         if (err)
1238                 goto done;
1239
1240         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1241         if (err)
1242                 goto done;
1243
1244         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1245                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1246         if (err)
1247                 goto done;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1250
1251 done:
1252         return err;
1253 }
1254
1255 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1256 {
1257         int err;
1258
1259         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1260         if (err)
1261                 goto done;
1262
1263         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1264         if (err)
1265                 goto done;
1266
1267         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1268                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1269         if (err)
1270                 goto done;
1271
1272         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1273
1274 done:
1275         return err;
1276 }
1277
1278 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1279 {
1280         int err;
1281
1282         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1283         if (!err)
1284                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1285
1286         return err;
1287 }
1288
1289 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1290 {
1291         int err;
1292
1293         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1294         if (!err)
1295                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1301 {
1302         int err;
1303
1304         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1305                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1306                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1307         if (!err)
1308                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1309
1310         return err;
1311 }
1312
1313 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1314 {
1315         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1316                 set |= MII_TG3_AUXCTL_MISC_WREN;
1317
1318         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1319 }
1320
1321 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1322 {
1323         u32 val;
1324         int err;
1325
1326         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1327
1328         if (err)
1329                 return err;
1330
1331         if (enable)
1332                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1333         else
1334                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1335
1336         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1337                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1338
1339         return err;
1340 }
1341
1342 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1343 {
1344         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1345                             reg | val | MII_TG3_MISC_SHDW_WREN);
1346 }
1347
1348 static int tg3_bmcr_reset(struct tg3 *tp)
1349 {
1350         u32 phy_control;
1351         int limit, err;
1352
1353         /* OK, reset it, and poll the BMCR_RESET bit until it
1354          * clears or we time out.
1355          */
1356         phy_control = BMCR_RESET;
1357         err = tg3_writephy(tp, MII_BMCR, phy_control);
1358         if (err != 0)
1359                 return -EBUSY;
1360
1361         limit = 5000;
1362         while (limit--) {
1363                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1364                 if (err != 0)
1365                         return -EBUSY;
1366
1367                 if ((phy_control & BMCR_RESET) == 0) {
1368                         udelay(40);
1369                         break;
1370                 }
1371                 udelay(10);
1372         }
1373         if (limit < 0)
1374                 return -EBUSY;
1375
1376         return 0;
1377 }
1378
1379 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1380 {
1381         struct tg3 *tp = bp->priv;
1382         u32 val;
1383
1384         spin_lock_bh(&tp->lock);
1385
1386         if (__tg3_readphy(tp, mii_id, reg, &val))
1387                 val = -EIO;
1388
1389         spin_unlock_bh(&tp->lock);
1390
1391         return val;
1392 }
1393
1394 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1395 {
1396         struct tg3 *tp = bp->priv;
1397         u32 ret = 0;
1398
1399         spin_lock_bh(&tp->lock);
1400
1401         if (__tg3_writephy(tp, mii_id, reg, val))
1402                 ret = -EIO;
1403
1404         spin_unlock_bh(&tp->lock);
1405
1406         return ret;
1407 }
1408
1409 static void tg3_mdio_config_5785(struct tg3 *tp)
1410 {
1411         u32 val;
1412         struct phy_device *phydev;
1413
1414         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1415         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1416         case PHY_ID_BCM50610:
1417         case PHY_ID_BCM50610M:
1418                 val = MAC_PHYCFG2_50610_LED_MODES;
1419                 break;
1420         case PHY_ID_BCMAC131:
1421                 val = MAC_PHYCFG2_AC131_LED_MODES;
1422                 break;
1423         case PHY_ID_RTL8211C:
1424                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1425                 break;
1426         case PHY_ID_RTL8201E:
1427                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1428                 break;
1429         default:
1430                 return;
1431         }
1432
1433         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1434                 tw32(MAC_PHYCFG2, val);
1435
1436                 val = tr32(MAC_PHYCFG1);
1437                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1438                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1439                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1440                 tw32(MAC_PHYCFG1, val);
1441
1442                 return;
1443         }
1444
1445         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1446                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1447                        MAC_PHYCFG2_FMODE_MASK_MASK |
1448                        MAC_PHYCFG2_GMODE_MASK_MASK |
1449                        MAC_PHYCFG2_ACT_MASK_MASK   |
1450                        MAC_PHYCFG2_QUAL_MASK_MASK |
1451                        MAC_PHYCFG2_INBAND_ENABLE;
1452
1453         tw32(MAC_PHYCFG2, val);
1454
1455         val = tr32(MAC_PHYCFG1);
1456         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1457                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1458         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1459                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1460                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1461                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1462                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1463         }
1464         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1465                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1466         tw32(MAC_PHYCFG1, val);
1467
1468         val = tr32(MAC_EXT_RGMII_MODE);
1469         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1470                  MAC_RGMII_MODE_RX_QUALITY |
1471                  MAC_RGMII_MODE_RX_ACTIVITY |
1472                  MAC_RGMII_MODE_RX_ENG_DET |
1473                  MAC_RGMII_MODE_TX_ENABLE |
1474                  MAC_RGMII_MODE_TX_LOWPWR |
1475                  MAC_RGMII_MODE_TX_RESET);
1476         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1477                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1478                         val |= MAC_RGMII_MODE_RX_INT_B |
1479                                MAC_RGMII_MODE_RX_QUALITY |
1480                                MAC_RGMII_MODE_RX_ACTIVITY |
1481                                MAC_RGMII_MODE_RX_ENG_DET;
1482                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1483                         val |= MAC_RGMII_MODE_TX_ENABLE |
1484                                MAC_RGMII_MODE_TX_LOWPWR |
1485                                MAC_RGMII_MODE_TX_RESET;
1486         }
1487         tw32(MAC_EXT_RGMII_MODE, val);
1488 }
1489
1490 static void tg3_mdio_start(struct tg3 *tp)
1491 {
1492         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1493         tw32_f(MAC_MI_MODE, tp->mi_mode);
1494         udelay(80);
1495
1496         if (tg3_flag(tp, MDIOBUS_INITED) &&
1497             tg3_asic_rev(tp) == ASIC_REV_5785)
1498                 tg3_mdio_config_5785(tp);
1499 }
1500
1501 static int tg3_mdio_init(struct tg3 *tp)
1502 {
1503         int i;
1504         u32 reg;
1505         struct phy_device *phydev;
1506
1507         if (tg3_flag(tp, 5717_PLUS)) {
1508                 u32 is_serdes;
1509
1510                 tp->phy_addr = tp->pci_fn + 1;
1511
1512                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1513                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1514                 else
1515                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1516                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1517                 if (is_serdes)
1518                         tp->phy_addr += 7;
1519         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1520                 int addr;
1521
1522                 addr = ssb_gige_get_phyaddr(tp->pdev);
1523                 if (addr < 0)
1524                         return addr;
1525                 tp->phy_addr = addr;
1526         } else
1527                 tp->phy_addr = TG3_PHY_MII_ADDR;
1528
1529         tg3_mdio_start(tp);
1530
1531         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1532                 return 0;
1533
1534         tp->mdio_bus = mdiobus_alloc();
1535         if (tp->mdio_bus == NULL)
1536                 return -ENOMEM;
1537
1538         tp->mdio_bus->name     = "tg3 mdio bus";
1539         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1540                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1541         tp->mdio_bus->priv     = tp;
1542         tp->mdio_bus->parent   = &tp->pdev->dev;
1543         tp->mdio_bus->read     = &tg3_mdio_read;
1544         tp->mdio_bus->write    = &tg3_mdio_write;
1545         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1546
1547         /* The bus registration will look for all the PHYs on the mdio bus.
1548          * Unfortunately, it does not ensure the PHY is powered up before
1549          * accessing the PHY ID registers.  A chip reset is the
1550          * quickest way to bring the device back to an operational state..
1551          */
1552         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1553                 tg3_bmcr_reset(tp);
1554
1555         i = mdiobus_register(tp->mdio_bus);
1556         if (i) {
1557                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1558                 mdiobus_free(tp->mdio_bus);
1559                 return i;
1560         }
1561
1562         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1563
1564         if (!phydev || !phydev->drv) {
1565                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1566                 mdiobus_unregister(tp->mdio_bus);
1567                 mdiobus_free(tp->mdio_bus);
1568                 return -ENODEV;
1569         }
1570
1571         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1572         case PHY_ID_BCM57780:
1573                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1574                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1575                 break;
1576         case PHY_ID_BCM50610:
1577         case PHY_ID_BCM50610M:
1578                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1579                                      PHY_BRCM_RX_REFCLK_UNUSED |
1580                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1581                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1582                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1583                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1584                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1585                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1586                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1587                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1588                 /* fallthru */
1589         case PHY_ID_RTL8211C:
1590                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1591                 break;
1592         case PHY_ID_RTL8201E:
1593         case PHY_ID_BCMAC131:
1594                 phydev->interface = PHY_INTERFACE_MODE_MII;
1595                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1596                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1597                 break;
1598         }
1599
1600         tg3_flag_set(tp, MDIOBUS_INITED);
1601
1602         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1603                 tg3_mdio_config_5785(tp);
1604
1605         return 0;
1606 }
1607
1608 static void tg3_mdio_fini(struct tg3 *tp)
1609 {
1610         if (tg3_flag(tp, MDIOBUS_INITED)) {
1611                 tg3_flag_clear(tp, MDIOBUS_INITED);
1612                 mdiobus_unregister(tp->mdio_bus);
1613                 mdiobus_free(tp->mdio_bus);
1614         }
1615 }
1616
1617 /* tp->lock is held. */
1618 static inline void tg3_generate_fw_event(struct tg3 *tp)
1619 {
1620         u32 val;
1621
1622         val = tr32(GRC_RX_CPU_EVENT);
1623         val |= GRC_RX_CPU_DRIVER_EVENT;
1624         tw32_f(GRC_RX_CPU_EVENT, val);
1625
1626         tp->last_event_jiffies = jiffies;
1627 }
1628
1629 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1630
1631 /* tp->lock is held. */
1632 static void tg3_wait_for_event_ack(struct tg3 *tp)
1633 {
1634         int i;
1635         unsigned int delay_cnt;
1636         long time_remain;
1637
1638         /* If enough time has passed, no wait is necessary. */
1639         time_remain = (long)(tp->last_event_jiffies + 1 +
1640                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1641                       (long)jiffies;
1642         if (time_remain < 0)
1643                 return;
1644
1645         /* Check if we can shorten the wait time. */
1646         delay_cnt = jiffies_to_usecs(time_remain);
1647         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1648                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1649         delay_cnt = (delay_cnt >> 3) + 1;
1650
1651         for (i = 0; i < delay_cnt; i++) {
1652                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1653                         break;
1654                 if (pci_channel_offline(tp->pdev))
1655                         break;
1656
1657                 udelay(8);
1658         }
1659 }
1660
1661 /* tp->lock is held. */
1662 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1663 {
1664         u32 reg, val;
1665
1666         val = 0;
1667         if (!tg3_readphy(tp, MII_BMCR, &reg))
1668                 val = reg << 16;
1669         if (!tg3_readphy(tp, MII_BMSR, &reg))
1670                 val |= (reg & 0xffff);
1671         *data++ = val;
1672
1673         val = 0;
1674         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1675                 val = reg << 16;
1676         if (!tg3_readphy(tp, MII_LPA, &reg))
1677                 val |= (reg & 0xffff);
1678         *data++ = val;
1679
1680         val = 0;
1681         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1682                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1683                         val = reg << 16;
1684                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1685                         val |= (reg & 0xffff);
1686         }
1687         *data++ = val;
1688
1689         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1690                 val = reg << 16;
1691         else
1692                 val = 0;
1693         *data++ = val;
1694 }
1695
1696 /* tp->lock is held. */
1697 static void tg3_ump_link_report(struct tg3 *tp)
1698 {
1699         u32 data[4];
1700
1701         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1702                 return;
1703
1704         tg3_phy_gather_ump_data(tp, data);
1705
1706         tg3_wait_for_event_ack(tp);
1707
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1710         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1711         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1712         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1713         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1714
1715         tg3_generate_fw_event(tp);
1716 }
1717
1718 /* tp->lock is held. */
1719 static void tg3_stop_fw(struct tg3 *tp)
1720 {
1721         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1722                 /* Wait for RX cpu to ACK the previous event. */
1723                 tg3_wait_for_event_ack(tp);
1724
1725                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1726
1727                 tg3_generate_fw_event(tp);
1728
1729                 /* Wait for RX cpu to ACK this event. */
1730                 tg3_wait_for_event_ack(tp);
1731         }
1732 }
1733
1734 /* tp->lock is held. */
1735 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1736 {
1737         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1738                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1739
1740         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1741                 switch (kind) {
1742                 case RESET_KIND_INIT:
1743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744                                       DRV_STATE_START);
1745                         break;
1746
1747                 case RESET_KIND_SHUTDOWN:
1748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749                                       DRV_STATE_UNLOAD);
1750                         break;
1751
1752                 case RESET_KIND_SUSPEND:
1753                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754                                       DRV_STATE_SUSPEND);
1755                         break;
1756
1757                 default:
1758                         break;
1759                 }
1760         }
1761 }
1762
1763 /* tp->lock is held. */
1764 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1765 {
1766         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1767                 switch (kind) {
1768                 case RESET_KIND_INIT:
1769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770                                       DRV_STATE_START_DONE);
1771                         break;
1772
1773                 case RESET_KIND_SHUTDOWN:
1774                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1775                                       DRV_STATE_UNLOAD_DONE);
1776                         break;
1777
1778                 default:
1779                         break;
1780                 }
1781         }
1782 }
1783
1784 /* tp->lock is held. */
1785 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1786 {
1787         if (tg3_flag(tp, ENABLE_ASF)) {
1788                 switch (kind) {
1789                 case RESET_KIND_INIT:
1790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791                                       DRV_STATE_START);
1792                         break;
1793
1794                 case RESET_KIND_SHUTDOWN:
1795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796                                       DRV_STATE_UNLOAD);
1797                         break;
1798
1799                 case RESET_KIND_SUSPEND:
1800                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1801                                       DRV_STATE_SUSPEND);
1802                         break;
1803
1804                 default:
1805                         break;
1806                 }
1807         }
1808 }
1809
1810 static int tg3_poll_fw(struct tg3 *tp)
1811 {
1812         int i;
1813         u32 val;
1814
1815         if (tg3_flag(tp, NO_FWARE_REPORTED))
1816                 return 0;
1817
1818         if (tg3_flag(tp, IS_SSB_CORE)) {
1819                 /* We don't use firmware. */
1820                 return 0;
1821         }
1822
1823         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1824                 /* Wait up to 20ms for init done. */
1825                 for (i = 0; i < 200; i++) {
1826                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1827                                 return 0;
1828                         if (pci_channel_offline(tp->pdev))
1829                                 return -ENODEV;
1830
1831                         udelay(100);
1832                 }
1833                 return -ENODEV;
1834         }
1835
1836         /* Wait for firmware initialization to complete. */
1837         for (i = 0; i < 100000; i++) {
1838                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1839                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1840                         break;
1841                 if (pci_channel_offline(tp->pdev)) {
1842                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1843                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1844                                 netdev_info(tp->dev, "No firmware running\n");
1845                         }
1846
1847                         break;
1848                 }
1849
1850                 udelay(10);
1851         }
1852
1853         /* Chip might not be fitted with firmware.  Some Sun onboard
1854          * parts are configured like that.  So don't signal the timeout
1855          * of the above loop as an error, but do report the lack of
1856          * running firmware once.
1857          */
1858         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1859                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1860
1861                 netdev_info(tp->dev, "No firmware running\n");
1862         }
1863
1864         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1865                 /* The 57765 A0 needs a little more
1866                  * time to do some important work.
1867                  */
1868                 mdelay(10);
1869         }
1870
1871         return 0;
1872 }
1873
1874 static void tg3_link_report(struct tg3 *tp)
1875 {
1876         if (!netif_carrier_ok(tp->dev)) {
1877                 netif_info(tp, link, tp->dev, "Link is down\n");
1878                 tg3_ump_link_report(tp);
1879         } else if (netif_msg_link(tp)) {
1880                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1881                             (tp->link_config.active_speed == SPEED_1000 ?
1882                              1000 :
1883                              (tp->link_config.active_speed == SPEED_100 ?
1884                               100 : 10)),
1885                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1886                              "full" : "half"));
1887
1888                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1889                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1890                             "on" : "off",
1891                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1892                             "on" : "off");
1893
1894                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1895                         netdev_info(tp->dev, "EEE is %s\n",
1896                                     tp->setlpicnt ? "enabled" : "disabled");
1897
1898                 tg3_ump_link_report(tp);
1899         }
1900
1901         tp->link_up = netif_carrier_ok(tp->dev);
1902 }
1903
1904 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1905 {
1906         u32 flowctrl = 0;
1907
1908         if (adv & ADVERTISE_PAUSE_CAP) {
1909                 flowctrl |= FLOW_CTRL_RX;
1910                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1911                         flowctrl |= FLOW_CTRL_TX;
1912         } else if (adv & ADVERTISE_PAUSE_ASYM)
1913                 flowctrl |= FLOW_CTRL_TX;
1914
1915         return flowctrl;
1916 }
1917
1918 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1919 {
1920         u16 miireg;
1921
1922         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1923                 miireg = ADVERTISE_1000XPAUSE;
1924         else if (flow_ctrl & FLOW_CTRL_TX)
1925                 miireg = ADVERTISE_1000XPSE_ASYM;
1926         else if (flow_ctrl & FLOW_CTRL_RX)
1927                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1928         else
1929                 miireg = 0;
1930
1931         return miireg;
1932 }
1933
1934 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1935 {
1936         u32 flowctrl = 0;
1937
1938         if (adv & ADVERTISE_1000XPAUSE) {
1939                 flowctrl |= FLOW_CTRL_RX;
1940                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1941                         flowctrl |= FLOW_CTRL_TX;
1942         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1943                 flowctrl |= FLOW_CTRL_TX;
1944
1945         return flowctrl;
1946 }
1947
1948 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1949 {
1950         u8 cap = 0;
1951
1952         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1953                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1954         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1955                 if (lcladv & ADVERTISE_1000XPAUSE)
1956                         cap = FLOW_CTRL_RX;
1957                 if (rmtadv & ADVERTISE_1000XPAUSE)
1958                         cap = FLOW_CTRL_TX;
1959         }
1960
1961         return cap;
1962 }
1963
1964 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1965 {
1966         u8 autoneg;
1967         u8 flowctrl = 0;
1968         u32 old_rx_mode = tp->rx_mode;
1969         u32 old_tx_mode = tp->tx_mode;
1970
1971         if (tg3_flag(tp, USE_PHYLIB))
1972                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1973         else
1974                 autoneg = tp->link_config.autoneg;
1975
1976         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1977                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1978                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1979                 else
1980                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1981         } else
1982                 flowctrl = tp->link_config.flowctrl;
1983
1984         tp->link_config.active_flowctrl = flowctrl;
1985
1986         if (flowctrl & FLOW_CTRL_RX)
1987                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1988         else
1989                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1990
1991         if (old_rx_mode != tp->rx_mode)
1992                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1993
1994         if (flowctrl & FLOW_CTRL_TX)
1995                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1996         else
1997                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1998
1999         if (old_tx_mode != tp->tx_mode)
2000                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2001 }
2002
2003 static void tg3_adjust_link(struct net_device *dev)
2004 {
2005         u8 oldflowctrl, linkmesg = 0;
2006         u32 mac_mode, lcl_adv, rmt_adv;
2007         struct tg3 *tp = netdev_priv(dev);
2008         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2009
2010         spin_lock_bh(&tp->lock);
2011
2012         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2013                                     MAC_MODE_HALF_DUPLEX);
2014
2015         oldflowctrl = tp->link_config.active_flowctrl;
2016
2017         if (phydev->link) {
2018                 lcl_adv = 0;
2019                 rmt_adv = 0;
2020
2021                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2022                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2023                 else if (phydev->speed == SPEED_1000 ||
2024                          tg3_asic_rev(tp) != ASIC_REV_5785)
2025                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2026                 else
2027                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2028
2029                 if (phydev->duplex == DUPLEX_HALF)
2030                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2031                 else {
2032                         lcl_adv = mii_advertise_flowctrl(
2033                                   tp->link_config.flowctrl);
2034
2035                         if (phydev->pause)
2036                                 rmt_adv = LPA_PAUSE_CAP;
2037                         if (phydev->asym_pause)
2038                                 rmt_adv |= LPA_PAUSE_ASYM;
2039                 }
2040
2041                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2042         } else
2043                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2044
2045         if (mac_mode != tp->mac_mode) {
2046                 tp->mac_mode = mac_mode;
2047                 tw32_f(MAC_MODE, tp->mac_mode);
2048                 udelay(40);
2049         }
2050
2051         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2052                 if (phydev->speed == SPEED_10)
2053                         tw32(MAC_MI_STAT,
2054                              MAC_MI_STAT_10MBPS_MODE |
2055                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2056                 else
2057                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2058         }
2059
2060         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2061                 tw32(MAC_TX_LENGTHS,
2062                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063                       (6 << TX_LENGTHS_IPG_SHIFT) |
2064                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065         else
2066                 tw32(MAC_TX_LENGTHS,
2067                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2068                       (6 << TX_LENGTHS_IPG_SHIFT) |
2069                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2070
2071         if (phydev->link != tp->old_link ||
2072             phydev->speed != tp->link_config.active_speed ||
2073             phydev->duplex != tp->link_config.active_duplex ||
2074             oldflowctrl != tp->link_config.active_flowctrl)
2075                 linkmesg = 1;
2076
2077         tp->old_link = phydev->link;
2078         tp->link_config.active_speed = phydev->speed;
2079         tp->link_config.active_duplex = phydev->duplex;
2080
2081         spin_unlock_bh(&tp->lock);
2082
2083         if (linkmesg)
2084                 tg3_link_report(tp);
2085 }
2086
2087 static int tg3_phy_init(struct tg3 *tp)
2088 {
2089         struct phy_device *phydev;
2090
2091         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2092                 return 0;
2093
2094         /* Bring the PHY back to a known state. */
2095         tg3_bmcr_reset(tp);
2096
2097         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2098
2099         /* Attach the MAC to the PHY. */
2100         phydev = phy_connect(tp->dev, phydev_name(phydev),
2101                              tg3_adjust_link, phydev->interface);
2102         if (IS_ERR(phydev)) {
2103                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2104                 return PTR_ERR(phydev);
2105         }
2106
2107         /* Mask with MAC supported features. */
2108         switch (phydev->interface) {
2109         case PHY_INTERFACE_MODE_GMII:
2110         case PHY_INTERFACE_MODE_RGMII:
2111                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2112                         phydev->supported &= (PHY_GBIT_FEATURES |
2113                                               SUPPORTED_Pause |
2114                                               SUPPORTED_Asym_Pause);
2115                         break;
2116                 }
2117                 /* fallthru */
2118         case PHY_INTERFACE_MODE_MII:
2119                 phydev->supported &= (PHY_BASIC_FEATURES |
2120                                       SUPPORTED_Pause |
2121                                       SUPPORTED_Asym_Pause);
2122                 break;
2123         default:
2124                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2125                 return -EINVAL;
2126         }
2127
2128         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2129
2130         phydev->advertising = phydev->supported;
2131
2132         phy_attached_info(phydev);
2133
2134         return 0;
2135 }
2136
2137 static void tg3_phy_start(struct tg3 *tp)
2138 {
2139         struct phy_device *phydev;
2140
2141         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2142                 return;
2143
2144         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2145
2146         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2147                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2148                 phydev->speed = tp->link_config.speed;
2149                 phydev->duplex = tp->link_config.duplex;
2150                 phydev->autoneg = tp->link_config.autoneg;
2151                 phydev->advertising = tp->link_config.advertising;
2152         }
2153
2154         phy_start(phydev);
2155
2156         phy_start_aneg(phydev);
2157 }
2158
2159 static void tg3_phy_stop(struct tg3 *tp)
2160 {
2161         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2162                 return;
2163
2164         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2165 }
2166
2167 static void tg3_phy_fini(struct tg3 *tp)
2168 {
2169         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2170                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2171                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2172         }
2173 }
2174
2175 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2176 {
2177         int err;
2178         u32 val;
2179
2180         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2181                 return 0;
2182
2183         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2184                 /* Cannot do read-modify-write on 5401 */
2185                 err = tg3_phy_auxctl_write(tp,
2186                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2187                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2188                                            0x4c20);
2189                 goto done;
2190         }
2191
2192         err = tg3_phy_auxctl_read(tp,
2193                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2194         if (err)
2195                 return err;
2196
2197         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2198         err = tg3_phy_auxctl_write(tp,
2199                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2200
2201 done:
2202         return err;
2203 }
2204
2205 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2206 {
2207         u32 phytest;
2208
2209         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2210                 u32 phy;
2211
2212                 tg3_writephy(tp, MII_TG3_FET_TEST,
2213                              phytest | MII_TG3_FET_SHADOW_EN);
2214                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2215                         if (enable)
2216                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217                         else
2218                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2219                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2220                 }
2221                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2222         }
2223 }
2224
2225 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2226 {
2227         u32 reg;
2228
2229         if (!tg3_flag(tp, 5705_PLUS) ||
2230             (tg3_flag(tp, 5717_PLUS) &&
2231              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2232                 return;
2233
2234         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2235                 tg3_phy_fet_toggle_apd(tp, enable);
2236                 return;
2237         }
2238
2239         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2240               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2241               MII_TG3_MISC_SHDW_SCR5_SDTL |
2242               MII_TG3_MISC_SHDW_SCR5_C125OE;
2243         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2244                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2245
2246         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2247
2248
2249         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2250         if (enable)
2251                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2252
2253         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2254 }
2255
2256 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2257 {
2258         u32 phy;
2259
2260         if (!tg3_flag(tp, 5705_PLUS) ||
2261             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2262                 return;
2263
2264         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2265                 u32 ephy;
2266
2267                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2268                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2269
2270                         tg3_writephy(tp, MII_TG3_FET_TEST,
2271                                      ephy | MII_TG3_FET_SHADOW_EN);
2272                         if (!tg3_readphy(tp, reg, &phy)) {
2273                                 if (enable)
2274                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275                                 else
2276                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2277                                 tg3_writephy(tp, reg, phy);
2278                         }
2279                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2280                 }
2281         } else {
2282                 int ret;
2283
2284                 ret = tg3_phy_auxctl_read(tp,
2285                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2286                 if (!ret) {
2287                         if (enable)
2288                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289                         else
2290                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2291                         tg3_phy_auxctl_write(tp,
2292                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2293                 }
2294         }
2295 }
2296
2297 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2298 {
2299         int ret;
2300         u32 val;
2301
2302         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2303                 return;
2304
2305         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2306         if (!ret)
2307                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2308                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2309 }
2310
2311 static void tg3_phy_apply_otp(struct tg3 *tp)
2312 {
2313         u32 otp, phy;
2314
2315         if (!tp->phy_otp)
2316                 return;
2317
2318         otp = tp->phy_otp;
2319
2320         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2321                 return;
2322
2323         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2324         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2325         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2326
2327         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2328               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2329         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2330
2331         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2332         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2333         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2334
2335         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2336         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2337
2338         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2339         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2340
2341         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2342               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2343         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2344
2345         tg3_phy_toggle_auxctl_smdsp(tp, false);
2346 }
2347
2348 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2349 {
2350         u32 val;
2351         struct ethtool_eee *dest = &tp->eee;
2352
2353         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2354                 return;
2355
2356         if (eee)
2357                 dest = eee;
2358
2359         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2360                 return;
2361
2362         /* Pull eee_active */
2363         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2364             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2365                 dest->eee_active = 1;
2366         } else
2367                 dest->eee_active = 0;
2368
2369         /* Pull lp advertised settings */
2370         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2371                 return;
2372         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373
2374         /* Pull advertised and eee_enabled settings */
2375         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2376                 return;
2377         dest->eee_enabled = !!val;
2378         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2379
2380         /* Pull tx_lpi_enabled */
2381         val = tr32(TG3_CPMU_EEE_MODE);
2382         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2383
2384         /* Pull lpi timer value */
2385         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2386 }
2387
2388 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2389 {
2390         u32 val;
2391
2392         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2393                 return;
2394
2395         tp->setlpicnt = 0;
2396
2397         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2398             current_link_up &&
2399             tp->link_config.active_duplex == DUPLEX_FULL &&
2400             (tp->link_config.active_speed == SPEED_100 ||
2401              tp->link_config.active_speed == SPEED_1000)) {
2402                 u32 eeectl;
2403
2404                 if (tp->link_config.active_speed == SPEED_1000)
2405                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2406                 else
2407                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2408
2409                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2410
2411                 tg3_eee_pull_config(tp, NULL);
2412                 if (tp->eee.eee_active)
2413                         tp->setlpicnt = 2;
2414         }
2415
2416         if (!tp->setlpicnt) {
2417                 if (current_link_up &&
2418                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2419                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2420                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2421                 }
2422
2423                 val = tr32(TG3_CPMU_EEE_MODE);
2424                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2425         }
2426 }
2427
2428 static void tg3_phy_eee_enable(struct tg3 *tp)
2429 {
2430         u32 val;
2431
2432         if (tp->link_config.active_speed == SPEED_1000 &&
2433             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2434              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2435              tg3_flag(tp, 57765_CLASS)) &&
2436             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2437                 val = MII_TG3_DSP_TAP26_ALNOKO |
2438                       MII_TG3_DSP_TAP26_RMRXSTO;
2439                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2440                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2441         }
2442
2443         val = tr32(TG3_CPMU_EEE_MODE);
2444         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2445 }
2446
2447 static int tg3_wait_macro_done(struct tg3 *tp)
2448 {
2449         int limit = 100;
2450
2451         while (limit--) {
2452                 u32 tmp32;
2453
2454                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2455                         if ((tmp32 & 0x1000) == 0)
2456                                 break;
2457                 }
2458         }
2459         if (limit < 0)
2460                 return -EBUSY;
2461
2462         return 0;
2463 }
2464
2465 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2466 {
2467         static const u32 test_pat[4][6] = {
2468         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2469         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2470         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2471         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2472         };
2473         int chan;
2474
2475         for (chan = 0; chan < 4; chan++) {
2476                 int i;
2477
2478                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479                              (chan * 0x2000) | 0x0200);
2480                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2481
2482                 for (i = 0; i < 6; i++)
2483                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2484                                      test_pat[chan][i]);
2485
2486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2487                 if (tg3_wait_macro_done(tp)) {
2488                         *resetp = 1;
2489                         return -EBUSY;
2490                 }
2491
2492                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2493                              (chan * 0x2000) | 0x0200);
2494                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2495                 if (tg3_wait_macro_done(tp)) {
2496                         *resetp = 1;
2497                         return -EBUSY;
2498                 }
2499
2500                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2501                 if (tg3_wait_macro_done(tp)) {
2502                         *resetp = 1;
2503                         return -EBUSY;
2504                 }
2505
2506                 for (i = 0; i < 6; i += 2) {
2507                         u32 low, high;
2508
2509                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2510                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2511                             tg3_wait_macro_done(tp)) {
2512                                 *resetp = 1;
2513                                 return -EBUSY;
2514                         }
2515                         low &= 0x7fff;
2516                         high &= 0x000f;
2517                         if (low != test_pat[chan][i] ||
2518                             high != test_pat[chan][i+1]) {
2519                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2520                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2521                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2522
2523                                 return -EBUSY;
2524                         }
2525                 }
2526         }
2527
2528         return 0;
2529 }
2530
2531 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2532 {
2533         int chan;
2534
2535         for (chan = 0; chan < 4; chan++) {
2536                 int i;
2537
2538                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2539                              (chan * 0x2000) | 0x0200);
2540                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2541                 for (i = 0; i < 6; i++)
2542                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2543                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2544                 if (tg3_wait_macro_done(tp))
2545                         return -EBUSY;
2546         }
2547
2548         return 0;
2549 }
2550
2551 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2552 {
2553         u32 reg32, phy9_orig;
2554         int retries, do_phy_reset, err;
2555
2556         retries = 10;
2557         do_phy_reset = 1;
2558         do {
2559                 if (do_phy_reset) {
2560                         err = tg3_bmcr_reset(tp);
2561                         if (err)
2562                                 return err;
2563                         do_phy_reset = 0;
2564                 }
2565
2566                 /* Disable transmitter and interrupt.  */
2567                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2568                         continue;
2569
2570                 reg32 |= 0x3000;
2571                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2572
2573                 /* Set full-duplex, 1000 mbps.  */
2574                 tg3_writephy(tp, MII_BMCR,
2575                              BMCR_FULLDPLX | BMCR_SPEED1000);
2576
2577                 /* Set to master mode.  */
2578                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2579                         continue;
2580
2581                 tg3_writephy(tp, MII_CTRL1000,
2582                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2583
2584                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2585                 if (err)
2586                         return err;
2587
2588                 /* Block the PHY control access.  */
2589                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2590
2591                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2592                 if (!err)
2593                         break;
2594         } while (--retries);
2595
2596         err = tg3_phy_reset_chanpat(tp);
2597         if (err)
2598                 return err;
2599
2600         tg3_phydsp_write(tp, 0x8005, 0x0000);
2601
2602         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2603         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2604
2605         tg3_phy_toggle_auxctl_smdsp(tp, false);
2606
2607         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2608
2609         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2610         if (err)
2611                 return err;
2612
2613         reg32 &= ~0x3000;
2614         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2615
2616         return 0;
2617 }
2618
2619 static void tg3_carrier_off(struct tg3 *tp)
2620 {
2621         netif_carrier_off(tp->dev);
2622         tp->link_up = false;
2623 }
2624
2625 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2626 {
2627         if (tg3_flag(tp, ENABLE_ASF))
2628                 netdev_warn(tp->dev,
2629                             "Management side-band traffic will be interrupted during phy settings change\n");
2630 }
2631
2632 /* This will reset the tigon3 PHY if there is no valid
2633  * link unless the FORCE argument is non-zero.
2634  */
2635 static int tg3_phy_reset(struct tg3 *tp)
2636 {
2637         u32 val, cpmuctrl;
2638         int err;
2639
2640         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2641                 val = tr32(GRC_MISC_CFG);
2642                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2643                 udelay(40);
2644         }
2645         err  = tg3_readphy(tp, MII_BMSR, &val);
2646         err |= tg3_readphy(tp, MII_BMSR, &val);
2647         if (err != 0)
2648                 return -EBUSY;
2649
2650         if (netif_running(tp->dev) && tp->link_up) {
2651                 netif_carrier_off(tp->dev);
2652                 tg3_link_report(tp);
2653         }
2654
2655         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2656             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2657             tg3_asic_rev(tp) == ASIC_REV_5705) {
2658                 err = tg3_phy_reset_5703_4_5(tp);
2659                 if (err)
2660                         return err;
2661                 goto out;
2662         }
2663
2664         cpmuctrl = 0;
2665         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2666             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2667                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2668                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2669                         tw32(TG3_CPMU_CTRL,
2670                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2671         }
2672
2673         err = tg3_bmcr_reset(tp);
2674         if (err)
2675                 return err;
2676
2677         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2678                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2679                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2680
2681                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2682         }
2683
2684         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2685             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2686                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2687                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2688                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2689                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2690                         udelay(40);
2691                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2692                 }
2693         }
2694
2695         if (tg3_flag(tp, 5717_PLUS) &&
2696             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2697                 return 0;
2698
2699         tg3_phy_apply_otp(tp);
2700
2701         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2702                 tg3_phy_toggle_apd(tp, true);
2703         else
2704                 tg3_phy_toggle_apd(tp, false);
2705
2706 out:
2707         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2708             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2709                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2710                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2711                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2712         }
2713
2714         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2715                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2716                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2717         }
2718
2719         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2720                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2722                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2723                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2724                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2725                 }
2726         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2727                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2728                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2729                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2730                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2731                                 tg3_writephy(tp, MII_TG3_TEST1,
2732                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2733                         } else
2734                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2735
2736                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2737                 }
2738         }
2739
2740         /* Set Extended packet length bit (bit 14) on all chips that */
2741         /* support jumbo frames */
2742         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2743                 /* Cannot do read-modify-write on 5401 */
2744                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2745         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2746                 /* Set bit 14 with read-modify-write to preserve other bits */
2747                 err = tg3_phy_auxctl_read(tp,
2748                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2749                 if (!err)
2750                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2751                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2752         }
2753
2754         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2755          * jumbo frames transmission.
2756          */
2757         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2758                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2759                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2760                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2761         }
2762
2763         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2764                 /* adjust output voltage */
2765                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2766         }
2767
2768         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2769                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2770
2771         tg3_phy_toggle_automdix(tp, true);
2772         tg3_phy_set_wirespeed(tp);
2773         return 0;
2774 }
2775
2776 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2777 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2778 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2779                                           TG3_GPIO_MSG_NEED_VAUX)
2780 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2781         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2782          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2783          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2784          (TG3_GPIO_MSG_DRVR_PRES << 12))
2785
2786 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2787         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2788          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2789          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2790          (TG3_GPIO_MSG_NEED_VAUX << 12))
2791
2792 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2793 {
2794         u32 status, shift;
2795
2796         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2797             tg3_asic_rev(tp) == ASIC_REV_5719)
2798                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2799         else
2800                 status = tr32(TG3_CPMU_DRV_STATUS);
2801
2802         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2803         status &= ~(TG3_GPIO_MSG_MASK << shift);
2804         status |= (newstat << shift);
2805
2806         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2807             tg3_asic_rev(tp) == ASIC_REV_5719)
2808                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2809         else
2810                 tw32(TG3_CPMU_DRV_STATUS, status);
2811
2812         return status >> TG3_APE_GPIO_MSG_SHIFT;
2813 }
2814
2815 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2816 {
2817         if (!tg3_flag(tp, IS_NIC))
2818                 return 0;
2819
2820         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2821             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2822             tg3_asic_rev(tp) == ASIC_REV_5720) {
2823                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2824                         return -EIO;
2825
2826                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2827
2828                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2829                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2832         } else {
2833                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2834                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2835         }
2836
2837         return 0;
2838 }
2839
2840 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2841 {
2842         u32 grc_local_ctrl;
2843
2844         if (!tg3_flag(tp, IS_NIC) ||
2845             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2846             tg3_asic_rev(tp) == ASIC_REV_5701)
2847                 return;
2848
2849         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2850
2851         tw32_wait_f(GRC_LOCAL_CTRL,
2852                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2853                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2854
2855         tw32_wait_f(GRC_LOCAL_CTRL,
2856                     grc_local_ctrl,
2857                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2858
2859         tw32_wait_f(GRC_LOCAL_CTRL,
2860                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2861                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2862 }
2863
2864 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2865 {
2866         if (!tg3_flag(tp, IS_NIC))
2867                 return;
2868
2869         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2870             tg3_asic_rev(tp) == ASIC_REV_5701) {
2871                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2872                             (GRC_LCLCTRL_GPIO_OE0 |
2873                              GRC_LCLCTRL_GPIO_OE1 |
2874                              GRC_LCLCTRL_GPIO_OE2 |
2875                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2876                              GRC_LCLCTRL_GPIO_OUTPUT1),
2877                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2878         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2879                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2880                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2881                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2882                                      GRC_LCLCTRL_GPIO_OE1 |
2883                                      GRC_LCLCTRL_GPIO_OE2 |
2884                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2885                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2886                                      tp->grc_local_ctrl;
2887                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2889
2890                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2891                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2892                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2893
2894                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2895                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2896                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2897         } else {
2898                 u32 no_gpio2;
2899                 u32 grc_local_ctrl = 0;
2900
2901                 /* Workaround to prevent overdrawing Amps. */
2902                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2903                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2904                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2905                                     grc_local_ctrl,
2906                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2907                 }
2908
2909                 /* On 5753 and variants, GPIO2 cannot be used. */
2910                 no_gpio2 = tp->nic_sram_data_cfg &
2911                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2912
2913                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2914                                   GRC_LCLCTRL_GPIO_OE1 |
2915                                   GRC_LCLCTRL_GPIO_OE2 |
2916                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2917                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2918                 if (no_gpio2) {
2919                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2920                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2921                 }
2922                 tw32_wait_f(GRC_LOCAL_CTRL,
2923                             tp->grc_local_ctrl | grc_local_ctrl,
2924                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2925
2926                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2927
2928                 tw32_wait_f(GRC_LOCAL_CTRL,
2929                             tp->grc_local_ctrl | grc_local_ctrl,
2930                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2931
2932                 if (!no_gpio2) {
2933                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2934                         tw32_wait_f(GRC_LOCAL_CTRL,
2935                                     tp->grc_local_ctrl | grc_local_ctrl,
2936                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2937                 }
2938         }
2939 }
2940
2941 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2942 {
2943         u32 msg = 0;
2944
2945         /* Serialize power state transitions */
2946         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2947                 return;
2948
2949         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2950                 msg = TG3_GPIO_MSG_NEED_VAUX;
2951
2952         msg = tg3_set_function_status(tp, msg);
2953
2954         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2955                 goto done;
2956
2957         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2958                 tg3_pwrsrc_switch_to_vaux(tp);
2959         else
2960                 tg3_pwrsrc_die_with_vmain(tp);
2961
2962 done:
2963         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2964 }
2965
2966 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2967 {
2968         bool need_vaux = false;
2969
2970         /* The GPIOs do something completely different on 57765. */
2971         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2972                 return;
2973
2974         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2975             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2976             tg3_asic_rev(tp) == ASIC_REV_5720) {
2977                 tg3_frob_aux_power_5717(tp, include_wol ?
2978                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2979                 return;
2980         }
2981
2982         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2983                 struct net_device *dev_peer;
2984
2985                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2986
2987                 /* remove_one() may have been run on the peer. */
2988                 if (dev_peer) {
2989                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2990
2991                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2992                                 return;
2993
2994                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2995                             tg3_flag(tp_peer, ENABLE_ASF))
2996                                 need_vaux = true;
2997                 }
2998         }
2999
3000         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3001             tg3_flag(tp, ENABLE_ASF))
3002                 need_vaux = true;
3003
3004         if (need_vaux)
3005                 tg3_pwrsrc_switch_to_vaux(tp);
3006         else
3007                 tg3_pwrsrc_die_with_vmain(tp);
3008 }
3009
3010 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3011 {
3012         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3013                 return 1;
3014         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3015                 if (speed != SPEED_10)
3016                         return 1;
3017         } else if (speed == SPEED_10)
3018                 return 1;
3019
3020         return 0;
3021 }
3022
3023 static bool tg3_phy_power_bug(struct tg3 *tp)
3024 {
3025         switch (tg3_asic_rev(tp)) {
3026         case ASIC_REV_5700:
3027         case ASIC_REV_5704:
3028                 return true;
3029         case ASIC_REV_5780:
3030                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3031                         return true;
3032                 return false;
3033         case ASIC_REV_5717:
3034                 if (!tp->pci_fn)
3035                         return true;
3036                 return false;
3037         case ASIC_REV_5719:
3038         case ASIC_REV_5720:
3039                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3040                     !tp->pci_fn)
3041                         return true;
3042                 return false;
3043         }
3044
3045         return false;
3046 }
3047
3048 static bool tg3_phy_led_bug(struct tg3 *tp)
3049 {
3050         switch (tg3_asic_rev(tp)) {
3051         case ASIC_REV_5719:
3052         case ASIC_REV_5720:
3053                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3054                     !tp->pci_fn)
3055                         return true;
3056                 return false;
3057         }
3058
3059         return false;
3060 }
3061
3062 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3063 {
3064         u32 val;
3065
3066         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3067                 return;
3068
3069         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3070                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3071                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3072                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3073
3074                         sg_dig_ctrl |=
3075                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3076                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3077                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3078                 }
3079                 return;
3080         }
3081
3082         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3083                 tg3_bmcr_reset(tp);
3084                 val = tr32(GRC_MISC_CFG);
3085                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3086                 udelay(40);
3087                 return;
3088         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3089                 u32 phytest;
3090                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3091                         u32 phy;
3092
3093                         tg3_writephy(tp, MII_ADVERTISE, 0);
3094                         tg3_writephy(tp, MII_BMCR,
3095                                      BMCR_ANENABLE | BMCR_ANRESTART);
3096
3097                         tg3_writephy(tp, MII_TG3_FET_TEST,
3098                                      phytest | MII_TG3_FET_SHADOW_EN);
3099                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3100                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3101                                 tg3_writephy(tp,
3102                                              MII_TG3_FET_SHDW_AUXMODE4,
3103                                              phy);
3104                         }
3105                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3106                 }
3107                 return;
3108         } else if (do_low_power) {
3109                 if (!tg3_phy_led_bug(tp))
3110                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3111                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3112
3113                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3114                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3115                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3116                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3117         }
3118
3119         /* The PHY should not be powered down on some chips because
3120          * of bugs.
3121          */
3122         if (tg3_phy_power_bug(tp))
3123                 return;
3124
3125         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3126             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3127                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3128                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3129                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3130                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3131         }
3132
3133         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3134 }
3135
3136 /* tp->lock is held. */
3137 static int tg3_nvram_lock(struct tg3 *tp)
3138 {
3139         if (tg3_flag(tp, NVRAM)) {
3140                 int i;
3141
3142                 if (tp->nvram_lock_cnt == 0) {
3143                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3144                         for (i = 0; i < 8000; i++) {
3145                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3146                                         break;
3147                                 udelay(20);
3148                         }
3149                         if (i == 8000) {
3150                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3151                                 return -ENODEV;
3152                         }
3153                 }
3154                 tp->nvram_lock_cnt++;
3155         }
3156         return 0;
3157 }
3158
3159 /* tp->lock is held. */
3160 static void tg3_nvram_unlock(struct tg3 *tp)
3161 {
3162         if (tg3_flag(tp, NVRAM)) {
3163                 if (tp->nvram_lock_cnt > 0)
3164                         tp->nvram_lock_cnt--;
3165                 if (tp->nvram_lock_cnt == 0)
3166                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3167         }
3168 }
3169
3170 /* tp->lock is held. */
3171 static void tg3_enable_nvram_access(struct tg3 *tp)
3172 {
3173         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3174                 u32 nvaccess = tr32(NVRAM_ACCESS);
3175
3176                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3177         }
3178 }
3179
3180 /* tp->lock is held. */
3181 static void tg3_disable_nvram_access(struct tg3 *tp)
3182 {
3183         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3184                 u32 nvaccess = tr32(NVRAM_ACCESS);
3185
3186                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3187         }
3188 }
3189
3190 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3191                                         u32 offset, u32 *val)
3192 {
3193         u32 tmp;
3194         int i;
3195
3196         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3197                 return -EINVAL;
3198
3199         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3200                                         EEPROM_ADDR_DEVID_MASK |
3201                                         EEPROM_ADDR_READ);
3202         tw32(GRC_EEPROM_ADDR,
3203              tmp |
3204              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3205              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3206               EEPROM_ADDR_ADDR_MASK) |
3207              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3208
3209         for (i = 0; i < 1000; i++) {
3210                 tmp = tr32(GRC_EEPROM_ADDR);
3211
3212                 if (tmp & EEPROM_ADDR_COMPLETE)
3213                         break;
3214                 msleep(1);
3215         }
3216         if (!(tmp & EEPROM_ADDR_COMPLETE))
3217                 return -EBUSY;
3218
3219         tmp = tr32(GRC_EEPROM_DATA);
3220
3221         /*
3222          * The data will always be opposite the native endian
3223          * format.  Perform a blind byteswap to compensate.
3224          */
3225         *val = swab32(tmp);
3226
3227         return 0;
3228 }
3229
3230 #define NVRAM_CMD_TIMEOUT 5000
3231
3232 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3233 {
3234         int i;
3235
3236         tw32(NVRAM_CMD, nvram_cmd);
3237         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3238                 usleep_range(10, 40);
3239                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3240                         udelay(10);
3241                         break;
3242                 }
3243         }
3244
3245         if (i == NVRAM_CMD_TIMEOUT)
3246                 return -EBUSY;
3247
3248         return 0;
3249 }
3250
3251 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3252 {
3253         if (tg3_flag(tp, NVRAM) &&
3254             tg3_flag(tp, NVRAM_BUFFERED) &&
3255             tg3_flag(tp, FLASH) &&
3256             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3257             (tp->nvram_jedecnum == JEDEC_ATMEL))
3258
3259                 addr = ((addr / tp->nvram_pagesize) <<
3260                         ATMEL_AT45DB0X1B_PAGE_POS) +
3261                        (addr % tp->nvram_pagesize);
3262
3263         return addr;
3264 }
3265
3266 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3267 {
3268         if (tg3_flag(tp, NVRAM) &&
3269             tg3_flag(tp, NVRAM_BUFFERED) &&
3270             tg3_flag(tp, FLASH) &&
3271             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3272             (tp->nvram_jedecnum == JEDEC_ATMEL))
3273
3274                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3275                         tp->nvram_pagesize) +
3276                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3277
3278         return addr;
3279 }
3280
3281 /* NOTE: Data read in from NVRAM is byteswapped according to
3282  * the byteswapping settings for all other register accesses.
3283  * tg3 devices are BE devices, so on a BE machine, the data
3284  * returned will be exactly as it is seen in NVRAM.  On a LE
3285  * machine, the 32-bit value will be byteswapped.
3286  */
3287 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3288 {
3289         int ret;
3290
3291         if (!tg3_flag(tp, NVRAM))
3292                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3293
3294         offset = tg3_nvram_phys_addr(tp, offset);
3295
3296         if (offset > NVRAM_ADDR_MSK)
3297                 return -EINVAL;
3298
3299         ret = tg3_nvram_lock(tp);
3300         if (ret)
3301                 return ret;
3302
3303         tg3_enable_nvram_access(tp);
3304
3305         tw32(NVRAM_ADDR, offset);
3306         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3307                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3308
3309         if (ret == 0)
3310                 *val = tr32(NVRAM_RDDATA);
3311
3312         tg3_disable_nvram_access(tp);
3313
3314         tg3_nvram_unlock(tp);
3315
3316         return ret;
3317 }
3318
3319 /* Ensures NVRAM data is in bytestream format. */
3320 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3321 {
3322         u32 v;
3323         int res = tg3_nvram_read(tp, offset, &v);
3324         if (!res)
3325                 *val = cpu_to_be32(v);
3326         return res;
3327 }
3328
3329 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3330                                     u32 offset, u32 len, u8 *buf)
3331 {
3332         int i, j, rc = 0;
3333         u32 val;
3334
3335         for (i = 0; i < len; i += 4) {
3336                 u32 addr;
3337                 __be32 data;
3338
3339                 addr = offset + i;
3340
3341                 memcpy(&data, buf + i, 4);
3342
3343                 /*
3344                  * The SEEPROM interface expects the data to always be opposite
3345                  * the native endian format.  We accomplish this by reversing
3346                  * all the operations that would have been performed on the
3347                  * data from a call to tg3_nvram_read_be32().
3348                  */
3349                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3350
3351                 val = tr32(GRC_EEPROM_ADDR);
3352                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3353
3354                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3355                         EEPROM_ADDR_READ);
3356                 tw32(GRC_EEPROM_ADDR, val |
3357                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3358                         (addr & EEPROM_ADDR_ADDR_MASK) |
3359                         EEPROM_ADDR_START |
3360                         EEPROM_ADDR_WRITE);
3361
3362                 for (j = 0; j < 1000; j++) {
3363                         val = tr32(GRC_EEPROM_ADDR);
3364
3365                         if (val & EEPROM_ADDR_COMPLETE)
3366                                 break;
3367                         msleep(1);
3368                 }
3369                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3370                         rc = -EBUSY;
3371                         break;
3372                 }
3373         }
3374
3375         return rc;
3376 }
3377
3378 /* offset and length are dword aligned */
3379 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3380                 u8 *buf)
3381 {
3382         int ret = 0;
3383         u32 pagesize = tp->nvram_pagesize;
3384         u32 pagemask = pagesize - 1;
3385         u32 nvram_cmd;
3386         u8 *tmp;
3387
3388         tmp = kmalloc(pagesize, GFP_KERNEL);
3389         if (tmp == NULL)
3390                 return -ENOMEM;
3391
3392         while (len) {
3393                 int j;
3394                 u32 phy_addr, page_off, size;
3395
3396                 phy_addr = offset & ~pagemask;
3397
3398                 for (j = 0; j < pagesize; j += 4) {
3399                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3400                                                   (__be32 *) (tmp + j));
3401                         if (ret)
3402                                 break;
3403                 }
3404                 if (ret)
3405                         break;
3406
3407                 page_off = offset & pagemask;
3408                 size = pagesize;
3409                 if (len < size)
3410                         size = len;
3411
3412                 len -= size;
3413
3414                 memcpy(tmp + page_off, buf, size);
3415
3416                 offset = offset + (pagesize - page_off);
3417
3418                 tg3_enable_nvram_access(tp);
3419
3420                 /*
3421                  * Before we can erase the flash page, we need
3422                  * to issue a special "write enable" command.
3423                  */
3424                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3425
3426                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3427                         break;
3428
3429                 /* Erase the target page */
3430                 tw32(NVRAM_ADDR, phy_addr);
3431
3432                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3433                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3434
3435                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436                         break;
3437
3438                 /* Issue another write enable to start the write. */
3439                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3440
3441                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3442                         break;
3443
3444                 for (j = 0; j < pagesize; j += 4) {
3445                         __be32 data;
3446
3447                         data = *((__be32 *) (tmp + j));
3448
3449                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3450
3451                         tw32(NVRAM_ADDR, phy_addr + j);
3452
3453                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3454                                 NVRAM_CMD_WR;
3455
3456                         if (j == 0)
3457                                 nvram_cmd |= NVRAM_CMD_FIRST;
3458                         else if (j == (pagesize - 4))
3459                                 nvram_cmd |= NVRAM_CMD_LAST;
3460
3461                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3462                         if (ret)
3463                                 break;
3464                 }
3465                 if (ret)
3466                         break;
3467         }
3468
3469         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3470         tg3_nvram_exec_cmd(tp, nvram_cmd);
3471
3472         kfree(tmp);
3473
3474         return ret;
3475 }
3476
3477 /* offset and length are dword aligned */
3478 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3479                 u8 *buf)
3480 {
3481         int i, ret = 0;
3482
3483         for (i = 0; i < len; i += 4, offset += 4) {
3484                 u32 page_off, phy_addr, nvram_cmd;
3485                 __be32 data;
3486
3487                 memcpy(&data, buf + i, 4);
3488                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3489
3490                 page_off = offset % tp->nvram_pagesize;
3491
3492                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3493
3494                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3495
3496                 if (page_off == 0 || i == 0)
3497                         nvram_cmd |= NVRAM_CMD_FIRST;
3498                 if (page_off == (tp->nvram_pagesize - 4))
3499                         nvram_cmd |= NVRAM_CMD_LAST;
3500
3501                 if (i == (len - 4))
3502                         nvram_cmd |= NVRAM_CMD_LAST;
3503
3504                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3505                     !tg3_flag(tp, FLASH) ||
3506                     !tg3_flag(tp, 57765_PLUS))
3507                         tw32(NVRAM_ADDR, phy_addr);
3508
3509                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3510                     !tg3_flag(tp, 5755_PLUS) &&
3511                     (tp->nvram_jedecnum == JEDEC_ST) &&
3512                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3513                         u32 cmd;
3514
3515                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3516                         ret = tg3_nvram_exec_cmd(tp, cmd);
3517                         if (ret)
3518                                 break;
3519                 }
3520                 if (!tg3_flag(tp, FLASH)) {
3521                         /* We always do complete word writes to eeprom. */
3522                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3523                 }
3524
3525                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3526                 if (ret)
3527                         break;
3528         }
3529         return ret;
3530 }
3531
3532 /* offset and length are dword aligned */
3533 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3534 {
3535         int ret;
3536
3537         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3538                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3539                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3540                 udelay(40);
3541         }
3542
3543         if (!tg3_flag(tp, NVRAM)) {
3544                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3545         } else {
3546                 u32 grc_mode;
3547
3548                 ret = tg3_nvram_lock(tp);
3549                 if (ret)
3550                         return ret;
3551
3552                 tg3_enable_nvram_access(tp);
3553                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3554                         tw32(NVRAM_WRITE1, 0x406);
3555
3556                 grc_mode = tr32(GRC_MODE);
3557                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3558
3559                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3560                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3561                                 buf);
3562                 } else {
3563                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3564                                 buf);
3565                 }
3566
3567                 grc_mode = tr32(GRC_MODE);
3568                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3569
3570                 tg3_disable_nvram_access(tp);
3571                 tg3_nvram_unlock(tp);
3572         }
3573
3574         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3575                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3576                 udelay(40);
3577         }
3578
3579         return ret;
3580 }
3581
3582 #define RX_CPU_SCRATCH_BASE     0x30000
3583 #define RX_CPU_SCRATCH_SIZE     0x04000
3584 #define TX_CPU_SCRATCH_BASE     0x34000
3585 #define TX_CPU_SCRATCH_SIZE     0x04000
3586
3587 /* tp->lock is held. */
3588 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3589 {
3590         int i;
3591         const int iters = 10000;
3592
3593         for (i = 0; i < iters; i++) {
3594                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3595                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3596                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3597                         break;
3598                 if (pci_channel_offline(tp->pdev))
3599                         return -EBUSY;
3600         }
3601
3602         return (i == iters) ? -EBUSY : 0;
3603 }
3604
3605 /* tp->lock is held. */
3606 static int tg3_rxcpu_pause(struct tg3 *tp)
3607 {
3608         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3609
3610         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3611         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3612         udelay(10);
3613
3614         return rc;
3615 }
3616
3617 /* tp->lock is held. */
3618 static int tg3_txcpu_pause(struct tg3 *tp)
3619 {
3620         return tg3_pause_cpu(tp, TX_CPU_BASE);
3621 }
3622
3623 /* tp->lock is held. */
3624 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3625 {
3626         tw32(cpu_base + CPU_STATE, 0xffffffff);
3627         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3628 }
3629
3630 /* tp->lock is held. */
3631 static void tg3_rxcpu_resume(struct tg3 *tp)
3632 {
3633         tg3_resume_cpu(tp, RX_CPU_BASE);
3634 }
3635
3636 /* tp->lock is held. */
3637 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3638 {
3639         int rc;
3640
3641         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3642
3643         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3644                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3645
3646                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3647                 return 0;
3648         }
3649         if (cpu_base == RX_CPU_BASE) {
3650                 rc = tg3_rxcpu_pause(tp);
3651         } else {
3652                 /*
3653                  * There is only an Rx CPU for the 5750 derivative in the
3654                  * BCM4785.
3655                  */
3656                 if (tg3_flag(tp, IS_SSB_CORE))
3657                         return 0;
3658
3659                 rc = tg3_txcpu_pause(tp);
3660         }
3661
3662         if (rc) {
3663                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3664                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3665                 return -ENODEV;
3666         }
3667
3668         /* Clear firmware's nvram arbitration. */
3669         if (tg3_flag(tp, NVRAM))
3670                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3671         return 0;
3672 }
3673
3674 static int tg3_fw_data_len(struct tg3 *tp,
3675                            const struct tg3_firmware_hdr *fw_hdr)
3676 {
3677         int fw_len;
3678
3679         /* Non fragmented firmware have one firmware header followed by a
3680          * contiguous chunk of data to be written. The length field in that
3681          * header is not the length of data to be written but the complete
3682          * length of the bss. The data length is determined based on
3683          * tp->fw->size minus headers.
3684          *
3685          * Fragmented firmware have a main header followed by multiple
3686          * fragments. Each fragment is identical to non fragmented firmware
3687          * with a firmware header followed by a contiguous chunk of data. In
3688          * the main header, the length field is unused and set to 0xffffffff.
3689          * In each fragment header the length is the entire size of that
3690          * fragment i.e. fragment data + header length. Data length is
3691          * therefore length field in the header minus TG3_FW_HDR_LEN.
3692          */
3693         if (tp->fw_len == 0xffffffff)
3694                 fw_len = be32_to_cpu(fw_hdr->len);
3695         else
3696                 fw_len = tp->fw->size;
3697
3698         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3699 }
3700
3701 /* tp->lock is held. */
3702 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3703                                  u32 cpu_scratch_base, int cpu_scratch_size,
3704                                  const struct tg3_firmware_hdr *fw_hdr)
3705 {
3706         int err, i;
3707         void (*write_op)(struct tg3 *, u32, u32);
3708         int total_len = tp->fw->size;
3709
3710         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3711                 netdev_err(tp->dev,
3712                            "%s: Trying to load TX cpu firmware which is 5705\n",
3713                            __func__);
3714                 return -EINVAL;
3715         }
3716
3717         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3718                 write_op = tg3_write_mem;
3719         else
3720                 write_op = tg3_write_indirect_reg32;
3721
3722         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3723                 /* It is possible that bootcode is still loading at this point.
3724                  * Get the nvram lock first before halting the cpu.
3725                  */
3726                 int lock_err = tg3_nvram_lock(tp);
3727                 err = tg3_halt_cpu(tp, cpu_base);
3728                 if (!lock_err)
3729                         tg3_nvram_unlock(tp);
3730                 if (err)
3731                         goto out;
3732
3733                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3734                         write_op(tp, cpu_scratch_base + i, 0);
3735                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3736                 tw32(cpu_base + CPU_MODE,
3737                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3738         } else {
3739                 /* Subtract additional main header for fragmented firmware and
3740                  * advance to the first fragment
3741                  */
3742                 total_len -= TG3_FW_HDR_LEN;
3743                 fw_hdr++;
3744         }
3745
3746         do {
3747                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3748                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3749                         write_op(tp, cpu_scratch_base +
3750                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3751                                      (i * sizeof(u32)),
3752                                  be32_to_cpu(fw_data[i]));
3753
3754                 total_len -= be32_to_cpu(fw_hdr->len);
3755
3756                 /* Advance to next fragment */
3757                 fw_hdr = (struct tg3_firmware_hdr *)
3758                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3759         } while (total_len > 0);
3760
3761         err = 0;
3762
3763 out:
3764         return err;
3765 }
3766
3767 /* tp->lock is held. */
3768 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3769 {
3770         int i;
3771         const int iters = 5;
3772
3773         tw32(cpu_base + CPU_STATE, 0xffffffff);
3774         tw32_f(cpu_base + CPU_PC, pc);
3775
3776         for (i = 0; i < iters; i++) {
3777                 if (tr32(cpu_base + CPU_PC) == pc)
3778                         break;
3779                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3780                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3781                 tw32_f(cpu_base + CPU_PC, pc);
3782                 udelay(1000);
3783         }
3784
3785         return (i == iters) ? -EBUSY : 0;
3786 }
3787
3788 /* tp->lock is held. */
3789 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3790 {
3791         const struct tg3_firmware_hdr *fw_hdr;
3792         int err;
3793
3794         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3795
3796         /* Firmware blob starts with version numbers, followed by
3797            start address and length. We are setting complete length.
3798            length = end_address_of_bss - start_address_of_text.
3799            Remainder is the blob to be loaded contiguously
3800            from start address. */
3801
3802         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3803                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3804                                     fw_hdr);
3805         if (err)
3806                 return err;
3807
3808         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3809                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3810                                     fw_hdr);
3811         if (err)
3812                 return err;
3813
3814         /* Now startup only the RX cpu. */
3815         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3816                                        be32_to_cpu(fw_hdr->base_addr));
3817         if (err) {
3818                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3819                            "should be %08x\n", __func__,
3820                            tr32(RX_CPU_BASE + CPU_PC),
3821                                 be32_to_cpu(fw_hdr->base_addr));
3822                 return -ENODEV;
3823         }
3824
3825         tg3_rxcpu_resume(tp);
3826
3827         return 0;
3828 }
3829
3830 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3831 {
3832         const int iters = 1000;
3833         int i;
3834         u32 val;
3835
3836         /* Wait for boot code to complete initialization and enter service
3837          * loop. It is then safe to download service patches
3838          */
3839         for (i = 0; i < iters; i++) {
3840                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3841                         break;
3842
3843                 udelay(10);
3844         }
3845
3846         if (i == iters) {
3847                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3848                 return -EBUSY;
3849         }
3850
3851         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3852         if (val & 0xff) {
3853                 netdev_warn(tp->dev,
3854                             "Other patches exist. Not downloading EEE patch\n");
3855                 return -EEXIST;
3856         }
3857
3858         return 0;
3859 }
3860
3861 /* tp->lock is held. */
3862 static void tg3_load_57766_firmware(struct tg3 *tp)
3863 {
3864         struct tg3_firmware_hdr *fw_hdr;
3865
3866         if (!tg3_flag(tp, NO_NVRAM))
3867                 return;
3868
3869         if (tg3_validate_rxcpu_state(tp))
3870                 return;
3871
3872         if (!tp->fw)
3873                 return;
3874
3875         /* This firmware blob has a different format than older firmware
3876          * releases as given below. The main difference is we have fragmented
3877          * data to be written to non-contiguous locations.
3878          *
3879          * In the beginning we have a firmware header identical to other
3880          * firmware which consists of version, base addr and length. The length
3881          * here is unused and set to 0xffffffff.
3882          *
3883          * This is followed by a series of firmware fragments which are
3884          * individually identical to previous firmware. i.e. they have the
3885          * firmware header and followed by data for that fragment. The version
3886          * field of the individual fragment header is unused.
3887          */
3888
3889         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3890         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3891                 return;
3892
3893         if (tg3_rxcpu_pause(tp))
3894                 return;
3895
3896         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3897         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3898
3899         tg3_rxcpu_resume(tp);
3900 }
3901
3902 /* tp->lock is held. */
3903 static int tg3_load_tso_firmware(struct tg3 *tp)
3904 {
3905         const struct tg3_firmware_hdr *fw_hdr;
3906         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3907         int err;
3908
3909         if (!tg3_flag(tp, FW_TSO))
3910                 return 0;
3911
3912         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3913
3914         /* Firmware blob starts with version numbers, followed by
3915            start address and length. We are setting complete length.
3916            length = end_address_of_bss - start_address_of_text.
3917            Remainder is the blob to be loaded contiguously
3918            from start address. */
3919
3920         cpu_scratch_size = tp->fw_len;
3921
3922         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3923                 cpu_base = RX_CPU_BASE;
3924                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3925         } else {
3926                 cpu_base = TX_CPU_BASE;
3927                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3928                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3929         }
3930
3931         err = tg3_load_firmware_cpu(tp, cpu_base,
3932                                     cpu_scratch_base, cpu_scratch_size,
3933                                     fw_hdr);
3934         if (err)
3935                 return err;
3936
3937         /* Now startup the cpu. */
3938         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3939                                        be32_to_cpu(fw_hdr->base_addr));
3940         if (err) {
3941                 netdev_err(tp->dev,
3942                            "%s fails to set CPU PC, is %08x should be %08x\n",
3943                            __func__, tr32(cpu_base + CPU_PC),
3944                            be32_to_cpu(fw_hdr->base_addr));
3945                 return -ENODEV;
3946         }
3947
3948         tg3_resume_cpu(tp, cpu_base);
3949         return 0;
3950 }
3951
3952 /* tp->lock is held. */
3953 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3954 {
3955         u32 addr_high, addr_low;
3956
3957         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3958         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3959                     (mac_addr[4] <<  8) | mac_addr[5]);
3960
3961         if (index < 4) {
3962                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3963                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3964         } else {
3965                 index -= 4;
3966                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3967                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3968         }
3969 }
3970
3971 /* tp->lock is held. */
3972 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3973 {
3974         u32 addr_high;
3975         int i;
3976
3977         for (i = 0; i < 4; i++) {
3978                 if (i == 1 && skip_mac_1)
3979                         continue;
3980                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981         }
3982
3983         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3984             tg3_asic_rev(tp) == ASIC_REV_5704) {
3985                 for (i = 4; i < 16; i++)
3986                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3987         }
3988
3989         addr_high = (tp->dev->dev_addr[0] +
3990                      tp->dev->dev_addr[1] +
3991                      tp->dev->dev_addr[2] +
3992                      tp->dev->dev_addr[3] +
3993                      tp->dev->dev_addr[4] +
3994                      tp->dev->dev_addr[5]) &
3995                 TX_BACKOFF_SEED_MASK;
3996         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3997 }
3998
3999 static void tg3_enable_register_access(struct tg3 *tp)
4000 {
4001         /*
4002          * Make sure register accesses (indirect or otherwise) will function
4003          * correctly.
4004          */
4005         pci_write_config_dword(tp->pdev,
4006                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4007 }
4008
4009 static int tg3_power_up(struct tg3 *tp)
4010 {
4011         int err;
4012
4013         tg3_enable_register_access(tp);
4014
4015         err = pci_set_power_state(tp->pdev, PCI_D0);
4016         if (!err) {
4017                 /* Switch out of Vaux if it is a NIC */
4018                 tg3_pwrsrc_switch_to_vmain(tp);
4019         } else {
4020                 netdev_err(tp->dev, "Transition to D0 failed\n");
4021         }
4022
4023         return err;
4024 }
4025
4026 static int tg3_setup_phy(struct tg3 *, bool);
4027
4028 static int tg3_power_down_prepare(struct tg3 *tp)
4029 {
4030         u32 misc_host_ctrl;
4031         bool device_should_wake, do_low_power;
4032
4033         tg3_enable_register_access(tp);
4034
4035         /* Restore the CLKREQ setting. */
4036         if (tg3_flag(tp, CLKREQ_BUG))
4037                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4038                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4039
4040         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4041         tw32(TG3PCI_MISC_HOST_CTRL,
4042              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4043
4044         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4045                              tg3_flag(tp, WOL_ENABLE);
4046
4047         if (tg3_flag(tp, USE_PHYLIB)) {
4048                 do_low_power = false;
4049                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4050                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4051                         struct phy_device *phydev;
4052                         u32 phyid, advertising;
4053
4054                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4055
4056                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4057
4058                         tp->link_config.speed = phydev->speed;
4059                         tp->link_config.duplex = phydev->duplex;
4060                         tp->link_config.autoneg = phydev->autoneg;
4061                         tp->link_config.advertising = phydev->advertising;
4062
4063                         advertising = ADVERTISED_TP |
4064                                       ADVERTISED_Pause |
4065                                       ADVERTISED_Autoneg |
4066                                       ADVERTISED_10baseT_Half;
4067
4068                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4070                                         advertising |=
4071                                                 ADVERTISED_100baseT_Half |
4072                                                 ADVERTISED_100baseT_Full |
4073                                                 ADVERTISED_10baseT_Full;
4074                                 else
4075                                         advertising |= ADVERTISED_10baseT_Full;
4076                         }
4077
4078                         phydev->advertising = advertising;
4079
4080                         phy_start_aneg(phydev);
4081
4082                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4083                         if (phyid != PHY_ID_BCMAC131) {
4084                                 phyid &= PHY_BCM_OUI_MASK;
4085                                 if (phyid == PHY_BCM_OUI_1 ||
4086                                     phyid == PHY_BCM_OUI_2 ||
4087                                     phyid == PHY_BCM_OUI_3)
4088                                         do_low_power = true;
4089                         }
4090                 }
4091         } else {
4092                 do_low_power = true;
4093
4094                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4095                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4096
4097                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4098                         tg3_setup_phy(tp, false);
4099         }
4100
4101         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4102                 u32 val;
4103
4104                 val = tr32(GRC_VCPU_EXT_CTRL);
4105                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4106         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4107                 int i;
4108                 u32 val;
4109
4110                 for (i = 0; i < 200; i++) {
4111                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4112                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4113                                 break;
4114                         msleep(1);
4115                 }
4116         }
4117         if (tg3_flag(tp, WOL_CAP))
4118                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4119                                                      WOL_DRV_STATE_SHUTDOWN |
4120                                                      WOL_DRV_WOL |
4121                                                      WOL_SET_MAGIC_PKT);
4122
4123         if (device_should_wake) {
4124                 u32 mac_mode;
4125
4126                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4127                         if (do_low_power &&
4128                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4129                                 tg3_phy_auxctl_write(tp,
4130                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4131                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4132                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4133                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4134                                 udelay(40);
4135                         }
4136
4137                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4138                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4139                         else if (tp->phy_flags &
4140                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4141                                 if (tp->link_config.active_speed == SPEED_1000)
4142                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4143                                 else
4144                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4145                         } else
4146                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4147
4148                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4149                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4150                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4151                                              SPEED_100 : SPEED_10;
4152                                 if (tg3_5700_link_polarity(tp, speed))
4153                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4154                                 else
4155                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4156                         }
4157                 } else {
4158                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4159                 }
4160
4161                 if (!tg3_flag(tp, 5750_PLUS))
4162                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4163
4164                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4165                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4166                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4167                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4168
4169                 if (tg3_flag(tp, ENABLE_APE))
4170                         mac_mode |= MAC_MODE_APE_TX_EN |
4171                                     MAC_MODE_APE_RX_EN |
4172                                     MAC_MODE_TDE_ENABLE;
4173
4174                 tw32_f(MAC_MODE, mac_mode);
4175                 udelay(100);
4176
4177                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4178                 udelay(10);
4179         }
4180
4181         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4182             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4183              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4184                 u32 base_val;
4185
4186                 base_val = tp->pci_clock_ctrl;
4187                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4188                              CLOCK_CTRL_TXCLK_DISABLE);
4189
4190                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4191                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4192         } else if (tg3_flag(tp, 5780_CLASS) ||
4193                    tg3_flag(tp, CPMU_PRESENT) ||
4194                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4195                 /* do nothing */
4196         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4197                 u32 newbits1, newbits2;
4198
4199                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4200                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4201                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4202                                     CLOCK_CTRL_TXCLK_DISABLE |
4203                                     CLOCK_CTRL_ALTCLK);
4204                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4205                 } else if (tg3_flag(tp, 5705_PLUS)) {
4206                         newbits1 = CLOCK_CTRL_625_CORE;
4207                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4208                 } else {
4209                         newbits1 = CLOCK_CTRL_ALTCLK;
4210                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4211                 }
4212
4213                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4214                             40);
4215
4216                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4217                             40);
4218
4219                 if (!tg3_flag(tp, 5705_PLUS)) {
4220                         u32 newbits3;
4221
4222                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4223                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4224                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4225                                             CLOCK_CTRL_TXCLK_DISABLE |
4226                                             CLOCK_CTRL_44MHZ_CORE);
4227                         } else {
4228                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4229                         }
4230
4231                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4232                                     tp->pci_clock_ctrl | newbits3, 40);
4233                 }
4234         }
4235
4236         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4237                 tg3_power_down_phy(tp, do_low_power);
4238
4239         tg3_frob_aux_power(tp, true);
4240
4241         /* Workaround for unstable PLL clock */
4242         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4243             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4244              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4245                 u32 val = tr32(0x7d00);
4246
4247                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4248                 tw32(0x7d00, val);
4249                 if (!tg3_flag(tp, ENABLE_ASF)) {
4250                         int err;
4251
4252                         err = tg3_nvram_lock(tp);
4253                         tg3_halt_cpu(tp, RX_CPU_BASE);
4254                         if (!err)
4255                                 tg3_nvram_unlock(tp);
4256                 }
4257         }
4258
4259         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4260
4261         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4262
4263         return 0;
4264 }
4265
4266 static void tg3_power_down(struct tg3 *tp)
4267 {
4268         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4269         pci_set_power_state(tp->pdev, PCI_D3hot);
4270 }
4271
4272 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4273 {
4274         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4275         case MII_TG3_AUX_STAT_10HALF:
4276                 *speed = SPEED_10;
4277                 *duplex = DUPLEX_HALF;
4278                 break;
4279
4280         case MII_TG3_AUX_STAT_10FULL:
4281                 *speed = SPEED_10;
4282                 *duplex = DUPLEX_FULL;
4283                 break;
4284
4285         case MII_TG3_AUX_STAT_100HALF:
4286                 *speed = SPEED_100;
4287                 *duplex = DUPLEX_HALF;
4288                 break;
4289
4290         case MII_TG3_AUX_STAT_100FULL:
4291                 *speed = SPEED_100;
4292                 *duplex = DUPLEX_FULL;
4293                 break;
4294
4295         case MII_TG3_AUX_STAT_1000HALF:
4296                 *speed = SPEED_1000;
4297                 *duplex = DUPLEX_HALF;
4298                 break;
4299
4300         case MII_TG3_AUX_STAT_1000FULL:
4301                 *speed = SPEED_1000;
4302                 *duplex = DUPLEX_FULL;
4303                 break;
4304
4305         default:
4306                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4307                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4308                                  SPEED_10;
4309                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4310                                   DUPLEX_HALF;
4311                         break;
4312                 }
4313                 *speed = SPEED_UNKNOWN;
4314                 *duplex = DUPLEX_UNKNOWN;
4315                 break;
4316         }
4317 }
4318
4319 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4320 {
4321         int err = 0;
4322         u32 val, new_adv;
4323
4324         new_adv = ADVERTISE_CSMA;
4325         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4326         new_adv |= mii_advertise_flowctrl(flowctrl);
4327
4328         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4329         if (err)
4330                 goto done;
4331
4332         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4333                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4334
4335                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4336                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4337                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4338
4339                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4340                 if (err)
4341                         goto done;
4342         }
4343
4344         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4345                 goto done;
4346
4347         tw32(TG3_CPMU_EEE_MODE,
4348              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4349
4350         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4351         if (!err) {
4352                 u32 err2;
4353
4354                 val = 0;
4355                 /* Advertise 100-BaseTX EEE ability */
4356                 if (advertise & ADVERTISED_100baseT_Full)
4357                         val |= MDIO_AN_EEE_ADV_100TX;
4358                 /* Advertise 1000-BaseT EEE ability */
4359                 if (advertise & ADVERTISED_1000baseT_Full)
4360                         val |= MDIO_AN_EEE_ADV_1000T;
4361
4362                 if (!tp->eee.eee_enabled) {
4363                         val = 0;
4364                         tp->eee.advertised = 0;
4365                 } else {
4366                         tp->eee.advertised = advertise &
4367                                              (ADVERTISED_100baseT_Full |
4368                                               ADVERTISED_1000baseT_Full);
4369                 }
4370
4371                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4372                 if (err)
4373                         val = 0;
4374
4375                 switch (tg3_asic_rev(tp)) {
4376                 case ASIC_REV_5717:
4377                 case ASIC_REV_57765:
4378                 case ASIC_REV_57766:
4379                 case ASIC_REV_5719:
4380                         /* If we advertised any eee advertisements above... */
4381                         if (val)
4382                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4383                                       MII_TG3_DSP_TAP26_RMRXSTO |
4384                                       MII_TG3_DSP_TAP26_OPCSINPT;
4385                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4386                         /* Fall through */
4387                 case ASIC_REV_5720:
4388                 case ASIC_REV_5762:
4389                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4390                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4391                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4392                 }
4393
4394                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4395                 if (!err)
4396                         err = err2;
4397         }
4398
4399 done:
4400         return err;
4401 }
4402
4403 static void tg3_phy_copper_begin(struct tg3 *tp)
4404 {
4405         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4406             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4407                 u32 adv, fc;
4408
4409                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4410                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4411                         adv = ADVERTISED_10baseT_Half |
4412                               ADVERTISED_10baseT_Full;
4413                         if (tg3_flag(tp, WOL_SPEED_100MB))
4414                                 adv |= ADVERTISED_100baseT_Half |
4415                                        ADVERTISED_100baseT_Full;
4416                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4417                                 if (!(tp->phy_flags &
4418                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4419                                         adv |= ADVERTISED_1000baseT_Half;
4420                                 adv |= ADVERTISED_1000baseT_Full;
4421                         }
4422
4423                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4424                 } else {
4425                         adv = tp->link_config.advertising;
4426                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4427                                 adv &= ~(ADVERTISED_1000baseT_Half |
4428                                          ADVERTISED_1000baseT_Full);
4429
4430                         fc = tp->link_config.flowctrl;
4431                 }
4432
4433                 tg3_phy_autoneg_cfg(tp, adv, fc);
4434
4435                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4436                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4437                         /* Normally during power down we want to autonegotiate
4438                          * the lowest possible speed for WOL. However, to avoid
4439                          * link flap, we leave it untouched.
4440                          */
4441                         return;
4442                 }
4443
4444                 tg3_writephy(tp, MII_BMCR,
4445                              BMCR_ANENABLE | BMCR_ANRESTART);
4446         } else {
4447                 int i;
4448                 u32 bmcr, orig_bmcr;
4449
4450                 tp->link_config.active_speed = tp->link_config.speed;
4451                 tp->link_config.active_duplex = tp->link_config.duplex;
4452
4453                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4454                         /* With autoneg disabled, 5715 only links up when the
4455                          * advertisement register has the configured speed
4456                          * enabled.
4457                          */
4458                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4459                 }
4460
4461                 bmcr = 0;
4462                 switch (tp->link_config.speed) {
4463                 default:
4464                 case SPEED_10:
4465                         break;
4466
4467                 case SPEED_100:
4468                         bmcr |= BMCR_SPEED100;
4469                         break;
4470
4471                 case SPEED_1000:
4472                         bmcr |= BMCR_SPEED1000;
4473                         break;
4474                 }
4475
4476                 if (tp->link_config.duplex == DUPLEX_FULL)
4477                         bmcr |= BMCR_FULLDPLX;
4478
4479                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4480                     (bmcr != orig_bmcr)) {
4481                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4482                         for (i = 0; i < 1500; i++) {
4483                                 u32 tmp;
4484
4485                                 udelay(10);
4486                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4487                                     tg3_readphy(tp, MII_BMSR, &tmp))
4488                                         continue;
4489                                 if (!(tmp & BMSR_LSTATUS)) {
4490                                         udelay(40);
4491                                         break;
4492                                 }
4493                         }
4494                         tg3_writephy(tp, MII_BMCR, bmcr);
4495                         udelay(40);
4496                 }
4497         }
4498 }
4499
4500 static int tg3_phy_pull_config(struct tg3 *tp)
4501 {
4502         int err;
4503         u32 val;
4504
4505         err = tg3_readphy(tp, MII_BMCR, &val);
4506         if (err)
4507                 goto done;
4508
4509         if (!(val & BMCR_ANENABLE)) {
4510                 tp->link_config.autoneg = AUTONEG_DISABLE;
4511                 tp->link_config.advertising = 0;
4512                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4513
4514                 err = -EIO;
4515
4516                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4517                 case 0:
4518                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4519                                 goto done;
4520
4521                         tp->link_config.speed = SPEED_10;
4522                         break;
4523                 case BMCR_SPEED100:
4524                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4525                                 goto done;
4526
4527                         tp->link_config.speed = SPEED_100;
4528                         break;
4529                 case BMCR_SPEED1000:
4530                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4531                                 tp->link_config.speed = SPEED_1000;
4532                                 break;
4533                         }
4534                         /* Fall through */
4535                 default:
4536                         goto done;
4537                 }
4538
4539                 if (val & BMCR_FULLDPLX)
4540                         tp->link_config.duplex = DUPLEX_FULL;
4541                 else
4542                         tp->link_config.duplex = DUPLEX_HALF;
4543
4544                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4545
4546                 err = 0;
4547                 goto done;
4548         }
4549
4550         tp->link_config.autoneg = AUTONEG_ENABLE;
4551         tp->link_config.advertising = ADVERTISED_Autoneg;
4552         tg3_flag_set(tp, PAUSE_AUTONEG);
4553
4554         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4555                 u32 adv;
4556
4557                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4558                 if (err)
4559                         goto done;
4560
4561                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4562                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4563
4564                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4565         } else {
4566                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4567         }
4568
4569         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4570                 u32 adv;
4571
4572                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4573                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4574                         if (err)
4575                                 goto done;
4576
4577                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4578                 } else {
4579                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4580                         if (err)
4581                                 goto done;
4582
4583                         adv = tg3_decode_flowctrl_1000X(val);
4584                         tp->link_config.flowctrl = adv;
4585
4586                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4587                         adv = mii_adv_to_ethtool_adv_x(val);
4588                 }
4589
4590                 tp->link_config.advertising |= adv;
4591         }
4592
4593 done:
4594         return err;
4595 }
4596
4597 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4598 {
4599         int err;
4600
4601         /* Turn off tap power management. */
4602         /* Set Extended packet length bit */
4603         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4604
4605         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4606         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4607         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4608         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4609         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4610
4611         udelay(40);
4612
4613         return err;
4614 }
4615
4616 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4617 {
4618         struct ethtool_eee eee;
4619
4620         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4621                 return true;
4622
4623         tg3_eee_pull_config(tp, &eee);
4624
4625         if (tp->eee.eee_enabled) {
4626                 if (tp->eee.advertised != eee.advertised ||
4627                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4628                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4629                         return false;
4630         } else {
4631                 /* EEE is disabled but we're advertising */
4632                 if (eee.advertised)
4633                         return false;
4634         }
4635
4636         return true;
4637 }
4638
4639 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4640 {
4641         u32 advmsk, tgtadv, advertising;
4642
4643         advertising = tp->link_config.advertising;
4644         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4645
4646         advmsk = ADVERTISE_ALL;
4647         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4648                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4649                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4650         }
4651
4652         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4653                 return false;
4654
4655         if ((*lcladv & advmsk) != tgtadv)
4656                 return false;
4657
4658         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4659                 u32 tg3_ctrl;
4660
4661                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4662
4663                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4664                         return false;
4665
4666                 if (tgtadv &&
4667                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4668                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4669                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4670                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4671                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4672                 } else {
4673                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4674                 }
4675
4676                 if (tg3_ctrl != tgtadv)
4677                         return false;
4678         }
4679
4680         return true;
4681 }
4682
4683 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4684 {
4685         u32 lpeth = 0;
4686
4687         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4688                 u32 val;
4689
4690                 if (tg3_readphy(tp, MII_STAT1000, &val))
4691                         return false;
4692
4693                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4694         }
4695
4696         if (tg3_readphy(tp, MII_LPA, rmtadv))
4697                 return false;
4698
4699         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4700         tp->link_config.rmt_adv = lpeth;
4701
4702         return true;
4703 }
4704
4705 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4706 {
4707         if (curr_link_up != tp->link_up) {
4708                 if (curr_link_up) {
4709                         netif_carrier_on(tp->dev);
4710                 } else {
4711                         netif_carrier_off(tp->dev);
4712                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4713                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4714                 }
4715
4716                 tg3_link_report(tp);
4717                 return true;
4718         }
4719
4720         return false;
4721 }
4722
4723 static void tg3_clear_mac_status(struct tg3 *tp)
4724 {
4725         tw32(MAC_EVENT, 0);
4726
4727         tw32_f(MAC_STATUS,
4728                MAC_STATUS_SYNC_CHANGED |
4729                MAC_STATUS_CFG_CHANGED |
4730                MAC_STATUS_MI_COMPLETION |
4731                MAC_STATUS_LNKSTATE_CHANGED);
4732         udelay(40);
4733 }
4734
4735 static void tg3_setup_eee(struct tg3 *tp)
4736 {
4737         u32 val;
4738
4739         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4740               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4741         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4742                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4743
4744         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4745
4746         tw32_f(TG3_CPMU_EEE_CTRL,
4747                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4748
4749         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4750               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4751               TG3_CPMU_EEEMD_LPI_IN_RX |
4752               TG3_CPMU_EEEMD_EEE_ENABLE;
4753
4754         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4755                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4756
4757         if (tg3_flag(tp, ENABLE_APE))
4758                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4759
4760         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4761
4762         tw32_f(TG3_CPMU_EEE_DBTMR1,
4763                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4764                (tp->eee.tx_lpi_timer & 0xffff));
4765
4766         tw32_f(TG3_CPMU_EEE_DBTMR2,
4767                TG3_CPMU_DBTMR2_APE_TX_2047US |
4768                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4769 }
4770
4771 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4772 {
4773         bool current_link_up;
4774         u32 bmsr, val;
4775         u32 lcl_adv, rmt_adv;
4776         u16 current_speed;
4777         u8 current_duplex;
4778         int i, err;
4779
4780         tg3_clear_mac_status(tp);
4781
4782         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4783                 tw32_f(MAC_MI_MODE,
4784                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4785                 udelay(80);
4786         }
4787
4788         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4789
4790         /* Some third-party PHYs need to be reset on link going
4791          * down.
4792          */
4793         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4794              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4795              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4796             tp->link_up) {
4797                 tg3_readphy(tp, MII_BMSR, &bmsr);
4798                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4799                     !(bmsr & BMSR_LSTATUS))
4800                         force_reset = true;
4801         }
4802         if (force_reset)
4803                 tg3_phy_reset(tp);
4804
4805         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4806                 tg3_readphy(tp, MII_BMSR, &bmsr);
4807                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4808                     !tg3_flag(tp, INIT_COMPLETE))
4809                         bmsr = 0;
4810
4811                 if (!(bmsr & BMSR_LSTATUS)) {
4812                         err = tg3_init_5401phy_dsp(tp);
4813                         if (err)
4814                                 return err;
4815
4816                         tg3_readphy(tp, MII_BMSR, &bmsr);
4817                         for (i = 0; i < 1000; i++) {
4818                                 udelay(10);
4819                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4820                                     (bmsr & BMSR_LSTATUS)) {
4821                                         udelay(40);
4822                                         break;
4823                                 }
4824                         }
4825
4826                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4827                             TG3_PHY_REV_BCM5401_B0 &&
4828                             !(bmsr & BMSR_LSTATUS) &&
4829                             tp->link_config.active_speed == SPEED_1000) {
4830                                 err = tg3_phy_reset(tp);
4831                                 if (!err)
4832                                         err = tg3_init_5401phy_dsp(tp);
4833                                 if (err)
4834                                         return err;
4835                         }
4836                 }
4837         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4838                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4839                 /* 5701 {A0,B0} CRC bug workaround */
4840                 tg3_writephy(tp, 0x15, 0x0a75);
4841                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4842                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4843                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4844         }
4845
4846         /* Clear pending interrupts... */
4847         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4848         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4849
4850         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4851                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4852         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4853                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4854
4855         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4856             tg3_asic_rev(tp) == ASIC_REV_5701) {
4857                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4858                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4859                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4860                 else
4861                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4862         }
4863
4864         current_link_up = false;
4865         current_speed = SPEED_UNKNOWN;
4866         current_duplex = DUPLEX_UNKNOWN;
4867         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4868         tp->link_config.rmt_adv = 0;
4869
4870         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4871                 err = tg3_phy_auxctl_read(tp,
4872                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4873                                           &val);
4874                 if (!err && !(val & (1 << 10))) {
4875                         tg3_phy_auxctl_write(tp,
4876                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4877                                              val | (1 << 10));
4878                         goto relink;
4879                 }
4880         }
4881
4882         bmsr = 0;
4883         for (i = 0; i < 100; i++) {
4884                 tg3_readphy(tp, MII_BMSR, &bmsr);
4885                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4886                     (bmsr & BMSR_LSTATUS))
4887                         break;
4888                 udelay(40);
4889         }
4890
4891         if (bmsr & BMSR_LSTATUS) {
4892                 u32 aux_stat, bmcr;
4893
4894                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4895                 for (i = 0; i < 2000; i++) {
4896                         udelay(10);
4897                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4898                             aux_stat)
4899                                 break;
4900                 }
4901
4902                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4903                                              &current_speed,
4904                                              &current_duplex);
4905
4906                 bmcr = 0;
4907                 for (i = 0; i < 200; i++) {
4908                         tg3_readphy(tp, MII_BMCR, &bmcr);
4909                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4910                                 continue;
4911                         if (bmcr && bmcr != 0x7fff)
4912                                 break;
4913                         udelay(10);
4914                 }
4915
4916                 lcl_adv = 0;
4917                 rmt_adv = 0;
4918
4919                 tp->link_config.active_speed = current_speed;
4920                 tp->link_config.active_duplex = current_duplex;
4921
4922                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4923                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4924
4925                         if ((bmcr & BMCR_ANENABLE) &&
4926                             eee_config_ok &&
4927                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4928                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4929                                 current_link_up = true;
4930
4931                         /* EEE settings changes take effect only after a phy
4932                          * reset.  If we have skipped a reset due to Link Flap
4933                          * Avoidance being enabled, do it now.
4934                          */
4935                         if (!eee_config_ok &&
4936                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4937                             !force_reset) {
4938                                 tg3_setup_eee(tp);
4939                                 tg3_phy_reset(tp);
4940                         }
4941                 } else {
4942                         if (!(bmcr & BMCR_ANENABLE) &&
4943                             tp->link_config.speed == current_speed &&
4944                             tp->link_config.duplex == current_duplex) {
4945                                 current_link_up = true;
4946                         }
4947                 }
4948
4949                 if (current_link_up &&
4950                     tp->link_config.active_duplex == DUPLEX_FULL) {
4951                         u32 reg, bit;
4952
4953                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4954                                 reg = MII_TG3_FET_GEN_STAT;
4955                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4956                         } else {
4957                                 reg = MII_TG3_EXT_STAT;
4958                                 bit = MII_TG3_EXT_STAT_MDIX;
4959                         }
4960
4961                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4962                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4963
4964                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4965                 }
4966         }
4967
4968 relink:
4969         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4970                 tg3_phy_copper_begin(tp);
4971
4972                 if (tg3_flag(tp, ROBOSWITCH)) {
4973                         current_link_up = true;
4974                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4975                         current_speed = SPEED_1000;
4976                         current_duplex = DUPLEX_FULL;
4977                         tp->link_config.active_speed = current_speed;
4978                         tp->link_config.active_duplex = current_duplex;
4979                 }
4980
4981                 tg3_readphy(tp, MII_BMSR, &bmsr);
4982                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4983                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4984                         current_link_up = true;
4985         }
4986
4987         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4988         if (current_link_up) {
4989                 if (tp->link_config.active_speed == SPEED_100 ||
4990                     tp->link_config.active_speed == SPEED_10)
4991                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4992                 else
4993                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4994         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4995                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4996         else
4997                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4998
4999         /* In order for the 5750 core in BCM4785 chip to work properly
5000          * in RGMII mode, the Led Control Register must be set up.
5001          */
5002         if (tg3_flag(tp, RGMII_MODE)) {
5003                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5004                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5005
5006                 if (tp->link_config.active_speed == SPEED_10)
5007                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5008                 else if (tp->link_config.active_speed == SPEED_100)
5009                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5010                                      LED_CTRL_100MBPS_ON);
5011                 else if (tp->link_config.active_speed == SPEED_1000)
5012                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013                                      LED_CTRL_1000MBPS_ON);
5014
5015                 tw32(MAC_LED_CTRL, led_ctrl);
5016                 udelay(40);
5017         }
5018
5019         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5020         if (tp->link_config.active_duplex == DUPLEX_HALF)
5021                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5022
5023         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5024                 if (current_link_up &&
5025                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5026                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5027                 else
5028                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5029         }
5030
5031         /* ??? Without this setting Netgear GA302T PHY does not
5032          * ??? send/receive packets...
5033          */
5034         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5035             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5036                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5038                 udelay(80);
5039         }
5040
5041         tw32_f(MAC_MODE, tp->mac_mode);
5042         udelay(40);
5043
5044         tg3_phy_eee_adjust(tp, current_link_up);
5045
5046         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5047                 /* Polled via timer. */
5048                 tw32_f(MAC_EVENT, 0);
5049         } else {
5050                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5051         }
5052         udelay(40);
5053
5054         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5055             current_link_up &&
5056             tp->link_config.active_speed == SPEED_1000 &&
5057             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5058                 udelay(120);
5059                 tw32_f(MAC_STATUS,
5060                      (MAC_STATUS_SYNC_CHANGED |
5061                       MAC_STATUS_CFG_CHANGED));
5062                 udelay(40);
5063                 tg3_write_mem(tp,
5064                               NIC_SRAM_FIRMWARE_MBOX,
5065                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5066         }
5067
5068         /* Prevent send BD corruption. */
5069         if (tg3_flag(tp, CLKREQ_BUG)) {
5070                 if (tp->link_config.active_speed == SPEED_100 ||
5071                     tp->link_config.active_speed == SPEED_10)
5072                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5073                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5074                 else
5075                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5076                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5077         }
5078
5079         tg3_test_and_report_link_chg(tp, current_link_up);
5080
5081         return 0;
5082 }
5083
5084 struct tg3_fiber_aneginfo {
5085         int state;
5086 #define ANEG_STATE_UNKNOWN              0
5087 #define ANEG_STATE_AN_ENABLE            1
5088 #define ANEG_STATE_RESTART_INIT         2
5089 #define ANEG_STATE_RESTART              3
5090 #define ANEG_STATE_DISABLE_LINK_OK      4
5091 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5092 #define ANEG_STATE_ABILITY_DETECT       6
5093 #define ANEG_STATE_ACK_DETECT_INIT      7
5094 #define ANEG_STATE_ACK_DETECT           8
5095 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5096 #define ANEG_STATE_COMPLETE_ACK         10
5097 #define ANEG_STATE_IDLE_DETECT_INIT     11
5098 #define ANEG_STATE_IDLE_DETECT          12
5099 #define ANEG_STATE_LINK_OK              13
5100 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5101 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5102
5103         u32 flags;
5104 #define MR_AN_ENABLE            0x00000001
5105 #define MR_RESTART_AN           0x00000002
5106 #define MR_AN_COMPLETE          0x00000004
5107 #define MR_PAGE_RX              0x00000008
5108 #define MR_NP_LOADED            0x00000010
5109 #define MR_TOGGLE_TX            0x00000020
5110 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5111 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5112 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5113 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5114 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5115 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5116 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5117 #define MR_TOGGLE_RX            0x00002000
5118 #define MR_NP_RX                0x00004000
5119
5120 #define MR_LINK_OK              0x80000000
5121
5122         unsigned long link_time, cur_time;
5123
5124         u32 ability_match_cfg;
5125         int ability_match_count;
5126
5127         char ability_match, idle_match, ack_match;
5128
5129         u32 txconfig, rxconfig;
5130 #define ANEG_CFG_NP             0x00000080
5131 #define ANEG_CFG_ACK            0x00000040
5132 #define ANEG_CFG_RF2            0x00000020
5133 #define ANEG_CFG_RF1            0x00000010
5134 #define ANEG_CFG_PS2            0x00000001
5135 #define ANEG_CFG_PS1            0x00008000
5136 #define ANEG_CFG_HD             0x00004000
5137 #define ANEG_CFG_FD             0x00002000
5138 #define ANEG_CFG_INVAL          0x00001f06
5139
5140 };
5141 #define ANEG_OK         0
5142 #define ANEG_DONE       1
5143 #define ANEG_TIMER_ENAB 2
5144 #define ANEG_FAILED     -1
5145
5146 #define ANEG_STATE_SETTLE_TIME  10000
5147
5148 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5149                                    struct tg3_fiber_aneginfo *ap)
5150 {
5151         u16 flowctrl;
5152         unsigned long delta;
5153         u32 rx_cfg_reg;
5154         int ret;
5155
5156         if (ap->state == ANEG_STATE_UNKNOWN) {
5157                 ap->rxconfig = 0;
5158                 ap->link_time = 0;
5159                 ap->cur_time = 0;
5160                 ap->ability_match_cfg = 0;
5161                 ap->ability_match_count = 0;
5162                 ap->ability_match = 0;
5163                 ap->idle_match = 0;
5164                 ap->ack_match = 0;
5165         }
5166         ap->cur_time++;
5167
5168         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5169                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5170
5171                 if (rx_cfg_reg != ap->ability_match_cfg) {
5172                         ap->ability_match_cfg = rx_cfg_reg;
5173                         ap->ability_match = 0;
5174                         ap->ability_match_count = 0;
5175                 } else {
5176                         if (++ap->ability_match_count > 1) {
5177                                 ap->ability_match = 1;
5178                                 ap->ability_match_cfg = rx_cfg_reg;
5179                         }
5180                 }
5181                 if (rx_cfg_reg & ANEG_CFG_ACK)
5182                         ap->ack_match = 1;
5183                 else
5184                         ap->ack_match = 0;
5185
5186                 ap->idle_match = 0;
5187         } else {
5188                 ap->idle_match = 1;
5189                 ap->ability_match_cfg = 0;
5190                 ap->ability_match_count = 0;
5191                 ap->ability_match = 0;
5192                 ap->ack_match = 0;
5193
5194                 rx_cfg_reg = 0;
5195         }
5196
5197         ap->rxconfig = rx_cfg_reg;
5198         ret = ANEG_OK;
5199
5200         switch (ap->state) {
5201         case ANEG_STATE_UNKNOWN:
5202                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5203                         ap->state = ANEG_STATE_AN_ENABLE;
5204
5205                 /* fallthru */
5206         case ANEG_STATE_AN_ENABLE:
5207                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5208                 if (ap->flags & MR_AN_ENABLE) {
5209                         ap->link_time = 0;
5210                         ap->cur_time = 0;
5211                         ap->ability_match_cfg = 0;
5212                         ap->ability_match_count = 0;
5213                         ap->ability_match = 0;
5214                         ap->idle_match = 0;
5215                         ap->ack_match = 0;
5216
5217                         ap->state = ANEG_STATE_RESTART_INIT;
5218                 } else {
5219                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5220                 }
5221                 break;
5222
5223         case ANEG_STATE_RESTART_INIT:
5224                 ap->link_time = ap->cur_time;
5225                 ap->flags &= ~(MR_NP_LOADED);
5226                 ap->txconfig = 0;
5227                 tw32(MAC_TX_AUTO_NEG, 0);
5228                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5229                 tw32_f(MAC_MODE, tp->mac_mode);
5230                 udelay(40);
5231
5232                 ret = ANEG_TIMER_ENAB;
5233                 ap->state = ANEG_STATE_RESTART;
5234
5235                 /* fallthru */
5236         case ANEG_STATE_RESTART:
5237                 delta = ap->cur_time - ap->link_time;
5238                 if (delta > ANEG_STATE_SETTLE_TIME)
5239                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5240                 else
5241                         ret = ANEG_TIMER_ENAB;
5242                 break;
5243
5244         case ANEG_STATE_DISABLE_LINK_OK:
5245                 ret = ANEG_DONE;
5246                 break;
5247
5248         case ANEG_STATE_ABILITY_DETECT_INIT:
5249                 ap->flags &= ~(MR_TOGGLE_TX);
5250                 ap->txconfig = ANEG_CFG_FD;
5251                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5252                 if (flowctrl & ADVERTISE_1000XPAUSE)
5253                         ap->txconfig |= ANEG_CFG_PS1;
5254                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5255                         ap->txconfig |= ANEG_CFG_PS2;
5256                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5257                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5258                 tw32_f(MAC_MODE, tp->mac_mode);
5259                 udelay(40);
5260
5261                 ap->state = ANEG_STATE_ABILITY_DETECT;
5262                 break;
5263
5264         case ANEG_STATE_ABILITY_DETECT:
5265                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5266                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5267                 break;
5268
5269         case ANEG_STATE_ACK_DETECT_INIT:
5270                 ap->txconfig |= ANEG_CFG_ACK;
5271                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5272                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5273                 tw32_f(MAC_MODE, tp->mac_mode);
5274                 udelay(40);
5275
5276                 ap->state = ANEG_STATE_ACK_DETECT;
5277
5278                 /* fallthru */
5279         case ANEG_STATE_ACK_DETECT:
5280                 if (ap->ack_match != 0) {
5281                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5282                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5283                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5284                         } else {
5285                                 ap->state = ANEG_STATE_AN_ENABLE;
5286                         }
5287                 } else if (ap->ability_match != 0 &&
5288                            ap->rxconfig == 0) {
5289                         ap->state = ANEG_STATE_AN_ENABLE;
5290                 }
5291                 break;
5292
5293         case ANEG_STATE_COMPLETE_ACK_INIT:
5294                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5295                         ret = ANEG_FAILED;
5296                         break;
5297                 }
5298                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5299                                MR_LP_ADV_HALF_DUPLEX |
5300                                MR_LP_ADV_SYM_PAUSE |
5301                                MR_LP_ADV_ASYM_PAUSE |
5302                                MR_LP_ADV_REMOTE_FAULT1 |
5303                                MR_LP_ADV_REMOTE_FAULT2 |
5304                                MR_LP_ADV_NEXT_PAGE |
5305                                MR_TOGGLE_RX |
5306                                MR_NP_RX);
5307                 if (ap->rxconfig & ANEG_CFG_FD)
5308                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5309                 if (ap->rxconfig & ANEG_CFG_HD)
5310                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5311                 if (ap->rxconfig & ANEG_CFG_PS1)
5312                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5313                 if (ap->rxconfig & ANEG_CFG_PS2)
5314                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5315                 if (ap->rxconfig & ANEG_CFG_RF1)
5316                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5317                 if (ap->rxconfig & ANEG_CFG_RF2)
5318                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5319                 if (ap->rxconfig & ANEG_CFG_NP)
5320                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5321
5322                 ap->link_time = ap->cur_time;
5323
5324                 ap->flags ^= (MR_TOGGLE_TX);
5325                 if (ap->rxconfig & 0x0008)
5326                         ap->flags |= MR_TOGGLE_RX;
5327                 if (ap->rxconfig & ANEG_CFG_NP)
5328                         ap->flags |= MR_NP_RX;
5329                 ap->flags |= MR_PAGE_RX;
5330
5331                 ap->state = ANEG_STATE_COMPLETE_ACK;
5332                 ret = ANEG_TIMER_ENAB;
5333                 break;
5334
5335         case ANEG_STATE_COMPLETE_ACK:
5336                 if (ap->ability_match != 0 &&
5337                     ap->rxconfig == 0) {
5338                         ap->state = ANEG_STATE_AN_ENABLE;
5339                         break;
5340                 }
5341                 delta = ap->cur_time - ap->link_time;
5342                 if (delta > ANEG_STATE_SETTLE_TIME) {
5343                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5344                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5345                         } else {
5346                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5347                                     !(ap->flags & MR_NP_RX)) {
5348                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5349                                 } else {
5350                                         ret = ANEG_FAILED;
5351                                 }
5352                         }
5353                 }
5354                 break;
5355
5356         case ANEG_STATE_IDLE_DETECT_INIT:
5357                 ap->link_time = ap->cur_time;
5358                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5359                 tw32_f(MAC_MODE, tp->mac_mode);
5360                 udelay(40);
5361
5362                 ap->state = ANEG_STATE_IDLE_DETECT;
5363                 ret = ANEG_TIMER_ENAB;
5364                 break;
5365
5366         case ANEG_STATE_IDLE_DETECT:
5367                 if (ap->ability_match != 0 &&
5368                     ap->rxconfig == 0) {
5369                         ap->state = ANEG_STATE_AN_ENABLE;
5370                         break;
5371                 }
5372                 delta = ap->cur_time - ap->link_time;
5373                 if (delta > ANEG_STATE_SETTLE_TIME) {
5374                         /* XXX another gem from the Broadcom driver :( */
5375                         ap->state = ANEG_STATE_LINK_OK;
5376                 }
5377                 break;
5378
5379         case ANEG_STATE_LINK_OK:
5380                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5381                 ret = ANEG_DONE;
5382                 break;
5383
5384         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5385                 /* ??? unimplemented */
5386                 break;
5387
5388         case ANEG_STATE_NEXT_PAGE_WAIT:
5389                 /* ??? unimplemented */
5390                 break;
5391
5392         default:
5393                 ret = ANEG_FAILED;
5394                 break;
5395         }
5396
5397         return ret;
5398 }
5399
5400 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5401 {
5402         int res = 0;
5403         struct tg3_fiber_aneginfo aninfo;
5404         int status = ANEG_FAILED;
5405         unsigned int tick;
5406         u32 tmp;
5407
5408         tw32_f(MAC_TX_AUTO_NEG, 0);
5409
5410         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5411         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5412         udelay(40);
5413
5414         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5415         udelay(40);
5416
5417         memset(&aninfo, 0, sizeof(aninfo));
5418         aninfo.flags |= MR_AN_ENABLE;
5419         aninfo.state = ANEG_STATE_UNKNOWN;
5420         aninfo.cur_time = 0;
5421         tick = 0;
5422         while (++tick < 195000) {
5423                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5424                 if (status == ANEG_DONE || status == ANEG_FAILED)
5425                         break;
5426
5427                 udelay(1);
5428         }
5429
5430         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5431         tw32_f(MAC_MODE, tp->mac_mode);
5432         udelay(40);
5433
5434         *txflags = aninfo.txconfig;
5435         *rxflags = aninfo.flags;
5436
5437         if (status == ANEG_DONE &&
5438             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5439                              MR_LP_ADV_FULL_DUPLEX)))
5440                 res = 1;
5441
5442         return res;
5443 }
5444
5445 static void tg3_init_bcm8002(struct tg3 *tp)
5446 {
5447         u32 mac_status = tr32(MAC_STATUS);
5448         int i;
5449
5450         /* Reset when initting first time or we have a link. */
5451         if (tg3_flag(tp, INIT_COMPLETE) &&
5452             !(mac_status & MAC_STATUS_PCS_SYNCED))
5453                 return;
5454
5455         /* Set PLL lock range. */
5456         tg3_writephy(tp, 0x16, 0x8007);
5457
5458         /* SW reset */
5459         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5460
5461         /* Wait for reset to complete. */
5462         /* XXX schedule_timeout() ... */
5463         for (i = 0; i < 500; i++)
5464                 udelay(10);
5465
5466         /* Config mode; select PMA/Ch 1 regs. */
5467         tg3_writephy(tp, 0x10, 0x8411);
5468
5469         /* Enable auto-lock and comdet, select txclk for tx. */
5470         tg3_writephy(tp, 0x11, 0x0a10);
5471
5472         tg3_writephy(tp, 0x18, 0x00a0);
5473         tg3_writephy(tp, 0x16, 0x41ff);
5474
5475         /* Assert and deassert POR. */
5476         tg3_writephy(tp, 0x13, 0x0400);
5477         udelay(40);
5478         tg3_writephy(tp, 0x13, 0x0000);
5479
5480         tg3_writephy(tp, 0x11, 0x0a50);
5481         udelay(40);
5482         tg3_writephy(tp, 0x11, 0x0a10);
5483
5484         /* Wait for signal to stabilize */
5485         /* XXX schedule_timeout() ... */
5486         for (i = 0; i < 15000; i++)
5487                 udelay(10);
5488
5489         /* Deselect the channel register so we can read the PHYID
5490          * later.
5491          */
5492         tg3_writephy(tp, 0x10, 0x8011);
5493 }
5494
5495 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5496 {
5497         u16 flowctrl;
5498         bool current_link_up;
5499         u32 sg_dig_ctrl, sg_dig_status;
5500         u32 serdes_cfg, expected_sg_dig_ctrl;
5501         int workaround, port_a;
5502
5503         serdes_cfg = 0;
5504         expected_sg_dig_ctrl = 0;
5505         workaround = 0;
5506         port_a = 1;
5507         current_link_up = false;
5508
5509         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5510             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5511                 workaround = 1;
5512                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5513                         port_a = 0;
5514
5515                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5516                 /* preserve bits 20-23 for voltage regulator */
5517                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5518         }
5519
5520         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5521
5522         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5523                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5524                         if (workaround) {
5525                                 u32 val = serdes_cfg;
5526
5527                                 if (port_a)
5528                                         val |= 0xc010000;
5529                                 else
5530                                         val |= 0x4010000;
5531                                 tw32_f(MAC_SERDES_CFG, val);
5532                         }
5533
5534                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5535                 }
5536                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5537                         tg3_setup_flow_control(tp, 0, 0);
5538                         current_link_up = true;
5539                 }
5540                 goto out;
5541         }
5542
5543         /* Want auto-negotiation.  */
5544         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5545
5546         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5547         if (flowctrl & ADVERTISE_1000XPAUSE)
5548                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5549         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5550                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5551
5552         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5553                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5554                     tp->serdes_counter &&
5555                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5556                                     MAC_STATUS_RCVD_CFG)) ==
5557                      MAC_STATUS_PCS_SYNCED)) {
5558                         tp->serdes_counter--;
5559                         current_link_up = true;
5560                         goto out;
5561                 }
5562 restart_autoneg:
5563                 if (workaround)
5564                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5565                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5566                 udelay(5);
5567                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5568
5569                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5570                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5571         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5572                                  MAC_STATUS_SIGNAL_DET)) {
5573                 sg_dig_status = tr32(SG_DIG_STATUS);
5574                 mac_status = tr32(MAC_STATUS);
5575
5576                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5577                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5578                         u32 local_adv = 0, remote_adv = 0;
5579
5580                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5581                                 local_adv |= ADVERTISE_1000XPAUSE;
5582                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5583                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5584
5585                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5586                                 remote_adv |= LPA_1000XPAUSE;
5587                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5588                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5589
5590                         tp->link_config.rmt_adv =
5591                                            mii_adv_to_ethtool_adv_x(remote_adv);
5592
5593                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5594                         current_link_up = true;
5595                         tp->serdes_counter = 0;
5596                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5597                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5598                         if (tp->serdes_counter)
5599                                 tp->serdes_counter--;
5600                         else {
5601                                 if (workaround) {
5602                                         u32 val = serdes_cfg;
5603
5604                                         if (port_a)
5605                                                 val |= 0xc010000;
5606                                         else
5607                                                 val |= 0x4010000;
5608
5609                                         tw32_f(MAC_SERDES_CFG, val);
5610                                 }
5611
5612                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5613                                 udelay(40);
5614
5615                                 /* Link parallel detection - link is up */
5616                                 /* only if we have PCS_SYNC and not */
5617                                 /* receiving config code words */
5618                                 mac_status = tr32(MAC_STATUS);
5619                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5620                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5621                                         tg3_setup_flow_control(tp, 0, 0);
5622                                         current_link_up = true;
5623                                         tp->phy_flags |=
5624                                                 TG3_PHYFLG_PARALLEL_DETECT;
5625                                         tp->serdes_counter =
5626                                                 SERDES_PARALLEL_DET_TIMEOUT;
5627                                 } else
5628                                         goto restart_autoneg;
5629                         }
5630                 }
5631         } else {
5632                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5633                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5634         }
5635
5636 out:
5637         return current_link_up;
5638 }
5639
5640 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5641 {
5642         bool current_link_up = false;
5643
5644         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5645                 goto out;
5646
5647         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5648                 u32 txflags, rxflags;
5649                 int i;
5650
5651                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5652                         u32 local_adv = 0, remote_adv = 0;
5653
5654                         if (txflags & ANEG_CFG_PS1)
5655                                 local_adv |= ADVERTISE_1000XPAUSE;
5656                         if (txflags & ANEG_CFG_PS2)
5657                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5658
5659                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5660                                 remote_adv |= LPA_1000XPAUSE;
5661                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5662                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5663
5664                         tp->link_config.rmt_adv =
5665                                            mii_adv_to_ethtool_adv_x(remote_adv);
5666
5667                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5668
5669                         current_link_up = true;
5670                 }
5671                 for (i = 0; i < 30; i++) {
5672                         udelay(20);
5673                         tw32_f(MAC_STATUS,
5674                                (MAC_STATUS_SYNC_CHANGED |
5675                                 MAC_STATUS_CFG_CHANGED));
5676                         udelay(40);
5677                         if ((tr32(MAC_STATUS) &
5678                              (MAC_STATUS_SYNC_CHANGED |
5679                               MAC_STATUS_CFG_CHANGED)) == 0)
5680                                 break;
5681                 }
5682
5683                 mac_status = tr32(MAC_STATUS);
5684                 if (!current_link_up &&
5685                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5686                     !(mac_status & MAC_STATUS_RCVD_CFG))
5687                         current_link_up = true;
5688         } else {
5689                 tg3_setup_flow_control(tp, 0, 0);
5690
5691                 /* Forcing 1000FD link up. */
5692                 current_link_up = true;
5693
5694                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5695                 udelay(40);
5696
5697                 tw32_f(MAC_MODE, tp->mac_mode);
5698                 udelay(40);
5699         }
5700
5701 out:
5702         return current_link_up;
5703 }
5704
5705 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5706 {
5707         u32 orig_pause_cfg;
5708         u16 orig_active_speed;
5709         u8 orig_active_duplex;
5710         u32 mac_status;
5711         bool current_link_up;
5712         int i;
5713
5714         orig_pause_cfg = tp->link_config.active_flowctrl;
5715         orig_active_speed = tp->link_config.active_speed;
5716         orig_active_duplex = tp->link_config.active_duplex;
5717
5718         if (!tg3_flag(tp, HW_AUTONEG) &&
5719             tp->link_up &&
5720             tg3_flag(tp, INIT_COMPLETE)) {
5721                 mac_status = tr32(MAC_STATUS);
5722                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5723                                MAC_STATUS_SIGNAL_DET |
5724                                MAC_STATUS_CFG_CHANGED |
5725                                MAC_STATUS_RCVD_CFG);
5726                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5727                                    MAC_STATUS_SIGNAL_DET)) {
5728                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5729                                             MAC_STATUS_CFG_CHANGED));
5730                         return 0;
5731                 }
5732         }
5733
5734         tw32_f(MAC_TX_AUTO_NEG, 0);
5735
5736         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5737         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5738         tw32_f(MAC_MODE, tp->mac_mode);
5739         udelay(40);
5740
5741         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5742                 tg3_init_bcm8002(tp);
5743
5744         /* Enable link change event even when serdes polling.  */
5745         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5746         udelay(40);
5747
5748         current_link_up = false;
5749         tp->link_config.rmt_adv = 0;
5750         mac_status = tr32(MAC_STATUS);
5751
5752         if (tg3_flag(tp, HW_AUTONEG))
5753                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5754         else
5755                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5756
5757         tp->napi[0].hw_status->status =
5758                 (SD_STATUS_UPDATED |
5759                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5760
5761         for (i = 0; i < 100; i++) {
5762                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5763                                     MAC_STATUS_CFG_CHANGED));
5764                 udelay(5);
5765                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5766                                          MAC_STATUS_CFG_CHANGED |
5767                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5768                         break;
5769         }
5770
5771         mac_status = tr32(MAC_STATUS);
5772         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5773                 current_link_up = false;
5774                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5775                     tp->serdes_counter == 0) {
5776                         tw32_f(MAC_MODE, (tp->mac_mode |
5777                                           MAC_MODE_SEND_CONFIGS));
5778                         udelay(1);
5779                         tw32_f(MAC_MODE, tp->mac_mode);
5780                 }
5781         }
5782
5783         if (current_link_up) {
5784                 tp->link_config.active_speed = SPEED_1000;
5785                 tp->link_config.active_duplex = DUPLEX_FULL;
5786                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5787                                     LED_CTRL_LNKLED_OVERRIDE |
5788                                     LED_CTRL_1000MBPS_ON));
5789         } else {
5790                 tp->link_config.active_speed = SPEED_UNKNOWN;
5791                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5792                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793                                     LED_CTRL_LNKLED_OVERRIDE |
5794                                     LED_CTRL_TRAFFIC_OVERRIDE));
5795         }
5796
5797         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5798                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5799                 if (orig_pause_cfg != now_pause_cfg ||
5800                     orig_active_speed != tp->link_config.active_speed ||
5801                     orig_active_duplex != tp->link_config.active_duplex)
5802                         tg3_link_report(tp);
5803         }
5804
5805         return 0;
5806 }
5807
5808 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5809 {
5810         int err = 0;
5811         u32 bmsr, bmcr;
5812         u16 current_speed = SPEED_UNKNOWN;
5813         u8 current_duplex = DUPLEX_UNKNOWN;
5814         bool current_link_up = false;
5815         u32 local_adv, remote_adv, sgsr;
5816
5817         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5818              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5819              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5820              (sgsr & SERDES_TG3_SGMII_MODE)) {
5821
5822                 if (force_reset)
5823                         tg3_phy_reset(tp);
5824
5825                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5826
5827                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5828                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829                 } else {
5830                         current_link_up = true;
5831                         if (sgsr & SERDES_TG3_SPEED_1000) {
5832                                 current_speed = SPEED_1000;
5833                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5834                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5835                                 current_speed = SPEED_100;
5836                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837                         } else {
5838                                 current_speed = SPEED_10;
5839                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5840                         }
5841
5842                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5843                                 current_duplex = DUPLEX_FULL;
5844                         else
5845                                 current_duplex = DUPLEX_HALF;
5846                 }
5847
5848                 tw32_f(MAC_MODE, tp->mac_mode);
5849                 udelay(40);
5850
5851                 tg3_clear_mac_status(tp);
5852
5853                 goto fiber_setup_done;
5854         }
5855
5856         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5857         tw32_f(MAC_MODE, tp->mac_mode);
5858         udelay(40);
5859
5860         tg3_clear_mac_status(tp);
5861
5862         if (force_reset)
5863                 tg3_phy_reset(tp);
5864
5865         tp->link_config.rmt_adv = 0;
5866
5867         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5868         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5870                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5871                         bmsr |= BMSR_LSTATUS;
5872                 else
5873                         bmsr &= ~BMSR_LSTATUS;
5874         }
5875
5876         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5877
5878         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5879             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5880                 /* do nothing, just check for link up at the end */
5881         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5882                 u32 adv, newadv;
5883
5884                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5885                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5886                                  ADVERTISE_1000XPAUSE |
5887                                  ADVERTISE_1000XPSE_ASYM |
5888                                  ADVERTISE_SLCT);
5889
5890                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5891                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5892
5893                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5894                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5895                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5896                         tg3_writephy(tp, MII_BMCR, bmcr);
5897
5898                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5899                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5900                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5901
5902                         return err;
5903                 }
5904         } else {
5905                 u32 new_bmcr;
5906
5907                 bmcr &= ~BMCR_SPEED1000;
5908                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5909
5910                 if (tp->link_config.duplex == DUPLEX_FULL)
5911                         new_bmcr |= BMCR_FULLDPLX;
5912
5913                 if (new_bmcr != bmcr) {
5914                         /* BMCR_SPEED1000 is a reserved bit that needs
5915                          * to be set on write.
5916                          */
5917                         new_bmcr |= BMCR_SPEED1000;
5918
5919                         /* Force a linkdown */
5920                         if (tp->link_up) {
5921                                 u32 adv;
5922
5923                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5924                                 adv &= ~(ADVERTISE_1000XFULL |
5925                                          ADVERTISE_1000XHALF |
5926                                          ADVERTISE_SLCT);
5927                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5928                                 tg3_writephy(tp, MII_BMCR, bmcr |
5929                                                            BMCR_ANRESTART |
5930                                                            BMCR_ANENABLE);
5931                                 udelay(10);
5932                                 tg3_carrier_off(tp);
5933                         }
5934                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5935                         bmcr = new_bmcr;
5936                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5937                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5939                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5940                                         bmsr |= BMSR_LSTATUS;
5941                                 else
5942                                         bmsr &= ~BMSR_LSTATUS;
5943                         }
5944                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5945                 }
5946         }
5947
5948         if (bmsr & BMSR_LSTATUS) {
5949                 current_speed = SPEED_1000;
5950                 current_link_up = true;
5951                 if (bmcr & BMCR_FULLDPLX)
5952                         current_duplex = DUPLEX_FULL;
5953                 else
5954                         current_duplex = DUPLEX_HALF;
5955
5956                 local_adv = 0;
5957                 remote_adv = 0;
5958
5959                 if (bmcr & BMCR_ANENABLE) {
5960                         u32 common;
5961
5962                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5963                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5964                         common = local_adv & remote_adv;
5965                         if (common & (ADVERTISE_1000XHALF |
5966                                       ADVERTISE_1000XFULL)) {
5967                                 if (common & ADVERTISE_1000XFULL)
5968                                         current_duplex = DUPLEX_FULL;
5969                                 else
5970                                         current_duplex = DUPLEX_HALF;
5971
5972                                 tp->link_config.rmt_adv =
5973                                            mii_adv_to_ethtool_adv_x(remote_adv);
5974                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5975                                 /* Link is up via parallel detect */
5976                         } else {
5977                                 current_link_up = false;
5978                         }
5979                 }
5980         }
5981
5982 fiber_setup_done:
5983         if (current_link_up && current_duplex == DUPLEX_FULL)
5984                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5985
5986         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5987         if (tp->link_config.active_duplex == DUPLEX_HALF)
5988                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5989
5990         tw32_f(MAC_MODE, tp->mac_mode);
5991         udelay(40);
5992
5993         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5994
5995         tp->link_config.active_speed = current_speed;
5996         tp->link_config.active_duplex = current_duplex;
5997
5998         tg3_test_and_report_link_chg(tp, current_link_up);
5999         return err;
6000 }
6001
6002 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6003 {
6004         if (tp->serdes_counter) {
6005                 /* Give autoneg time to complete. */
6006                 tp->serdes_counter--;
6007                 return;
6008         }
6009
6010         if (!tp->link_up &&
6011             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6012                 u32 bmcr;
6013
6014                 tg3_readphy(tp, MII_BMCR, &bmcr);
6015                 if (bmcr & BMCR_ANENABLE) {
6016                         u32 phy1, phy2;
6017
6018                         /* Select shadow register 0x1f */
6019                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6020                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6021
6022                         /* Select expansion interrupt status register */
6023                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6024                                          MII_TG3_DSP_EXP1_INT_STAT);
6025                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6026                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027
6028                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6029                                 /* We have signal detect and not receiving
6030                                  * config code words, link is up by parallel
6031                                  * detection.
6032                                  */
6033
6034                                 bmcr &= ~BMCR_ANENABLE;
6035                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6036                                 tg3_writephy(tp, MII_BMCR, bmcr);
6037                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6038                         }
6039                 }
6040         } else if (tp->link_up &&
6041                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6042                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6043                 u32 phy2;
6044
6045                 /* Select expansion interrupt status register */
6046                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6047                                  MII_TG3_DSP_EXP1_INT_STAT);
6048                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6049                 if (phy2 & 0x20) {
6050                         u32 bmcr;
6051
6052                         /* Config code words received, turn on autoneg. */
6053                         tg3_readphy(tp, MII_BMCR, &bmcr);
6054                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6055
6056                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6057
6058                 }
6059         }
6060 }
6061
6062 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6063 {
6064         u32 val;
6065         int err;
6066
6067         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6068                 err = tg3_setup_fiber_phy(tp, force_reset);
6069         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6070                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6071         else
6072                 err = tg3_setup_copper_phy(tp, force_reset);
6073
6074         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6075                 u32 scale;
6076
6077                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6078                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6079                         scale = 65;
6080                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6081                         scale = 6;
6082                 else
6083                         scale = 12;
6084
6085                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6086                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6087                 tw32(GRC_MISC_CFG, val);
6088         }
6089
6090         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6091               (6 << TX_LENGTHS_IPG_SHIFT);
6092         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6093             tg3_asic_rev(tp) == ASIC_REV_5762)
6094                 val |= tr32(MAC_TX_LENGTHS) &
6095                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6096                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6097
6098         if (tp->link_config.active_speed == SPEED_1000 &&
6099             tp->link_config.active_duplex == DUPLEX_HALF)
6100                 tw32(MAC_TX_LENGTHS, val |
6101                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6102         else
6103                 tw32(MAC_TX_LENGTHS, val |
6104                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6105
6106         if (!tg3_flag(tp, 5705_PLUS)) {
6107                 if (tp->link_up) {
6108                         tw32(HOSTCC_STAT_COAL_TICKS,
6109                              tp->coal.stats_block_coalesce_usecs);
6110                 } else {
6111                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6112                 }
6113         }
6114
6115         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6116                 val = tr32(PCIE_PWR_MGMT_THRESH);
6117                 if (!tp->link_up)
6118                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6119                               tp->pwrmgmt_thresh;
6120                 else
6121                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6122                 tw32(PCIE_PWR_MGMT_THRESH, val);
6123         }
6124
6125         return err;
6126 }
6127
6128 /* tp->lock must be held */
6129 static u64 tg3_refclk_read(struct tg3 *tp)
6130 {
6131         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6132         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6133 }
6134
6135 /* tp->lock must be held */
6136 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6137 {
6138         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6139
6140         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6141         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6142         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6143         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6144 }
6145
6146 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6147 static inline void tg3_full_unlock(struct tg3 *tp);
6148 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6149 {
6150         struct tg3 *tp = netdev_priv(dev);
6151
6152         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6153                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6154                                 SOF_TIMESTAMPING_SOFTWARE;
6155
6156         if (tg3_flag(tp, PTP_CAPABLE)) {
6157                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6158                                         SOF_TIMESTAMPING_RX_HARDWARE |
6159                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6160         }
6161
6162         if (tp->ptp_clock)
6163                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6164         else
6165                 info->phc_index = -1;
6166
6167         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6168
6169         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6170                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6171                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6172                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6173         return 0;
6174 }
6175
6176 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6177 {
6178         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6179         bool neg_adj = false;
6180         u32 correction = 0;
6181
6182         if (ppb < 0) {
6183                 neg_adj = true;
6184                 ppb = -ppb;
6185         }
6186
6187         /* Frequency adjustment is performed using hardware with a 24 bit
6188          * accumulator and a programmable correction value. On each clk, the
6189          * correction value gets added to the accumulator and when it
6190          * overflows, the time counter is incremented/decremented.
6191          *
6192          * So conversion from ppb to correction value is
6193          *              ppb * (1 << 24) / 1000000000
6194          */
6195         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6196                      TG3_EAV_REF_CLK_CORRECT_MASK;
6197
6198         tg3_full_lock(tp, 0);
6199
6200         if (correction)
6201                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6202                      TG3_EAV_REF_CLK_CORRECT_EN |
6203                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6204         else
6205                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6206
6207         tg3_full_unlock(tp);
6208
6209         return 0;
6210 }
6211
6212 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6213 {
6214         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215
6216         tg3_full_lock(tp, 0);
6217         tp->ptp_adjust += delta;
6218         tg3_full_unlock(tp);
6219
6220         return 0;
6221 }
6222
6223 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6224 {
6225         u64 ns;
6226         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6227
6228         tg3_full_lock(tp, 0);
6229         ns = tg3_refclk_read(tp);
6230         ns += tp->ptp_adjust;
6231         tg3_full_unlock(tp);
6232
6233         *ts = ns_to_timespec64(ns);
6234
6235         return 0;
6236 }
6237
6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6239                            const struct timespec64 *ts)
6240 {
6241         u64 ns;
6242         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6243
6244         ns = timespec64_to_ns(ts);
6245
6246         tg3_full_lock(tp, 0);
6247         tg3_refclk_write(tp, ns);
6248         tp->ptp_adjust = 0;
6249         tg3_full_unlock(tp);
6250
6251         return 0;
6252 }
6253
6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6255                           struct ptp_clock_request *rq, int on)
6256 {
6257         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6258         u32 clock_ctl;
6259         int rval = 0;
6260
6261         switch (rq->type) {
6262         case PTP_CLK_REQ_PEROUT:
6263                 if (rq->perout.index != 0)
6264                         return -EINVAL;
6265
6266                 tg3_full_lock(tp, 0);
6267                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6268                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6269
6270                 if (on) {
6271                         u64 nsec;
6272
6273                         nsec = rq->perout.start.sec * 1000000000ULL +
6274                                rq->perout.start.nsec;
6275
6276                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6277                                 netdev_warn(tp->dev,
6278                                             "Device supports only a one-shot timesync output, period must be 0\n");
6279                                 rval = -EINVAL;
6280                                 goto err_out;
6281                         }
6282
6283                         if (nsec & (1ULL << 63)) {
6284                                 netdev_warn(tp->dev,
6285                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6286                                 rval = -EINVAL;
6287                                 goto err_out;
6288                         }
6289
6290                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6291                         tw32(TG3_EAV_WATCHDOG0_MSB,
6292                              TG3_EAV_WATCHDOG0_EN |
6293                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6294
6295                         tw32(TG3_EAV_REF_CLCK_CTL,
6296                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6297                 } else {
6298                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6299                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6300                 }
6301
6302 err_out:
6303                 tg3_full_unlock(tp);
6304                 return rval;
6305
6306         default:
6307                 break;
6308         }
6309
6310         return -EOPNOTSUPP;
6311 }
6312
6313 static const struct ptp_clock_info tg3_ptp_caps = {
6314         .owner          = THIS_MODULE,
6315         .name           = "tg3 clock",
6316         .max_adj        = 250000000,
6317         .n_alarm        = 0,
6318         .n_ext_ts       = 0,
6319         .n_per_out      = 1,
6320         .n_pins         = 0,
6321         .pps            = 0,
6322         .adjfreq        = tg3_ptp_adjfreq,
6323         .adjtime        = tg3_ptp_adjtime,
6324         .gettime64      = tg3_ptp_gettime,
6325         .settime64      = tg3_ptp_settime,
6326         .enable         = tg3_ptp_enable,
6327 };
6328
6329 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6330                                      struct skb_shared_hwtstamps *timestamp)
6331 {
6332         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6333         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6334                                            tp->ptp_adjust);
6335 }
6336
6337 /* tp->lock must be held */
6338 static void tg3_ptp_init(struct tg3 *tp)
6339 {
6340         if (!tg3_flag(tp, PTP_CAPABLE))
6341                 return;
6342
6343         /* Initialize the hardware clock to the system time. */
6344         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6345         tp->ptp_adjust = 0;
6346         tp->ptp_info = tg3_ptp_caps;
6347 }
6348
6349 /* tp->lock must be held */
6350 static void tg3_ptp_resume(struct tg3 *tp)
6351 {
6352         if (!tg3_flag(tp, PTP_CAPABLE))
6353                 return;
6354
6355         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6356         tp->ptp_adjust = 0;
6357 }
6358
6359 static void tg3_ptp_fini(struct tg3 *tp)
6360 {
6361         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6362                 return;
6363
6364         ptp_clock_unregister(tp->ptp_clock);
6365         tp->ptp_clock = NULL;
6366         tp->ptp_adjust = 0;
6367 }
6368
6369 static inline int tg3_irq_sync(struct tg3 *tp)
6370 {
6371         return tp->irq_sync;
6372 }
6373
6374 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6375 {
6376         int i;
6377
6378         dst = (u32 *)((u8 *)dst + off);
6379         for (i = 0; i < len; i += sizeof(u32))
6380                 *dst++ = tr32(off + i);
6381 }
6382
6383 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6384 {
6385         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6386         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6387         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6388         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6389         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6390         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6391         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6392         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6393         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6394         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6395         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6396         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6397         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6398         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6399         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6400         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6401         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6402         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6403         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6404
6405         if (tg3_flag(tp, SUPPORT_MSIX))
6406                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6407
6408         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6409         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6410         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6411         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6412         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6413         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6414         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6415         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6416
6417         if (!tg3_flag(tp, 5705_PLUS)) {
6418                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6419                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6420                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6421         }
6422
6423         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6424         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6425         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6426         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6427         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6428
6429         if (tg3_flag(tp, NVRAM))
6430                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6431 }
6432
6433 static void tg3_dump_state(struct tg3 *tp)
6434 {
6435         int i;
6436         u32 *regs;
6437
6438         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6439         if (!regs)
6440                 return;
6441
6442         if (tg3_flag(tp, PCI_EXPRESS)) {
6443                 /* Read up to but not including private PCI registers */
6444                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6445                         regs[i / sizeof(u32)] = tr32(i);
6446         } else
6447                 tg3_dump_legacy_regs(tp, regs);
6448
6449         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6450                 if (!regs[i + 0] && !regs[i + 1] &&
6451                     !regs[i + 2] && !regs[i + 3])
6452                         continue;
6453
6454                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6455                            i * 4,
6456                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6457         }
6458
6459         kfree(regs);
6460
6461         for (i = 0; i < tp->irq_cnt; i++) {
6462                 struct tg3_napi *tnapi = &tp->napi[i];
6463
6464                 /* SW status block */
6465                 netdev_err(tp->dev,
6466                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6467                            i,
6468                            tnapi->hw_status->status,
6469                            tnapi->hw_status->status_tag,
6470                            tnapi->hw_status->rx_jumbo_consumer,
6471                            tnapi->hw_status->rx_consumer,
6472                            tnapi->hw_status->rx_mini_consumer,
6473                            tnapi->hw_status->idx[0].rx_producer,
6474                            tnapi->hw_status->idx[0].tx_consumer);
6475
6476                 netdev_err(tp->dev,
6477                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6478                            i,
6479                            tnapi->last_tag, tnapi->last_irq_tag,
6480                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6481                            tnapi->rx_rcb_ptr,
6482                            tnapi->prodring.rx_std_prod_idx,
6483                            tnapi->prodring.rx_std_cons_idx,
6484                            tnapi->prodring.rx_jmb_prod_idx,
6485                            tnapi->prodring.rx_jmb_cons_idx);
6486         }
6487 }
6488
6489 /* This is called whenever we suspect that the system chipset is re-
6490  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6491  * is bogus tx completions. We try to recover by setting the
6492  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6493  * in the workqueue.
6494  */
6495 static void tg3_tx_recover(struct tg3 *tp)
6496 {
6497         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6498                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6499
6500         netdev_warn(tp->dev,
6501                     "The system may be re-ordering memory-mapped I/O "
6502                     "cycles to the network device, attempting to recover. "
6503                     "Please report the problem to the driver maintainer "
6504                     "and include system chipset information.\n");
6505
6506         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6507 }
6508
6509 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6510 {
6511         /* Tell compiler to fetch tx indices from memory. */
6512         barrier();
6513         return tnapi->tx_pending -
6514                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6515 }
6516
6517 /* Tigon3 never reports partial packet sends.  So we do not
6518  * need special logic to handle SKBs that have not had all
6519  * of their frags sent yet, like SunGEM does.
6520  */
6521 static void tg3_tx(struct tg3_napi *tnapi)
6522 {
6523         struct tg3 *tp = tnapi->tp;
6524         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6525         u32 sw_idx = tnapi->tx_cons;
6526         struct netdev_queue *txq;
6527         int index = tnapi - tp->napi;
6528         unsigned int pkts_compl = 0, bytes_compl = 0;
6529
6530         if (tg3_flag(tp, ENABLE_TSS))
6531                 index--;
6532
6533         txq = netdev_get_tx_queue(tp->dev, index);
6534
6535         while (sw_idx != hw_idx) {
6536                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6537                 struct sk_buff *skb = ri->skb;
6538                 int i, tx_bug = 0;
6539
6540                 if (unlikely(skb == NULL)) {
6541                         tg3_tx_recover(tp);
6542                         return;
6543                 }
6544
6545                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6546                         struct skb_shared_hwtstamps timestamp;
6547                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6548                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6549
6550                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6551
6552                         skb_tstamp_tx(skb, &timestamp);
6553                 }
6554
6555                 pci_unmap_single(tp->pdev,
6556                                  dma_unmap_addr(ri, mapping),
6557                                  skb_headlen(skb),
6558                                  PCI_DMA_TODEVICE);
6559
6560                 ri->skb = NULL;
6561
6562                 while (ri->fragmented) {
6563                         ri->fragmented = false;
6564                         sw_idx = NEXT_TX(sw_idx);
6565                         ri = &tnapi->tx_buffers[sw_idx];
6566                 }
6567
6568                 sw_idx = NEXT_TX(sw_idx);
6569
6570                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6571                         ri = &tnapi->tx_buffers[sw_idx];
6572                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6573                                 tx_bug = 1;
6574
6575                         pci_unmap_page(tp->pdev,
6576                                        dma_unmap_addr(ri, mapping),
6577                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6578                                        PCI_DMA_TODEVICE);
6579
6580                         while (ri->fragmented) {
6581                                 ri->fragmented = false;
6582                                 sw_idx = NEXT_TX(sw_idx);
6583                                 ri = &tnapi->tx_buffers[sw_idx];
6584                         }
6585
6586                         sw_idx = NEXT_TX(sw_idx);
6587                 }
6588
6589                 pkts_compl++;
6590                 bytes_compl += skb->len;
6591
6592                 dev_consume_skb_any(skb);
6593
6594                 if (unlikely(tx_bug)) {
6595                         tg3_tx_recover(tp);
6596                         return;
6597                 }
6598         }
6599
6600         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6601
6602         tnapi->tx_cons = sw_idx;
6603
6604         /* Need to make the tx_cons update visible to tg3_start_xmit()
6605          * before checking for netif_queue_stopped().  Without the
6606          * memory barrier, there is a small possibility that tg3_start_xmit()
6607          * will miss it and cause the queue to be stopped forever.
6608          */
6609         smp_mb();
6610
6611         if (unlikely(netif_tx_queue_stopped(txq) &&
6612                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6613                 __netif_tx_lock(txq, smp_processor_id());
6614                 if (netif_tx_queue_stopped(txq) &&
6615                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6616                         netif_tx_wake_queue(txq);
6617                 __netif_tx_unlock(txq);
6618         }
6619 }
6620
6621 static void tg3_frag_free(bool is_frag, void *data)
6622 {
6623         if (is_frag)
6624                 skb_free_frag(data);
6625         else
6626                 kfree(data);
6627 }
6628
6629 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6630 {
6631         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6632                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6633
6634         if (!ri->data)
6635                 return;
6636
6637         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6638                          map_sz, PCI_DMA_FROMDEVICE);
6639         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6640         ri->data = NULL;
6641 }
6642
6643
6644 /* Returns size of skb allocated or < 0 on error.
6645  *
6646  * We only need to fill in the address because the other members
6647  * of the RX descriptor are invariant, see tg3_init_rings.
6648  *
6649  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6650  * posting buffers we only dirty the first cache line of the RX
6651  * descriptor (containing the address).  Whereas for the RX status
6652  * buffers the cpu only reads the last cacheline of the RX descriptor
6653  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6654  */
6655 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6656                              u32 opaque_key, u32 dest_idx_unmasked,
6657                              unsigned int *frag_size)
6658 {
6659         struct tg3_rx_buffer_desc *desc;
6660         struct ring_info *map;
6661         u8 *data;
6662         dma_addr_t mapping;
6663         int skb_size, data_size, dest_idx;
6664
6665         switch (opaque_key) {
6666         case RXD_OPAQUE_RING_STD:
6667                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6668                 desc = &tpr->rx_std[dest_idx];
6669                 map = &tpr->rx_std_buffers[dest_idx];
6670                 data_size = tp->rx_pkt_map_sz;
6671                 break;
6672
6673         case RXD_OPAQUE_RING_JUMBO:
6674                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6675                 desc = &tpr->rx_jmb[dest_idx].std;
6676                 map = &tpr->rx_jmb_buffers[dest_idx];
6677                 data_size = TG3_RX_JMB_MAP_SZ;
6678                 break;
6679
6680         default:
6681                 return -EINVAL;
6682         }
6683
6684         /* Do not overwrite any of the map or rp information
6685          * until we are sure we can commit to a new buffer.
6686          *
6687          * Callers depend upon this behavior and assume that
6688          * we leave everything unchanged if we fail.
6689          */
6690         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6691                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6692         if (skb_size <= PAGE_SIZE) {
6693                 data = netdev_alloc_frag(skb_size);
6694                 *frag_size = skb_size;
6695         } else {
6696                 data = kmalloc(skb_size, GFP_ATOMIC);
6697                 *frag_size = 0;
6698         }
6699         if (!data)
6700                 return -ENOMEM;
6701
6702         mapping = pci_map_single(tp->pdev,
6703                                  data + TG3_RX_OFFSET(tp),
6704                                  data_size,
6705                                  PCI_DMA_FROMDEVICE);
6706         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6707                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6708                 return -EIO;
6709         }
6710
6711         map->data = data;
6712         dma_unmap_addr_set(map, mapping, mapping);
6713
6714         desc->addr_hi = ((u64)mapping >> 32);
6715         desc->addr_lo = ((u64)mapping & 0xffffffff);
6716
6717         return data_size;
6718 }
6719
6720 /* We only need to move over in the address because the other
6721  * members of the RX descriptor are invariant.  See notes above
6722  * tg3_alloc_rx_data for full details.
6723  */
6724 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6725                            struct tg3_rx_prodring_set *dpr,
6726                            u32 opaque_key, int src_idx,
6727                            u32 dest_idx_unmasked)
6728 {
6729         struct tg3 *tp = tnapi->tp;
6730         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6731         struct ring_info *src_map, *dest_map;
6732         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6733         int dest_idx;
6734
6735         switch (opaque_key) {
6736         case RXD_OPAQUE_RING_STD:
6737                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6738                 dest_desc = &dpr->rx_std[dest_idx];
6739                 dest_map = &dpr->rx_std_buffers[dest_idx];
6740                 src_desc = &spr->rx_std[src_idx];
6741                 src_map = &spr->rx_std_buffers[src_idx];
6742                 break;
6743
6744         case RXD_OPAQUE_RING_JUMBO:
6745                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6746                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6747                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6748                 src_desc = &spr->rx_jmb[src_idx].std;
6749                 src_map = &spr->rx_jmb_buffers[src_idx];
6750                 break;
6751
6752         default:
6753                 return;
6754         }
6755
6756         dest_map->data = src_map->data;
6757         dma_unmap_addr_set(dest_map, mapping,
6758                            dma_unmap_addr(src_map, mapping));
6759         dest_desc->addr_hi = src_desc->addr_hi;
6760         dest_desc->addr_lo = src_desc->addr_lo;
6761
6762         /* Ensure that the update to the skb happens after the physical
6763          * addresses have been transferred to the new BD location.
6764          */
6765         smp_wmb();
6766
6767         src_map->data = NULL;
6768 }
6769
6770 /* The RX ring scheme is composed of multiple rings which post fresh
6771  * buffers to the chip, and one special ring the chip uses to report
6772  * status back to the host.
6773  *
6774  * The special ring reports the status of received packets to the
6775  * host.  The chip does not write into the original descriptor the
6776  * RX buffer was obtained from.  The chip simply takes the original
6777  * descriptor as provided by the host, updates the status and length
6778  * field, then writes this into the next status ring entry.
6779  *
6780  * Each ring the host uses to post buffers to the chip is described
6781  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6782  * it is first placed into the on-chip ram.  When the packet's length
6783  * is known, it walks down the TG3_BDINFO entries to select the ring.
6784  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6785  * which is within the range of the new packet's length is chosen.
6786  *
6787  * The "separate ring for rx status" scheme may sound queer, but it makes
6788  * sense from a cache coherency perspective.  If only the host writes
6789  * to the buffer post rings, and only the chip writes to the rx status
6790  * rings, then cache lines never move beyond shared-modified state.
6791  * If both the host and chip were to write into the same ring, cache line
6792  * eviction could occur since both entities want it in an exclusive state.
6793  */
6794 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6795 {
6796         struct tg3 *tp = tnapi->tp;
6797         u32 work_mask, rx_std_posted = 0;
6798         u32 std_prod_idx, jmb_prod_idx;
6799         u32 sw_idx = tnapi->rx_rcb_ptr;
6800         u16 hw_idx;
6801         int received;
6802         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6803
6804         hw_idx = *(tnapi->rx_rcb_prod_idx);
6805         /*
6806          * We need to order the read of hw_idx and the read of
6807          * the opaque cookie.
6808          */
6809         rmb();
6810         work_mask = 0;
6811         received = 0;
6812         std_prod_idx = tpr->rx_std_prod_idx;
6813         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6814         while (sw_idx != hw_idx && budget > 0) {
6815                 struct ring_info *ri;
6816                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6817                 unsigned int len;
6818                 struct sk_buff *skb;
6819                 dma_addr_t dma_addr;
6820                 u32 opaque_key, desc_idx, *post_ptr;
6821                 u8 *data;
6822                 u64 tstamp = 0;
6823
6824                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6825                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6826                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6827                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6828                         dma_addr = dma_unmap_addr(ri, mapping);
6829                         data = ri->data;
6830                         post_ptr = &std_prod_idx;
6831                         rx_std_posted++;
6832                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6833                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6834                         dma_addr = dma_unmap_addr(ri, mapping);
6835                         data = ri->data;
6836                         post_ptr = &jmb_prod_idx;
6837                 } else
6838                         goto next_pkt_nopost;
6839
6840                 work_mask |= opaque_key;
6841
6842                 if (desc->err_vlan & RXD_ERR_MASK) {
6843                 drop_it:
6844                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6845                                        desc_idx, *post_ptr);
6846                 drop_it_no_recycle:
6847                         /* Other statistics kept track of by card. */
6848                         tp->rx_dropped++;
6849                         goto next_pkt;
6850                 }
6851
6852                 prefetch(data + TG3_RX_OFFSET(tp));
6853                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6854                       ETH_FCS_LEN;
6855
6856                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6857                      RXD_FLAG_PTPSTAT_PTPV1 ||
6858                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6859                      RXD_FLAG_PTPSTAT_PTPV2) {
6860                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6861                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6862                 }
6863
6864                 if (len > TG3_RX_COPY_THRESH(tp)) {
6865                         int skb_size;
6866                         unsigned int frag_size;
6867
6868                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6869                                                     *post_ptr, &frag_size);
6870                         if (skb_size < 0)
6871                                 goto drop_it;
6872
6873                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6874                                          PCI_DMA_FROMDEVICE);
6875
6876                         /* Ensure that the update to the data happens
6877                          * after the usage of the old DMA mapping.
6878                          */
6879                         smp_wmb();
6880
6881                         ri->data = NULL;
6882
6883                         skb = build_skb(data, frag_size);
6884                         if (!skb) {
6885                                 tg3_frag_free(frag_size != 0, data);
6886                                 goto drop_it_no_recycle;
6887                         }
6888                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6889                 } else {
6890                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6891                                        desc_idx, *post_ptr);
6892
6893                         skb = netdev_alloc_skb(tp->dev,
6894                                                len + TG3_RAW_IP_ALIGN);
6895                         if (skb == NULL)
6896                                 goto drop_it_no_recycle;
6897
6898                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6899                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6900                         memcpy(skb->data,
6901                                data + TG3_RX_OFFSET(tp),
6902                                len);
6903                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6904                 }
6905
6906                 skb_put(skb, len);
6907                 if (tstamp)
6908                         tg3_hwclock_to_timestamp(tp, tstamp,
6909                                                  skb_hwtstamps(skb));
6910
6911                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6912                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6913                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6914                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6915                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6916                 else
6917                         skb_checksum_none_assert(skb);
6918
6919                 skb->protocol = eth_type_trans(skb, tp->dev);
6920
6921                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6922                     skb->protocol != htons(ETH_P_8021Q) &&
6923                     skb->protocol != htons(ETH_P_8021AD)) {
6924                         dev_kfree_skb_any(skb);
6925                         goto drop_it_no_recycle;
6926                 }
6927
6928                 if (desc->type_flags & RXD_FLAG_VLAN &&
6929                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6930                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6931                                                desc->err_vlan & RXD_VLAN_MASK);
6932
6933                 napi_gro_receive(&tnapi->napi, skb);
6934
6935                 received++;
6936                 budget--;
6937
6938 next_pkt:
6939                 (*post_ptr)++;
6940
6941                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6942                         tpr->rx_std_prod_idx = std_prod_idx &
6943                                                tp->rx_std_ring_mask;
6944                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6945                                      tpr->rx_std_prod_idx);
6946                         work_mask &= ~RXD_OPAQUE_RING_STD;
6947                         rx_std_posted = 0;
6948                 }
6949 next_pkt_nopost:
6950                 sw_idx++;
6951                 sw_idx &= tp->rx_ret_ring_mask;
6952
6953                 /* Refresh hw_idx to see if there is new work */
6954                 if (sw_idx == hw_idx) {
6955                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6956                         rmb();
6957                 }
6958         }
6959
6960         /* ACK the status ring. */
6961         tnapi->rx_rcb_ptr = sw_idx;
6962         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6963
6964         /* Refill RX ring(s). */
6965         if (!tg3_flag(tp, ENABLE_RSS)) {
6966                 /* Sync BD data before updating mailbox */
6967                 wmb();
6968
6969                 if (work_mask & RXD_OPAQUE_RING_STD) {
6970                         tpr->rx_std_prod_idx = std_prod_idx &
6971                                                tp->rx_std_ring_mask;
6972                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6973                                      tpr->rx_std_prod_idx);
6974                 }
6975                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6976                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6977                                                tp->rx_jmb_ring_mask;
6978                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6979                                      tpr->rx_jmb_prod_idx);
6980                 }
6981                 mmiowb();
6982         } else if (work_mask) {
6983                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6984                  * updated before the producer indices can be updated.
6985                  */
6986                 smp_wmb();
6987
6988                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6989                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6990
6991                 if (tnapi != &tp->napi[1]) {
6992                         tp->rx_refill = true;
6993                         napi_schedule(&tp->napi[1].napi);
6994                 }
6995         }
6996
6997         return received;
6998 }
6999
7000 static void tg3_poll_link(struct tg3 *tp)
7001 {
7002         /* handle link change and other phy events */
7003         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7004                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7005
7006                 if (sblk->status & SD_STATUS_LINK_CHG) {
7007                         sblk->status = SD_STATUS_UPDATED |
7008                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7009                         spin_lock(&tp->lock);
7010                         if (tg3_flag(tp, USE_PHYLIB)) {
7011                                 tw32_f(MAC_STATUS,
7012                                      (MAC_STATUS_SYNC_CHANGED |
7013                                       MAC_STATUS_CFG_CHANGED |
7014                                       MAC_STATUS_MI_COMPLETION |
7015                                       MAC_STATUS_LNKSTATE_CHANGED));
7016                                 udelay(40);
7017                         } else
7018                                 tg3_setup_phy(tp, false);
7019                         spin_unlock(&tp->lock);
7020                 }
7021         }
7022 }
7023
7024 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7025                                 struct tg3_rx_prodring_set *dpr,
7026                                 struct tg3_rx_prodring_set *spr)
7027 {
7028         u32 si, di, cpycnt, src_prod_idx;
7029         int i, err = 0;
7030
7031         while (1) {
7032                 src_prod_idx = spr->rx_std_prod_idx;
7033
7034                 /* Make sure updates to the rx_std_buffers[] entries and the
7035                  * standard producer index are seen in the correct order.
7036                  */
7037                 smp_rmb();
7038
7039                 if (spr->rx_std_cons_idx == src_prod_idx)
7040                         break;
7041
7042                 if (spr->rx_std_cons_idx < src_prod_idx)
7043                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7044                 else
7045                         cpycnt = tp->rx_std_ring_mask + 1 -
7046                                  spr->rx_std_cons_idx;
7047
7048                 cpycnt = min(cpycnt,
7049                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7050
7051                 si = spr->rx_std_cons_idx;
7052                 di = dpr->rx_std_prod_idx;
7053
7054                 for (i = di; i < di + cpycnt; i++) {
7055                         if (dpr->rx_std_buffers[i].data) {
7056                                 cpycnt = i - di;
7057                                 err = -ENOSPC;
7058                                 break;
7059                         }
7060                 }
7061
7062                 if (!cpycnt)
7063                         break;
7064
7065                 /* Ensure that updates to the rx_std_buffers ring and the
7066                  * shadowed hardware producer ring from tg3_recycle_skb() are
7067                  * ordered correctly WRT the skb check above.
7068                  */
7069                 smp_rmb();
7070
7071                 memcpy(&dpr->rx_std_buffers[di],
7072                        &spr->rx_std_buffers[si],
7073                        cpycnt * sizeof(struct ring_info));
7074
7075                 for (i = 0; i < cpycnt; i++, di++, si++) {
7076                         struct tg3_rx_buffer_desc *sbd, *dbd;
7077                         sbd = &spr->rx_std[si];
7078                         dbd = &dpr->rx_std[di];
7079                         dbd->addr_hi = sbd->addr_hi;
7080                         dbd->addr_lo = sbd->addr_lo;
7081                 }
7082
7083                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7084                                        tp->rx_std_ring_mask;
7085                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7086                                        tp->rx_std_ring_mask;
7087         }
7088
7089         while (1) {
7090                 src_prod_idx = spr->rx_jmb_prod_idx;
7091
7092                 /* Make sure updates to the rx_jmb_buffers[] entries and
7093                  * the jumbo producer index are seen in the correct order.
7094                  */
7095                 smp_rmb();
7096
7097                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7098                         break;
7099
7100                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7101                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7102                 else
7103                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7104                                  spr->rx_jmb_cons_idx;
7105
7106                 cpycnt = min(cpycnt,
7107                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7108
7109                 si = spr->rx_jmb_cons_idx;
7110                 di = dpr->rx_jmb_prod_idx;
7111
7112                 for (i = di; i < di + cpycnt; i++) {
7113                         if (dpr->rx_jmb_buffers[i].data) {
7114                                 cpycnt = i - di;
7115                                 err = -ENOSPC;
7116                                 break;
7117                         }
7118                 }
7119
7120                 if (!cpycnt)
7121                         break;
7122
7123                 /* Ensure that updates to the rx_jmb_buffers ring and the
7124                  * shadowed hardware producer ring from tg3_recycle_skb() are
7125                  * ordered correctly WRT the skb check above.
7126                  */
7127                 smp_rmb();
7128
7129                 memcpy(&dpr->rx_jmb_buffers[di],
7130                        &spr->rx_jmb_buffers[si],
7131                        cpycnt * sizeof(struct ring_info));
7132
7133                 for (i = 0; i < cpycnt; i++, di++, si++) {
7134                         struct tg3_rx_buffer_desc *sbd, *dbd;
7135                         sbd = &spr->rx_jmb[si].std;
7136                         dbd = &dpr->rx_jmb[di].std;
7137                         dbd->addr_hi = sbd->addr_hi;
7138                         dbd->addr_lo = sbd->addr_lo;
7139                 }
7140
7141                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7142                                        tp->rx_jmb_ring_mask;
7143                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7144                                        tp->rx_jmb_ring_mask;
7145         }
7146
7147         return err;
7148 }
7149
7150 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7151 {
7152         struct tg3 *tp = tnapi->tp;
7153
7154         /* run TX completion thread */
7155         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7156                 tg3_tx(tnapi);
7157                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7158                         return work_done;
7159         }
7160
7161         if (!tnapi->rx_rcb_prod_idx)
7162                 return work_done;
7163
7164         /* run RX thread, within the bounds set by NAPI.
7165          * All RX "locking" is done by ensuring outside
7166          * code synchronizes with tg3->napi.poll()
7167          */
7168         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7169                 work_done += tg3_rx(tnapi, budget - work_done);
7170
7171         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7172                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7173                 int i, err = 0;
7174                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7175                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7176
7177                 tp->rx_refill = false;
7178                 for (i = 1; i <= tp->rxq_cnt; i++)
7179                         err |= tg3_rx_prodring_xfer(tp, dpr,
7180                                                     &tp->napi[i].prodring);
7181
7182                 wmb();
7183
7184                 if (std_prod_idx != dpr->rx_std_prod_idx)
7185                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7186                                      dpr->rx_std_prod_idx);
7187
7188                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7189                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7190                                      dpr->rx_jmb_prod_idx);
7191
7192                 mmiowb();
7193
7194                 if (err)
7195                         tw32_f(HOSTCC_MODE, tp->coal_now);
7196         }
7197
7198         return work_done;
7199 }
7200
7201 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7202 {
7203         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7204                 schedule_work(&tp->reset_task);
7205 }
7206
7207 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7208 {
7209         cancel_work_sync(&tp->reset_task);
7210         tg3_flag_clear(tp, RESET_TASK_PENDING);
7211         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7212 }
7213
7214 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7215 {
7216         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7217         struct tg3 *tp = tnapi->tp;
7218         int work_done = 0;
7219         struct tg3_hw_status *sblk = tnapi->hw_status;
7220
7221         while (1) {
7222                 work_done = tg3_poll_work(tnapi, work_done, budget);
7223
7224                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7225                         goto tx_recovery;
7226
7227                 if (unlikely(work_done >= budget))
7228                         break;
7229
7230                 /* tp->last_tag is used in tg3_int_reenable() below
7231                  * to tell the hw how much work has been processed,
7232                  * so we must read it before checking for more work.
7233                  */
7234                 tnapi->last_tag = sblk->status_tag;
7235                 tnapi->last_irq_tag = tnapi->last_tag;
7236                 rmb();
7237
7238                 /* check for RX/TX work to do */
7239                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7240                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7241
7242                         /* This test here is not race free, but will reduce
7243                          * the number of interrupts by looping again.
7244                          */
7245                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7246                                 continue;
7247
7248                         napi_complete_done(napi, work_done);
7249                         /* Reenable interrupts. */
7250                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7251
7252                         /* This test here is synchronized by napi_schedule()
7253                          * and napi_complete() to close the race condition.
7254                          */
7255                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7256                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7257                                                   HOSTCC_MODE_ENABLE |
7258                                                   tnapi->coal_now);
7259                         }
7260                         mmiowb();
7261                         break;
7262                 }
7263         }
7264
7265         return work_done;
7266
7267 tx_recovery:
7268         /* work_done is guaranteed to be less than budget. */
7269         napi_complete(napi);
7270         tg3_reset_task_schedule(tp);
7271         return work_done;
7272 }
7273
7274 static void tg3_process_error(struct tg3 *tp)
7275 {
7276         u32 val;
7277         bool real_error = false;
7278
7279         if (tg3_flag(tp, ERROR_PROCESSED))
7280                 return;
7281
7282         /* Check Flow Attention register */
7283         val = tr32(HOSTCC_FLOW_ATTN);
7284         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7285                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7286                 real_error = true;
7287         }
7288
7289         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7290                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7291                 real_error = true;
7292         }
7293
7294         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7295                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7296                 real_error = true;
7297         }
7298
7299         if (!real_error)
7300                 return;
7301
7302         tg3_dump_state(tp);
7303
7304         tg3_flag_set(tp, ERROR_PROCESSED);
7305         tg3_reset_task_schedule(tp);
7306 }
7307
7308 static int tg3_poll(struct napi_struct *napi, int budget)
7309 {
7310         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7311         struct tg3 *tp = tnapi->tp;
7312         int work_done = 0;
7313         struct tg3_hw_status *sblk = tnapi->hw_status;
7314
7315         while (1) {
7316                 if (sblk->status & SD_STATUS_ERROR)
7317                         tg3_process_error(tp);
7318
7319                 tg3_poll_link(tp);
7320
7321                 work_done = tg3_poll_work(tnapi, work_done, budget);
7322
7323                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7324                         goto tx_recovery;
7325
7326                 if (unlikely(work_done >= budget))
7327                         break;
7328
7329                 if (tg3_flag(tp, TAGGED_STATUS)) {
7330                         /* tp->last_tag is used in tg3_int_reenable() below
7331                          * to tell the hw how much work has been processed,
7332                          * so we must read it before checking for more work.
7333                          */
7334                         tnapi->last_tag = sblk->status_tag;
7335                         tnapi->last_irq_tag = tnapi->last_tag;
7336                         rmb();
7337                 } else
7338                         sblk->status &= ~SD_STATUS_UPDATED;
7339
7340                 if (likely(!tg3_has_work(tnapi))) {
7341                         napi_complete_done(napi, work_done);
7342                         tg3_int_reenable(tnapi);
7343                         break;
7344                 }
7345         }
7346
7347         return work_done;
7348
7349 tx_recovery:
7350         /* work_done is guaranteed to be less than budget. */
7351         napi_complete(napi);
7352         tg3_reset_task_schedule(tp);
7353         return work_done;
7354 }
7355
7356 static void tg3_napi_disable(struct tg3 *tp)
7357 {
7358         int i;
7359
7360         for (i = tp->irq_cnt - 1; i >= 0; i--)
7361                 napi_disable(&tp->napi[i].napi);
7362 }
7363
7364 static void tg3_napi_enable(struct tg3 *tp)
7365 {
7366         int i;
7367
7368         for (i = 0; i < tp->irq_cnt; i++)
7369                 napi_enable(&tp->napi[i].napi);
7370 }
7371
7372 static void tg3_napi_init(struct tg3 *tp)
7373 {
7374         int i;
7375
7376         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7377         for (i = 1; i < tp->irq_cnt; i++)
7378                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7379 }
7380
7381 static void tg3_napi_fini(struct tg3 *tp)
7382 {
7383         int i;
7384
7385         for (i = 0; i < tp->irq_cnt; i++)
7386                 netif_napi_del(&tp->napi[i].napi);
7387 }
7388
7389 static inline void tg3_netif_stop(struct tg3 *tp)
7390 {
7391         netif_trans_update(tp->dev);    /* prevent tx timeout */
7392         tg3_napi_disable(tp);
7393         netif_carrier_off(tp->dev);
7394         netif_tx_disable(tp->dev);
7395 }
7396
7397 /* tp->lock must be held */
7398 static inline void tg3_netif_start(struct tg3 *tp)
7399 {
7400         tg3_ptp_resume(tp);
7401
7402         /* NOTE: unconditional netif_tx_wake_all_queues is only
7403          * appropriate so long as all callers are assured to
7404          * have free tx slots (such as after tg3_init_hw)
7405          */
7406         netif_tx_wake_all_queues(tp->dev);
7407
7408         if (tp->link_up)
7409                 netif_carrier_on(tp->dev);
7410
7411         tg3_napi_enable(tp);
7412         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7413         tg3_enable_ints(tp);
7414 }
7415
7416 static void tg3_irq_quiesce(struct tg3 *tp)
7417         __releases(tp->lock)
7418         __acquires(tp->lock)
7419 {
7420         int i;
7421
7422         BUG_ON(tp->irq_sync);
7423
7424         tp->irq_sync = 1;
7425         smp_mb();
7426
7427         spin_unlock_bh(&tp->lock);
7428
7429         for (i = 0; i < tp->irq_cnt; i++)
7430                 synchronize_irq(tp->napi[i].irq_vec);
7431
7432         spin_lock_bh(&tp->lock);
7433 }
7434
7435 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7436  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7437  * with as well.  Most of the time, this is not necessary except when
7438  * shutting down the device.
7439  */
7440 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7441 {
7442         spin_lock_bh(&tp->lock);
7443         if (irq_sync)
7444                 tg3_irq_quiesce(tp);
7445 }
7446
7447 static inline void tg3_full_unlock(struct tg3 *tp)
7448 {
7449         spin_unlock_bh(&tp->lock);
7450 }
7451
7452 /* One-shot MSI handler - Chip automatically disables interrupt
7453  * after sending MSI so driver doesn't have to do it.
7454  */
7455 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7456 {
7457         struct tg3_napi *tnapi = dev_id;
7458         struct tg3 *tp = tnapi->tp;
7459
7460         prefetch(tnapi->hw_status);
7461         if (tnapi->rx_rcb)
7462                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7463
7464         if (likely(!tg3_irq_sync(tp)))
7465                 napi_schedule(&tnapi->napi);
7466
7467         return IRQ_HANDLED;
7468 }
7469
7470 /* MSI ISR - No need to check for interrupt sharing and no need to
7471  * flush status block and interrupt mailbox. PCI ordering rules
7472  * guarantee that MSI will arrive after the status block.
7473  */
7474 static irqreturn_t tg3_msi(int irq, void *dev_id)
7475 {
7476         struct tg3_napi *tnapi = dev_id;
7477         struct tg3 *tp = tnapi->tp;
7478
7479         prefetch(tnapi->hw_status);
7480         if (tnapi->rx_rcb)
7481                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7482         /*
7483          * Writing any value to intr-mbox-0 clears PCI INTA# and
7484          * chip-internal interrupt pending events.
7485          * Writing non-zero to intr-mbox-0 additional tells the
7486          * NIC to stop sending us irqs, engaging "in-intr-handler"
7487          * event coalescing.
7488          */
7489         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7490         if (likely(!tg3_irq_sync(tp)))
7491                 napi_schedule(&tnapi->napi);
7492
7493         return IRQ_RETVAL(1);
7494 }
7495
7496 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7497 {
7498         struct tg3_napi *tnapi = dev_id;
7499         struct tg3 *tp = tnapi->tp;
7500         struct tg3_hw_status *sblk = tnapi->hw_status;
7501         unsigned int handled = 1;
7502
7503         /* In INTx mode, it is possible for the interrupt to arrive at
7504          * the CPU before the status block posted prior to the interrupt.
7505          * Reading the PCI State register will confirm whether the
7506          * interrupt is ours and will flush the status block.
7507          */
7508         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7509                 if (tg3_flag(tp, CHIP_RESETTING) ||
7510                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7511                         handled = 0;
7512                         goto out;
7513                 }
7514         }
7515
7516         /*
7517          * Writing any value to intr-mbox-0 clears PCI INTA# and
7518          * chip-internal interrupt pending events.
7519          * Writing non-zero to intr-mbox-0 additional tells the
7520          * NIC to stop sending us irqs, engaging "in-intr-handler"
7521          * event coalescing.
7522          *
7523          * Flush the mailbox to de-assert the IRQ immediately to prevent
7524          * spurious interrupts.  The flush impacts performance but
7525          * excessive spurious interrupts can be worse in some cases.
7526          */
7527         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7528         if (tg3_irq_sync(tp))
7529                 goto out;
7530         sblk->status &= ~SD_STATUS_UPDATED;
7531         if (likely(tg3_has_work(tnapi))) {
7532                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7533                 napi_schedule(&tnapi->napi);
7534         } else {
7535                 /* No work, shared interrupt perhaps?  re-enable
7536                  * interrupts, and flush that PCI write
7537                  */
7538                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7539                                0x00000000);
7540         }
7541 out:
7542         return IRQ_RETVAL(handled);
7543 }
7544
7545 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7546 {
7547         struct tg3_napi *tnapi = dev_id;
7548         struct tg3 *tp = tnapi->tp;
7549         struct tg3_hw_status *sblk = tnapi->hw_status;
7550         unsigned int handled = 1;
7551
7552         /* In INTx mode, it is possible for the interrupt to arrive at
7553          * the CPU before the status block posted prior to the interrupt.
7554          * Reading the PCI State register will confirm whether the
7555          * interrupt is ours and will flush the status block.
7556          */
7557         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7558                 if (tg3_flag(tp, CHIP_RESETTING) ||
7559                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7560                         handled = 0;
7561                         goto out;
7562                 }
7563         }
7564
7565         /*
7566          * writing any value to intr-mbox-0 clears PCI INTA# and
7567          * chip-internal interrupt pending events.
7568          * writing non-zero to intr-mbox-0 additional tells the
7569          * NIC to stop sending us irqs, engaging "in-intr-handler"
7570          * event coalescing.
7571          *
7572          * Flush the mailbox to de-assert the IRQ immediately to prevent
7573          * spurious interrupts.  The flush impacts performance but
7574          * excessive spurious interrupts can be worse in some cases.
7575          */
7576         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7577
7578         /*
7579          * In a shared interrupt configuration, sometimes other devices'
7580          * interrupts will scream.  We record the current status tag here
7581          * so that the above check can report that the screaming interrupts
7582          * are unhandled.  Eventually they will be silenced.
7583          */
7584         tnapi->last_irq_tag = sblk->status_tag;
7585
7586         if (tg3_irq_sync(tp))
7587                 goto out;
7588
7589         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7590
7591         napi_schedule(&tnapi->napi);
7592
7593 out:
7594         return IRQ_RETVAL(handled);
7595 }
7596
7597 /* ISR for interrupt test */
7598 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7599 {
7600         struct tg3_napi *tnapi = dev_id;
7601         struct tg3 *tp = tnapi->tp;
7602         struct tg3_hw_status *sblk = tnapi->hw_status;
7603
7604         if ((sblk->status & SD_STATUS_UPDATED) ||
7605             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7606                 tg3_disable_ints(tp);
7607                 return IRQ_RETVAL(1);
7608         }
7609         return IRQ_RETVAL(0);
7610 }
7611
7612 #ifdef CONFIG_NET_POLL_CONTROLLER
7613 static void tg3_poll_controller(struct net_device *dev)
7614 {
7615         int i;
7616         struct tg3 *tp = netdev_priv(dev);
7617
7618         if (tg3_irq_sync(tp))
7619                 return;
7620
7621         for (i = 0; i < tp->irq_cnt; i++)
7622                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7623 }
7624 #endif
7625
7626 static void tg3_tx_timeout(struct net_device *dev)
7627 {
7628         struct tg3 *tp = netdev_priv(dev);
7629
7630         if (netif_msg_tx_err(tp)) {
7631                 netdev_err(dev, "transmit timed out, resetting\n");
7632                 tg3_dump_state(tp);
7633         }
7634
7635         tg3_reset_task_schedule(tp);
7636 }
7637
7638 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7639 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7640 {
7641         u32 base = (u32) mapping & 0xffffffff;
7642
7643         return base + len + 8 < base;
7644 }
7645
7646 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7647  * of any 4GB boundaries: 4G, 8G, etc
7648  */
7649 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7650                                            u32 len, u32 mss)
7651 {
7652         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7653                 u32 base = (u32) mapping & 0xffffffff;
7654
7655                 return ((base + len + (mss & 0x3fff)) < base);
7656         }
7657         return 0;
7658 }
7659
7660 /* Test for DMA addresses > 40-bit */
7661 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7662                                           int len)
7663 {
7664 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7665         if (tg3_flag(tp, 40BIT_DMA_BUG))
7666                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7667         return 0;
7668 #else
7669         return 0;
7670 #endif
7671 }
7672
7673 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7674                                  dma_addr_t mapping, u32 len, u32 flags,
7675                                  u32 mss, u32 vlan)
7676 {
7677         txbd->addr_hi = ((u64) mapping >> 32);
7678         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7679         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7680         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7681 }
7682
7683 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7684                             dma_addr_t map, u32 len, u32 flags,
7685                             u32 mss, u32 vlan)
7686 {
7687         struct tg3 *tp = tnapi->tp;
7688         bool hwbug = false;
7689
7690         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7691                 hwbug = true;
7692
7693         if (tg3_4g_overflow_test(map, len))
7694                 hwbug = true;
7695
7696         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7697                 hwbug = true;
7698
7699         if (tg3_40bit_overflow_test(tp, map, len))
7700                 hwbug = true;
7701
7702         if (tp->dma_limit) {
7703                 u32 prvidx = *entry;
7704                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7705                 while (len > tp->dma_limit && *budget) {
7706                         u32 frag_len = tp->dma_limit;
7707                         len -= tp->dma_limit;
7708
7709                         /* Avoid the 8byte DMA problem */
7710                         if (len <= 8) {
7711                                 len += tp->dma_limit / 2;
7712                                 frag_len = tp->dma_limit / 2;
7713                         }
7714
7715                         tnapi->tx_buffers[*entry].fragmented = true;
7716
7717                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7718                                       frag_len, tmp_flag, mss, vlan);
7719                         *budget -= 1;
7720                         prvidx = *entry;
7721                         *entry = NEXT_TX(*entry);
7722
7723                         map += frag_len;
7724                 }
7725
7726                 if (len) {
7727                         if (*budget) {
7728                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7729                                               len, flags, mss, vlan);
7730                                 *budget -= 1;
7731                                 *entry = NEXT_TX(*entry);
7732                         } else {
7733                                 hwbug = true;
7734                                 tnapi->tx_buffers[prvidx].fragmented = false;
7735                         }
7736                 }
7737         } else {
7738                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7739                               len, flags, mss, vlan);
7740                 *entry = NEXT_TX(*entry);
7741         }
7742
7743         return hwbug;
7744 }
7745
7746 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7747 {
7748         int i;
7749         struct sk_buff *skb;
7750         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7751
7752         skb = txb->skb;
7753         txb->skb = NULL;
7754
7755         pci_unmap_single(tnapi->tp->pdev,
7756                          dma_unmap_addr(txb, mapping),
7757                          skb_headlen(skb),
7758                          PCI_DMA_TODEVICE);
7759
7760         while (txb->fragmented) {
7761                 txb->fragmented = false;
7762                 entry = NEXT_TX(entry);
7763                 txb = &tnapi->tx_buffers[entry];
7764         }
7765
7766         for (i = 0; i <= last; i++) {
7767                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7768
7769                 entry = NEXT_TX(entry);
7770                 txb = &tnapi->tx_buffers[entry];
7771
7772                 pci_unmap_page(tnapi->tp->pdev,
7773                                dma_unmap_addr(txb, mapping),
7774                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7775
7776                 while (txb->fragmented) {
7777                         txb->fragmented = false;
7778                         entry = NEXT_TX(entry);
7779                         txb = &tnapi->tx_buffers[entry];
7780                 }
7781         }
7782 }
7783
7784 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7785 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7786                                        struct sk_buff **pskb,
7787                                        u32 *entry, u32 *budget,
7788                                        u32 base_flags, u32 mss, u32 vlan)
7789 {
7790         struct tg3 *tp = tnapi->tp;
7791         struct sk_buff *new_skb, *skb = *pskb;
7792         dma_addr_t new_addr = 0;
7793         int ret = 0;
7794
7795         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7796                 new_skb = skb_copy(skb, GFP_ATOMIC);
7797         else {
7798                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7799
7800                 new_skb = skb_copy_expand(skb,
7801                                           skb_headroom(skb) + more_headroom,
7802                                           skb_tailroom(skb), GFP_ATOMIC);
7803         }
7804
7805         if (!new_skb) {
7806                 ret = -1;
7807         } else {
7808                 /* New SKB is guaranteed to be linear. */
7809                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7810                                           PCI_DMA_TODEVICE);
7811                 /* Make sure the mapping succeeded */
7812                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7813                         dev_kfree_skb_any(new_skb);
7814                         ret = -1;
7815                 } else {
7816                         u32 save_entry = *entry;
7817
7818                         base_flags |= TXD_FLAG_END;
7819
7820                         tnapi->tx_buffers[*entry].skb = new_skb;
7821                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7822                                            mapping, new_addr);
7823
7824                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7825                                             new_skb->len, base_flags,
7826                                             mss, vlan)) {
7827                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7828                                 dev_kfree_skb_any(new_skb);
7829                                 ret = -1;
7830                         }
7831                 }
7832         }
7833
7834         dev_consume_skb_any(skb);
7835         *pskb = new_skb;
7836         return ret;
7837 }
7838
7839 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7840 {
7841         /* Check if we will never have enough descriptors,
7842          * as gso_segs can be more than current ring size
7843          */
7844         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7845 }
7846
7847 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7848
7849 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7850  * indicated in tg3_tx_frag_set()
7851  */
7852 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7853                        struct netdev_queue *txq, struct sk_buff *skb)
7854 {
7855         struct sk_buff *segs, *nskb;
7856         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7857
7858         /* Estimate the number of fragments in the worst case */
7859         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7860                 netif_tx_stop_queue(txq);
7861
7862                 /* netif_tx_stop_queue() must be done before checking
7863                  * checking tx index in tg3_tx_avail() below, because in
7864                  * tg3_tx(), we update tx index before checking for
7865                  * netif_tx_queue_stopped().
7866                  */
7867                 smp_mb();
7868                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7869                         return NETDEV_TX_BUSY;
7870
7871                 netif_tx_wake_queue(txq);
7872         }
7873
7874         segs = skb_gso_segment(skb, tp->dev->features &
7875                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7876         if (IS_ERR(segs) || !segs)
7877                 goto tg3_tso_bug_end;
7878
7879         do {
7880                 nskb = segs;
7881                 segs = segs->next;
7882                 nskb->next = NULL;
7883                 tg3_start_xmit(nskb, tp->dev);
7884         } while (segs);
7885
7886 tg3_tso_bug_end:
7887         dev_consume_skb_any(skb);
7888
7889         return NETDEV_TX_OK;
7890 }
7891
7892 /* hard_start_xmit for all devices */
7893 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7894 {
7895         struct tg3 *tp = netdev_priv(dev);
7896         u32 len, entry, base_flags, mss, vlan = 0;
7897         u32 budget;
7898         int i = -1, would_hit_hwbug;
7899         dma_addr_t mapping;
7900         struct tg3_napi *tnapi;
7901         struct netdev_queue *txq;
7902         unsigned int last;
7903         struct iphdr *iph = NULL;
7904         struct tcphdr *tcph = NULL;
7905         __sum16 tcp_csum = 0, ip_csum = 0;
7906         __be16 ip_tot_len = 0;
7907
7908         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7909         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7910         if (tg3_flag(tp, ENABLE_TSS))
7911                 tnapi++;
7912
7913         budget = tg3_tx_avail(tnapi);
7914
7915         /* We are running in BH disabled context with netif_tx_lock
7916          * and TX reclaim runs via tp->napi.poll inside of a software
7917          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7918          * no IRQ context deadlocks to worry about either.  Rejoice!
7919          */
7920         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7921                 if (!netif_tx_queue_stopped(txq)) {
7922                         netif_tx_stop_queue(txq);
7923
7924                         /* This is a hard error, log it. */
7925                         netdev_err(dev,
7926                                    "BUG! Tx Ring full when queue awake!\n");
7927                 }
7928                 return NETDEV_TX_BUSY;
7929         }
7930
7931         entry = tnapi->tx_prod;
7932         base_flags = 0;
7933
7934         mss = skb_shinfo(skb)->gso_size;
7935         if (mss) {
7936                 u32 tcp_opt_len, hdr_len;
7937
7938                 if (skb_cow_head(skb, 0))
7939                         goto drop;
7940
7941                 iph = ip_hdr(skb);
7942                 tcp_opt_len = tcp_optlen(skb);
7943
7944                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7945
7946                 /* HW/FW can not correctly segment packets that have been
7947                  * vlan encapsulated.
7948                  */
7949                 if (skb->protocol == htons(ETH_P_8021Q) ||
7950                     skb->protocol == htons(ETH_P_8021AD)) {
7951                         if (tg3_tso_bug_gso_check(tnapi, skb))
7952                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7953                         goto drop;
7954                 }
7955
7956                 if (!skb_is_gso_v6(skb)) {
7957                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7958                             tg3_flag(tp, TSO_BUG)) {
7959                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7960                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7961                                 goto drop;
7962                         }
7963                         ip_csum = iph->check;
7964                         ip_tot_len = iph->tot_len;
7965                         iph->check = 0;
7966                         iph->tot_len = htons(mss + hdr_len);
7967                 }
7968
7969                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7970                                TXD_FLAG_CPU_POST_DMA);
7971
7972                 tcph = tcp_hdr(skb);
7973                 tcp_csum = tcph->check;
7974
7975                 if (tg3_flag(tp, HW_TSO_1) ||
7976                     tg3_flag(tp, HW_TSO_2) ||
7977                     tg3_flag(tp, HW_TSO_3)) {
7978                         tcph->check = 0;
7979                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7980                 } else {
7981                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7982                                                          0, IPPROTO_TCP, 0);
7983                 }
7984
7985                 if (tg3_flag(tp, HW_TSO_3)) {
7986                         mss |= (hdr_len & 0xc) << 12;
7987                         if (hdr_len & 0x10)
7988                                 base_flags |= 0x00000010;
7989                         base_flags |= (hdr_len & 0x3e0) << 5;
7990                 } else if (tg3_flag(tp, HW_TSO_2))
7991                         mss |= hdr_len << 9;
7992                 else if (tg3_flag(tp, HW_TSO_1) ||
7993                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7994                         if (tcp_opt_len || iph->ihl > 5) {
7995                                 int tsflags;
7996
7997                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7998                                 mss |= (tsflags << 11);
7999                         }
8000                 } else {
8001                         if (tcp_opt_len || iph->ihl > 5) {
8002                                 int tsflags;
8003
8004                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8005                                 base_flags |= tsflags << 12;
8006                         }
8007                 }
8008         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8009                 /* HW/FW can not correctly checksum packets that have been
8010                  * vlan encapsulated.
8011                  */
8012                 if (skb->protocol == htons(ETH_P_8021Q) ||
8013                     skb->protocol == htons(ETH_P_8021AD)) {
8014                         if (skb_checksum_help(skb))
8015                                 goto drop;
8016                 } else  {
8017                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8018                 }
8019         }
8020
8021         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8022             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8023                 base_flags |= TXD_FLAG_JMB_PKT;
8024
8025         if (skb_vlan_tag_present(skb)) {
8026                 base_flags |= TXD_FLAG_VLAN;
8027                 vlan = skb_vlan_tag_get(skb);
8028         }
8029
8030         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8031             tg3_flag(tp, TX_TSTAMP_EN)) {
8032                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8033                 base_flags |= TXD_FLAG_HWTSTAMP;
8034         }
8035
8036         len = skb_headlen(skb);
8037
8038         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8039         if (pci_dma_mapping_error(tp->pdev, mapping))
8040                 goto drop;
8041
8042
8043         tnapi->tx_buffers[entry].skb = skb;
8044         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8045
8046         would_hit_hwbug = 0;
8047
8048         if (tg3_flag(tp, 5701_DMA_BUG))
8049                 would_hit_hwbug = 1;
8050
8051         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8052                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8053                             mss, vlan)) {
8054                 would_hit_hwbug = 1;
8055         } else if (skb_shinfo(skb)->nr_frags > 0) {
8056                 u32 tmp_mss = mss;
8057
8058                 if (!tg3_flag(tp, HW_TSO_1) &&
8059                     !tg3_flag(tp, HW_TSO_2) &&
8060                     !tg3_flag(tp, HW_TSO_3))
8061                         tmp_mss = 0;
8062
8063                 /* Now loop through additional data
8064                  * fragments, and queue them.
8065                  */
8066                 last = skb_shinfo(skb)->nr_frags - 1;
8067                 for (i = 0; i <= last; i++) {
8068                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8069
8070                         len = skb_frag_size(frag);
8071                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8072                                                    len, DMA_TO_DEVICE);
8073
8074                         tnapi->tx_buffers[entry].skb = NULL;
8075                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8076                                            mapping);
8077                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8078                                 goto dma_error;
8079
8080                         if (!budget ||
8081                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8082                                             len, base_flags |
8083                                             ((i == last) ? TXD_FLAG_END : 0),
8084                                             tmp_mss, vlan)) {
8085                                 would_hit_hwbug = 1;
8086                                 break;
8087                         }
8088                 }
8089         }
8090
8091         if (would_hit_hwbug) {
8092                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8093
8094                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8095                         /* If it's a TSO packet, do GSO instead of
8096                          * allocating and copying to a large linear SKB
8097                          */
8098                         if (ip_tot_len) {
8099                                 iph->check = ip_csum;
8100                                 iph->tot_len = ip_tot_len;
8101                         }
8102                         tcph->check = tcp_csum;
8103                         return tg3_tso_bug(tp, tnapi, txq, skb);
8104                 }
8105
8106                 /* If the workaround fails due to memory/mapping
8107                  * failure, silently drop this packet.
8108                  */
8109                 entry = tnapi->tx_prod;
8110                 budget = tg3_tx_avail(tnapi);
8111                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8112                                                 base_flags, mss, vlan))
8113                         goto drop_nofree;
8114         }
8115
8116         skb_tx_timestamp(skb);
8117         netdev_tx_sent_queue(txq, skb->len);
8118
8119         /* Sync BD data before updating mailbox */
8120         wmb();
8121
8122         tnapi->tx_prod = entry;
8123         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8124                 netif_tx_stop_queue(txq);
8125
8126                 /* netif_tx_stop_queue() must be done before checking
8127                  * checking tx index in tg3_tx_avail() below, because in
8128                  * tg3_tx(), we update tx index before checking for
8129                  * netif_tx_queue_stopped().
8130                  */
8131                 smp_mb();
8132                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8133                         netif_tx_wake_queue(txq);
8134         }
8135
8136         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8137                 /* Packets are ready, update Tx producer idx on card. */
8138                 tw32_tx_mbox(tnapi->prodmbox, entry);
8139                 mmiowb();
8140         }
8141
8142         return NETDEV_TX_OK;
8143
8144 dma_error:
8145         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8146         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8147 drop:
8148         dev_kfree_skb_any(skb);
8149 drop_nofree:
8150         tp->tx_dropped++;
8151         return NETDEV_TX_OK;
8152 }
8153
8154 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8155 {
8156         if (enable) {
8157                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8158                                   MAC_MODE_PORT_MODE_MASK);
8159
8160                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8161
8162                 if (!tg3_flag(tp, 5705_PLUS))
8163                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8164
8165                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8166                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8167                 else
8168                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8169         } else {
8170                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8171
8172                 if (tg3_flag(tp, 5705_PLUS) ||
8173                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8174                     tg3_asic_rev(tp) == ASIC_REV_5700)
8175                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8176         }
8177
8178         tw32(MAC_MODE, tp->mac_mode);
8179         udelay(40);
8180 }
8181
8182 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8183 {
8184         u32 val, bmcr, mac_mode, ptest = 0;
8185
8186         tg3_phy_toggle_apd(tp, false);
8187         tg3_phy_toggle_automdix(tp, false);
8188
8189         if (extlpbk && tg3_phy_set_extloopbk(tp))
8190                 return -EIO;
8191
8192         bmcr = BMCR_FULLDPLX;
8193         switch (speed) {
8194         case SPEED_10:
8195                 break;
8196         case SPEED_100:
8197                 bmcr |= BMCR_SPEED100;
8198                 break;
8199         case SPEED_1000:
8200         default:
8201                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8202                         speed = SPEED_100;
8203                         bmcr |= BMCR_SPEED100;
8204                 } else {
8205                         speed = SPEED_1000;
8206                         bmcr |= BMCR_SPEED1000;
8207                 }
8208         }
8209
8210         if (extlpbk) {
8211                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8212                         tg3_readphy(tp, MII_CTRL1000, &val);
8213                         val |= CTL1000_AS_MASTER |
8214                                CTL1000_ENABLE_MASTER;
8215                         tg3_writephy(tp, MII_CTRL1000, val);
8216                 } else {
8217                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8218                                 MII_TG3_FET_PTEST_TRIM_2;
8219                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8220                 }
8221         } else
8222                 bmcr |= BMCR_LOOPBACK;
8223
8224         tg3_writephy(tp, MII_BMCR, bmcr);
8225
8226         /* The write needs to be flushed for the FETs */
8227         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8228                 tg3_readphy(tp, MII_BMCR, &bmcr);
8229
8230         udelay(40);
8231
8232         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8233             tg3_asic_rev(tp) == ASIC_REV_5785) {
8234                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8235                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8236                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8237
8238                 /* The write needs to be flushed for the AC131 */
8239                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8240         }
8241
8242         /* Reset to prevent losing 1st rx packet intermittently */
8243         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8244             tg3_flag(tp, 5780_CLASS)) {
8245                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8246                 udelay(10);
8247                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8248         }
8249
8250         mac_mode = tp->mac_mode &
8251                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8252         if (speed == SPEED_1000)
8253                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8254         else
8255                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8256
8257         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8258                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8259
8260                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8261                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8262                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8263                         mac_mode |= MAC_MODE_LINK_POLARITY;
8264
8265                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8266                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8267         }
8268
8269         tw32(MAC_MODE, mac_mode);
8270         udelay(40);
8271
8272         return 0;
8273 }
8274
8275 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8276 {
8277         struct tg3 *tp = netdev_priv(dev);
8278
8279         if (features & NETIF_F_LOOPBACK) {
8280                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8281                         return;
8282
8283                 spin_lock_bh(&tp->lock);
8284                 tg3_mac_loopback(tp, true);
8285                 netif_carrier_on(tp->dev);
8286                 spin_unlock_bh(&tp->lock);
8287                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8288         } else {
8289                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8290                         return;
8291
8292                 spin_lock_bh(&tp->lock);
8293                 tg3_mac_loopback(tp, false);
8294                 /* Force link status check */
8295                 tg3_setup_phy(tp, true);
8296                 spin_unlock_bh(&tp->lock);
8297                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8298         }
8299 }
8300
8301 static netdev_features_t tg3_fix_features(struct net_device *dev,
8302         netdev_features_t features)
8303 {
8304         struct tg3 *tp = netdev_priv(dev);
8305
8306         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8307                 features &= ~NETIF_F_ALL_TSO;
8308
8309         return features;
8310 }
8311
8312 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8313 {
8314         netdev_features_t changed = dev->features ^ features;
8315
8316         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8317                 tg3_set_loopback(dev, features);
8318
8319         return 0;
8320 }
8321
8322 static void tg3_rx_prodring_free(struct tg3 *tp,
8323                                  struct tg3_rx_prodring_set *tpr)
8324 {
8325         int i;
8326
8327         if (tpr != &tp->napi[0].prodring) {
8328                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8329                      i = (i + 1) & tp->rx_std_ring_mask)
8330                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8331                                         tp->rx_pkt_map_sz);
8332
8333                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8334                         for (i = tpr->rx_jmb_cons_idx;
8335                              i != tpr->rx_jmb_prod_idx;
8336                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8337                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8338                                                 TG3_RX_JMB_MAP_SZ);
8339                         }
8340                 }
8341
8342                 return;
8343         }
8344
8345         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8346                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8347                                 tp->rx_pkt_map_sz);
8348
8349         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8350                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8351                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8352                                         TG3_RX_JMB_MAP_SZ);
8353         }
8354 }
8355
8356 /* Initialize rx rings for packet processing.
8357  *
8358  * The chip has been shut down and the driver detached from
8359  * the networking, so no interrupts or new tx packets will
8360  * end up in the driver.  tp->{tx,}lock are held and thus
8361  * we may not sleep.
8362  */
8363 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8364                                  struct tg3_rx_prodring_set *tpr)
8365 {
8366         u32 i, rx_pkt_dma_sz;
8367
8368         tpr->rx_std_cons_idx = 0;
8369         tpr->rx_std_prod_idx = 0;
8370         tpr->rx_jmb_cons_idx = 0;
8371         tpr->rx_jmb_prod_idx = 0;
8372
8373         if (tpr != &tp->napi[0].prodring) {
8374                 memset(&tpr->rx_std_buffers[0], 0,
8375                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8376                 if (tpr->rx_jmb_buffers)
8377                         memset(&tpr->rx_jmb_buffers[0], 0,
8378                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8379                 goto done;
8380         }
8381
8382         /* Zero out all descriptors. */
8383         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8384
8385         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8386         if (tg3_flag(tp, 5780_CLASS) &&
8387             tp->dev->mtu > ETH_DATA_LEN)
8388                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8389         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8390
8391         /* Initialize invariants of the rings, we only set this
8392          * stuff once.  This works because the card does not
8393          * write into the rx buffer posting rings.
8394          */
8395         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8396                 struct tg3_rx_buffer_desc *rxd;
8397
8398                 rxd = &tpr->rx_std[i];
8399                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8400                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8401                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8402                                (i << RXD_OPAQUE_INDEX_SHIFT));
8403         }
8404
8405         /* Now allocate fresh SKBs for each rx ring. */
8406         for (i = 0; i < tp->rx_pending; i++) {
8407                 unsigned int frag_size;
8408
8409                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8410                                       &frag_size) < 0) {
8411                         netdev_warn(tp->dev,
8412                                     "Using a smaller RX standard ring. Only "
8413                                     "%d out of %d buffers were allocated "
8414                                     "successfully\n", i, tp->rx_pending);
8415                         if (i == 0)
8416                                 goto initfail;
8417                         tp->rx_pending = i;
8418                         break;
8419                 }
8420         }
8421
8422         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8423                 goto done;
8424
8425         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8426
8427         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8428                 goto done;
8429
8430         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8431                 struct tg3_rx_buffer_desc *rxd;
8432
8433                 rxd = &tpr->rx_jmb[i].std;
8434                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8435                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8436                                   RXD_FLAG_JUMBO;
8437                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8438                        (i << RXD_OPAQUE_INDEX_SHIFT));
8439         }
8440
8441         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8442                 unsigned int frag_size;
8443
8444                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8445                                       &frag_size) < 0) {
8446                         netdev_warn(tp->dev,
8447                                     "Using a smaller RX jumbo ring. Only %d "
8448                                     "out of %d buffers were allocated "
8449                                     "successfully\n", i, tp->rx_jumbo_pending);
8450                         if (i == 0)
8451                                 goto initfail;
8452                         tp->rx_jumbo_pending = i;
8453                         break;
8454                 }
8455         }
8456
8457 done:
8458         return 0;
8459
8460 initfail:
8461         tg3_rx_prodring_free(tp, tpr);
8462         return -ENOMEM;
8463 }
8464
8465 static void tg3_rx_prodring_fini(struct tg3 *tp,
8466                                  struct tg3_rx_prodring_set *tpr)
8467 {
8468         kfree(tpr->rx_std_buffers);
8469         tpr->rx_std_buffers = NULL;
8470         kfree(tpr->rx_jmb_buffers);
8471         tpr->rx_jmb_buffers = NULL;
8472         if (tpr->rx_std) {
8473                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8474                                   tpr->rx_std, tpr->rx_std_mapping);
8475                 tpr->rx_std = NULL;
8476         }
8477         if (tpr->rx_jmb) {
8478                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8479                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8480                 tpr->rx_jmb = NULL;
8481         }
8482 }
8483
8484 static int tg3_rx_prodring_init(struct tg3 *tp,
8485                                 struct tg3_rx_prodring_set *tpr)
8486 {
8487         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8488                                       GFP_KERNEL);
8489         if (!tpr->rx_std_buffers)
8490                 return -ENOMEM;
8491
8492         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8493                                          TG3_RX_STD_RING_BYTES(tp),
8494                                          &tpr->rx_std_mapping,
8495                                          GFP_KERNEL);
8496         if (!tpr->rx_std)
8497                 goto err_out;
8498
8499         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8500                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8501                                               GFP_KERNEL);
8502                 if (!tpr->rx_jmb_buffers)
8503                         goto err_out;
8504
8505                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8506                                                  TG3_RX_JMB_RING_BYTES(tp),
8507                                                  &tpr->rx_jmb_mapping,
8508                                                  GFP_KERNEL);
8509                 if (!tpr->rx_jmb)
8510                         goto err_out;
8511         }
8512
8513         return 0;
8514
8515 err_out:
8516         tg3_rx_prodring_fini(tp, tpr);
8517         return -ENOMEM;
8518 }
8519
8520 /* Free up pending packets in all rx/tx rings.
8521  *
8522  * The chip has been shut down and the driver detached from
8523  * the networking, so no interrupts or new tx packets will
8524  * end up in the driver.  tp->{tx,}lock is not held and we are not
8525  * in an interrupt context and thus may sleep.
8526  */
8527 static void tg3_free_rings(struct tg3 *tp)
8528 {
8529         int i, j;
8530
8531         for (j = 0; j < tp->irq_cnt; j++) {
8532                 struct tg3_napi *tnapi = &tp->napi[j];
8533
8534                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8535
8536                 if (!tnapi->tx_buffers)
8537                         continue;
8538
8539                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8540                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8541
8542                         if (!skb)
8543                                 continue;
8544
8545                         tg3_tx_skb_unmap(tnapi, i,
8546                                          skb_shinfo(skb)->nr_frags - 1);
8547
8548                         dev_consume_skb_any(skb);
8549                 }
8550                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8551         }
8552 }
8553
8554 /* Initialize tx/rx rings for packet processing.
8555  *
8556  * The chip has been shut down and the driver detached from
8557  * the networking, so no interrupts or new tx packets will
8558  * end up in the driver.  tp->{tx,}lock are held and thus
8559  * we may not sleep.
8560  */
8561 static int tg3_init_rings(struct tg3 *tp)
8562 {
8563         int i;
8564
8565         /* Free up all the SKBs. */
8566         tg3_free_rings(tp);
8567
8568         for (i = 0; i < tp->irq_cnt; i++) {
8569                 struct tg3_napi *tnapi = &tp->napi[i];
8570
8571                 tnapi->last_tag = 0;
8572                 tnapi->last_irq_tag = 0;
8573                 tnapi->hw_status->status = 0;
8574                 tnapi->hw_status->status_tag = 0;
8575                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8576
8577                 tnapi->tx_prod = 0;
8578                 tnapi->tx_cons = 0;
8579                 if (tnapi->tx_ring)
8580                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8581
8582                 tnapi->rx_rcb_ptr = 0;
8583                 if (tnapi->rx_rcb)
8584                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8585
8586                 if (tnapi->prodring.rx_std &&
8587                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8588                         tg3_free_rings(tp);
8589                         return -ENOMEM;
8590                 }
8591         }
8592
8593         return 0;
8594 }
8595
8596 static void tg3_mem_tx_release(struct tg3 *tp)
8597 {
8598         int i;
8599
8600         for (i = 0; i < tp->irq_max; i++) {
8601                 struct tg3_napi *tnapi = &tp->napi[i];
8602
8603                 if (tnapi->tx_ring) {
8604                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8605                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8606                         tnapi->tx_ring = NULL;
8607                 }
8608
8609                 kfree(tnapi->tx_buffers);
8610                 tnapi->tx_buffers = NULL;
8611         }
8612 }
8613
8614 static int tg3_mem_tx_acquire(struct tg3 *tp)
8615 {
8616         int i;
8617         struct tg3_napi *tnapi = &tp->napi[0];
8618
8619         /* If multivector TSS is enabled, vector 0 does not handle
8620          * tx interrupts.  Don't allocate any resources for it.
8621          */
8622         if (tg3_flag(tp, ENABLE_TSS))
8623                 tnapi++;
8624
8625         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8626                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8627                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8628                 if (!tnapi->tx_buffers)
8629                         goto err_out;
8630
8631                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8632                                                     TG3_TX_RING_BYTES,
8633                                                     &tnapi->tx_desc_mapping,
8634                                                     GFP_KERNEL);
8635                 if (!tnapi->tx_ring)
8636                         goto err_out;
8637         }
8638
8639         return 0;
8640
8641 err_out:
8642         tg3_mem_tx_release(tp);
8643         return -ENOMEM;
8644 }
8645
8646 static void tg3_mem_rx_release(struct tg3 *tp)
8647 {
8648         int i;
8649
8650         for (i = 0; i < tp->irq_max; i++) {
8651                 struct tg3_napi *tnapi = &tp->napi[i];
8652
8653                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8654
8655                 if (!tnapi->rx_rcb)
8656                         continue;
8657
8658                 dma_free_coherent(&tp->pdev->dev,
8659                                   TG3_RX_RCB_RING_BYTES(tp),
8660                                   tnapi->rx_rcb,
8661                                   tnapi->rx_rcb_mapping);
8662                 tnapi->rx_rcb = NULL;
8663         }
8664 }
8665
8666 static int tg3_mem_rx_acquire(struct tg3 *tp)
8667 {
8668         unsigned int i, limit;
8669
8670         limit = tp->rxq_cnt;
8671
8672         /* If RSS is enabled, we need a (dummy) producer ring
8673          * set on vector zero.  This is the true hw prodring.
8674          */
8675         if (tg3_flag(tp, ENABLE_RSS))
8676                 limit++;
8677
8678         for (i = 0; i < limit; i++) {
8679                 struct tg3_napi *tnapi = &tp->napi[i];
8680
8681                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8682                         goto err_out;
8683
8684                 /* If multivector RSS is enabled, vector 0
8685                  * does not handle rx or tx interrupts.
8686                  * Don't allocate any resources for it.
8687                  */
8688                 if (!i && tg3_flag(tp, ENABLE_RSS))
8689                         continue;
8690
8691                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8692                                                     TG3_RX_RCB_RING_BYTES(tp),
8693                                                     &tnapi->rx_rcb_mapping,
8694                                                     GFP_KERNEL);
8695                 if (!tnapi->rx_rcb)
8696                         goto err_out;
8697         }
8698
8699         return 0;
8700
8701 err_out:
8702         tg3_mem_rx_release(tp);
8703         return -ENOMEM;
8704 }
8705
8706 /*
8707  * Must not be invoked with interrupt sources disabled and
8708  * the hardware shutdown down.
8709  */
8710 static void tg3_free_consistent(struct tg3 *tp)
8711 {
8712         int i;
8713
8714         for (i = 0; i < tp->irq_cnt; i++) {
8715                 struct tg3_napi *tnapi = &tp->napi[i];
8716
8717                 if (tnapi->hw_status) {
8718                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8719                                           tnapi->hw_status,
8720                                           tnapi->status_mapping);
8721                         tnapi->hw_status = NULL;
8722                 }
8723         }
8724
8725         tg3_mem_rx_release(tp);
8726         tg3_mem_tx_release(tp);
8727
8728         /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8729         tg3_full_lock(tp, 0);
8730         if (tp->hw_stats) {
8731                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8732                                   tp->hw_stats, tp->stats_mapping);
8733                 tp->hw_stats = NULL;
8734         }
8735         tg3_full_unlock(tp);
8736 }
8737
8738 /*
8739  * Must not be invoked with interrupt sources disabled and
8740  * the hardware shutdown down.  Can sleep.
8741  */
8742 static int tg3_alloc_consistent(struct tg3 *tp)
8743 {
8744         int i;
8745
8746         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8747                                            sizeof(struct tg3_hw_stats),
8748                                            &tp->stats_mapping, GFP_KERNEL);
8749         if (!tp->hw_stats)
8750                 goto err_out;
8751
8752         for (i = 0; i < tp->irq_cnt; i++) {
8753                 struct tg3_napi *tnapi = &tp->napi[i];
8754                 struct tg3_hw_status *sblk;
8755
8756                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8757                                                        TG3_HW_STATUS_SIZE,
8758                                                        &tnapi->status_mapping,
8759                                                        GFP_KERNEL);
8760                 if (!tnapi->hw_status)
8761                         goto err_out;
8762
8763                 sblk = tnapi->hw_status;
8764
8765                 if (tg3_flag(tp, ENABLE_RSS)) {
8766                         u16 *prodptr = NULL;
8767
8768                         /*
8769                          * When RSS is enabled, the status block format changes
8770                          * slightly.  The "rx_jumbo_consumer", "reserved",
8771                          * and "rx_mini_consumer" members get mapped to the
8772                          * other three rx return ring producer indexes.
8773                          */
8774                         switch (i) {
8775                         case 1:
8776                                 prodptr = &sblk->idx[0].rx_producer;
8777                                 break;
8778                         case 2:
8779                                 prodptr = &sblk->rx_jumbo_consumer;
8780                                 break;
8781                         case 3:
8782                                 prodptr = &sblk->reserved;
8783                                 break;
8784                         case 4:
8785                                 prodptr = &sblk->rx_mini_consumer;
8786                                 break;
8787                         }
8788                         tnapi->rx_rcb_prod_idx = prodptr;
8789                 } else {
8790                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8791                 }
8792         }
8793
8794         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8795                 goto err_out;
8796
8797         return 0;
8798
8799 err_out:
8800         tg3_free_consistent(tp);
8801         return -ENOMEM;
8802 }
8803
8804 #define MAX_WAIT_CNT 1000
8805
8806 /* To stop a block, clear the enable bit and poll till it
8807  * clears.  tp->lock is held.
8808  */
8809 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8810 {
8811         unsigned int i;
8812         u32 val;
8813
8814         if (tg3_flag(tp, 5705_PLUS)) {
8815                 switch (ofs) {
8816                 case RCVLSC_MODE:
8817                 case DMAC_MODE:
8818                 case MBFREE_MODE:
8819                 case BUFMGR_MODE:
8820                 case MEMARB_MODE:
8821                         /* We can't enable/disable these bits of the
8822                          * 5705/5750, just say success.
8823                          */
8824                         return 0;
8825
8826                 default:
8827                         break;
8828                 }
8829         }
8830
8831         val = tr32(ofs);
8832         val &= ~enable_bit;
8833         tw32_f(ofs, val);
8834
8835         for (i = 0; i < MAX_WAIT_CNT; i++) {
8836                 if (pci_channel_offline(tp->pdev)) {
8837                         dev_err(&tp->pdev->dev,
8838                                 "tg3_stop_block device offline, "
8839                                 "ofs=%lx enable_bit=%x\n",
8840                                 ofs, enable_bit);
8841                         return -ENODEV;
8842                 }
8843
8844                 udelay(100);
8845                 val = tr32(ofs);
8846                 if ((val & enable_bit) == 0)
8847                         break;
8848         }
8849
8850         if (i == MAX_WAIT_CNT && !silent) {
8851                 dev_err(&tp->pdev->dev,
8852                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8853                         ofs, enable_bit);
8854                 return -ENODEV;
8855         }
8856
8857         return 0;
8858 }
8859
8860 /* tp->lock is held. */
8861 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8862 {
8863         int i, err;
8864
8865         tg3_disable_ints(tp);
8866
8867         if (pci_channel_offline(tp->pdev)) {
8868                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8869                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8870                 err = -ENODEV;
8871                 goto err_no_dev;
8872         }
8873
8874         tp->rx_mode &= ~RX_MODE_ENABLE;
8875         tw32_f(MAC_RX_MODE, tp->rx_mode);
8876         udelay(10);
8877
8878         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8879         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8880         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8881         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8882         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8883         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8884
8885         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8886         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8887         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8888         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8889         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8890         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8891         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8892
8893         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8894         tw32_f(MAC_MODE, tp->mac_mode);
8895         udelay(40);
8896
8897         tp->tx_mode &= ~TX_MODE_ENABLE;
8898         tw32_f(MAC_TX_MODE, tp->tx_mode);
8899
8900         for (i = 0; i < MAX_WAIT_CNT; i++) {
8901                 udelay(100);
8902                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8903                         break;
8904         }
8905         if (i >= MAX_WAIT_CNT) {
8906                 dev_err(&tp->pdev->dev,
8907                         "%s timed out, TX_MODE_ENABLE will not clear "
8908                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8909                 err |= -ENODEV;
8910         }
8911
8912         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8913         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8914         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8915
8916         tw32(FTQ_RESET, 0xffffffff);
8917         tw32(FTQ_RESET, 0x00000000);
8918
8919         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8920         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8921
8922 err_no_dev:
8923         for (i = 0; i < tp->irq_cnt; i++) {
8924                 struct tg3_napi *tnapi = &tp->napi[i];
8925                 if (tnapi->hw_status)
8926                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8927         }
8928
8929         return err;
8930 }
8931
8932 /* Save PCI command register before chip reset */
8933 static void tg3_save_pci_state(struct tg3 *tp)
8934 {
8935         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8936 }
8937
8938 /* Restore PCI state after chip reset */
8939 static void tg3_restore_pci_state(struct tg3 *tp)
8940 {
8941         u32 val;
8942
8943         /* Re-enable indirect register accesses. */
8944         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8945                                tp->misc_host_ctrl);
8946
8947         /* Set MAX PCI retry to zero. */
8948         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8949         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8950             tg3_flag(tp, PCIX_MODE))
8951                 val |= PCISTATE_RETRY_SAME_DMA;
8952         /* Allow reads and writes to the APE register and memory space. */
8953         if (tg3_flag(tp, ENABLE_APE))
8954                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8955                        PCISTATE_ALLOW_APE_SHMEM_WR |
8956                        PCISTATE_ALLOW_APE_PSPACE_WR;
8957         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8958
8959         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8960
8961         if (!tg3_flag(tp, PCI_EXPRESS)) {
8962                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8963                                       tp->pci_cacheline_sz);
8964                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8965                                       tp->pci_lat_timer);
8966         }
8967
8968         /* Make sure PCI-X relaxed ordering bit is clear. */
8969         if (tg3_flag(tp, PCIX_MODE)) {
8970                 u16 pcix_cmd;
8971
8972                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8973                                      &pcix_cmd);
8974                 pcix_cmd &= ~PCI_X_CMD_ERO;
8975                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8976                                       pcix_cmd);
8977         }
8978
8979         if (tg3_flag(tp, 5780_CLASS)) {
8980
8981                 /* Chip reset on 5780 will reset MSI enable bit,
8982                  * so need to restore it.
8983                  */
8984                 if (tg3_flag(tp, USING_MSI)) {
8985                         u16 ctrl;
8986
8987                         pci_read_config_word(tp->pdev,
8988                                              tp->msi_cap + PCI_MSI_FLAGS,
8989                                              &ctrl);
8990                         pci_write_config_word(tp->pdev,
8991                                               tp->msi_cap + PCI_MSI_FLAGS,
8992                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8993                         val = tr32(MSGINT_MODE);
8994                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8995                 }
8996         }
8997 }
8998
8999 static void tg3_override_clk(struct tg3 *tp)
9000 {
9001         u32 val;
9002
9003         switch (tg3_asic_rev(tp)) {
9004         case ASIC_REV_5717:
9005                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9006                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9007                      TG3_CPMU_MAC_ORIDE_ENABLE);
9008                 break;
9009
9010         case ASIC_REV_5719:
9011         case ASIC_REV_5720:
9012                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9013                 break;
9014
9015         default:
9016                 return;
9017         }
9018 }
9019
9020 static void tg3_restore_clk(struct tg3 *tp)
9021 {
9022         u32 val;
9023
9024         switch (tg3_asic_rev(tp)) {
9025         case ASIC_REV_5717:
9026                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9027                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9028                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9029                 break;
9030
9031         case ASIC_REV_5719:
9032         case ASIC_REV_5720:
9033                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9034                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9035                 break;
9036
9037         default:
9038                 return;
9039         }
9040 }
9041
9042 /* tp->lock is held. */
9043 static int tg3_chip_reset(struct tg3 *tp)
9044         __releases(tp->lock)
9045         __acquires(tp->lock)
9046 {
9047         u32 val;
9048         void (*write_op)(struct tg3 *, u32, u32);
9049         int i, err;
9050
9051         if (!pci_device_is_present(tp->pdev))
9052                 return -ENODEV;
9053
9054         tg3_nvram_lock(tp);
9055
9056         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9057
9058         /* No matching tg3_nvram_unlock() after this because
9059          * chip reset below will undo the nvram lock.
9060          */
9061         tp->nvram_lock_cnt = 0;
9062
9063         /* GRC_MISC_CFG core clock reset will clear the memory
9064          * enable bit in PCI register 4 and the MSI enable bit
9065          * on some chips, so we save relevant registers here.
9066          */
9067         tg3_save_pci_state(tp);
9068
9069         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9070             tg3_flag(tp, 5755_PLUS))
9071                 tw32(GRC_FASTBOOT_PC, 0);
9072
9073         /*
9074          * We must avoid the readl() that normally takes place.
9075          * It locks machines, causes machine checks, and other
9076          * fun things.  So, temporarily disable the 5701
9077          * hardware workaround, while we do the reset.
9078          */
9079         write_op = tp->write32;
9080         if (write_op == tg3_write_flush_reg32)
9081                 tp->write32 = tg3_write32;
9082
9083         /* Prevent the irq handler from reading or writing PCI registers
9084          * during chip reset when the memory enable bit in the PCI command
9085          * register may be cleared.  The chip does not generate interrupt
9086          * at this time, but the irq handler may still be called due to irq
9087          * sharing or irqpoll.
9088          */
9089         tg3_flag_set(tp, CHIP_RESETTING);
9090         for (i = 0; i < tp->irq_cnt; i++) {
9091                 struct tg3_napi *tnapi = &tp->napi[i];
9092                 if (tnapi->hw_status) {
9093                         tnapi->hw_status->status = 0;
9094                         tnapi->hw_status->status_tag = 0;
9095                 }
9096                 tnapi->last_tag = 0;
9097                 tnapi->last_irq_tag = 0;
9098         }
9099         smp_mb();
9100
9101         tg3_full_unlock(tp);
9102
9103         for (i = 0; i < tp->irq_cnt; i++)
9104                 synchronize_irq(tp->napi[i].irq_vec);
9105
9106         tg3_full_lock(tp, 0);
9107
9108         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9109                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9110                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9111         }
9112
9113         /* do the reset */
9114         val = GRC_MISC_CFG_CORECLK_RESET;
9115
9116         if (tg3_flag(tp, PCI_EXPRESS)) {
9117                 /* Force PCIe 1.0a mode */
9118                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9119                     !tg3_flag(tp, 57765_PLUS) &&
9120                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9121                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9122                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9123
9124                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9125                         tw32(GRC_MISC_CFG, (1 << 29));
9126                         val |= (1 << 29);
9127                 }
9128         }
9129
9130         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9131                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9132                 tw32(GRC_VCPU_EXT_CTRL,
9133                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9134         }
9135
9136         /* Set the clock to the highest frequency to avoid timeouts. With link
9137          * aware mode, the clock speed could be slow and bootcode does not
9138          * complete within the expected time. Override the clock to allow the
9139          * bootcode to finish sooner and then restore it.
9140          */
9141         tg3_override_clk(tp);
9142
9143         /* Manage gphy power for all CPMU absent PCIe devices. */
9144         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9145                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9146
9147         tw32(GRC_MISC_CFG, val);
9148
9149         /* restore 5701 hardware bug workaround write method */
9150         tp->write32 = write_op;
9151
9152         /* Unfortunately, we have to delay before the PCI read back.
9153          * Some 575X chips even will not respond to a PCI cfg access
9154          * when the reset command is given to the chip.
9155          *
9156          * How do these hardware designers expect things to work
9157          * properly if the PCI write is posted for a long period
9158          * of time?  It is always necessary to have some method by
9159          * which a register read back can occur to push the write
9160          * out which does the reset.
9161          *
9162          * For most tg3 variants the trick below was working.
9163          * Ho hum...
9164          */
9165         udelay(120);
9166
9167         /* Flush PCI posted writes.  The normal MMIO registers
9168          * are inaccessible at this time so this is the only
9169          * way to make this reliably (actually, this is no longer
9170          * the case, see above).  I tried to use indirect
9171          * register read/write but this upset some 5701 variants.
9172          */
9173         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9174
9175         udelay(120);
9176
9177         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9178                 u16 val16;
9179
9180                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9181                         int j;
9182                         u32 cfg_val;
9183
9184                         /* Wait for link training to complete.  */
9185                         for (j = 0; j < 5000; j++)
9186                                 udelay(100);
9187
9188                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9189                         pci_write_config_dword(tp->pdev, 0xc4,
9190                                                cfg_val | (1 << 15));
9191                 }
9192
9193                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9194                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9195                 /*
9196                  * Older PCIe devices only support the 128 byte
9197                  * MPS setting.  Enforce the restriction.
9198                  */
9199                 if (!tg3_flag(tp, CPMU_PRESENT))
9200                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9201                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9202
9203                 /* Clear error status */
9204                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9205                                       PCI_EXP_DEVSTA_CED |
9206                                       PCI_EXP_DEVSTA_NFED |
9207                                       PCI_EXP_DEVSTA_FED |
9208                                       PCI_EXP_DEVSTA_URD);
9209         }
9210
9211         tg3_restore_pci_state(tp);
9212
9213         tg3_flag_clear(tp, CHIP_RESETTING);
9214         tg3_flag_clear(tp, ERROR_PROCESSED);
9215
9216         val = 0;
9217         if (tg3_flag(tp, 5780_CLASS))
9218                 val = tr32(MEMARB_MODE);
9219         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9220
9221         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9222                 tg3_stop_fw(tp);
9223                 tw32(0x5000, 0x400);
9224         }
9225
9226         if (tg3_flag(tp, IS_SSB_CORE)) {
9227                 /*
9228                  * BCM4785: In order to avoid repercussions from using
9229                  * potentially defective internal ROM, stop the Rx RISC CPU,
9230                  * which is not required.
9231                  */
9232                 tg3_stop_fw(tp);
9233                 tg3_halt_cpu(tp, RX_CPU_BASE);
9234         }
9235
9236         err = tg3_poll_fw(tp);
9237         if (err)
9238                 return err;
9239
9240         tw32(GRC_MODE, tp->grc_mode);
9241
9242         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9243                 val = tr32(0xc4);
9244
9245                 tw32(0xc4, val | (1 << 15));
9246         }
9247
9248         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9249             tg3_asic_rev(tp) == ASIC_REV_5705) {
9250                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9251                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9252                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9253                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9254         }
9255
9256         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9257                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9258                 val = tp->mac_mode;
9259         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9260                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9261                 val = tp->mac_mode;
9262         } else
9263                 val = 0;
9264
9265         tw32_f(MAC_MODE, val);
9266         udelay(40);
9267
9268         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9269
9270         tg3_mdio_start(tp);
9271
9272         if (tg3_flag(tp, PCI_EXPRESS) &&
9273             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9274             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9275             !tg3_flag(tp, 57765_PLUS)) {
9276                 val = tr32(0x7c00);
9277
9278                 tw32(0x7c00, val | (1 << 25));
9279         }
9280
9281         tg3_restore_clk(tp);
9282
9283         /* Reprobe ASF enable state.  */
9284         tg3_flag_clear(tp, ENABLE_ASF);
9285         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9286                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9287
9288         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9289         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9290         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9291                 u32 nic_cfg;
9292
9293                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9294                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9295                         tg3_flag_set(tp, ENABLE_ASF);
9296                         tp->last_event_jiffies = jiffies;
9297                         if (tg3_flag(tp, 5750_PLUS))
9298                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9299
9300                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9301                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9302                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9303                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9304                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9305                 }
9306         }
9307
9308         return 0;
9309 }
9310
9311 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9312 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9313 static void __tg3_set_rx_mode(struct net_device *);
9314
9315 /* tp->lock is held. */
9316 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9317 {
9318         int err;
9319
9320         tg3_stop_fw(tp);
9321
9322         tg3_write_sig_pre_reset(tp, kind);
9323
9324         tg3_abort_hw(tp, silent);
9325         err = tg3_chip_reset(tp);
9326
9327         __tg3_set_mac_addr(tp, false);
9328
9329         tg3_write_sig_legacy(tp, kind);
9330         tg3_write_sig_post_reset(tp, kind);
9331
9332         if (tp->hw_stats) {
9333                 /* Save the stats across chip resets... */
9334                 tg3_get_nstats(tp, &tp->net_stats_prev);
9335                 tg3_get_estats(tp, &tp->estats_prev);
9336
9337                 /* And make sure the next sample is new data */
9338                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9339         }
9340
9341         return err;
9342 }
9343
9344 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9345 {
9346         struct tg3 *tp = netdev_priv(dev);
9347         struct sockaddr *addr = p;
9348         int err = 0;
9349         bool skip_mac_1 = false;
9350
9351         if (!is_valid_ether_addr(addr->sa_data))
9352                 return -EADDRNOTAVAIL;
9353
9354         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9355
9356         if (!netif_running(dev))
9357                 return 0;
9358
9359         if (tg3_flag(tp, ENABLE_ASF)) {
9360                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9361
9362                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9363                 addr0_low = tr32(MAC_ADDR_0_LOW);
9364                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9365                 addr1_low = tr32(MAC_ADDR_1_LOW);
9366
9367                 /* Skip MAC addr 1 if ASF is using it. */
9368                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9369                     !(addr1_high == 0 && addr1_low == 0))
9370                         skip_mac_1 = true;
9371         }
9372         spin_lock_bh(&tp->lock);
9373         __tg3_set_mac_addr(tp, skip_mac_1);
9374         __tg3_set_rx_mode(dev);
9375         spin_unlock_bh(&tp->lock);
9376
9377         return err;
9378 }
9379
9380 /* tp->lock is held. */
9381 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9382                            dma_addr_t mapping, u32 maxlen_flags,
9383                            u32 nic_addr)
9384 {
9385         tg3_write_mem(tp,
9386                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9387                       ((u64) mapping >> 32));
9388         tg3_write_mem(tp,
9389                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9390                       ((u64) mapping & 0xffffffff));
9391         tg3_write_mem(tp,
9392                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9393                        maxlen_flags);
9394
9395         if (!tg3_flag(tp, 5705_PLUS))
9396                 tg3_write_mem(tp,
9397                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9398                               nic_addr);
9399 }
9400
9401
9402 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9403 {
9404         int i = 0;
9405
9406         if (!tg3_flag(tp, ENABLE_TSS)) {
9407                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9408                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9409                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9410         } else {
9411                 tw32(HOSTCC_TXCOL_TICKS, 0);
9412                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9413                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9414
9415                 for (; i < tp->txq_cnt; i++) {
9416                         u32 reg;
9417
9418                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9419                         tw32(reg, ec->tx_coalesce_usecs);
9420                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9421                         tw32(reg, ec->tx_max_coalesced_frames);
9422                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9423                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9424                 }
9425         }
9426
9427         for (; i < tp->irq_max - 1; i++) {
9428                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9429                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9430                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9431         }
9432 }
9433
9434 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9435 {
9436         int i = 0;
9437         u32 limit = tp->rxq_cnt;
9438
9439         if (!tg3_flag(tp, ENABLE_RSS)) {
9440                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9441                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9442                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9443                 limit--;
9444         } else {
9445                 tw32(HOSTCC_RXCOL_TICKS, 0);
9446                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9447                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9448         }
9449
9450         for (; i < limit; i++) {
9451                 u32 reg;
9452
9453                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9454                 tw32(reg, ec->rx_coalesce_usecs);
9455                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9456                 tw32(reg, ec->rx_max_coalesced_frames);
9457                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9458                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9459         }
9460
9461         for (; i < tp->irq_max - 1; i++) {
9462                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9463                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9464                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9465         }
9466 }
9467
9468 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9469 {
9470         tg3_coal_tx_init(tp, ec);
9471         tg3_coal_rx_init(tp, ec);
9472
9473         if (!tg3_flag(tp, 5705_PLUS)) {
9474                 u32 val = ec->stats_block_coalesce_usecs;
9475
9476                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9477                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9478
9479                 if (!tp->link_up)
9480                         val = 0;
9481
9482                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9483         }
9484 }
9485
9486 /* tp->lock is held. */
9487 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9488 {
9489         u32 txrcb, limit;
9490
9491         /* Disable all transmit rings but the first. */
9492         if (!tg3_flag(tp, 5705_PLUS))
9493                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9494         else if (tg3_flag(tp, 5717_PLUS))
9495                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9496         else if (tg3_flag(tp, 57765_CLASS) ||
9497                  tg3_asic_rev(tp) == ASIC_REV_5762)
9498                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9499         else
9500                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9501
9502         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9503              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9504                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9505                               BDINFO_FLAGS_DISABLED);
9506 }
9507
9508 /* tp->lock is held. */
9509 static void tg3_tx_rcbs_init(struct tg3 *tp)
9510 {
9511         int i = 0;
9512         u32 txrcb = NIC_SRAM_SEND_RCB;
9513
9514         if (tg3_flag(tp, ENABLE_TSS))
9515                 i++;
9516
9517         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9518                 struct tg3_napi *tnapi = &tp->napi[i];
9519
9520                 if (!tnapi->tx_ring)
9521                         continue;
9522
9523                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9524                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9525                                NIC_SRAM_TX_BUFFER_DESC);
9526         }
9527 }
9528
9529 /* tp->lock is held. */
9530 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9531 {
9532         u32 rxrcb, limit;
9533
9534         /* Disable all receive return rings but the first. */
9535         if (tg3_flag(tp, 5717_PLUS))
9536                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9537         else if (!tg3_flag(tp, 5705_PLUS))
9538                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9539         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9540                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9541                  tg3_flag(tp, 57765_CLASS))
9542                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9543         else
9544                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9545
9546         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9547              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9548                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9549                               BDINFO_FLAGS_DISABLED);
9550 }
9551
9552 /* tp->lock is held. */
9553 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9554 {
9555         int i = 0;
9556         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9557
9558         if (tg3_flag(tp, ENABLE_RSS))
9559                 i++;
9560
9561         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9562                 struct tg3_napi *tnapi = &tp->napi[i];
9563
9564                 if (!tnapi->rx_rcb)
9565                         continue;
9566
9567                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9568                                (tp->rx_ret_ring_mask + 1) <<
9569                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9570         }
9571 }
9572
9573 /* tp->lock is held. */
9574 static void tg3_rings_reset(struct tg3 *tp)
9575 {
9576         int i;
9577         u32 stblk;
9578         struct tg3_napi *tnapi = &tp->napi[0];
9579
9580         tg3_tx_rcbs_disable(tp);
9581
9582         tg3_rx_ret_rcbs_disable(tp);
9583
9584         /* Disable interrupts */
9585         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9586         tp->napi[0].chk_msi_cnt = 0;
9587         tp->napi[0].last_rx_cons = 0;
9588         tp->napi[0].last_tx_cons = 0;
9589
9590         /* Zero mailbox registers. */
9591         if (tg3_flag(tp, SUPPORT_MSIX)) {
9592                 for (i = 1; i < tp->irq_max; i++) {
9593                         tp->napi[i].tx_prod = 0;
9594                         tp->napi[i].tx_cons = 0;
9595                         if (tg3_flag(tp, ENABLE_TSS))
9596                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9597                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9598                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9599                         tp->napi[i].chk_msi_cnt = 0;
9600                         tp->napi[i].last_rx_cons = 0;
9601                         tp->napi[i].last_tx_cons = 0;
9602                 }
9603                 if (!tg3_flag(tp, ENABLE_TSS))
9604                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9605         } else {
9606                 tp->napi[0].tx_prod = 0;
9607                 tp->napi[0].tx_cons = 0;
9608                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9609                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9610         }
9611
9612         /* Make sure the NIC-based send BD rings are disabled. */
9613         if (!tg3_flag(tp, 5705_PLUS)) {
9614                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9615                 for (i = 0; i < 16; i++)
9616                         tw32_tx_mbox(mbox + i * 8, 0);
9617         }
9618
9619         /* Clear status block in ram. */
9620         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9621
9622         /* Set status block DMA address */
9623         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9624              ((u64) tnapi->status_mapping >> 32));
9625         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9626              ((u64) tnapi->status_mapping & 0xffffffff));
9627
9628         stblk = HOSTCC_STATBLCK_RING1;
9629
9630         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9631                 u64 mapping = (u64)tnapi->status_mapping;
9632                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9633                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9634                 stblk += 8;
9635
9636                 /* Clear status block in ram. */
9637                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9638         }
9639
9640         tg3_tx_rcbs_init(tp);
9641         tg3_rx_ret_rcbs_init(tp);
9642 }
9643
9644 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9645 {
9646         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9647
9648         if (!tg3_flag(tp, 5750_PLUS) ||
9649             tg3_flag(tp, 5780_CLASS) ||
9650             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9651             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9652             tg3_flag(tp, 57765_PLUS))
9653                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9654         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9655                  tg3_asic_rev(tp) == ASIC_REV_5787)
9656                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9657         else
9658                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9659
9660         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9661         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9662
9663         val = min(nic_rep_thresh, host_rep_thresh);
9664         tw32(RCVBDI_STD_THRESH, val);
9665
9666         if (tg3_flag(tp, 57765_PLUS))
9667                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9668
9669         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9670                 return;
9671
9672         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9673
9674         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9675
9676         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9677         tw32(RCVBDI_JUMBO_THRESH, val);
9678
9679         if (tg3_flag(tp, 57765_PLUS))
9680                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9681 }
9682
9683 static inline u32 calc_crc(unsigned char *buf, int len)
9684 {
9685         u32 reg;
9686         u32 tmp;
9687         int j, k;
9688
9689         reg = 0xffffffff;
9690
9691         for (j = 0; j < len; j++) {
9692                 reg ^= buf[j];
9693
9694                 for (k = 0; k < 8; k++) {
9695                         tmp = reg & 0x01;
9696
9697                         reg >>= 1;
9698
9699                         if (tmp)
9700                                 reg ^= 0xedb88320;
9701                 }
9702         }
9703
9704         return ~reg;
9705 }
9706
9707 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9708 {
9709         /* accept or reject all multicast frames */
9710         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9711         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9712         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9713         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9714 }
9715
9716 static void __tg3_set_rx_mode(struct net_device *dev)
9717 {
9718         struct tg3 *tp = netdev_priv(dev);
9719         u32 rx_mode;
9720
9721         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9722                                   RX_MODE_KEEP_VLAN_TAG);
9723
9724 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9725         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9726          * flag clear.
9727          */
9728         if (!tg3_flag(tp, ENABLE_ASF))
9729                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9730 #endif
9731
9732         if (dev->flags & IFF_PROMISC) {
9733                 /* Promiscuous mode. */
9734                 rx_mode |= RX_MODE_PROMISC;
9735         } else if (dev->flags & IFF_ALLMULTI) {
9736                 /* Accept all multicast. */
9737                 tg3_set_multi(tp, 1);
9738         } else if (netdev_mc_empty(dev)) {
9739                 /* Reject all multicast. */
9740                 tg3_set_multi(tp, 0);
9741         } else {
9742                 /* Accept one or more multicast(s). */
9743                 struct netdev_hw_addr *ha;
9744                 u32 mc_filter[4] = { 0, };
9745                 u32 regidx;
9746                 u32 bit;
9747                 u32 crc;
9748
9749                 netdev_for_each_mc_addr(ha, dev) {
9750                         crc = calc_crc(ha->addr, ETH_ALEN);
9751                         bit = ~crc & 0x7f;
9752                         regidx = (bit & 0x60) >> 5;
9753                         bit &= 0x1f;
9754                         mc_filter[regidx] |= (1 << bit);
9755                 }
9756
9757                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9758                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9759                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9760                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9761         }
9762
9763         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9764                 rx_mode |= RX_MODE_PROMISC;
9765         } else if (!(dev->flags & IFF_PROMISC)) {
9766                 /* Add all entries into to the mac addr filter list */
9767                 int i = 0;
9768                 struct netdev_hw_addr *ha;
9769
9770                 netdev_for_each_uc_addr(ha, dev) {
9771                         __tg3_set_one_mac_addr(tp, ha->addr,
9772                                                i + TG3_UCAST_ADDR_IDX(tp));
9773                         i++;
9774                 }
9775         }
9776
9777         if (rx_mode != tp->rx_mode) {
9778                 tp->rx_mode = rx_mode;
9779                 tw32_f(MAC_RX_MODE, rx_mode);
9780                 udelay(10);
9781         }
9782 }
9783
9784 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9785 {
9786         int i;
9787
9788         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9789                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9790 }
9791
9792 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9793 {
9794         int i;
9795
9796         if (!tg3_flag(tp, SUPPORT_MSIX))
9797                 return;
9798
9799         if (tp->rxq_cnt == 1) {
9800                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9801                 return;
9802         }
9803
9804         /* Validate table against current IRQ count */
9805         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9806                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9807                         break;
9808         }
9809
9810         if (i != TG3_RSS_INDIR_TBL_SIZE)
9811                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9812 }
9813
9814 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9815 {
9816         int i = 0;
9817         u32 reg = MAC_RSS_INDIR_TBL_0;
9818
9819         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9820                 u32 val = tp->rss_ind_tbl[i];
9821                 i++;
9822                 for (; i % 8; i++) {
9823                         val <<= 4;
9824                         val |= tp->rss_ind_tbl[i];
9825                 }
9826                 tw32(reg, val);
9827                 reg += 4;
9828         }
9829 }
9830
9831 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9832 {
9833         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9834                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9835         else
9836                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9837 }
9838
9839 /* tp->lock is held. */
9840 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9841 {
9842         u32 val, rdmac_mode;
9843         int i, err, limit;
9844         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9845
9846         tg3_disable_ints(tp);
9847
9848         tg3_stop_fw(tp);
9849
9850         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9851
9852         if (tg3_flag(tp, INIT_COMPLETE))
9853                 tg3_abort_hw(tp, 1);
9854
9855         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9856             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9857                 tg3_phy_pull_config(tp);
9858                 tg3_eee_pull_config(tp, NULL);
9859                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9860         }
9861
9862         /* Enable MAC control of LPI */
9863         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9864                 tg3_setup_eee(tp);
9865
9866         if (reset_phy)
9867                 tg3_phy_reset(tp);
9868
9869         err = tg3_chip_reset(tp);
9870         if (err)
9871                 return err;
9872
9873         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9874
9875         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9876                 val = tr32(TG3_CPMU_CTRL);
9877                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9878                 tw32(TG3_CPMU_CTRL, val);
9879
9880                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9881                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9882                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9883                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9884
9885                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9886                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9887                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9888                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9889
9890                 val = tr32(TG3_CPMU_HST_ACC);
9891                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9892                 val |= CPMU_HST_ACC_MACCLK_6_25;
9893                 tw32(TG3_CPMU_HST_ACC, val);
9894         }
9895
9896         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9897                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9898                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9899                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9900                 tw32(PCIE_PWR_MGMT_THRESH, val);
9901
9902                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9903                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9904
9905                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9906
9907                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9908                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9909         }
9910
9911         if (tg3_flag(tp, L1PLLPD_EN)) {
9912                 u32 grc_mode = tr32(GRC_MODE);
9913
9914                 /* Access the lower 1K of PL PCIE block registers. */
9915                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9916                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9917
9918                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9919                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9920                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9921
9922                 tw32(GRC_MODE, grc_mode);
9923         }
9924
9925         if (tg3_flag(tp, 57765_CLASS)) {
9926                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9927                         u32 grc_mode = tr32(GRC_MODE);
9928
9929                         /* Access the lower 1K of PL PCIE block registers. */
9930                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9931                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9932
9933                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9934                                    TG3_PCIE_PL_LO_PHYCTL5);
9935                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9936                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9937
9938                         tw32(GRC_MODE, grc_mode);
9939                 }
9940
9941                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9942                         u32 grc_mode;
9943
9944                         /* Fix transmit hangs */
9945                         val = tr32(TG3_CPMU_PADRNG_CTL);
9946                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9947                         tw32(TG3_CPMU_PADRNG_CTL, val);
9948
9949                         grc_mode = tr32(GRC_MODE);
9950
9951                         /* Access the lower 1K of DL PCIE block registers. */
9952                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9953                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9954
9955                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9956                                    TG3_PCIE_DL_LO_FTSMAX);
9957                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9958                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9959                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9960
9961                         tw32(GRC_MODE, grc_mode);
9962                 }
9963
9964                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9965                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9966                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9967                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9968         }
9969
9970         /* This works around an issue with Athlon chipsets on
9971          * B3 tigon3 silicon.  This bit has no effect on any
9972          * other revision.  But do not set this on PCI Express
9973          * chips and don't even touch the clocks if the CPMU is present.
9974          */
9975         if (!tg3_flag(tp, CPMU_PRESENT)) {
9976                 if (!tg3_flag(tp, PCI_EXPRESS))
9977                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9978                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9979         }
9980
9981         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9982             tg3_flag(tp, PCIX_MODE)) {
9983                 val = tr32(TG3PCI_PCISTATE);
9984                 val |= PCISTATE_RETRY_SAME_DMA;
9985                 tw32(TG3PCI_PCISTATE, val);
9986         }
9987
9988         if (tg3_flag(tp, ENABLE_APE)) {
9989                 /* Allow reads and writes to the
9990                  * APE register and memory space.
9991                  */
9992                 val = tr32(TG3PCI_PCISTATE);
9993                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9994                        PCISTATE_ALLOW_APE_SHMEM_WR |
9995                        PCISTATE_ALLOW_APE_PSPACE_WR;
9996                 tw32(TG3PCI_PCISTATE, val);
9997         }
9998
9999         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10000                 /* Enable some hw fixes.  */
10001                 val = tr32(TG3PCI_MSI_DATA);
10002                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10003                 tw32(TG3PCI_MSI_DATA, val);
10004         }
10005
10006         /* Descriptor ring init may make accesses to the
10007          * NIC SRAM area to setup the TX descriptors, so we
10008          * can only do this after the hardware has been
10009          * successfully reset.
10010          */
10011         err = tg3_init_rings(tp);
10012         if (err)
10013                 return err;
10014
10015         if (tg3_flag(tp, 57765_PLUS)) {
10016                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10017                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10018                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10019                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10020                 if (!tg3_flag(tp, 57765_CLASS) &&
10021                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10022                     tg3_asic_rev(tp) != ASIC_REV_5762)
10023                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10024                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10025         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10026                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10027                 /* This value is determined during the probe time DMA
10028                  * engine test, tg3_test_dma.
10029                  */
10030                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10031         }
10032
10033         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10034                           GRC_MODE_4X_NIC_SEND_RINGS |
10035                           GRC_MODE_NO_TX_PHDR_CSUM |
10036                           GRC_MODE_NO_RX_PHDR_CSUM);
10037         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10038
10039         /* Pseudo-header checksum is done by hardware logic and not
10040          * the offload processers, so make the chip do the pseudo-
10041          * header checksums on receive.  For transmit it is more
10042          * convenient to do the pseudo-header checksum in software
10043          * as Linux does that on transmit for us in all cases.
10044          */
10045         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10046
10047         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10048         if (tp->rxptpctl)
10049                 tw32(TG3_RX_PTP_CTL,
10050                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10051
10052         if (tg3_flag(tp, PTP_CAPABLE))
10053                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10054
10055         tw32(GRC_MODE, tp->grc_mode | val);
10056
10057         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10058          * south bridge limitation. As a workaround, Driver is setting MRRS
10059          * to 2048 instead of default 4096.
10060          */
10061         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10062             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10063                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10064                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10065         }
10066
10067         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10068         val = tr32(GRC_MISC_CFG);
10069         val &= ~0xff;
10070         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10071         tw32(GRC_MISC_CFG, val);
10072
10073         /* Initialize MBUF/DESC pool. */
10074         if (tg3_flag(tp, 5750_PLUS)) {
10075                 /* Do nothing.  */
10076         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10077                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10078                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10079                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10080                 else
10081                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10082                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10083                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10084         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10085                 int fw_len;
10086
10087                 fw_len = tp->fw_len;
10088                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10089                 tw32(BUFMGR_MB_POOL_ADDR,
10090                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10091                 tw32(BUFMGR_MB_POOL_SIZE,
10092                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10093         }
10094
10095         if (tp->dev->mtu <= ETH_DATA_LEN) {
10096                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10097                      tp->bufmgr_config.mbuf_read_dma_low_water);
10098                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10099                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10100                 tw32(BUFMGR_MB_HIGH_WATER,
10101                      tp->bufmgr_config.mbuf_high_water);
10102         } else {
10103                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10104                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10105                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10106                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10107                 tw32(BUFMGR_MB_HIGH_WATER,
10108                      tp->bufmgr_config.mbuf_high_water_jumbo);
10109         }
10110         tw32(BUFMGR_DMA_LOW_WATER,
10111              tp->bufmgr_config.dma_low_water);
10112         tw32(BUFMGR_DMA_HIGH_WATER,
10113              tp->bufmgr_config.dma_high_water);
10114
10115         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10116         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10117                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10118         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10119             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10120             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10121             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10122                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10123         tw32(BUFMGR_MODE, val);
10124         for (i = 0; i < 2000; i++) {
10125                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10126                         break;
10127                 udelay(10);
10128         }
10129         if (i >= 2000) {
10130                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10131                 return -ENODEV;
10132         }
10133
10134         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10135                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10136
10137         tg3_setup_rxbd_thresholds(tp);
10138
10139         /* Initialize TG3_BDINFO's at:
10140          *  RCVDBDI_STD_BD:     standard eth size rx ring
10141          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10142          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10143          *
10144          * like so:
10145          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10146          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10147          *                              ring attribute flags
10148          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10149          *
10150          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10151          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10152          *
10153          * The size of each ring is fixed in the firmware, but the location is
10154          * configurable.
10155          */
10156         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10157              ((u64) tpr->rx_std_mapping >> 32));
10158         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10159              ((u64) tpr->rx_std_mapping & 0xffffffff));
10160         if (!tg3_flag(tp, 5717_PLUS))
10161                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10162                      NIC_SRAM_RX_BUFFER_DESC);
10163
10164         /* Disable the mini ring */
10165         if (!tg3_flag(tp, 5705_PLUS))
10166                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10167                      BDINFO_FLAGS_DISABLED);
10168
10169         /* Program the jumbo buffer descriptor ring control
10170          * blocks on those devices that have them.
10171          */
10172         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10173             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10174
10175                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10176                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10177                              ((u64) tpr->rx_jmb_mapping >> 32));
10178                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10179                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10180                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10181                               BDINFO_FLAGS_MAXLEN_SHIFT;
10182                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10183                              val | BDINFO_FLAGS_USE_EXT_RECV);
10184                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10185                             tg3_flag(tp, 57765_CLASS) ||
10186                             tg3_asic_rev(tp) == ASIC_REV_5762)
10187                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10188                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10189                 } else {
10190                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10191                              BDINFO_FLAGS_DISABLED);
10192                 }
10193
10194                 if (tg3_flag(tp, 57765_PLUS)) {
10195                         val = TG3_RX_STD_RING_SIZE(tp);
10196                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10197                         val |= (TG3_RX_STD_DMA_SZ << 2);
10198                 } else
10199                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10200         } else
10201                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10202
10203         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10204
10205         tpr->rx_std_prod_idx = tp->rx_pending;
10206         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10207
10208         tpr->rx_jmb_prod_idx =
10209                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10210         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10211
10212         tg3_rings_reset(tp);
10213
10214         /* Initialize MAC address and backoff seed. */
10215         __tg3_set_mac_addr(tp, false);
10216
10217         /* MTU + ethernet header + FCS + optional VLAN tag */
10218         tw32(MAC_RX_MTU_SIZE,
10219              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10220
10221         /* The slot time is changed by tg3_setup_phy if we
10222          * run at gigabit with half duplex.
10223          */
10224         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10225               (6 << TX_LENGTHS_IPG_SHIFT) |
10226               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10227
10228         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10229             tg3_asic_rev(tp) == ASIC_REV_5762)
10230                 val |= tr32(MAC_TX_LENGTHS) &
10231                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10232                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10233
10234         tw32(MAC_TX_LENGTHS, val);
10235
10236         /* Receive rules. */
10237         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10238         tw32(RCVLPC_CONFIG, 0x0181);
10239
10240         /* Calculate RDMAC_MODE setting early, we need it to determine
10241          * the RCVLPC_STATE_ENABLE mask.
10242          */
10243         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10244                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10245                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10246                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10247                       RDMAC_MODE_LNGREAD_ENAB);
10248
10249         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10250                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10251
10252         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10253             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10254             tg3_asic_rev(tp) == ASIC_REV_57780)
10255                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10256                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10257                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10258
10259         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10260             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10261                 if (tg3_flag(tp, TSO_CAPABLE) &&
10262                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10263                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10264                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10265                            !tg3_flag(tp, IS_5788)) {
10266                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10267                 }
10268         }
10269
10270         if (tg3_flag(tp, PCI_EXPRESS))
10271                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10272
10273         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10274                 tp->dma_limit = 0;
10275                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10276                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10277                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10278                 }
10279         }
10280
10281         if (tg3_flag(tp, HW_TSO_1) ||
10282             tg3_flag(tp, HW_TSO_2) ||
10283             tg3_flag(tp, HW_TSO_3))
10284                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10285
10286         if (tg3_flag(tp, 57765_PLUS) ||
10287             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10288             tg3_asic_rev(tp) == ASIC_REV_57780)
10289                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10290
10291         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10292             tg3_asic_rev(tp) == ASIC_REV_5762)
10293                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10294
10295         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10296             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10297             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10298             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10299             tg3_flag(tp, 57765_PLUS)) {
10300                 u32 tgtreg;
10301
10302                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10303                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10304                 else
10305                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10306
10307                 val = tr32(tgtreg);
10308                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10309                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10310                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10311                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10312                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10313                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10314                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10315                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10316                 }
10317                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10318         }
10319
10320         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10321             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10322             tg3_asic_rev(tp) == ASIC_REV_5762) {
10323                 u32 tgtreg;
10324
10325                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10326                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10327                 else
10328                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10329
10330                 val = tr32(tgtreg);
10331                 tw32(tgtreg, val |
10332                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10333                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10334         }
10335
10336         /* Receive/send statistics. */
10337         if (tg3_flag(tp, 5750_PLUS)) {
10338                 val = tr32(RCVLPC_STATS_ENABLE);
10339                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10340                 tw32(RCVLPC_STATS_ENABLE, val);
10341         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10342                    tg3_flag(tp, TSO_CAPABLE)) {
10343                 val = tr32(RCVLPC_STATS_ENABLE);
10344                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10345                 tw32(RCVLPC_STATS_ENABLE, val);
10346         } else {
10347                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10348         }
10349         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10350         tw32(SNDDATAI_STATSENAB, 0xffffff);
10351         tw32(SNDDATAI_STATSCTRL,
10352              (SNDDATAI_SCTRL_ENABLE |
10353               SNDDATAI_SCTRL_FASTUPD));
10354
10355         /* Setup host coalescing engine. */
10356         tw32(HOSTCC_MODE, 0);
10357         for (i = 0; i < 2000; i++) {
10358                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10359                         break;
10360                 udelay(10);
10361         }
10362
10363         __tg3_set_coalesce(tp, &tp->coal);
10364
10365         if (!tg3_flag(tp, 5705_PLUS)) {
10366                 /* Status/statistics block address.  See tg3_timer,
10367                  * the tg3_periodic_fetch_stats call there, and
10368                  * tg3_get_stats to see how this works for 5705/5750 chips.
10369                  */
10370                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10371                      ((u64) tp->stats_mapping >> 32));
10372                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10373                      ((u64) tp->stats_mapping & 0xffffffff));
10374                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10375
10376                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10377
10378                 /* Clear statistics and status block memory areas */
10379                 for (i = NIC_SRAM_STATS_BLK;
10380                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10381                      i += sizeof(u32)) {
10382                         tg3_write_mem(tp, i, 0);
10383                         udelay(40);
10384                 }
10385         }
10386
10387         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10388
10389         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10390         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10391         if (!tg3_flag(tp, 5705_PLUS))
10392                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10393
10394         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10395                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10396                 /* reset to prevent losing 1st rx packet intermittently */
10397                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10398                 udelay(10);
10399         }
10400
10401         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10402                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10403                         MAC_MODE_FHDE_ENABLE;
10404         if (tg3_flag(tp, ENABLE_APE))
10405                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10406         if (!tg3_flag(tp, 5705_PLUS) &&
10407             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10408             tg3_asic_rev(tp) != ASIC_REV_5700)
10409                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10410         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10411         udelay(40);
10412
10413         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10414          * If TG3_FLAG_IS_NIC is zero, we should read the
10415          * register to preserve the GPIO settings for LOMs. The GPIOs,
10416          * whether used as inputs or outputs, are set by boot code after
10417          * reset.
10418          */
10419         if (!tg3_flag(tp, IS_NIC)) {
10420                 u32 gpio_mask;
10421
10422                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10423                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10424                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10425
10426                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10427                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10428                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10429
10430                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10431                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10432
10433                 tp->grc_local_ctrl &= ~gpio_mask;
10434                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10435
10436                 /* GPIO1 must be driven high for eeprom write protect */
10437                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10438                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10439                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10440         }
10441         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10442         udelay(100);
10443
10444         if (tg3_flag(tp, USING_MSIX)) {
10445                 val = tr32(MSGINT_MODE);
10446                 val |= MSGINT_MODE_ENABLE;
10447                 if (tp->irq_cnt > 1)
10448                         val |= MSGINT_MODE_MULTIVEC_EN;
10449                 if (!tg3_flag(tp, 1SHOT_MSI))
10450                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10451                 tw32(MSGINT_MODE, val);
10452         }
10453
10454         if (!tg3_flag(tp, 5705_PLUS)) {
10455                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10456                 udelay(40);
10457         }
10458
10459         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10460                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10461                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10462                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10463                WDMAC_MODE_LNGREAD_ENAB);
10464
10465         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10466             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10467                 if (tg3_flag(tp, TSO_CAPABLE) &&
10468                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10469                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10470                         /* nothing */
10471                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10472                            !tg3_flag(tp, IS_5788)) {
10473                         val |= WDMAC_MODE_RX_ACCEL;
10474                 }
10475         }
10476
10477         /* Enable host coalescing bug fix */
10478         if (tg3_flag(tp, 5755_PLUS))
10479                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10480
10481         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10482                 val |= WDMAC_MODE_BURST_ALL_DATA;
10483
10484         tw32_f(WDMAC_MODE, val);
10485         udelay(40);
10486
10487         if (tg3_flag(tp, PCIX_MODE)) {
10488                 u16 pcix_cmd;
10489
10490                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10491                                      &pcix_cmd);
10492                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10493                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10494                         pcix_cmd |= PCI_X_CMD_READ_2K;
10495                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10496                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10497                         pcix_cmd |= PCI_X_CMD_READ_2K;
10498                 }
10499                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10500                                       pcix_cmd);
10501         }
10502
10503         tw32_f(RDMAC_MODE, rdmac_mode);
10504         udelay(40);
10505
10506         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10507             tg3_asic_rev(tp) == ASIC_REV_5720) {
10508                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10509                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10510                                 break;
10511                 }
10512                 if (i < TG3_NUM_RDMA_CHANNELS) {
10513                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10514                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10515                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10516                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10517                 }
10518         }
10519
10520         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10521         if (!tg3_flag(tp, 5705_PLUS))
10522                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10523
10524         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10525                 tw32(SNDDATAC_MODE,
10526                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10527         else
10528                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10529
10530         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10531         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10532         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10533         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10534                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10535         tw32(RCVDBDI_MODE, val);
10536         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10537         if (tg3_flag(tp, HW_TSO_1) ||
10538             tg3_flag(tp, HW_TSO_2) ||
10539             tg3_flag(tp, HW_TSO_3))
10540                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10541         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10542         if (tg3_flag(tp, ENABLE_TSS))
10543                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10544         tw32(SNDBDI_MODE, val);
10545         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10546
10547         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10548                 err = tg3_load_5701_a0_firmware_fix(tp);
10549                 if (err)
10550                         return err;
10551         }
10552
10553         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10554                 /* Ignore any errors for the firmware download. If download
10555                  * fails, the device will operate with EEE disabled
10556                  */
10557                 tg3_load_57766_firmware(tp);
10558         }
10559
10560         if (tg3_flag(tp, TSO_CAPABLE)) {
10561                 err = tg3_load_tso_firmware(tp);
10562                 if (err)
10563                         return err;
10564         }
10565
10566         tp->tx_mode = TX_MODE_ENABLE;
10567
10568         if (tg3_flag(tp, 5755_PLUS) ||
10569             tg3_asic_rev(tp) == ASIC_REV_5906)
10570                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10571
10572         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10573             tg3_asic_rev(tp) == ASIC_REV_5762) {
10574                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10575                 tp->tx_mode &= ~val;
10576                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10577         }
10578
10579         tw32_f(MAC_TX_MODE, tp->tx_mode);
10580         udelay(100);
10581
10582         if (tg3_flag(tp, ENABLE_RSS)) {
10583                 u32 rss_key[10];
10584
10585                 tg3_rss_write_indir_tbl(tp);
10586
10587                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10588
10589                 for (i = 0; i < 10 ; i++)
10590                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10591         }
10592
10593         tp->rx_mode = RX_MODE_ENABLE;
10594         if (tg3_flag(tp, 5755_PLUS))
10595                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10596
10597         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10598                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10599
10600         if (tg3_flag(tp, ENABLE_RSS))
10601                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10602                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10603                                RX_MODE_RSS_IPV6_HASH_EN |
10604                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10605                                RX_MODE_RSS_IPV4_HASH_EN |
10606                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10607
10608         tw32_f(MAC_RX_MODE, tp->rx_mode);
10609         udelay(10);
10610
10611         tw32(MAC_LED_CTRL, tp->led_ctrl);
10612
10613         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10614         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10615                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10616                 udelay(10);
10617         }
10618         tw32_f(MAC_RX_MODE, tp->rx_mode);
10619         udelay(10);
10620
10621         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10622                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10623                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10624                         /* Set drive transmission level to 1.2V  */
10625                         /* only if the signal pre-emphasis bit is not set  */
10626                         val = tr32(MAC_SERDES_CFG);
10627                         val &= 0xfffff000;
10628                         val |= 0x880;
10629                         tw32(MAC_SERDES_CFG, val);
10630                 }
10631                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10632                         tw32(MAC_SERDES_CFG, 0x616000);
10633         }
10634
10635         /* Prevent chip from dropping frames when flow control
10636          * is enabled.
10637          */
10638         if (tg3_flag(tp, 57765_CLASS))
10639                 val = 1;
10640         else
10641                 val = 2;
10642         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10643
10644         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10645             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10646                 /* Use hardware link auto-negotiation */
10647                 tg3_flag_set(tp, HW_AUTONEG);
10648         }
10649
10650         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10651             tg3_asic_rev(tp) == ASIC_REV_5714) {
10652                 u32 tmp;
10653
10654                 tmp = tr32(SERDES_RX_CTRL);
10655                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10656                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10657                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10658                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10659         }
10660
10661         if (!tg3_flag(tp, USE_PHYLIB)) {
10662                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10663                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10664
10665                 err = tg3_setup_phy(tp, false);
10666                 if (err)
10667                         return err;
10668
10669                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10670                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10671                         u32 tmp;
10672
10673                         /* Clear CRC stats. */
10674                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10675                                 tg3_writephy(tp, MII_TG3_TEST1,
10676                                              tmp | MII_TG3_TEST1_CRC_EN);
10677                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10678                         }
10679                 }
10680         }
10681
10682         __tg3_set_rx_mode(tp->dev);
10683
10684         /* Initialize receive rules. */
10685         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10686         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10687         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10688         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10689
10690         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10691                 limit = 8;
10692         else
10693                 limit = 16;
10694         if (tg3_flag(tp, ENABLE_ASF))
10695                 limit -= 4;
10696         switch (limit) {
10697         case 16:
10698                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10699         case 15:
10700                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10701         case 14:
10702                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10703         case 13:
10704                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10705         case 12:
10706                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10707         case 11:
10708                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10709         case 10:
10710                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10711         case 9:
10712                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10713         case 8:
10714                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10715         case 7:
10716                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10717         case 6:
10718                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10719         case 5:
10720                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10721         case 4:
10722                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10723         case 3:
10724                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10725         case 2:
10726         case 1:
10727
10728         default:
10729                 break;
10730         }
10731
10732         if (tg3_flag(tp, ENABLE_APE))
10733                 /* Write our heartbeat update interval to APE. */
10734                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10735                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10736
10737         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10738
10739         return 0;
10740 }
10741
10742 /* Called at device open time to get the chip ready for
10743  * packet processing.  Invoked with tp->lock held.
10744  */
10745 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10746 {
10747         /* Chip may have been just powered on. If so, the boot code may still
10748          * be running initialization. Wait for it to finish to avoid races in
10749          * accessing the hardware.
10750          */
10751         tg3_enable_register_access(tp);
10752         tg3_poll_fw(tp);
10753
10754         tg3_switch_clocks(tp);
10755
10756         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10757
10758         return tg3_reset_hw(tp, reset_phy);
10759 }
10760
10761 #ifdef CONFIG_TIGON3_HWMON
10762 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10763 {
10764         int i;
10765
10766         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10767                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10768
10769                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10770                 off += len;
10771
10772                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10773                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10774                         memset(ocir, 0, TG3_OCIR_LEN);
10775         }
10776 }
10777
10778 /* sysfs attributes for hwmon */
10779 static ssize_t tg3_show_temp(struct device *dev,
10780                              struct device_attribute *devattr, char *buf)
10781 {
10782         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10783         struct tg3 *tp = dev_get_drvdata(dev);
10784         u32 temperature;
10785
10786         spin_lock_bh(&tp->lock);
10787         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10788                                 sizeof(temperature));
10789         spin_unlock_bh(&tp->lock);
10790         return sprintf(buf, "%u\n", temperature * 1000);
10791 }
10792
10793
10794 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10795                           TG3_TEMP_SENSOR_OFFSET);
10796 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10797                           TG3_TEMP_CAUTION_OFFSET);
10798 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10799                           TG3_TEMP_MAX_OFFSET);
10800
10801 static struct attribute *tg3_attrs[] = {
10802         &sensor_dev_attr_temp1_input.dev_attr.attr,
10803         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10804         &sensor_dev_attr_temp1_max.dev_attr.attr,
10805         NULL
10806 };
10807 ATTRIBUTE_GROUPS(tg3);
10808
10809 static void tg3_hwmon_close(struct tg3 *tp)
10810 {
10811         if (tp->hwmon_dev) {
10812                 hwmon_device_unregister(tp->hwmon_dev);
10813                 tp->hwmon_dev = NULL;
10814         }
10815 }
10816
10817 static void tg3_hwmon_open(struct tg3 *tp)
10818 {
10819         int i;
10820         u32 size = 0;
10821         struct pci_dev *pdev = tp->pdev;
10822         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10823
10824         tg3_sd_scan_scratchpad(tp, ocirs);
10825
10826         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10827                 if (!ocirs[i].src_data_length)
10828                         continue;
10829
10830                 size += ocirs[i].src_hdr_length;
10831                 size += ocirs[i].src_data_length;
10832         }
10833
10834         if (!size)
10835                 return;
10836
10837         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10838                                                           tp, tg3_groups);
10839         if (IS_ERR(tp->hwmon_dev)) {
10840                 tp->hwmon_dev = NULL;
10841                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10842         }
10843 }
10844 #else
10845 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10846 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10847 #endif /* CONFIG_TIGON3_HWMON */
10848
10849
10850 #define TG3_STAT_ADD32(PSTAT, REG) \
10851 do {    u32 __val = tr32(REG); \
10852         (PSTAT)->low += __val; \
10853         if ((PSTAT)->low < __val) \
10854                 (PSTAT)->high += 1; \
10855 } while (0)
10856
10857 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10858 {
10859         struct tg3_hw_stats *sp = tp->hw_stats;
10860
10861         if (!tp->link_up)
10862                 return;
10863
10864         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10865         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10866         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10867         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10868         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10869         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10870         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10871         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10872         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10873         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10874         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10875         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10876         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10877         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10878                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10879                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10880                 u32 val;
10881
10882                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10883                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10884                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10885                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10886         }
10887
10888         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10889         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10890         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10891         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10892         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10893         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10894         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10895         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10896         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10897         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10898         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10899         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10900         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10901         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10902
10903         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10904         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10905             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10906             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10907             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10908                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10909         } else {
10910                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10911                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10912                 if (val) {
10913                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10914                         sp->rx_discards.low += val;
10915                         if (sp->rx_discards.low < val)
10916                                 sp->rx_discards.high += 1;
10917                 }
10918                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10919         }
10920         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10921 }
10922
10923 static void tg3_chk_missed_msi(struct tg3 *tp)
10924 {
10925         u32 i;
10926
10927         for (i = 0; i < tp->irq_cnt; i++) {
10928                 struct tg3_napi *tnapi = &tp->napi[i];
10929
10930                 if (tg3_has_work(tnapi)) {
10931                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10932                             tnapi->last_tx_cons == tnapi->tx_cons) {
10933                                 if (tnapi->chk_msi_cnt < 1) {
10934                                         tnapi->chk_msi_cnt++;
10935                                         return;
10936                                 }
10937                                 tg3_msi(0, tnapi);
10938                         }
10939                 }
10940                 tnapi->chk_msi_cnt = 0;
10941                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10942                 tnapi->last_tx_cons = tnapi->tx_cons;
10943         }
10944 }
10945
10946 static void tg3_timer(struct timer_list *t)
10947 {
10948         struct tg3 *tp = from_timer(tp, t, timer);
10949
10950         spin_lock(&tp->lock);
10951
10952         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10953                 spin_unlock(&tp->lock);
10954                 goto restart_timer;
10955         }
10956
10957         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10958             tg3_flag(tp, 57765_CLASS))
10959                 tg3_chk_missed_msi(tp);
10960
10961         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10962                 /* BCM4785: Flush posted writes from GbE to host memory. */
10963                 tr32(HOSTCC_MODE);
10964         }
10965
10966         if (!tg3_flag(tp, TAGGED_STATUS)) {
10967                 /* All of this garbage is because when using non-tagged
10968                  * IRQ status the mailbox/status_block protocol the chip
10969                  * uses with the cpu is race prone.
10970                  */
10971                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10972                         tw32(GRC_LOCAL_CTRL,
10973                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10974                 } else {
10975                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10976                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10977                 }
10978
10979                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10980                         spin_unlock(&tp->lock);
10981                         tg3_reset_task_schedule(tp);
10982                         goto restart_timer;
10983                 }
10984         }
10985
10986         /* This part only runs once per second. */
10987         if (!--tp->timer_counter) {
10988                 if (tg3_flag(tp, 5705_PLUS))
10989                         tg3_periodic_fetch_stats(tp);
10990
10991                 if (tp->setlpicnt && !--tp->setlpicnt)
10992                         tg3_phy_eee_enable(tp);
10993
10994                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10995                         u32 mac_stat;
10996                         int phy_event;
10997
10998                         mac_stat = tr32(MAC_STATUS);
10999
11000                         phy_event = 0;
11001                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11002                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11003                                         phy_event = 1;
11004                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11005                                 phy_event = 1;
11006
11007                         if (phy_event)
11008                                 tg3_setup_phy(tp, false);
11009                 } else if (tg3_flag(tp, POLL_SERDES)) {
11010                         u32 mac_stat = tr32(MAC_STATUS);
11011                         int need_setup = 0;
11012
11013                         if (tp->link_up &&
11014                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11015                                 need_setup = 1;
11016                         }
11017                         if (!tp->link_up &&
11018                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11019                                          MAC_STATUS_SIGNAL_DET))) {
11020                                 need_setup = 1;
11021                         }
11022                         if (need_setup) {
11023                                 if (!tp->serdes_counter) {
11024                                         tw32_f(MAC_MODE,
11025                                              (tp->mac_mode &
11026                                               ~MAC_MODE_PORT_MODE_MASK));
11027                                         udelay(40);
11028                                         tw32_f(MAC_MODE, tp->mac_mode);
11029                                         udelay(40);
11030                                 }
11031                                 tg3_setup_phy(tp, false);
11032                         }
11033                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11034                            tg3_flag(tp, 5780_CLASS)) {
11035                         tg3_serdes_parallel_detect(tp);
11036                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11037                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11038                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11039                                          TG3_CPMU_STATUS_LINK_MASK);
11040
11041                         if (link_up != tp->link_up)
11042                                 tg3_setup_phy(tp, false);
11043                 }
11044
11045                 tp->timer_counter = tp->timer_multiplier;
11046         }
11047
11048         /* Heartbeat is only sent once every 2 seconds.
11049          *
11050          * The heartbeat is to tell the ASF firmware that the host
11051          * driver is still alive.  In the event that the OS crashes,
11052          * ASF needs to reset the hardware to free up the FIFO space
11053          * that may be filled with rx packets destined for the host.
11054          * If the FIFO is full, ASF will no longer function properly.
11055          *
11056          * Unintended resets have been reported on real time kernels
11057          * where the timer doesn't run on time.  Netpoll will also have
11058          * same problem.
11059          *
11060          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11061          * to check the ring condition when the heartbeat is expiring
11062          * before doing the reset.  This will prevent most unintended
11063          * resets.
11064          */
11065         if (!--tp->asf_counter) {
11066                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11067                         tg3_wait_for_event_ack(tp);
11068
11069                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11070                                       FWCMD_NICDRV_ALIVE3);
11071                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11072                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11073                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11074
11075                         tg3_generate_fw_event(tp);
11076                 }
11077                 tp->asf_counter = tp->asf_multiplier;
11078         }
11079
11080         spin_unlock(&tp->lock);
11081
11082 restart_timer:
11083         tp->timer.expires = jiffies + tp->timer_offset;
11084         add_timer(&tp->timer);
11085 }
11086
11087 static void tg3_timer_init(struct tg3 *tp)
11088 {
11089         if (tg3_flag(tp, TAGGED_STATUS) &&
11090             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11091             !tg3_flag(tp, 57765_CLASS))
11092                 tp->timer_offset = HZ;
11093         else
11094                 tp->timer_offset = HZ / 10;
11095
11096         BUG_ON(tp->timer_offset > HZ);
11097
11098         tp->timer_multiplier = (HZ / tp->timer_offset);
11099         tp->asf_multiplier = (HZ / tp->timer_offset) *
11100                              TG3_FW_UPDATE_FREQ_SEC;
11101
11102         timer_setup(&tp->timer, tg3_timer, 0);
11103 }
11104
11105 static void tg3_timer_start(struct tg3 *tp)
11106 {
11107         tp->asf_counter   = tp->asf_multiplier;
11108         tp->timer_counter = tp->timer_multiplier;
11109
11110         tp->timer.expires = jiffies + tp->timer_offset;
11111         add_timer(&tp->timer);
11112 }
11113
11114 static void tg3_timer_stop(struct tg3 *tp)
11115 {
11116         del_timer_sync(&tp->timer);
11117 }
11118
11119 /* Restart hardware after configuration changes, self-test, etc.
11120  * Invoked with tp->lock held.
11121  */
11122 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11123         __releases(tp->lock)
11124         __acquires(tp->lock)
11125 {
11126         int err;
11127
11128         err = tg3_init_hw(tp, reset_phy);
11129         if (err) {
11130                 netdev_err(tp->dev,
11131                            "Failed to re-initialize device, aborting\n");
11132                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11133                 tg3_full_unlock(tp);
11134                 tg3_timer_stop(tp);
11135                 tp->irq_sync = 0;
11136                 tg3_napi_enable(tp);
11137                 dev_close(tp->dev);
11138                 tg3_full_lock(tp, 0);
11139         }
11140         return err;
11141 }
11142
11143 static void tg3_reset_task(struct work_struct *work)
11144 {
11145         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11146         int err;
11147
11148         rtnl_lock();
11149         tg3_full_lock(tp, 0);
11150
11151         if (!netif_running(tp->dev)) {
11152                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11153                 tg3_full_unlock(tp);
11154                 rtnl_unlock();
11155                 return;
11156         }
11157
11158         tg3_full_unlock(tp);
11159
11160         tg3_phy_stop(tp);
11161
11162         tg3_netif_stop(tp);
11163
11164         tg3_full_lock(tp, 1);
11165
11166         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11167                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11168                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11169                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11170                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11171         }
11172
11173         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11174         err = tg3_init_hw(tp, true);
11175         if (err)
11176                 goto out;
11177
11178         tg3_netif_start(tp);
11179
11180 out:
11181         tg3_full_unlock(tp);
11182
11183         if (!err)
11184                 tg3_phy_start(tp);
11185
11186         tg3_flag_clear(tp, RESET_TASK_PENDING);
11187         rtnl_unlock();
11188 }
11189
11190 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11191 {
11192         irq_handler_t fn;
11193         unsigned long flags;
11194         char *name;
11195         struct tg3_napi *tnapi = &tp->napi[irq_num];
11196
11197         if (tp->irq_cnt == 1)
11198                 name = tp->dev->name;
11199         else {
11200                 name = &tnapi->irq_lbl[0];
11201                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11202                         snprintf(name, IFNAMSIZ,
11203                                  "%s-txrx-%d", tp->dev->name, irq_num);
11204                 else if (tnapi->tx_buffers)
11205                         snprintf(name, IFNAMSIZ,
11206                                  "%s-tx-%d", tp->dev->name, irq_num);
11207                 else if (tnapi->rx_rcb)
11208                         snprintf(name, IFNAMSIZ,
11209                                  "%s-rx-%d", tp->dev->name, irq_num);
11210                 else
11211                         snprintf(name, IFNAMSIZ,
11212                                  "%s-%d", tp->dev->name, irq_num);
11213                 name[IFNAMSIZ-1] = 0;
11214         }
11215
11216         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11217                 fn = tg3_msi;
11218                 if (tg3_flag(tp, 1SHOT_MSI))
11219                         fn = tg3_msi_1shot;
11220                 flags = 0;
11221         } else {
11222                 fn = tg3_interrupt;
11223                 if (tg3_flag(tp, TAGGED_STATUS))
11224                         fn = tg3_interrupt_tagged;
11225                 flags = IRQF_SHARED;
11226         }
11227
11228         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11229 }
11230
11231 static int tg3_test_interrupt(struct tg3 *tp)
11232 {
11233         struct tg3_napi *tnapi = &tp->napi[0];
11234         struct net_device *dev = tp->dev;
11235         int err, i, intr_ok = 0;
11236         u32 val;
11237
11238         if (!netif_running(dev))
11239                 return -ENODEV;
11240
11241         tg3_disable_ints(tp);
11242
11243         free_irq(tnapi->irq_vec, tnapi);
11244
11245         /*
11246          * Turn off MSI one shot mode.  Otherwise this test has no
11247          * observable way to know whether the interrupt was delivered.
11248          */
11249         if (tg3_flag(tp, 57765_PLUS)) {
11250                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11251                 tw32(MSGINT_MODE, val);
11252         }
11253
11254         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11255                           IRQF_SHARED, dev->name, tnapi);
11256         if (err)
11257                 return err;
11258
11259         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11260         tg3_enable_ints(tp);
11261
11262         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11263                tnapi->coal_now);
11264
11265         for (i = 0; i < 5; i++) {
11266                 u32 int_mbox, misc_host_ctrl;
11267
11268                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11269                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11270
11271                 if ((int_mbox != 0) ||
11272                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11273                         intr_ok = 1;
11274                         break;
11275                 }
11276
11277                 if (tg3_flag(tp, 57765_PLUS) &&
11278                     tnapi->hw_status->status_tag != tnapi->last_tag)
11279                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11280
11281                 msleep(10);
11282         }
11283
11284         tg3_disable_ints(tp);
11285
11286         free_irq(tnapi->irq_vec, tnapi);
11287
11288         err = tg3_request_irq(tp, 0);
11289
11290         if (err)
11291                 return err;
11292
11293         if (intr_ok) {
11294                 /* Reenable MSI one shot mode. */
11295                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11296                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11297                         tw32(MSGINT_MODE, val);
11298                 }
11299                 return 0;
11300         }
11301
11302         return -EIO;
11303 }
11304
11305 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11306  * successfully restored
11307  */
11308 static int tg3_test_msi(struct tg3 *tp)
11309 {
11310         int err;
11311         u16 pci_cmd;
11312
11313         if (!tg3_flag(tp, USING_MSI))
11314                 return 0;
11315
11316         /* Turn off SERR reporting in case MSI terminates with Master
11317          * Abort.
11318          */
11319         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11320         pci_write_config_word(tp->pdev, PCI_COMMAND,
11321                               pci_cmd & ~PCI_COMMAND_SERR);
11322
11323         err = tg3_test_interrupt(tp);
11324
11325         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11326
11327         if (!err)
11328                 return 0;
11329
11330         /* other failures */
11331         if (err != -EIO)
11332                 return err;
11333
11334         /* MSI test failed, go back to INTx mode */
11335         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11336                     "to INTx mode. Please report this failure to the PCI "
11337                     "maintainer and include system chipset information\n");
11338
11339         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11340
11341         pci_disable_msi(tp->pdev);
11342
11343         tg3_flag_clear(tp, USING_MSI);
11344         tp->napi[0].irq_vec = tp->pdev->irq;
11345
11346         err = tg3_request_irq(tp, 0);
11347         if (err)
11348                 return err;
11349
11350         /* Need to reset the chip because the MSI cycle may have terminated
11351          * with Master Abort.
11352          */
11353         tg3_full_lock(tp, 1);
11354
11355         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11356         err = tg3_init_hw(tp, true);
11357
11358         tg3_full_unlock(tp);
11359
11360         if (err)
11361                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11362
11363         return err;
11364 }
11365
11366 static int tg3_request_firmware(struct tg3 *tp)
11367 {
11368         const struct tg3_firmware_hdr *fw_hdr;
11369
11370         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11371                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11372                            tp->fw_needed);
11373                 return -ENOENT;
11374         }
11375
11376         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11377
11378         /* Firmware blob starts with version numbers, followed by
11379          * start address and _full_ length including BSS sections
11380          * (which must be longer than the actual data, of course
11381          */
11382
11383         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11384         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11385                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11386                            tp->fw_len, tp->fw_needed);
11387                 release_firmware(tp->fw);
11388                 tp->fw = NULL;
11389                 return -EINVAL;
11390         }
11391
11392         /* We no longer need firmware; we have it. */
11393         tp->fw_needed = NULL;
11394         return 0;
11395 }
11396
11397 static u32 tg3_irq_count(struct tg3 *tp)
11398 {
11399         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11400
11401         if (irq_cnt > 1) {
11402                 /* We want as many rx rings enabled as there are cpus.
11403                  * In multiqueue MSI-X mode, the first MSI-X vector
11404                  * only deals with link interrupts, etc, so we add
11405                  * one to the number of vectors we are requesting.
11406                  */
11407                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11408         }
11409
11410         return irq_cnt;
11411 }
11412
11413 static bool tg3_enable_msix(struct tg3 *tp)
11414 {
11415         int i, rc;
11416         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11417
11418         tp->txq_cnt = tp->txq_req;
11419         tp->rxq_cnt = tp->rxq_req;
11420         if (!tp->rxq_cnt)
11421                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11422         if (tp->rxq_cnt > tp->rxq_max)
11423                 tp->rxq_cnt = tp->rxq_max;
11424
11425         /* Disable multiple TX rings by default.  Simple round-robin hardware
11426          * scheduling of the TX rings can cause starvation of rings with
11427          * small packets when other rings have TSO or jumbo packets.
11428          */
11429         if (!tp->txq_req)
11430                 tp->txq_cnt = 1;
11431
11432         tp->irq_cnt = tg3_irq_count(tp);
11433
11434         for (i = 0; i < tp->irq_max; i++) {
11435                 msix_ent[i].entry  = i;
11436                 msix_ent[i].vector = 0;
11437         }
11438
11439         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11440         if (rc < 0) {
11441                 return false;
11442         } else if (rc < tp->irq_cnt) {
11443                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11444                               tp->irq_cnt, rc);
11445                 tp->irq_cnt = rc;
11446                 tp->rxq_cnt = max(rc - 1, 1);
11447                 if (tp->txq_cnt)
11448                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11449         }
11450
11451         for (i = 0; i < tp->irq_max; i++)
11452                 tp->napi[i].irq_vec = msix_ent[i].vector;
11453
11454         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11455                 pci_disable_msix(tp->pdev);
11456                 return false;
11457         }
11458
11459         if (tp->irq_cnt == 1)
11460                 return true;
11461
11462         tg3_flag_set(tp, ENABLE_RSS);
11463
11464         if (tp->txq_cnt > 1)
11465                 tg3_flag_set(tp, ENABLE_TSS);
11466
11467         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11468
11469         return true;
11470 }
11471
11472 static void tg3_ints_init(struct tg3 *tp)
11473 {
11474         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11475             !tg3_flag(tp, TAGGED_STATUS)) {
11476                 /* All MSI supporting chips should support tagged
11477                  * status.  Assert that this is the case.
11478                  */
11479                 netdev_warn(tp->dev,
11480                             "MSI without TAGGED_STATUS? Not using MSI\n");
11481                 goto defcfg;
11482         }
11483
11484         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11485                 tg3_flag_set(tp, USING_MSIX);
11486         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11487                 tg3_flag_set(tp, USING_MSI);
11488
11489         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11490                 u32 msi_mode = tr32(MSGINT_MODE);
11491                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11492                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11493                 if (!tg3_flag(tp, 1SHOT_MSI))
11494                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11495                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11496         }
11497 defcfg:
11498         if (!tg3_flag(tp, USING_MSIX)) {
11499                 tp->irq_cnt = 1;
11500                 tp->napi[0].irq_vec = tp->pdev->irq;
11501         }
11502
11503         if (tp->irq_cnt == 1) {
11504                 tp->txq_cnt = 1;
11505                 tp->rxq_cnt = 1;
11506                 netif_set_real_num_tx_queues(tp->dev, 1);
11507                 netif_set_real_num_rx_queues(tp->dev, 1);
11508         }
11509 }
11510
11511 static void tg3_ints_fini(struct tg3 *tp)
11512 {
11513         if (tg3_flag(tp, USING_MSIX))
11514                 pci_disable_msix(tp->pdev);
11515         else if (tg3_flag(tp, USING_MSI))
11516                 pci_disable_msi(tp->pdev);
11517         tg3_flag_clear(tp, USING_MSI);
11518         tg3_flag_clear(tp, USING_MSIX);
11519         tg3_flag_clear(tp, ENABLE_RSS);
11520         tg3_flag_clear(tp, ENABLE_TSS);
11521 }
11522
11523 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11524                      bool init)
11525 {
11526         struct net_device *dev = tp->dev;
11527         int i, err;
11528
11529         /*
11530          * Setup interrupts first so we know how
11531          * many NAPI resources to allocate
11532          */
11533         tg3_ints_init(tp);
11534
11535         tg3_rss_check_indir_tbl(tp);
11536
11537         /* The placement of this call is tied
11538          * to the setup and use of Host TX descriptors.
11539          */
11540         err = tg3_alloc_consistent(tp);
11541         if (err)
11542                 goto out_ints_fini;
11543
11544         tg3_napi_init(tp);
11545
11546         tg3_napi_enable(tp);
11547
11548         for (i = 0; i < tp->irq_cnt; i++) {
11549                 err = tg3_request_irq(tp, i);
11550                 if (err) {
11551                         for (i--; i >= 0; i--) {
11552                                 struct tg3_napi *tnapi = &tp->napi[i];
11553
11554                                 free_irq(tnapi->irq_vec, tnapi);
11555                         }
11556                         goto out_napi_fini;
11557                 }
11558         }
11559
11560         tg3_full_lock(tp, 0);
11561
11562         if (init)
11563                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11564
11565         err = tg3_init_hw(tp, reset_phy);
11566         if (err) {
11567                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11568                 tg3_free_rings(tp);
11569         }
11570
11571         tg3_full_unlock(tp);
11572
11573         if (err)
11574                 goto out_free_irq;
11575
11576         if (test_irq && tg3_flag(tp, USING_MSI)) {
11577                 err = tg3_test_msi(tp);
11578
11579                 if (err) {
11580                         tg3_full_lock(tp, 0);
11581                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11582                         tg3_free_rings(tp);
11583                         tg3_full_unlock(tp);
11584
11585                         goto out_napi_fini;
11586                 }
11587
11588                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11589                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11590
11591                         tw32(PCIE_TRANSACTION_CFG,
11592                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11593                 }
11594         }
11595
11596         tg3_phy_start(tp);
11597
11598         tg3_hwmon_open(tp);
11599
11600         tg3_full_lock(tp, 0);
11601
11602         tg3_timer_start(tp);
11603         tg3_flag_set(tp, INIT_COMPLETE);
11604         tg3_enable_ints(tp);
11605
11606         tg3_ptp_resume(tp);
11607
11608         tg3_full_unlock(tp);
11609
11610         netif_tx_start_all_queues(dev);
11611
11612         /*
11613          * Reset loopback feature if it was turned on while the device was down
11614          * make sure that it's installed properly now.
11615          */
11616         if (dev->features & NETIF_F_LOOPBACK)
11617                 tg3_set_loopback(dev, dev->features);
11618
11619         return 0;
11620
11621 out_free_irq:
11622         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11623                 struct tg3_napi *tnapi = &tp->napi[i];
11624                 free_irq(tnapi->irq_vec, tnapi);
11625         }
11626
11627 out_napi_fini:
11628         tg3_napi_disable(tp);
11629         tg3_napi_fini(tp);
11630         tg3_free_consistent(tp);
11631
11632 out_ints_fini:
11633         tg3_ints_fini(tp);
11634
11635         return err;
11636 }
11637
11638 static void tg3_stop(struct tg3 *tp)
11639 {
11640         int i;
11641
11642         tg3_reset_task_cancel(tp);
11643         tg3_netif_stop(tp);
11644
11645         tg3_timer_stop(tp);
11646
11647         tg3_hwmon_close(tp);
11648
11649         tg3_phy_stop(tp);
11650
11651         tg3_full_lock(tp, 1);
11652
11653         tg3_disable_ints(tp);
11654
11655         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11656         tg3_free_rings(tp);
11657         tg3_flag_clear(tp, INIT_COMPLETE);
11658
11659         tg3_full_unlock(tp);
11660
11661         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11662                 struct tg3_napi *tnapi = &tp->napi[i];
11663                 free_irq(tnapi->irq_vec, tnapi);
11664         }
11665
11666         tg3_ints_fini(tp);
11667
11668         tg3_napi_fini(tp);
11669
11670         tg3_free_consistent(tp);
11671 }
11672
11673 static int tg3_open(struct net_device *dev)
11674 {
11675         struct tg3 *tp = netdev_priv(dev);
11676         int err;
11677
11678         if (tp->pcierr_recovery) {
11679                 netdev_err(dev, "Failed to open device. PCI error recovery "
11680                            "in progress\n");
11681                 return -EAGAIN;
11682         }
11683
11684         if (tp->fw_needed) {
11685                 err = tg3_request_firmware(tp);
11686                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11687                         if (err) {
11688                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11689                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11690                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11691                                 netdev_warn(tp->dev, "EEE capability restored\n");
11692                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11693                         }
11694                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11695                         if (err)
11696                                 return err;
11697                 } else if (err) {
11698                         netdev_warn(tp->dev, "TSO capability disabled\n");
11699                         tg3_flag_clear(tp, TSO_CAPABLE);
11700                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11701                         netdev_notice(tp->dev, "TSO capability restored\n");
11702                         tg3_flag_set(tp, TSO_CAPABLE);
11703                 }
11704         }
11705
11706         tg3_carrier_off(tp);
11707
11708         err = tg3_power_up(tp);
11709         if (err)
11710                 return err;
11711
11712         tg3_full_lock(tp, 0);
11713
11714         tg3_disable_ints(tp);
11715         tg3_flag_clear(tp, INIT_COMPLETE);
11716
11717         tg3_full_unlock(tp);
11718
11719         err = tg3_start(tp,
11720                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11721                         true, true);
11722         if (err) {
11723                 tg3_frob_aux_power(tp, false);
11724                 pci_set_power_state(tp->pdev, PCI_D3hot);
11725         }
11726
11727         return err;
11728 }
11729
11730 static int tg3_close(struct net_device *dev)
11731 {
11732         struct tg3 *tp = netdev_priv(dev);
11733
11734         if (tp->pcierr_recovery) {
11735                 netdev_err(dev, "Failed to close device. PCI error recovery "
11736                            "in progress\n");
11737                 return -EAGAIN;
11738         }
11739
11740         tg3_stop(tp);
11741
11742         if (pci_device_is_present(tp->pdev)) {
11743                 tg3_power_down_prepare(tp);
11744
11745                 tg3_carrier_off(tp);
11746         }
11747         return 0;
11748 }
11749
11750 static inline u64 get_stat64(tg3_stat64_t *val)
11751 {
11752        return ((u64)val->high << 32) | ((u64)val->low);
11753 }
11754
11755 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11756 {
11757         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11758
11759         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11760             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11761              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11762                 u32 val;
11763
11764                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11765                         tg3_writephy(tp, MII_TG3_TEST1,
11766                                      val | MII_TG3_TEST1_CRC_EN);
11767                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11768                 } else
11769                         val = 0;
11770
11771                 tp->phy_crc_errors += val;
11772
11773                 return tp->phy_crc_errors;
11774         }
11775
11776         return get_stat64(&hw_stats->rx_fcs_errors);
11777 }
11778
11779 #define ESTAT_ADD(member) \
11780         estats->member =        old_estats->member + \
11781                                 get_stat64(&hw_stats->member)
11782
11783 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11784 {
11785         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11786         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11787
11788         ESTAT_ADD(rx_octets);
11789         ESTAT_ADD(rx_fragments);
11790         ESTAT_ADD(rx_ucast_packets);
11791         ESTAT_ADD(rx_mcast_packets);
11792         ESTAT_ADD(rx_bcast_packets);
11793         ESTAT_ADD(rx_fcs_errors);
11794         ESTAT_ADD(rx_align_errors);
11795         ESTAT_ADD(rx_xon_pause_rcvd);
11796         ESTAT_ADD(rx_xoff_pause_rcvd);
11797         ESTAT_ADD(rx_mac_ctrl_rcvd);
11798         ESTAT_ADD(rx_xoff_entered);
11799         ESTAT_ADD(rx_frame_too_long_errors);
11800         ESTAT_ADD(rx_jabbers);
11801         ESTAT_ADD(rx_undersize_packets);
11802         ESTAT_ADD(rx_in_length_errors);
11803         ESTAT_ADD(rx_out_length_errors);
11804         ESTAT_ADD(rx_64_or_less_octet_packets);
11805         ESTAT_ADD(rx_65_to_127_octet_packets);
11806         ESTAT_ADD(rx_128_to_255_octet_packets);
11807         ESTAT_ADD(rx_256_to_511_octet_packets);
11808         ESTAT_ADD(rx_512_to_1023_octet_packets);
11809         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11810         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11811         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11812         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11813         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11814
11815         ESTAT_ADD(tx_octets);
11816         ESTAT_ADD(tx_collisions);
11817         ESTAT_ADD(tx_xon_sent);
11818         ESTAT_ADD(tx_xoff_sent);
11819         ESTAT_ADD(tx_flow_control);
11820         ESTAT_ADD(tx_mac_errors);
11821         ESTAT_ADD(tx_single_collisions);
11822         ESTAT_ADD(tx_mult_collisions);
11823         ESTAT_ADD(tx_deferred);
11824         ESTAT_ADD(tx_excessive_collisions);
11825         ESTAT_ADD(tx_late_collisions);
11826         ESTAT_ADD(tx_collide_2times);
11827         ESTAT_ADD(tx_collide_3times);
11828         ESTAT_ADD(tx_collide_4times);
11829         ESTAT_ADD(tx_collide_5times);
11830         ESTAT_ADD(tx_collide_6times);
11831         ESTAT_ADD(tx_collide_7times);
11832         ESTAT_ADD(tx_collide_8times);
11833         ESTAT_ADD(tx_collide_9times);
11834         ESTAT_ADD(tx_collide_10times);
11835         ESTAT_ADD(tx_collide_11times);
11836         ESTAT_ADD(tx_collide_12times);
11837         ESTAT_ADD(tx_collide_13times);
11838         ESTAT_ADD(tx_collide_14times);
11839         ESTAT_ADD(tx_collide_15times);
11840         ESTAT_ADD(tx_ucast_packets);
11841         ESTAT_ADD(tx_mcast_packets);
11842         ESTAT_ADD(tx_bcast_packets);
11843         ESTAT_ADD(tx_carrier_sense_errors);
11844         ESTAT_ADD(tx_discards);
11845         ESTAT_ADD(tx_errors);
11846
11847         ESTAT_ADD(dma_writeq_full);
11848         ESTAT_ADD(dma_write_prioq_full);
11849         ESTAT_ADD(rxbds_empty);
11850         ESTAT_ADD(rx_discards);
11851         ESTAT_ADD(rx_errors);
11852         ESTAT_ADD(rx_threshold_hit);
11853
11854         ESTAT_ADD(dma_readq_full);
11855         ESTAT_ADD(dma_read_prioq_full);
11856         ESTAT_ADD(tx_comp_queue_full);
11857
11858         ESTAT_ADD(ring_set_send_prod_index);
11859         ESTAT_ADD(ring_status_update);
11860         ESTAT_ADD(nic_irqs);
11861         ESTAT_ADD(nic_avoided_irqs);
11862         ESTAT_ADD(nic_tx_threshold_hit);
11863
11864         ESTAT_ADD(mbuf_lwm_thresh_hit);
11865 }
11866
11867 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11868 {
11869         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11870         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11871
11872         stats->rx_packets = old_stats->rx_packets +
11873                 get_stat64(&hw_stats->rx_ucast_packets) +
11874                 get_stat64(&hw_stats->rx_mcast_packets) +
11875                 get_stat64(&hw_stats->rx_bcast_packets);
11876
11877         stats->tx_packets = old_stats->tx_packets +
11878                 get_stat64(&hw_stats->tx_ucast_packets) +
11879                 get_stat64(&hw_stats->tx_mcast_packets) +
11880                 get_stat64(&hw_stats->tx_bcast_packets);
11881
11882         stats->rx_bytes = old_stats->rx_bytes +
11883                 get_stat64(&hw_stats->rx_octets);
11884         stats->tx_bytes = old_stats->tx_bytes +
11885                 get_stat64(&hw_stats->tx_octets);
11886
11887         stats->rx_errors = old_stats->rx_errors +
11888                 get_stat64(&hw_stats->rx_errors);
11889         stats->tx_errors = old_stats->tx_errors +
11890                 get_stat64(&hw_stats->tx_errors) +
11891                 get_stat64(&hw_stats->tx_mac_errors) +
11892                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11893                 get_stat64(&hw_stats->tx_discards);
11894
11895         stats->multicast = old_stats->multicast +
11896                 get_stat64(&hw_stats->rx_mcast_packets);
11897         stats->collisions = old_stats->collisions +
11898                 get_stat64(&hw_stats->tx_collisions);
11899
11900         stats->rx_length_errors = old_stats->rx_length_errors +
11901                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11902                 get_stat64(&hw_stats->rx_undersize_packets);
11903
11904         stats->rx_frame_errors = old_stats->rx_frame_errors +
11905                 get_stat64(&hw_stats->rx_align_errors);
11906         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11907                 get_stat64(&hw_stats->tx_discards);
11908         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11909                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11910
11911         stats->rx_crc_errors = old_stats->rx_crc_errors +
11912                 tg3_calc_crc_errors(tp);
11913
11914         stats->rx_missed_errors = old_stats->rx_missed_errors +
11915                 get_stat64(&hw_stats->rx_discards);
11916
11917         stats->rx_dropped = tp->rx_dropped;
11918         stats->tx_dropped = tp->tx_dropped;
11919 }
11920
11921 static int tg3_get_regs_len(struct net_device *dev)
11922 {
11923         return TG3_REG_BLK_SIZE;
11924 }
11925
11926 static void tg3_get_regs(struct net_device *dev,
11927                 struct ethtool_regs *regs, void *_p)
11928 {
11929         struct tg3 *tp = netdev_priv(dev);
11930
11931         regs->version = 0;
11932
11933         memset(_p, 0, TG3_REG_BLK_SIZE);
11934
11935         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11936                 return;
11937
11938         tg3_full_lock(tp, 0);
11939
11940         tg3_dump_legacy_regs(tp, (u32 *)_p);
11941
11942         tg3_full_unlock(tp);
11943 }
11944
11945 static int tg3_get_eeprom_len(struct net_device *dev)
11946 {
11947         struct tg3 *tp = netdev_priv(dev);
11948
11949         return tp->nvram_size;
11950 }
11951
11952 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11953 {
11954         struct tg3 *tp = netdev_priv(dev);
11955         int ret, cpmu_restore = 0;
11956         u8  *pd;
11957         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11958         __be32 val;
11959
11960         if (tg3_flag(tp, NO_NVRAM))
11961                 return -EINVAL;
11962
11963         offset = eeprom->offset;
11964         len = eeprom->len;
11965         eeprom->len = 0;
11966
11967         eeprom->magic = TG3_EEPROM_MAGIC;
11968
11969         /* Override clock, link aware and link idle modes */
11970         if (tg3_flag(tp, CPMU_PRESENT)) {
11971                 cpmu_val = tr32(TG3_CPMU_CTRL);
11972                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11973                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11974                         tw32(TG3_CPMU_CTRL, cpmu_val &
11975                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11976                                              CPMU_CTRL_LINK_IDLE_MODE));
11977                         cpmu_restore = 1;
11978                 }
11979         }
11980         tg3_override_clk(tp);
11981
11982         if (offset & 3) {
11983                 /* adjustments to start on required 4 byte boundary */
11984                 b_offset = offset & 3;
11985                 b_count = 4 - b_offset;
11986                 if (b_count > len) {
11987                         /* i.e. offset=1 len=2 */
11988                         b_count = len;
11989                 }
11990                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11991                 if (ret)
11992                         goto eeprom_done;
11993                 memcpy(data, ((char *)&val) + b_offset, b_count);
11994                 len -= b_count;
11995                 offset += b_count;
11996                 eeprom->len += b_count;
11997         }
11998
11999         /* read bytes up to the last 4 byte boundary */
12000         pd = &data[eeprom->len];
12001         for (i = 0; i < (len - (len & 3)); i += 4) {
12002                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12003                 if (ret) {
12004                         if (i)
12005                                 i -= 4;
12006                         eeprom->len += i;
12007                         goto eeprom_done;
12008                 }
12009                 memcpy(pd + i, &val, 4);
12010                 if (need_resched()) {
12011                         if (signal_pending(current)) {
12012                                 eeprom->len += i;
12013                                 ret = -EINTR;
12014                                 goto eeprom_done;
12015                         }
12016                         cond_resched();
12017                 }
12018         }
12019         eeprom->len += i;
12020
12021         if (len & 3) {
12022                 /* read last bytes not ending on 4 byte boundary */
12023                 pd = &data[eeprom->len];
12024                 b_count = len & 3;
12025                 b_offset = offset + len - b_count;
12026                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12027                 if (ret)
12028                         goto eeprom_done;
12029                 memcpy(pd, &val, b_count);
12030                 eeprom->len += b_count;
12031         }
12032         ret = 0;
12033
12034 eeprom_done:
12035         /* Restore clock, link aware and link idle modes */
12036         tg3_restore_clk(tp);
12037         if (cpmu_restore)
12038                 tw32(TG3_CPMU_CTRL, cpmu_val);
12039
12040         return ret;
12041 }
12042
12043 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12044 {
12045         struct tg3 *tp = netdev_priv(dev);
12046         int ret;
12047         u32 offset, len, b_offset, odd_len;
12048         u8 *buf;
12049         __be32 start = 0, end;
12050
12051         if (tg3_flag(tp, NO_NVRAM) ||
12052             eeprom->magic != TG3_EEPROM_MAGIC)
12053                 return -EINVAL;
12054
12055         offset = eeprom->offset;
12056         len = eeprom->len;
12057
12058         if ((b_offset = (offset & 3))) {
12059                 /* adjustments to start on required 4 byte boundary */
12060                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12061                 if (ret)
12062                         return ret;
12063                 len += b_offset;
12064                 offset &= ~3;
12065                 if (len < 4)
12066                         len = 4;
12067         }
12068
12069         odd_len = 0;
12070         if (len & 3) {
12071                 /* adjustments to end on required 4 byte boundary */
12072                 odd_len = 1;
12073                 len = (len + 3) & ~3;
12074                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12075                 if (ret)
12076                         return ret;
12077         }
12078
12079         buf = data;
12080         if (b_offset || odd_len) {
12081                 buf = kmalloc(len, GFP_KERNEL);
12082                 if (!buf)
12083                         return -ENOMEM;
12084                 if (b_offset)
12085                         memcpy(buf, &start, 4);
12086                 if (odd_len)
12087                         memcpy(buf+len-4, &end, 4);
12088                 memcpy(buf + b_offset, data, eeprom->len);
12089         }
12090
12091         ret = tg3_nvram_write_block(tp, offset, len, buf);
12092
12093         if (buf != data)
12094                 kfree(buf);
12095
12096         return ret;
12097 }
12098
12099 static int tg3_get_link_ksettings(struct net_device *dev,
12100                                   struct ethtool_link_ksettings *cmd)
12101 {
12102         struct tg3 *tp = netdev_priv(dev);
12103         u32 supported, advertising;
12104
12105         if (tg3_flag(tp, USE_PHYLIB)) {
12106                 struct phy_device *phydev;
12107                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12108                         return -EAGAIN;
12109                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12110                 phy_ethtool_ksettings_get(phydev, cmd);
12111
12112                 return 0;
12113         }
12114
12115         supported = (SUPPORTED_Autoneg);
12116
12117         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12118                 supported |= (SUPPORTED_1000baseT_Half |
12119                               SUPPORTED_1000baseT_Full);
12120
12121         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12122                 supported |= (SUPPORTED_100baseT_Half |
12123                               SUPPORTED_100baseT_Full |
12124                               SUPPORTED_10baseT_Half |
12125                               SUPPORTED_10baseT_Full |
12126                               SUPPORTED_TP);
12127                 cmd->base.port = PORT_TP;
12128         } else {
12129                 supported |= SUPPORTED_FIBRE;
12130                 cmd->base.port = PORT_FIBRE;
12131         }
12132         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12133                                                 supported);
12134
12135         advertising = tp->link_config.advertising;
12136         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12137                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12138                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12139                                 advertising |= ADVERTISED_Pause;
12140                         } else {
12141                                 advertising |= ADVERTISED_Pause |
12142                                         ADVERTISED_Asym_Pause;
12143                         }
12144                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12145                         advertising |= ADVERTISED_Asym_Pause;
12146                 }
12147         }
12148         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12149                                                 advertising);
12150
12151         if (netif_running(dev) && tp->link_up) {
12152                 cmd->base.speed = tp->link_config.active_speed;
12153                 cmd->base.duplex = tp->link_config.active_duplex;
12154                 ethtool_convert_legacy_u32_to_link_mode(
12155                         cmd->link_modes.lp_advertising,
12156                         tp->link_config.rmt_adv);
12157
12158                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12159                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12160                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12161                         else
12162                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12163                 }
12164         } else {
12165                 cmd->base.speed = SPEED_UNKNOWN;
12166                 cmd->base.duplex = DUPLEX_UNKNOWN;
12167                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12168         }
12169         cmd->base.phy_address = tp->phy_addr;
12170         cmd->base.autoneg = tp->link_config.autoneg;
12171         return 0;
12172 }
12173
12174 static int tg3_set_link_ksettings(struct net_device *dev,
12175                                   const struct ethtool_link_ksettings *cmd)
12176 {
12177         struct tg3 *tp = netdev_priv(dev);
12178         u32 speed = cmd->base.speed;
12179         u32 advertising;
12180
12181         if (tg3_flag(tp, USE_PHYLIB)) {
12182                 struct phy_device *phydev;
12183                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12184                         return -EAGAIN;
12185                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12186                 return phy_ethtool_ksettings_set(phydev, cmd);
12187         }
12188
12189         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12190             cmd->base.autoneg != AUTONEG_DISABLE)
12191                 return -EINVAL;
12192
12193         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12194             cmd->base.duplex != DUPLEX_FULL &&
12195             cmd->base.duplex != DUPLEX_HALF)
12196                 return -EINVAL;
12197
12198         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12199                                                 cmd->link_modes.advertising);
12200
12201         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12202                 u32 mask = ADVERTISED_Autoneg |
12203                            ADVERTISED_Pause |
12204                            ADVERTISED_Asym_Pause;
12205
12206                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12207                         mask |= ADVERTISED_1000baseT_Half |
12208                                 ADVERTISED_1000baseT_Full;
12209
12210                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12211                         mask |= ADVERTISED_100baseT_Half |
12212                                 ADVERTISED_100baseT_Full |
12213                                 ADVERTISED_10baseT_Half |
12214                                 ADVERTISED_10baseT_Full |
12215                                 ADVERTISED_TP;
12216                 else
12217                         mask |= ADVERTISED_FIBRE;
12218
12219                 if (advertising & ~mask)
12220                         return -EINVAL;
12221
12222                 mask &= (ADVERTISED_1000baseT_Half |
12223                          ADVERTISED_1000baseT_Full |
12224                          ADVERTISED_100baseT_Half |
12225                          ADVERTISED_100baseT_Full |
12226                          ADVERTISED_10baseT_Half |
12227                          ADVERTISED_10baseT_Full);
12228
12229                 advertising &= mask;
12230         } else {
12231                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12232                         if (speed != SPEED_1000)
12233                                 return -EINVAL;
12234
12235                         if (cmd->base.duplex != DUPLEX_FULL)
12236                                 return -EINVAL;
12237                 } else {
12238                         if (speed != SPEED_100 &&
12239                             speed != SPEED_10)
12240                                 return -EINVAL;
12241                 }
12242         }
12243
12244         tg3_full_lock(tp, 0);
12245
12246         tp->link_config.autoneg = cmd->base.autoneg;
12247         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12248                 tp->link_config.advertising = (advertising |
12249                                               ADVERTISED_Autoneg);
12250                 tp->link_config.speed = SPEED_UNKNOWN;
12251                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12252         } else {
12253                 tp->link_config.advertising = 0;
12254                 tp->link_config.speed = speed;
12255                 tp->link_config.duplex = cmd->base.duplex;
12256         }
12257
12258         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12259
12260         tg3_warn_mgmt_link_flap(tp);
12261
12262         if (netif_running(dev))
12263                 tg3_setup_phy(tp, true);
12264
12265         tg3_full_unlock(tp);
12266
12267         return 0;
12268 }
12269
12270 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12271 {
12272         struct tg3 *tp = netdev_priv(dev);
12273
12274         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12275         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12276         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12277         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12278 }
12279
12280 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12281 {
12282         struct tg3 *tp = netdev_priv(dev);
12283
12284         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12285                 wol->supported = WAKE_MAGIC;
12286         else
12287                 wol->supported = 0;
12288         wol->wolopts = 0;
12289         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12290                 wol->wolopts = WAKE_MAGIC;
12291         memset(&wol->sopass, 0, sizeof(wol->sopass));
12292 }
12293
12294 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12295 {
12296         struct tg3 *tp = netdev_priv(dev);
12297         struct device *dp = &tp->pdev->dev;
12298
12299         if (wol->wolopts & ~WAKE_MAGIC)
12300                 return -EINVAL;
12301         if ((wol->wolopts & WAKE_MAGIC) &&
12302             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12303                 return -EINVAL;
12304
12305         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12306
12307         if (device_may_wakeup(dp))
12308                 tg3_flag_set(tp, WOL_ENABLE);
12309         else
12310                 tg3_flag_clear(tp, WOL_ENABLE);
12311
12312         return 0;
12313 }
12314
12315 static u32 tg3_get_msglevel(struct net_device *dev)
12316 {
12317         struct tg3 *tp = netdev_priv(dev);
12318         return tp->msg_enable;
12319 }
12320
12321 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12322 {
12323         struct tg3 *tp = netdev_priv(dev);
12324         tp->msg_enable = value;
12325 }
12326
12327 static int tg3_nway_reset(struct net_device *dev)
12328 {
12329         struct tg3 *tp = netdev_priv(dev);
12330         int r;
12331
12332         if (!netif_running(dev))
12333                 return -EAGAIN;
12334
12335         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12336                 return -EINVAL;
12337
12338         tg3_warn_mgmt_link_flap(tp);
12339
12340         if (tg3_flag(tp, USE_PHYLIB)) {
12341                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12342                         return -EAGAIN;
12343                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12344         } else {
12345                 u32 bmcr;
12346
12347                 spin_lock_bh(&tp->lock);
12348                 r = -EINVAL;
12349                 tg3_readphy(tp, MII_BMCR, &bmcr);
12350                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12351                     ((bmcr & BMCR_ANENABLE) ||
12352                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12353                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12354                                                    BMCR_ANENABLE);
12355                         r = 0;
12356                 }
12357                 spin_unlock_bh(&tp->lock);
12358         }
12359
12360         return r;
12361 }
12362
12363 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12364 {
12365         struct tg3 *tp = netdev_priv(dev);
12366
12367         ering->rx_max_pending = tp->rx_std_ring_mask;
12368         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12369                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12370         else
12371                 ering->rx_jumbo_max_pending = 0;
12372
12373         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12374
12375         ering->rx_pending = tp->rx_pending;
12376         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12377                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12378         else
12379                 ering->rx_jumbo_pending = 0;
12380
12381         ering->tx_pending = tp->napi[0].tx_pending;
12382 }
12383
12384 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12385 {
12386         struct tg3 *tp = netdev_priv(dev);
12387         int i, irq_sync = 0, err = 0;
12388
12389         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12390             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12391             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12392             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12393             (tg3_flag(tp, TSO_BUG) &&
12394              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12395                 return -EINVAL;
12396
12397         if (netif_running(dev)) {
12398                 tg3_phy_stop(tp);
12399                 tg3_netif_stop(tp);
12400                 irq_sync = 1;
12401         }
12402
12403         tg3_full_lock(tp, irq_sync);
12404
12405         tp->rx_pending = ering->rx_pending;
12406
12407         if (tg3_flag(tp, MAX_RXPEND_64) &&
12408             tp->rx_pending > 63)
12409                 tp->rx_pending = 63;
12410
12411         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12412                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12413
12414         for (i = 0; i < tp->irq_max; i++)
12415                 tp->napi[i].tx_pending = ering->tx_pending;
12416
12417         if (netif_running(dev)) {
12418                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12419                 err = tg3_restart_hw(tp, false);
12420                 if (!err)
12421                         tg3_netif_start(tp);
12422         }
12423
12424         tg3_full_unlock(tp);
12425
12426         if (irq_sync && !err)
12427                 tg3_phy_start(tp);
12428
12429         return err;
12430 }
12431
12432 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12433 {
12434         struct tg3 *tp = netdev_priv(dev);
12435
12436         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12437
12438         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12439                 epause->rx_pause = 1;
12440         else
12441                 epause->rx_pause = 0;
12442
12443         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12444                 epause->tx_pause = 1;
12445         else
12446                 epause->tx_pause = 0;
12447 }
12448
12449 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12450 {
12451         struct tg3 *tp = netdev_priv(dev);
12452         int err = 0;
12453
12454         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12455                 tg3_warn_mgmt_link_flap(tp);
12456
12457         if (tg3_flag(tp, USE_PHYLIB)) {
12458                 u32 newadv;
12459                 struct phy_device *phydev;
12460
12461                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12462
12463                 if (!(phydev->supported & SUPPORTED_Pause) ||
12464                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12465                      (epause->rx_pause != epause->tx_pause)))
12466                         return -EINVAL;
12467
12468                 tp->link_config.flowctrl = 0;
12469                 if (epause->rx_pause) {
12470                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12471
12472                         if (epause->tx_pause) {
12473                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12474                                 newadv = ADVERTISED_Pause;
12475                         } else
12476                                 newadv = ADVERTISED_Pause |
12477                                          ADVERTISED_Asym_Pause;
12478                 } else if (epause->tx_pause) {
12479                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12480                         newadv = ADVERTISED_Asym_Pause;
12481                 } else
12482                         newadv = 0;
12483
12484                 if (epause->autoneg)
12485                         tg3_flag_set(tp, PAUSE_AUTONEG);
12486                 else
12487                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12488
12489                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12490                         u32 oldadv = phydev->advertising &
12491                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12492                         if (oldadv != newadv) {
12493                                 phydev->advertising &=
12494                                         ~(ADVERTISED_Pause |
12495                                           ADVERTISED_Asym_Pause);
12496                                 phydev->advertising |= newadv;
12497                                 if (phydev->autoneg) {
12498                                         /*
12499                                          * Always renegotiate the link to
12500                                          * inform our link partner of our
12501                                          * flow control settings, even if the
12502                                          * flow control is forced.  Let
12503                                          * tg3_adjust_link() do the final
12504                                          * flow control setup.
12505                                          */
12506                                         return phy_start_aneg(phydev);
12507                                 }
12508                         }
12509
12510                         if (!epause->autoneg)
12511                                 tg3_setup_flow_control(tp, 0, 0);
12512                 } else {
12513                         tp->link_config.advertising &=
12514                                         ~(ADVERTISED_Pause |
12515                                           ADVERTISED_Asym_Pause);
12516                         tp->link_config.advertising |= newadv;
12517                 }
12518         } else {
12519                 int irq_sync = 0;
12520
12521                 if (netif_running(dev)) {
12522                         tg3_netif_stop(tp);
12523                         irq_sync = 1;
12524                 }
12525
12526                 tg3_full_lock(tp, irq_sync);
12527
12528                 if (epause->autoneg)
12529                         tg3_flag_set(tp, PAUSE_AUTONEG);
12530                 else
12531                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12532                 if (epause->rx_pause)
12533                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12534                 else
12535                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12536                 if (epause->tx_pause)
12537                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12538                 else
12539                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12540
12541                 if (netif_running(dev)) {
12542                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12543                         err = tg3_restart_hw(tp, false);
12544                         if (!err)
12545                                 tg3_netif_start(tp);
12546                 }
12547
12548                 tg3_full_unlock(tp);
12549         }
12550
12551         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12552
12553         return err;
12554 }
12555
12556 static int tg3_get_sset_count(struct net_device *dev, int sset)
12557 {
12558         switch (sset) {
12559         case ETH_SS_TEST:
12560                 return TG3_NUM_TEST;
12561         case ETH_SS_STATS:
12562                 return TG3_NUM_STATS;
12563         default:
12564                 return -EOPNOTSUPP;
12565         }
12566 }
12567
12568 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12569                          u32 *rules __always_unused)
12570 {
12571         struct tg3 *tp = netdev_priv(dev);
12572
12573         if (!tg3_flag(tp, SUPPORT_MSIX))
12574                 return -EOPNOTSUPP;
12575
12576         switch (info->cmd) {
12577         case ETHTOOL_GRXRINGS:
12578                 if (netif_running(tp->dev))
12579                         info->data = tp->rxq_cnt;
12580                 else {
12581                         info->data = num_online_cpus();
12582                         if (info->data > TG3_RSS_MAX_NUM_QS)
12583                                 info->data = TG3_RSS_MAX_NUM_QS;
12584                 }
12585
12586                 return 0;
12587
12588         default:
12589                 return -EOPNOTSUPP;
12590         }
12591 }
12592
12593 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12594 {
12595         u32 size = 0;
12596         struct tg3 *tp = netdev_priv(dev);
12597
12598         if (tg3_flag(tp, SUPPORT_MSIX))
12599                 size = TG3_RSS_INDIR_TBL_SIZE;
12600
12601         return size;
12602 }
12603
12604 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12605 {
12606         struct tg3 *tp = netdev_priv(dev);
12607         int i;
12608
12609         if (hfunc)
12610                 *hfunc = ETH_RSS_HASH_TOP;
12611         if (!indir)
12612                 return 0;
12613
12614         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12615                 indir[i] = tp->rss_ind_tbl[i];
12616
12617         return 0;
12618 }
12619
12620 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12621                         const u8 hfunc)
12622 {
12623         struct tg3 *tp = netdev_priv(dev);
12624         size_t i;
12625
12626         /* We require at least one supported parameter to be changed and no
12627          * change in any of the unsupported parameters
12628          */
12629         if (key ||
12630             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12631                 return -EOPNOTSUPP;
12632
12633         if (!indir)
12634                 return 0;
12635
12636         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12637                 tp->rss_ind_tbl[i] = indir[i];
12638
12639         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12640                 return 0;
12641
12642         /* It is legal to write the indirection
12643          * table while the device is running.
12644          */
12645         tg3_full_lock(tp, 0);
12646         tg3_rss_write_indir_tbl(tp);
12647         tg3_full_unlock(tp);
12648
12649         return 0;
12650 }
12651
12652 static void tg3_get_channels(struct net_device *dev,
12653                              struct ethtool_channels *channel)
12654 {
12655         struct tg3 *tp = netdev_priv(dev);
12656         u32 deflt_qs = netif_get_num_default_rss_queues();
12657
12658         channel->max_rx = tp->rxq_max;
12659         channel->max_tx = tp->txq_max;
12660
12661         if (netif_running(dev)) {
12662                 channel->rx_count = tp->rxq_cnt;
12663                 channel->tx_count = tp->txq_cnt;
12664         } else {
12665                 if (tp->rxq_req)
12666                         channel->rx_count = tp->rxq_req;
12667                 else
12668                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12669
12670                 if (tp->txq_req)
12671                         channel->tx_count = tp->txq_req;
12672                 else
12673                         channel->tx_count = min(deflt_qs, tp->txq_max);
12674         }
12675 }
12676
12677 static int tg3_set_channels(struct net_device *dev,
12678                             struct ethtool_channels *channel)
12679 {
12680         struct tg3 *tp = netdev_priv(dev);
12681
12682         if (!tg3_flag(tp, SUPPORT_MSIX))
12683                 return -EOPNOTSUPP;
12684
12685         if (channel->rx_count > tp->rxq_max ||
12686             channel->tx_count > tp->txq_max)
12687                 return -EINVAL;
12688
12689         tp->rxq_req = channel->rx_count;
12690         tp->txq_req = channel->tx_count;
12691
12692         if (!netif_running(dev))
12693                 return 0;
12694
12695         tg3_stop(tp);
12696
12697         tg3_carrier_off(tp);
12698
12699         tg3_start(tp, true, false, false);
12700
12701         return 0;
12702 }
12703
12704 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12705 {
12706         switch (stringset) {
12707         case ETH_SS_STATS:
12708                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12709                 break;
12710         case ETH_SS_TEST:
12711                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12712                 break;
12713         default:
12714                 WARN_ON(1);     /* we need a WARN() */
12715                 break;
12716         }
12717 }
12718
12719 static int tg3_set_phys_id(struct net_device *dev,
12720                             enum ethtool_phys_id_state state)
12721 {
12722         struct tg3 *tp = netdev_priv(dev);
12723
12724         if (!netif_running(tp->dev))
12725                 return -EAGAIN;
12726
12727         switch (state) {
12728         case ETHTOOL_ID_ACTIVE:
12729                 return 1;       /* cycle on/off once per second */
12730
12731         case ETHTOOL_ID_ON:
12732                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12733                      LED_CTRL_1000MBPS_ON |
12734                      LED_CTRL_100MBPS_ON |
12735                      LED_CTRL_10MBPS_ON |
12736                      LED_CTRL_TRAFFIC_OVERRIDE |
12737                      LED_CTRL_TRAFFIC_BLINK |
12738                      LED_CTRL_TRAFFIC_LED);
12739                 break;
12740
12741         case ETHTOOL_ID_OFF:
12742                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12743                      LED_CTRL_TRAFFIC_OVERRIDE);
12744                 break;
12745
12746         case ETHTOOL_ID_INACTIVE:
12747                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12748                 break;
12749         }
12750
12751         return 0;
12752 }
12753
12754 static void tg3_get_ethtool_stats(struct net_device *dev,
12755                                    struct ethtool_stats *estats, u64 *tmp_stats)
12756 {
12757         struct tg3 *tp = netdev_priv(dev);
12758
12759         if (tp->hw_stats)
12760                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12761         else
12762                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12763 }
12764
12765 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12766 {
12767         int i;
12768         __be32 *buf;
12769         u32 offset = 0, len = 0;
12770         u32 magic, val;
12771
12772         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12773                 return NULL;
12774
12775         if (magic == TG3_EEPROM_MAGIC) {
12776                 for (offset = TG3_NVM_DIR_START;
12777                      offset < TG3_NVM_DIR_END;
12778                      offset += TG3_NVM_DIRENT_SIZE) {
12779                         if (tg3_nvram_read(tp, offset, &val))
12780                                 return NULL;
12781
12782                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12783                             TG3_NVM_DIRTYPE_EXTVPD)
12784                                 break;
12785                 }
12786
12787                 if (offset != TG3_NVM_DIR_END) {
12788                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12789                         if (tg3_nvram_read(tp, offset + 4, &offset))
12790                                 return NULL;
12791
12792                         offset = tg3_nvram_logical_addr(tp, offset);
12793                 }
12794         }
12795
12796         if (!offset || !len) {
12797                 offset = TG3_NVM_VPD_OFF;
12798                 len = TG3_NVM_VPD_LEN;
12799         }
12800
12801         buf = kmalloc(len, GFP_KERNEL);
12802         if (buf == NULL)
12803                 return NULL;
12804
12805         if (magic == TG3_EEPROM_MAGIC) {
12806                 for (i = 0; i < len; i += 4) {
12807                         /* The data is in little-endian format in NVRAM.
12808                          * Use the big-endian read routines to preserve
12809                          * the byte order as it exists in NVRAM.
12810                          */
12811                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12812                                 goto error;
12813                 }
12814         } else {
12815                 u8 *ptr;
12816                 ssize_t cnt;
12817                 unsigned int pos = 0;
12818
12819                 ptr = (u8 *)&buf[0];
12820                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12821                         cnt = pci_read_vpd(tp->pdev, pos,
12822                                            len - pos, ptr);
12823                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12824                                 cnt = 0;
12825                         else if (cnt < 0)
12826                                 goto error;
12827                 }
12828                 if (pos != len)
12829                         goto error;
12830         }
12831
12832         *vpdlen = len;
12833
12834         return buf;
12835
12836 error:
12837         kfree(buf);
12838         return NULL;
12839 }
12840
12841 #define NVRAM_TEST_SIZE 0x100
12842 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12843 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12844 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12845 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12846 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12847 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12848 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12849 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12850
12851 static int tg3_test_nvram(struct tg3 *tp)
12852 {
12853         u32 csum, magic, len;
12854         __be32 *buf;
12855         int i, j, k, err = 0, size;
12856
12857         if (tg3_flag(tp, NO_NVRAM))
12858                 return 0;
12859
12860         if (tg3_nvram_read(tp, 0, &magic) != 0)
12861                 return -EIO;
12862
12863         if (magic == TG3_EEPROM_MAGIC)
12864                 size = NVRAM_TEST_SIZE;
12865         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12866                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12867                     TG3_EEPROM_SB_FORMAT_1) {
12868                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12869                         case TG3_EEPROM_SB_REVISION_0:
12870                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12871                                 break;
12872                         case TG3_EEPROM_SB_REVISION_2:
12873                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12874                                 break;
12875                         case TG3_EEPROM_SB_REVISION_3:
12876                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12877                                 break;
12878                         case TG3_EEPROM_SB_REVISION_4:
12879                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12880                                 break;
12881                         case TG3_EEPROM_SB_REVISION_5:
12882                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12883                                 break;
12884                         case TG3_EEPROM_SB_REVISION_6:
12885                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12886                                 break;
12887                         default:
12888                                 return -EIO;
12889                         }
12890                 } else
12891                         return 0;
12892         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12893                 size = NVRAM_SELFBOOT_HW_SIZE;
12894         else
12895                 return -EIO;
12896
12897         buf = kmalloc(size, GFP_KERNEL);
12898         if (buf == NULL)
12899                 return -ENOMEM;
12900
12901         err = -EIO;
12902         for (i = 0, j = 0; i < size; i += 4, j++) {
12903                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12904                 if (err)
12905                         break;
12906         }
12907         if (i < size)
12908                 goto out;
12909
12910         /* Selfboot format */
12911         magic = be32_to_cpu(buf[0]);
12912         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12913             TG3_EEPROM_MAGIC_FW) {
12914                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12915
12916                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12917                     TG3_EEPROM_SB_REVISION_2) {
12918                         /* For rev 2, the csum doesn't include the MBA. */
12919                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12920                                 csum8 += buf8[i];
12921                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12922                                 csum8 += buf8[i];
12923                 } else {
12924                         for (i = 0; i < size; i++)
12925                                 csum8 += buf8[i];
12926                 }
12927
12928                 if (csum8 == 0) {
12929                         err = 0;
12930                         goto out;
12931                 }
12932
12933                 err = -EIO;
12934                 goto out;
12935         }
12936
12937         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12938             TG3_EEPROM_MAGIC_HW) {
12939                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12940                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12941                 u8 *buf8 = (u8 *) buf;
12942
12943                 /* Separate the parity bits and the data bytes.  */
12944                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12945                         if ((i == 0) || (i == 8)) {
12946                                 int l;
12947                                 u8 msk;
12948
12949                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12950                                         parity[k++] = buf8[i] & msk;
12951                                 i++;
12952                         } else if (i == 16) {
12953                                 int l;
12954                                 u8 msk;
12955
12956                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12957                                         parity[k++] = buf8[i] & msk;
12958                                 i++;
12959
12960                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12961                                         parity[k++] = buf8[i] & msk;
12962                                 i++;
12963                         }
12964                         data[j++] = buf8[i];
12965                 }
12966
12967                 err = -EIO;
12968                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12969                         u8 hw8 = hweight8(data[i]);
12970
12971                         if ((hw8 & 0x1) && parity[i])
12972                                 goto out;
12973                         else if (!(hw8 & 0x1) && !parity[i])
12974                                 goto out;
12975                 }
12976                 err = 0;
12977                 goto out;
12978         }
12979
12980         err = -EIO;
12981
12982         /* Bootstrap checksum at offset 0x10 */
12983         csum = calc_crc((unsigned char *) buf, 0x10);
12984         if (csum != le32_to_cpu(buf[0x10/4]))
12985                 goto out;
12986
12987         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12988         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12989         if (csum != le32_to_cpu(buf[0xfc/4]))
12990                 goto out;
12991
12992         kfree(buf);
12993
12994         buf = tg3_vpd_readblock(tp, &len);
12995         if (!buf)
12996                 return -ENOMEM;
12997
12998         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12999         if (i > 0) {
13000                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13001                 if (j < 0)
13002                         goto out;
13003
13004                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13005                         goto out;
13006
13007                 i += PCI_VPD_LRDT_TAG_SIZE;
13008                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13009                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13010                 if (j > 0) {
13011                         u8 csum8 = 0;
13012
13013                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13014
13015                         for (i = 0; i <= j; i++)
13016                                 csum8 += ((u8 *)buf)[i];
13017
13018                         if (csum8)
13019                                 goto out;
13020                 }
13021         }
13022
13023         err = 0;
13024
13025 out:
13026         kfree(buf);
13027         return err;
13028 }
13029
13030 #define TG3_SERDES_TIMEOUT_SEC  2
13031 #define TG3_COPPER_TIMEOUT_SEC  6
13032
13033 static int tg3_test_link(struct tg3 *tp)
13034 {
13035         int i, max;
13036
13037         if (!netif_running(tp->dev))
13038                 return -ENODEV;
13039
13040         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13041                 max = TG3_SERDES_TIMEOUT_SEC;
13042         else
13043                 max = TG3_COPPER_TIMEOUT_SEC;
13044
13045         for (i = 0; i < max; i++) {
13046                 if (tp->link_up)
13047                         return 0;
13048
13049                 if (msleep_interruptible(1000))
13050                         break;
13051         }
13052
13053         return -EIO;
13054 }
13055
13056 /* Only test the commonly used registers */
13057 static int tg3_test_registers(struct tg3 *tp)
13058 {
13059         int i, is_5705, is_5750;
13060         u32 offset, read_mask, write_mask, val, save_val, read_val;
13061         static struct {
13062                 u16 offset;
13063                 u16 flags;
13064 #define TG3_FL_5705     0x1
13065 #define TG3_FL_NOT_5705 0x2
13066 #define TG3_FL_NOT_5788 0x4
13067 #define TG3_FL_NOT_5750 0x8
13068                 u32 read_mask;
13069                 u32 write_mask;
13070         } reg_tbl[] = {
13071                 /* MAC Control Registers */
13072                 { MAC_MODE, TG3_FL_NOT_5705,
13073                         0x00000000, 0x00ef6f8c },
13074                 { MAC_MODE, TG3_FL_5705,
13075                         0x00000000, 0x01ef6b8c },
13076                 { MAC_STATUS, TG3_FL_NOT_5705,
13077                         0x03800107, 0x00000000 },
13078                 { MAC_STATUS, TG3_FL_5705,
13079                         0x03800100, 0x00000000 },
13080                 { MAC_ADDR_0_HIGH, 0x0000,
13081                         0x00000000, 0x0000ffff },
13082                 { MAC_ADDR_0_LOW, 0x0000,
13083                         0x00000000, 0xffffffff },
13084                 { MAC_RX_MTU_SIZE, 0x0000,
13085                         0x00000000, 0x0000ffff },
13086                 { MAC_TX_MODE, 0x0000,
13087                         0x00000000, 0x00000070 },
13088                 { MAC_TX_LENGTHS, 0x0000,
13089                         0x00000000, 0x00003fff },
13090                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13091                         0x00000000, 0x000007fc },
13092                 { MAC_RX_MODE, TG3_FL_5705,
13093                         0x00000000, 0x000007dc },
13094                 { MAC_HASH_REG_0, 0x0000,
13095                         0x00000000, 0xffffffff },
13096                 { MAC_HASH_REG_1, 0x0000,
13097                         0x00000000, 0xffffffff },
13098                 { MAC_HASH_REG_2, 0x0000,
13099                         0x00000000, 0xffffffff },
13100                 { MAC_HASH_REG_3, 0x0000,
13101                         0x00000000, 0xffffffff },
13102
13103                 /* Receive Data and Receive BD Initiator Control Registers. */
13104                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13105                         0x00000000, 0xffffffff },
13106                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13107                         0x00000000, 0xffffffff },
13108                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13109                         0x00000000, 0x00000003 },
13110                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13111                         0x00000000, 0xffffffff },
13112                 { RCVDBDI_STD_BD+0, 0x0000,
13113                         0x00000000, 0xffffffff },
13114                 { RCVDBDI_STD_BD+4, 0x0000,
13115                         0x00000000, 0xffffffff },
13116                 { RCVDBDI_STD_BD+8, 0x0000,
13117                         0x00000000, 0xffff0002 },
13118                 { RCVDBDI_STD_BD+0xc, 0x0000,
13119                         0x00000000, 0xffffffff },
13120
13121                 /* Receive BD Initiator Control Registers. */
13122                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13123                         0x00000000, 0xffffffff },
13124                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13125                         0x00000000, 0x000003ff },
13126                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13127                         0x00000000, 0xffffffff },
13128
13129                 /* Host Coalescing Control Registers. */
13130                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13131                         0x00000000, 0x00000004 },
13132                 { HOSTCC_MODE, TG3_FL_5705,
13133                         0x00000000, 0x000000f6 },
13134                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13135                         0x00000000, 0xffffffff },
13136                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13137                         0x00000000, 0x000003ff },
13138                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13139                         0x00000000, 0xffffffff },
13140                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13141                         0x00000000, 0x000003ff },
13142                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13143                         0x00000000, 0xffffffff },
13144                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13145                         0x00000000, 0x000000ff },
13146                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13147                         0x00000000, 0xffffffff },
13148                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13149                         0x00000000, 0x000000ff },
13150                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13151                         0x00000000, 0xffffffff },
13152                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13153                         0x00000000, 0xffffffff },
13154                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13155                         0x00000000, 0xffffffff },
13156                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13157                         0x00000000, 0x000000ff },
13158                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13159                         0x00000000, 0xffffffff },
13160                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13161                         0x00000000, 0x000000ff },
13162                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13163                         0x00000000, 0xffffffff },
13164                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13165                         0x00000000, 0xffffffff },
13166                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13167                         0x00000000, 0xffffffff },
13168                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13169                         0x00000000, 0xffffffff },
13170                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13171                         0x00000000, 0xffffffff },
13172                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13173                         0xffffffff, 0x00000000 },
13174                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13175                         0xffffffff, 0x00000000 },
13176
13177                 /* Buffer Manager Control Registers. */
13178                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13179                         0x00000000, 0x007fff80 },
13180                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13181                         0x00000000, 0x007fffff },
13182                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13183                         0x00000000, 0x0000003f },
13184                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13185                         0x00000000, 0x000001ff },
13186                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13187                         0x00000000, 0x000001ff },
13188                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13189                         0xffffffff, 0x00000000 },
13190                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13191                         0xffffffff, 0x00000000 },
13192
13193                 /* Mailbox Registers */
13194                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13195                         0x00000000, 0x000001ff },
13196                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13197                         0x00000000, 0x000001ff },
13198                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13199                         0x00000000, 0x000007ff },
13200                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13201                         0x00000000, 0x000001ff },
13202
13203                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13204         };
13205
13206         is_5705 = is_5750 = 0;
13207         if (tg3_flag(tp, 5705_PLUS)) {
13208                 is_5705 = 1;
13209                 if (tg3_flag(tp, 5750_PLUS))
13210                         is_5750 = 1;
13211         }
13212
13213         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13214                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13215                         continue;
13216
13217                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13218                         continue;
13219
13220                 if (tg3_flag(tp, IS_5788) &&
13221                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13222                         continue;
13223
13224                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13225                         continue;
13226
13227                 offset = (u32) reg_tbl[i].offset;
13228                 read_mask = reg_tbl[i].read_mask;
13229                 write_mask = reg_tbl[i].write_mask;
13230
13231                 /* Save the original register content */
13232                 save_val = tr32(offset);
13233
13234                 /* Determine the read-only value. */
13235                 read_val = save_val & read_mask;
13236
13237                 /* Write zero to the register, then make sure the read-only bits
13238                  * are not changed and the read/write bits are all zeros.
13239                  */
13240                 tw32(offset, 0);
13241
13242                 val = tr32(offset);
13243
13244                 /* Test the read-only and read/write bits. */
13245                 if (((val & read_mask) != read_val) || (val & write_mask))
13246                         goto out;
13247
13248                 /* Write ones to all the bits defined by RdMask and WrMask, then
13249                  * make sure the read-only bits are not changed and the
13250                  * read/write bits are all ones.
13251                  */
13252                 tw32(offset, read_mask | write_mask);
13253
13254                 val = tr32(offset);
13255
13256                 /* Test the read-only bits. */
13257                 if ((val & read_mask) != read_val)
13258                         goto out;
13259
13260                 /* Test the read/write bits. */
13261                 if ((val & write_mask) != write_mask)
13262                         goto out;
13263
13264                 tw32(offset, save_val);
13265         }
13266
13267         return 0;
13268
13269 out:
13270         if (netif_msg_hw(tp))
13271                 netdev_err(tp->dev,
13272                            "Register test failed at offset %x\n", offset);
13273         tw32(offset, save_val);
13274         return -EIO;
13275 }
13276
13277 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13278 {
13279         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13280         int i;
13281         u32 j;
13282
13283         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13284                 for (j = 0; j < len; j += 4) {
13285                         u32 val;
13286
13287                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13288                         tg3_read_mem(tp, offset + j, &val);
13289                         if (val != test_pattern[i])
13290                                 return -EIO;
13291                 }
13292         }
13293         return 0;
13294 }
13295
13296 static int tg3_test_memory(struct tg3 *tp)
13297 {
13298         static struct mem_entry {
13299                 u32 offset;
13300                 u32 len;
13301         } mem_tbl_570x[] = {
13302                 { 0x00000000, 0x00b50},
13303                 { 0x00002000, 0x1c000},
13304                 { 0xffffffff, 0x00000}
13305         }, mem_tbl_5705[] = {
13306                 { 0x00000100, 0x0000c},
13307                 { 0x00000200, 0x00008},
13308                 { 0x00004000, 0x00800},
13309                 { 0x00006000, 0x01000},
13310                 { 0x00008000, 0x02000},
13311                 { 0x00010000, 0x0e000},
13312                 { 0xffffffff, 0x00000}
13313         }, mem_tbl_5755[] = {
13314                 { 0x00000200, 0x00008},
13315                 { 0x00004000, 0x00800},
13316                 { 0x00006000, 0x00800},
13317                 { 0x00008000, 0x02000},
13318                 { 0x00010000, 0x0c000},
13319                 { 0xffffffff, 0x00000}
13320         }, mem_tbl_5906[] = {
13321                 { 0x00000200, 0x00008},
13322                 { 0x00004000, 0x00400},
13323                 { 0x00006000, 0x00400},
13324                 { 0x00008000, 0x01000},
13325                 { 0x00010000, 0x01000},
13326                 { 0xffffffff, 0x00000}
13327         }, mem_tbl_5717[] = {
13328                 { 0x00000200, 0x00008},
13329                 { 0x00010000, 0x0a000},
13330                 { 0x00020000, 0x13c00},
13331                 { 0xffffffff, 0x00000}
13332         }, mem_tbl_57765[] = {
13333                 { 0x00000200, 0x00008},
13334                 { 0x00004000, 0x00800},
13335                 { 0x00006000, 0x09800},
13336                 { 0x00010000, 0x0a000},
13337                 { 0xffffffff, 0x00000}
13338         };
13339         struct mem_entry *mem_tbl;
13340         int err = 0;
13341         int i;
13342
13343         if (tg3_flag(tp, 5717_PLUS))
13344                 mem_tbl = mem_tbl_5717;
13345         else if (tg3_flag(tp, 57765_CLASS) ||
13346                  tg3_asic_rev(tp) == ASIC_REV_5762)
13347                 mem_tbl = mem_tbl_57765;
13348         else if (tg3_flag(tp, 5755_PLUS))
13349                 mem_tbl = mem_tbl_5755;
13350         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13351                 mem_tbl = mem_tbl_5906;
13352         else if (tg3_flag(tp, 5705_PLUS))
13353                 mem_tbl = mem_tbl_5705;
13354         else
13355                 mem_tbl = mem_tbl_570x;
13356
13357         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13358                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13359                 if (err)
13360                         break;
13361         }
13362
13363         return err;
13364 }
13365
13366 #define TG3_TSO_MSS             500
13367
13368 #define TG3_TSO_IP_HDR_LEN      20
13369 #define TG3_TSO_TCP_HDR_LEN     20
13370 #define TG3_TSO_TCP_OPT_LEN     12
13371
13372 static const u8 tg3_tso_header[] = {
13373 0x08, 0x00,
13374 0x45, 0x00, 0x00, 0x00,
13375 0x00, 0x00, 0x40, 0x00,
13376 0x40, 0x06, 0x00, 0x00,
13377 0x0a, 0x00, 0x00, 0x01,
13378 0x0a, 0x00, 0x00, 0x02,
13379 0x0d, 0x00, 0xe0, 0x00,
13380 0x00, 0x00, 0x01, 0x00,
13381 0x00, 0x00, 0x02, 0x00,
13382 0x80, 0x10, 0x10, 0x00,
13383 0x14, 0x09, 0x00, 0x00,
13384 0x01, 0x01, 0x08, 0x0a,
13385 0x11, 0x11, 0x11, 0x11,
13386 0x11, 0x11, 0x11, 0x11,
13387 };
13388
13389 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13390 {
13391         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13392         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13393         u32 budget;
13394         struct sk_buff *skb;
13395         u8 *tx_data, *rx_data;
13396         dma_addr_t map;
13397         int num_pkts, tx_len, rx_len, i, err;
13398         struct tg3_rx_buffer_desc *desc;
13399         struct tg3_napi *tnapi, *rnapi;
13400         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13401
13402         tnapi = &tp->napi[0];
13403         rnapi = &tp->napi[0];
13404         if (tp->irq_cnt > 1) {
13405                 if (tg3_flag(tp, ENABLE_RSS))
13406                         rnapi = &tp->napi[1];
13407                 if (tg3_flag(tp, ENABLE_TSS))
13408                         tnapi = &tp->napi[1];
13409         }
13410         coal_now = tnapi->coal_now | rnapi->coal_now;
13411
13412         err = -EIO;
13413
13414         tx_len = pktsz;
13415         skb = netdev_alloc_skb(tp->dev, tx_len);
13416         if (!skb)
13417                 return -ENOMEM;
13418
13419         tx_data = skb_put(skb, tx_len);
13420         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13421         memset(tx_data + ETH_ALEN, 0x0, 8);
13422
13423         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13424
13425         if (tso_loopback) {
13426                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13427
13428                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13429                               TG3_TSO_TCP_OPT_LEN;
13430
13431                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13432                        sizeof(tg3_tso_header));
13433                 mss = TG3_TSO_MSS;
13434
13435                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13436                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13437
13438                 /* Set the total length field in the IP header */
13439                 iph->tot_len = htons((u16)(mss + hdr_len));
13440
13441                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13442                               TXD_FLAG_CPU_POST_DMA);
13443
13444                 if (tg3_flag(tp, HW_TSO_1) ||
13445                     tg3_flag(tp, HW_TSO_2) ||
13446                     tg3_flag(tp, HW_TSO_3)) {
13447                         struct tcphdr *th;
13448                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13449                         th = (struct tcphdr *)&tx_data[val];
13450                         th->check = 0;
13451                 } else
13452                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13453
13454                 if (tg3_flag(tp, HW_TSO_3)) {
13455                         mss |= (hdr_len & 0xc) << 12;
13456                         if (hdr_len & 0x10)
13457                                 base_flags |= 0x00000010;
13458                         base_flags |= (hdr_len & 0x3e0) << 5;
13459                 } else if (tg3_flag(tp, HW_TSO_2))
13460                         mss |= hdr_len << 9;
13461                 else if (tg3_flag(tp, HW_TSO_1) ||
13462                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13463                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13464                 } else {
13465                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13466                 }
13467
13468                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13469         } else {
13470                 num_pkts = 1;
13471                 data_off = ETH_HLEN;
13472
13473                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13474                     tx_len > VLAN_ETH_FRAME_LEN)
13475                         base_flags |= TXD_FLAG_JMB_PKT;
13476         }
13477
13478         for (i = data_off; i < tx_len; i++)
13479                 tx_data[i] = (u8) (i & 0xff);
13480
13481         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13482         if (pci_dma_mapping_error(tp->pdev, map)) {
13483                 dev_kfree_skb(skb);
13484                 return -EIO;
13485         }
13486
13487         val = tnapi->tx_prod;
13488         tnapi->tx_buffers[val].skb = skb;
13489         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13490
13491         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13492                rnapi->coal_now);
13493
13494         udelay(10);
13495
13496         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13497
13498         budget = tg3_tx_avail(tnapi);
13499         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13500                             base_flags | TXD_FLAG_END, mss, 0)) {
13501                 tnapi->tx_buffers[val].skb = NULL;
13502                 dev_kfree_skb(skb);
13503                 return -EIO;
13504         }
13505
13506         tnapi->tx_prod++;
13507
13508         /* Sync BD data before updating mailbox */
13509         wmb();
13510
13511         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13512         tr32_mailbox(tnapi->prodmbox);
13513
13514         udelay(10);
13515
13516         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13517         for (i = 0; i < 35; i++) {
13518                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13519                        coal_now);
13520
13521                 udelay(10);
13522
13523                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13524                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13525                 if ((tx_idx == tnapi->tx_prod) &&
13526                     (rx_idx == (rx_start_idx + num_pkts)))
13527                         break;
13528         }
13529
13530         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13531         dev_kfree_skb(skb);
13532
13533         if (tx_idx != tnapi->tx_prod)
13534                 goto out;
13535
13536         if (rx_idx != rx_start_idx + num_pkts)
13537                 goto out;
13538
13539         val = data_off;
13540         while (rx_idx != rx_start_idx) {
13541                 desc = &rnapi->rx_rcb[rx_start_idx++];
13542                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13543                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13544
13545                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13546                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13547                         goto out;
13548
13549                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13550                          - ETH_FCS_LEN;
13551
13552                 if (!tso_loopback) {
13553                         if (rx_len != tx_len)
13554                                 goto out;
13555
13556                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13557                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13558                                         goto out;
13559                         } else {
13560                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13561                                         goto out;
13562                         }
13563                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13564                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13565                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13566                         goto out;
13567                 }
13568
13569                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13570                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13571                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13572                                              mapping);
13573                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13574                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13575                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13576                                              mapping);
13577                 } else
13578                         goto out;
13579
13580                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13581                                             PCI_DMA_FROMDEVICE);
13582
13583                 rx_data += TG3_RX_OFFSET(tp);
13584                 for (i = data_off; i < rx_len; i++, val++) {
13585                         if (*(rx_data + i) != (u8) (val & 0xff))
13586                                 goto out;
13587                 }
13588         }
13589
13590         err = 0;
13591
13592         /* tg3_free_rings will unmap and free the rx_data */
13593 out:
13594         return err;
13595 }
13596
13597 #define TG3_STD_LOOPBACK_FAILED         1
13598 #define TG3_JMB_LOOPBACK_FAILED         2
13599 #define TG3_TSO_LOOPBACK_FAILED         4
13600 #define TG3_LOOPBACK_FAILED \
13601         (TG3_STD_LOOPBACK_FAILED | \
13602          TG3_JMB_LOOPBACK_FAILED | \
13603          TG3_TSO_LOOPBACK_FAILED)
13604
13605 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13606 {
13607         int err = -EIO;
13608         u32 eee_cap;
13609         u32 jmb_pkt_sz = 9000;
13610
13611         if (tp->dma_limit)
13612                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13613
13614         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13615         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13616
13617         if (!netif_running(tp->dev)) {
13618                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13619                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13620                 if (do_extlpbk)
13621                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13622                 goto done;
13623         }
13624
13625         err = tg3_reset_hw(tp, true);
13626         if (err) {
13627                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13628                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13629                 if (do_extlpbk)
13630                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13631                 goto done;
13632         }
13633
13634         if (tg3_flag(tp, ENABLE_RSS)) {
13635                 int i;
13636
13637                 /* Reroute all rx packets to the 1st queue */
13638                 for (i = MAC_RSS_INDIR_TBL_0;
13639                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13640                         tw32(i, 0x0);
13641         }
13642
13643         /* HW errata - mac loopback fails in some cases on 5780.
13644          * Normal traffic and PHY loopback are not affected by
13645          * errata.  Also, the MAC loopback test is deprecated for
13646          * all newer ASIC revisions.
13647          */
13648         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13649             !tg3_flag(tp, CPMU_PRESENT)) {
13650                 tg3_mac_loopback(tp, true);
13651
13652                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13653                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13654
13655                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13656                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13657                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13658
13659                 tg3_mac_loopback(tp, false);
13660         }
13661
13662         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13663             !tg3_flag(tp, USE_PHYLIB)) {
13664                 int i;
13665
13666                 tg3_phy_lpbk_set(tp, 0, false);
13667
13668                 /* Wait for link */
13669                 for (i = 0; i < 100; i++) {
13670                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13671                                 break;
13672                         mdelay(1);
13673                 }
13674
13675                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13676                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13677                 if (tg3_flag(tp, TSO_CAPABLE) &&
13678                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13679                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13680                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13681                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13682                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13683
13684                 if (do_extlpbk) {
13685                         tg3_phy_lpbk_set(tp, 0, true);
13686
13687                         /* All link indications report up, but the hardware
13688                          * isn't really ready for about 20 msec.  Double it
13689                          * to be sure.
13690                          */
13691                         mdelay(40);
13692
13693                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13694                                 data[TG3_EXT_LOOPB_TEST] |=
13695                                                         TG3_STD_LOOPBACK_FAILED;
13696                         if (tg3_flag(tp, TSO_CAPABLE) &&
13697                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13698                                 data[TG3_EXT_LOOPB_TEST] |=
13699                                                         TG3_TSO_LOOPBACK_FAILED;
13700                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13701                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13702                                 data[TG3_EXT_LOOPB_TEST] |=
13703                                                         TG3_JMB_LOOPBACK_FAILED;
13704                 }
13705
13706                 /* Re-enable gphy autopowerdown. */
13707                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13708                         tg3_phy_toggle_apd(tp, true);
13709         }
13710
13711         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13712                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13713
13714 done:
13715         tp->phy_flags |= eee_cap;
13716
13717         return err;
13718 }
13719
13720 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13721                           u64 *data)
13722 {
13723         struct tg3 *tp = netdev_priv(dev);
13724         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13725
13726         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13727                 if (tg3_power_up(tp)) {
13728                         etest->flags |= ETH_TEST_FL_FAILED;
13729                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13730                         return;
13731                 }
13732                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13733         }
13734
13735         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13736
13737         if (tg3_test_nvram(tp) != 0) {
13738                 etest->flags |= ETH_TEST_FL_FAILED;
13739                 data[TG3_NVRAM_TEST] = 1;
13740         }
13741         if (!doextlpbk && tg3_test_link(tp)) {
13742                 etest->flags |= ETH_TEST_FL_FAILED;
13743                 data[TG3_LINK_TEST] = 1;
13744         }
13745         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13746                 int err, err2 = 0, irq_sync = 0;
13747
13748                 if (netif_running(dev)) {
13749                         tg3_phy_stop(tp);
13750                         tg3_netif_stop(tp);
13751                         irq_sync = 1;
13752                 }
13753
13754                 tg3_full_lock(tp, irq_sync);
13755                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13756                 err = tg3_nvram_lock(tp);
13757                 tg3_halt_cpu(tp, RX_CPU_BASE);
13758                 if (!tg3_flag(tp, 5705_PLUS))
13759                         tg3_halt_cpu(tp, TX_CPU_BASE);
13760                 if (!err)
13761                         tg3_nvram_unlock(tp);
13762
13763                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13764                         tg3_phy_reset(tp);
13765
13766                 if (tg3_test_registers(tp) != 0) {
13767                         etest->flags |= ETH_TEST_FL_FAILED;
13768                         data[TG3_REGISTER_TEST] = 1;
13769                 }
13770
13771                 if (tg3_test_memory(tp) != 0) {
13772                         etest->flags |= ETH_TEST_FL_FAILED;
13773                         data[TG3_MEMORY_TEST] = 1;
13774                 }
13775
13776                 if (doextlpbk)
13777                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13778
13779                 if (tg3_test_loopback(tp, data, doextlpbk))
13780                         etest->flags |= ETH_TEST_FL_FAILED;
13781
13782                 tg3_full_unlock(tp);
13783
13784                 if (tg3_test_interrupt(tp) != 0) {
13785                         etest->flags |= ETH_TEST_FL_FAILED;
13786                         data[TG3_INTERRUPT_TEST] = 1;
13787                 }
13788
13789                 tg3_full_lock(tp, 0);
13790
13791                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13792                 if (netif_running(dev)) {
13793                         tg3_flag_set(tp, INIT_COMPLETE);
13794                         err2 = tg3_restart_hw(tp, true);
13795                         if (!err2)
13796                                 tg3_netif_start(tp);
13797                 }
13798
13799                 tg3_full_unlock(tp);
13800
13801                 if (irq_sync && !err2)
13802                         tg3_phy_start(tp);
13803         }
13804         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13805                 tg3_power_down_prepare(tp);
13806
13807 }
13808
13809 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13810 {
13811         struct tg3 *tp = netdev_priv(dev);
13812         struct hwtstamp_config stmpconf;
13813
13814         if (!tg3_flag(tp, PTP_CAPABLE))
13815                 return -EOPNOTSUPP;
13816
13817         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13818                 return -EFAULT;
13819
13820         if (stmpconf.flags)
13821                 return -EINVAL;
13822
13823         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13824             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13825                 return -ERANGE;
13826
13827         switch (stmpconf.rx_filter) {
13828         case HWTSTAMP_FILTER_NONE:
13829                 tp->rxptpctl = 0;
13830                 break;
13831         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13832                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13833                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13834                 break;
13835         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13836                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13837                                TG3_RX_PTP_CTL_SYNC_EVNT;
13838                 break;
13839         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13840                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13841                                TG3_RX_PTP_CTL_DELAY_REQ;
13842                 break;
13843         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13844                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13845                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13846                 break;
13847         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13848                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13849                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13850                 break;
13851         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13853                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13854                 break;
13855         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13857                                TG3_RX_PTP_CTL_SYNC_EVNT;
13858                 break;
13859         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13861                                TG3_RX_PTP_CTL_SYNC_EVNT;
13862                 break;
13863         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13864                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13865                                TG3_RX_PTP_CTL_SYNC_EVNT;
13866                 break;
13867         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13868                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13869                                TG3_RX_PTP_CTL_DELAY_REQ;
13870                 break;
13871         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13872                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13873                                TG3_RX_PTP_CTL_DELAY_REQ;
13874                 break;
13875         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13876                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13877                                TG3_RX_PTP_CTL_DELAY_REQ;
13878                 break;
13879         default:
13880                 return -ERANGE;
13881         }
13882
13883         if (netif_running(dev) && tp->rxptpctl)
13884                 tw32(TG3_RX_PTP_CTL,
13885                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13886
13887         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13888                 tg3_flag_set(tp, TX_TSTAMP_EN);
13889         else
13890                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13891
13892         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13893                 -EFAULT : 0;
13894 }
13895
13896 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13897 {
13898         struct tg3 *tp = netdev_priv(dev);
13899         struct hwtstamp_config stmpconf;
13900
13901         if (!tg3_flag(tp, PTP_CAPABLE))
13902                 return -EOPNOTSUPP;
13903
13904         stmpconf.flags = 0;
13905         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13906                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13907
13908         switch (tp->rxptpctl) {
13909         case 0:
13910                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13911                 break;
13912         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13913                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13914                 break;
13915         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13916                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13917                 break;
13918         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13919                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13920                 break;
13921         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13922                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13923                 break;
13924         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13925                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13926                 break;
13927         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13928                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13929                 break;
13930         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13931                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13932                 break;
13933         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13934                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13935                 break;
13936         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13937                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13938                 break;
13939         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13940                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13941                 break;
13942         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13943                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13944                 break;
13945         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13946                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13947                 break;
13948         default:
13949                 WARN_ON_ONCE(1);
13950                 return -ERANGE;
13951         }
13952
13953         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13954                 -EFAULT : 0;
13955 }
13956
13957 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13958 {
13959         struct mii_ioctl_data *data = if_mii(ifr);
13960         struct tg3 *tp = netdev_priv(dev);
13961         int err;
13962
13963         if (tg3_flag(tp, USE_PHYLIB)) {
13964                 struct phy_device *phydev;
13965                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13966                         return -EAGAIN;
13967                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13968                 return phy_mii_ioctl(phydev, ifr, cmd);
13969         }
13970
13971         switch (cmd) {
13972         case SIOCGMIIPHY:
13973                 data->phy_id = tp->phy_addr;
13974
13975                 /* fallthru */
13976         case SIOCGMIIREG: {
13977                 u32 mii_regval;
13978
13979                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13980                         break;                  /* We have no PHY */
13981
13982                 if (!netif_running(dev))
13983                         return -EAGAIN;
13984
13985                 spin_lock_bh(&tp->lock);
13986                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13987                                     data->reg_num & 0x1f, &mii_regval);
13988                 spin_unlock_bh(&tp->lock);
13989
13990                 data->val_out = mii_regval;
13991
13992                 return err;
13993         }
13994
13995         case SIOCSMIIREG:
13996                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13997                         break;                  /* We have no PHY */
13998
13999                 if (!netif_running(dev))
14000                         return -EAGAIN;
14001
14002                 spin_lock_bh(&tp->lock);
14003                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14004                                      data->reg_num & 0x1f, data->val_in);
14005                 spin_unlock_bh(&tp->lock);
14006
14007                 return err;
14008
14009         case SIOCSHWTSTAMP:
14010                 return tg3_hwtstamp_set(dev, ifr);
14011
14012         case SIOCGHWTSTAMP:
14013                 return tg3_hwtstamp_get(dev, ifr);
14014
14015         default:
14016                 /* do nothing */
14017                 break;
14018         }
14019         return -EOPNOTSUPP;
14020 }
14021
14022 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14023 {
14024         struct tg3 *tp = netdev_priv(dev);
14025
14026         memcpy(ec, &tp->coal, sizeof(*ec));
14027         return 0;
14028 }
14029
14030 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14031 {
14032         struct tg3 *tp = netdev_priv(dev);
14033         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14034         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14035
14036         if (!tg3_flag(tp, 5705_PLUS)) {
14037                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14038                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14039                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14040                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14041         }
14042
14043         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14044             (!ec->rx_coalesce_usecs) ||
14045             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14046             (!ec->tx_coalesce_usecs) ||
14047             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14048             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14049             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14050             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14051             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14052             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14053             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14054             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14055                 return -EINVAL;
14056
14057         /* Only copy relevant parameters, ignore all others. */
14058         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14059         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14060         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14061         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14062         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14063         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14064         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14065         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14066         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14067
14068         if (netif_running(dev)) {
14069                 tg3_full_lock(tp, 0);
14070                 __tg3_set_coalesce(tp, &tp->coal);
14071                 tg3_full_unlock(tp);
14072         }
14073         return 0;
14074 }
14075
14076 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14077 {
14078         struct tg3 *tp = netdev_priv(dev);
14079
14080         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14081                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14082                 return -EOPNOTSUPP;
14083         }
14084
14085         if (edata->advertised != tp->eee.advertised) {
14086                 netdev_warn(tp->dev,
14087                             "Direct manipulation of EEE advertisement is not supported\n");
14088                 return -EINVAL;
14089         }
14090
14091         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14092                 netdev_warn(tp->dev,
14093                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14094                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14095                 return -EINVAL;
14096         }
14097
14098         tp->eee = *edata;
14099
14100         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14101         tg3_warn_mgmt_link_flap(tp);
14102
14103         if (netif_running(tp->dev)) {
14104                 tg3_full_lock(tp, 0);
14105                 tg3_setup_eee(tp);
14106                 tg3_phy_reset(tp);
14107                 tg3_full_unlock(tp);
14108         }
14109
14110         return 0;
14111 }
14112
14113 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14114 {
14115         struct tg3 *tp = netdev_priv(dev);
14116
14117         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14118                 netdev_warn(tp->dev,
14119                             "Board does not support EEE!\n");
14120                 return -EOPNOTSUPP;
14121         }
14122
14123         *edata = tp->eee;
14124         return 0;
14125 }
14126
14127 static const struct ethtool_ops tg3_ethtool_ops = {
14128         .get_drvinfo            = tg3_get_drvinfo,
14129         .get_regs_len           = tg3_get_regs_len,
14130         .get_regs               = tg3_get_regs,
14131         .get_wol                = tg3_get_wol,
14132         .set_wol                = tg3_set_wol,
14133         .get_msglevel           = tg3_get_msglevel,
14134         .set_msglevel           = tg3_set_msglevel,
14135         .nway_reset             = tg3_nway_reset,
14136         .get_link               = ethtool_op_get_link,
14137         .get_eeprom_len         = tg3_get_eeprom_len,
14138         .get_eeprom             = tg3_get_eeprom,
14139         .set_eeprom             = tg3_set_eeprom,
14140         .get_ringparam          = tg3_get_ringparam,
14141         .set_ringparam          = tg3_set_ringparam,
14142         .get_pauseparam         = tg3_get_pauseparam,
14143         .set_pauseparam         = tg3_set_pauseparam,
14144         .self_test              = tg3_self_test,
14145         .get_strings            = tg3_get_strings,
14146         .set_phys_id            = tg3_set_phys_id,
14147         .get_ethtool_stats      = tg3_get_ethtool_stats,
14148         .get_coalesce           = tg3_get_coalesce,
14149         .set_coalesce           = tg3_set_coalesce,
14150         .get_sset_count         = tg3_get_sset_count,
14151         .get_rxnfc              = tg3_get_rxnfc,
14152         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14153         .get_rxfh               = tg3_get_rxfh,
14154         .set_rxfh               = tg3_set_rxfh,
14155         .get_channels           = tg3_get_channels,
14156         .set_channels           = tg3_set_channels,
14157         .get_ts_info            = tg3_get_ts_info,
14158         .get_eee                = tg3_get_eee,
14159         .set_eee                = tg3_set_eee,
14160         .get_link_ksettings     = tg3_get_link_ksettings,
14161         .set_link_ksettings     = tg3_set_link_ksettings,
14162 };
14163
14164 static void tg3_get_stats64(struct net_device *dev,
14165                             struct rtnl_link_stats64 *stats)
14166 {
14167         struct tg3 *tp = netdev_priv(dev);
14168
14169         spin_lock_bh(&tp->lock);
14170         if (!tp->hw_stats) {
14171                 *stats = tp->net_stats_prev;
14172                 spin_unlock_bh(&tp->lock);
14173                 return;
14174         }
14175
14176         tg3_get_nstats(tp, stats);
14177         spin_unlock_bh(&tp->lock);
14178 }
14179
14180 static void tg3_set_rx_mode(struct net_device *dev)
14181 {
14182         struct tg3 *tp = netdev_priv(dev);
14183
14184         if (!netif_running(dev))
14185                 return;
14186
14187         tg3_full_lock(tp, 0);
14188         __tg3_set_rx_mode(dev);
14189         tg3_full_unlock(tp);
14190 }
14191
14192 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14193                                int new_mtu)
14194 {
14195         dev->mtu = new_mtu;
14196
14197         if (new_mtu > ETH_DATA_LEN) {
14198                 if (tg3_flag(tp, 5780_CLASS)) {
14199                         netdev_update_features(dev);
14200                         tg3_flag_clear(tp, TSO_CAPABLE);
14201                 } else {
14202                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14203                 }
14204         } else {
14205                 if (tg3_flag(tp, 5780_CLASS)) {
14206                         tg3_flag_set(tp, TSO_CAPABLE);
14207                         netdev_update_features(dev);
14208                 }
14209                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14210         }
14211 }
14212
14213 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14214 {
14215         struct tg3 *tp = netdev_priv(dev);
14216         int err;
14217         bool reset_phy = false;
14218
14219         if (!netif_running(dev)) {
14220                 /* We'll just catch it later when the
14221                  * device is up'd.
14222                  */
14223                 tg3_set_mtu(dev, tp, new_mtu);
14224                 return 0;
14225         }
14226
14227         tg3_phy_stop(tp);
14228
14229         tg3_netif_stop(tp);
14230
14231         tg3_set_mtu(dev, tp, new_mtu);
14232
14233         tg3_full_lock(tp, 1);
14234
14235         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14236
14237         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14238          * breaks all requests to 256 bytes.
14239          */
14240         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14241             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14242             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14243             tg3_asic_rev(tp) == ASIC_REV_5720)
14244                 reset_phy = true;
14245
14246         err = tg3_restart_hw(tp, reset_phy);
14247
14248         if (!err)
14249                 tg3_netif_start(tp);
14250
14251         tg3_full_unlock(tp);
14252
14253         if (!err)
14254                 tg3_phy_start(tp);
14255
14256         return err;
14257 }
14258
14259 static const struct net_device_ops tg3_netdev_ops = {
14260         .ndo_open               = tg3_open,
14261         .ndo_stop               = tg3_close,
14262         .ndo_start_xmit         = tg3_start_xmit,
14263         .ndo_get_stats64        = tg3_get_stats64,
14264         .ndo_validate_addr      = eth_validate_addr,
14265         .ndo_set_rx_mode        = tg3_set_rx_mode,
14266         .ndo_set_mac_address    = tg3_set_mac_addr,
14267         .ndo_do_ioctl           = tg3_ioctl,
14268         .ndo_tx_timeout         = tg3_tx_timeout,
14269         .ndo_change_mtu         = tg3_change_mtu,
14270         .ndo_fix_features       = tg3_fix_features,
14271         .ndo_set_features       = tg3_set_features,
14272 #ifdef CONFIG_NET_POLL_CONTROLLER
14273         .ndo_poll_controller    = tg3_poll_controller,
14274 #endif
14275 };
14276
14277 static void tg3_get_eeprom_size(struct tg3 *tp)
14278 {
14279         u32 cursize, val, magic;
14280
14281         tp->nvram_size = EEPROM_CHIP_SIZE;
14282
14283         if (tg3_nvram_read(tp, 0, &magic) != 0)
14284                 return;
14285
14286         if ((magic != TG3_EEPROM_MAGIC) &&
14287             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14288             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14289                 return;
14290
14291         /*
14292          * Size the chip by reading offsets at increasing powers of two.
14293          * When we encounter our validation signature, we know the addressing
14294          * has wrapped around, and thus have our chip size.
14295          */
14296         cursize = 0x10;
14297
14298         while (cursize < tp->nvram_size) {
14299                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14300                         return;
14301
14302                 if (val == magic)
14303                         break;
14304
14305                 cursize <<= 1;
14306         }
14307
14308         tp->nvram_size = cursize;
14309 }
14310
14311 static void tg3_get_nvram_size(struct tg3 *tp)
14312 {
14313         u32 val;
14314
14315         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14316                 return;
14317
14318         /* Selfboot format */
14319         if (val != TG3_EEPROM_MAGIC) {
14320                 tg3_get_eeprom_size(tp);
14321                 return;
14322         }
14323
14324         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14325                 if (val != 0) {
14326                         /* This is confusing.  We want to operate on the
14327                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14328                          * call will read from NVRAM and byteswap the data
14329                          * according to the byteswapping settings for all
14330                          * other register accesses.  This ensures the data we
14331                          * want will always reside in the lower 16-bits.
14332                          * However, the data in NVRAM is in LE format, which
14333                          * means the data from the NVRAM read will always be
14334                          * opposite the endianness of the CPU.  The 16-bit
14335                          * byteswap then brings the data to CPU endianness.
14336                          */
14337                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14338                         return;
14339                 }
14340         }
14341         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14342 }
14343
14344 static void tg3_get_nvram_info(struct tg3 *tp)
14345 {
14346         u32 nvcfg1;
14347
14348         nvcfg1 = tr32(NVRAM_CFG1);
14349         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14350                 tg3_flag_set(tp, FLASH);
14351         } else {
14352                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14353                 tw32(NVRAM_CFG1, nvcfg1);
14354         }
14355
14356         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14357             tg3_flag(tp, 5780_CLASS)) {
14358                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14359                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14360                         tp->nvram_jedecnum = JEDEC_ATMEL;
14361                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14362                         tg3_flag_set(tp, NVRAM_BUFFERED);
14363                         break;
14364                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14365                         tp->nvram_jedecnum = JEDEC_ATMEL;
14366                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14367                         break;
14368                 case FLASH_VENDOR_ATMEL_EEPROM:
14369                         tp->nvram_jedecnum = JEDEC_ATMEL;
14370                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14371                         tg3_flag_set(tp, NVRAM_BUFFERED);
14372                         break;
14373                 case FLASH_VENDOR_ST:
14374                         tp->nvram_jedecnum = JEDEC_ST;
14375                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14376                         tg3_flag_set(tp, NVRAM_BUFFERED);
14377                         break;
14378                 case FLASH_VENDOR_SAIFUN:
14379                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14380                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14381                         break;
14382                 case FLASH_VENDOR_SST_SMALL:
14383                 case FLASH_VENDOR_SST_LARGE:
14384                         tp->nvram_jedecnum = JEDEC_SST;
14385                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14386                         break;
14387                 }
14388         } else {
14389                 tp->nvram_jedecnum = JEDEC_ATMEL;
14390                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14391                 tg3_flag_set(tp, NVRAM_BUFFERED);
14392         }
14393 }
14394
14395 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14396 {
14397         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14398         case FLASH_5752PAGE_SIZE_256:
14399                 tp->nvram_pagesize = 256;
14400                 break;
14401         case FLASH_5752PAGE_SIZE_512:
14402                 tp->nvram_pagesize = 512;
14403                 break;
14404         case FLASH_5752PAGE_SIZE_1K:
14405                 tp->nvram_pagesize = 1024;
14406                 break;
14407         case FLASH_5752PAGE_SIZE_2K:
14408                 tp->nvram_pagesize = 2048;
14409                 break;
14410         case FLASH_5752PAGE_SIZE_4K:
14411                 tp->nvram_pagesize = 4096;
14412                 break;
14413         case FLASH_5752PAGE_SIZE_264:
14414                 tp->nvram_pagesize = 264;
14415                 break;
14416         case FLASH_5752PAGE_SIZE_528:
14417                 tp->nvram_pagesize = 528;
14418                 break;
14419         }
14420 }
14421
14422 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14423 {
14424         u32 nvcfg1;
14425
14426         nvcfg1 = tr32(NVRAM_CFG1);
14427
14428         /* NVRAM protection for TPM */
14429         if (nvcfg1 & (1 << 27))
14430                 tg3_flag_set(tp, PROTECTED_NVRAM);
14431
14432         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14433         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14434         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14435                 tp->nvram_jedecnum = JEDEC_ATMEL;
14436                 tg3_flag_set(tp, NVRAM_BUFFERED);
14437                 break;
14438         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14439                 tp->nvram_jedecnum = JEDEC_ATMEL;
14440                 tg3_flag_set(tp, NVRAM_BUFFERED);
14441                 tg3_flag_set(tp, FLASH);
14442                 break;
14443         case FLASH_5752VENDOR_ST_M45PE10:
14444         case FLASH_5752VENDOR_ST_M45PE20:
14445         case FLASH_5752VENDOR_ST_M45PE40:
14446                 tp->nvram_jedecnum = JEDEC_ST;
14447                 tg3_flag_set(tp, NVRAM_BUFFERED);
14448                 tg3_flag_set(tp, FLASH);
14449                 break;
14450         }
14451
14452         if (tg3_flag(tp, FLASH)) {
14453                 tg3_nvram_get_pagesize(tp, nvcfg1);
14454         } else {
14455                 /* For eeprom, set pagesize to maximum eeprom size */
14456                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14457
14458                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14459                 tw32(NVRAM_CFG1, nvcfg1);
14460         }
14461 }
14462
14463 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14464 {
14465         u32 nvcfg1, protect = 0;
14466
14467         nvcfg1 = tr32(NVRAM_CFG1);
14468
14469         /* NVRAM protection for TPM */
14470         if (nvcfg1 & (1 << 27)) {
14471                 tg3_flag_set(tp, PROTECTED_NVRAM);
14472                 protect = 1;
14473         }
14474
14475         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14476         switch (nvcfg1) {
14477         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14478         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14479         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14480         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14481                 tp->nvram_jedecnum = JEDEC_ATMEL;
14482                 tg3_flag_set(tp, NVRAM_BUFFERED);
14483                 tg3_flag_set(tp, FLASH);
14484                 tp->nvram_pagesize = 264;
14485                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14486                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14487                         tp->nvram_size = (protect ? 0x3e200 :
14488                                           TG3_NVRAM_SIZE_512KB);
14489                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14490                         tp->nvram_size = (protect ? 0x1f200 :
14491                                           TG3_NVRAM_SIZE_256KB);
14492                 else
14493                         tp->nvram_size = (protect ? 0x1f200 :
14494                                           TG3_NVRAM_SIZE_128KB);
14495                 break;
14496         case FLASH_5752VENDOR_ST_M45PE10:
14497         case FLASH_5752VENDOR_ST_M45PE20:
14498         case FLASH_5752VENDOR_ST_M45PE40:
14499                 tp->nvram_jedecnum = JEDEC_ST;
14500                 tg3_flag_set(tp, NVRAM_BUFFERED);
14501                 tg3_flag_set(tp, FLASH);
14502                 tp->nvram_pagesize = 256;
14503                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14504                         tp->nvram_size = (protect ?
14505                                           TG3_NVRAM_SIZE_64KB :
14506                                           TG3_NVRAM_SIZE_128KB);
14507                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14508                         tp->nvram_size = (protect ?
14509                                           TG3_NVRAM_SIZE_64KB :
14510                                           TG3_NVRAM_SIZE_256KB);
14511                 else
14512                         tp->nvram_size = (protect ?
14513                                           TG3_NVRAM_SIZE_128KB :
14514                                           TG3_NVRAM_SIZE_512KB);
14515                 break;
14516         }
14517 }
14518
14519 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14520 {
14521         u32 nvcfg1;
14522
14523         nvcfg1 = tr32(NVRAM_CFG1);
14524
14525         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14526         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14527         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14528         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14529         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14530                 tp->nvram_jedecnum = JEDEC_ATMEL;
14531                 tg3_flag_set(tp, NVRAM_BUFFERED);
14532                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14533
14534                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14535                 tw32(NVRAM_CFG1, nvcfg1);
14536                 break;
14537         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14538         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14539         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14540         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14541                 tp->nvram_jedecnum = JEDEC_ATMEL;
14542                 tg3_flag_set(tp, NVRAM_BUFFERED);
14543                 tg3_flag_set(tp, FLASH);
14544                 tp->nvram_pagesize = 264;
14545                 break;
14546         case FLASH_5752VENDOR_ST_M45PE10:
14547         case FLASH_5752VENDOR_ST_M45PE20:
14548         case FLASH_5752VENDOR_ST_M45PE40:
14549                 tp->nvram_jedecnum = JEDEC_ST;
14550                 tg3_flag_set(tp, NVRAM_BUFFERED);
14551                 tg3_flag_set(tp, FLASH);
14552                 tp->nvram_pagesize = 256;
14553                 break;
14554         }
14555 }
14556
14557 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14558 {
14559         u32 nvcfg1, protect = 0;
14560
14561         nvcfg1 = tr32(NVRAM_CFG1);
14562
14563         /* NVRAM protection for TPM */
14564         if (nvcfg1 & (1 << 27)) {
14565                 tg3_flag_set(tp, PROTECTED_NVRAM);
14566                 protect = 1;
14567         }
14568
14569         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14570         switch (nvcfg1) {
14571         case FLASH_5761VENDOR_ATMEL_ADB021D:
14572         case FLASH_5761VENDOR_ATMEL_ADB041D:
14573         case FLASH_5761VENDOR_ATMEL_ADB081D:
14574         case FLASH_5761VENDOR_ATMEL_ADB161D:
14575         case FLASH_5761VENDOR_ATMEL_MDB021D:
14576         case FLASH_5761VENDOR_ATMEL_MDB041D:
14577         case FLASH_5761VENDOR_ATMEL_MDB081D:
14578         case FLASH_5761VENDOR_ATMEL_MDB161D:
14579                 tp->nvram_jedecnum = JEDEC_ATMEL;
14580                 tg3_flag_set(tp, NVRAM_BUFFERED);
14581                 tg3_flag_set(tp, FLASH);
14582                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14583                 tp->nvram_pagesize = 256;
14584                 break;
14585         case FLASH_5761VENDOR_ST_A_M45PE20:
14586         case FLASH_5761VENDOR_ST_A_M45PE40:
14587         case FLASH_5761VENDOR_ST_A_M45PE80:
14588         case FLASH_5761VENDOR_ST_A_M45PE16:
14589         case FLASH_5761VENDOR_ST_M_M45PE20:
14590         case FLASH_5761VENDOR_ST_M_M45PE40:
14591         case FLASH_5761VENDOR_ST_M_M45PE80:
14592         case FLASH_5761VENDOR_ST_M_M45PE16:
14593                 tp->nvram_jedecnum = JEDEC_ST;
14594                 tg3_flag_set(tp, NVRAM_BUFFERED);
14595                 tg3_flag_set(tp, FLASH);
14596                 tp->nvram_pagesize = 256;
14597                 break;
14598         }
14599
14600         if (protect) {
14601                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14602         } else {
14603                 switch (nvcfg1) {
14604                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14605                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14606                 case FLASH_5761VENDOR_ST_A_M45PE16:
14607                 case FLASH_5761VENDOR_ST_M_M45PE16:
14608                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14609                         break;
14610                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14611                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14612                 case FLASH_5761VENDOR_ST_A_M45PE80:
14613                 case FLASH_5761VENDOR_ST_M_M45PE80:
14614                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14615                         break;
14616                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14617                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14618                 case FLASH_5761VENDOR_ST_A_M45PE40:
14619                 case FLASH_5761VENDOR_ST_M_M45PE40:
14620                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14621                         break;
14622                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14623                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14624                 case FLASH_5761VENDOR_ST_A_M45PE20:
14625                 case FLASH_5761VENDOR_ST_M_M45PE20:
14626                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14627                         break;
14628                 }
14629         }
14630 }
14631
14632 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14633 {
14634         tp->nvram_jedecnum = JEDEC_ATMEL;
14635         tg3_flag_set(tp, NVRAM_BUFFERED);
14636         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14637 }
14638
14639 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14640 {
14641         u32 nvcfg1;
14642
14643         nvcfg1 = tr32(NVRAM_CFG1);
14644
14645         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14646         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14647         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14648                 tp->nvram_jedecnum = JEDEC_ATMEL;
14649                 tg3_flag_set(tp, NVRAM_BUFFERED);
14650                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14651
14652                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14653                 tw32(NVRAM_CFG1, nvcfg1);
14654                 return;
14655         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14656         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14657         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14658         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14659         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14660         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14661         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14662                 tp->nvram_jedecnum = JEDEC_ATMEL;
14663                 tg3_flag_set(tp, NVRAM_BUFFERED);
14664                 tg3_flag_set(tp, FLASH);
14665
14666                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14667                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14668                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14669                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14670                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14671                         break;
14672                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14673                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14674                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14675                         break;
14676                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14677                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14678                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14679                         break;
14680                 }
14681                 break;
14682         case FLASH_5752VENDOR_ST_M45PE10:
14683         case FLASH_5752VENDOR_ST_M45PE20:
14684         case FLASH_5752VENDOR_ST_M45PE40:
14685                 tp->nvram_jedecnum = JEDEC_ST;
14686                 tg3_flag_set(tp, NVRAM_BUFFERED);
14687                 tg3_flag_set(tp, FLASH);
14688
14689                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14690                 case FLASH_5752VENDOR_ST_M45PE10:
14691                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14692                         break;
14693                 case FLASH_5752VENDOR_ST_M45PE20:
14694                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14695                         break;
14696                 case FLASH_5752VENDOR_ST_M45PE40:
14697                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14698                         break;
14699                 }
14700                 break;
14701         default:
14702                 tg3_flag_set(tp, NO_NVRAM);
14703                 return;
14704         }
14705
14706         tg3_nvram_get_pagesize(tp, nvcfg1);
14707         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14708                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14709 }
14710
14711
14712 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14713 {
14714         u32 nvcfg1;
14715
14716         nvcfg1 = tr32(NVRAM_CFG1);
14717
14718         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14719         case FLASH_5717VENDOR_ATMEL_EEPROM:
14720         case FLASH_5717VENDOR_MICRO_EEPROM:
14721                 tp->nvram_jedecnum = JEDEC_ATMEL;
14722                 tg3_flag_set(tp, NVRAM_BUFFERED);
14723                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14724
14725                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14726                 tw32(NVRAM_CFG1, nvcfg1);
14727                 return;
14728         case FLASH_5717VENDOR_ATMEL_MDB011D:
14729         case FLASH_5717VENDOR_ATMEL_ADB011B:
14730         case FLASH_5717VENDOR_ATMEL_ADB011D:
14731         case FLASH_5717VENDOR_ATMEL_MDB021D:
14732         case FLASH_5717VENDOR_ATMEL_ADB021B:
14733         case FLASH_5717VENDOR_ATMEL_ADB021D:
14734         case FLASH_5717VENDOR_ATMEL_45USPT:
14735                 tp->nvram_jedecnum = JEDEC_ATMEL;
14736                 tg3_flag_set(tp, NVRAM_BUFFERED);
14737                 tg3_flag_set(tp, FLASH);
14738
14739                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14740                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14741                         /* Detect size with tg3_nvram_get_size() */
14742                         break;
14743                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14744                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14745                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14746                         break;
14747                 default:
14748                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14749                         break;
14750                 }
14751                 break;
14752         case FLASH_5717VENDOR_ST_M_M25PE10:
14753         case FLASH_5717VENDOR_ST_A_M25PE10:
14754         case FLASH_5717VENDOR_ST_M_M45PE10:
14755         case FLASH_5717VENDOR_ST_A_M45PE10:
14756         case FLASH_5717VENDOR_ST_M_M25PE20:
14757         case FLASH_5717VENDOR_ST_A_M25PE20:
14758         case FLASH_5717VENDOR_ST_M_M45PE20:
14759         case FLASH_5717VENDOR_ST_A_M45PE20:
14760         case FLASH_5717VENDOR_ST_25USPT:
14761         case FLASH_5717VENDOR_ST_45USPT:
14762                 tp->nvram_jedecnum = JEDEC_ST;
14763                 tg3_flag_set(tp, NVRAM_BUFFERED);
14764                 tg3_flag_set(tp, FLASH);
14765
14766                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14767                 case FLASH_5717VENDOR_ST_M_M25PE20:
14768                 case FLASH_5717VENDOR_ST_M_M45PE20:
14769                         /* Detect size with tg3_nvram_get_size() */
14770                         break;
14771                 case FLASH_5717VENDOR_ST_A_M25PE20:
14772                 case FLASH_5717VENDOR_ST_A_M45PE20:
14773                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14774                         break;
14775                 default:
14776                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14777                         break;
14778                 }
14779                 break;
14780         default:
14781                 tg3_flag_set(tp, NO_NVRAM);
14782                 return;
14783         }
14784
14785         tg3_nvram_get_pagesize(tp, nvcfg1);
14786         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14787                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14788 }
14789
14790 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14791 {
14792         u32 nvcfg1, nvmpinstrp;
14793
14794         nvcfg1 = tr32(NVRAM_CFG1);
14795         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14796
14797         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14798                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14799                         tg3_flag_set(tp, NO_NVRAM);
14800                         return;
14801                 }
14802
14803                 switch (nvmpinstrp) {
14804                 case FLASH_5762_EEPROM_HD:
14805                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14806                         break;
14807                 case FLASH_5762_EEPROM_LD:
14808                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14809                         break;
14810                 case FLASH_5720VENDOR_M_ST_M45PE20:
14811                         /* This pinstrap supports multiple sizes, so force it
14812                          * to read the actual size from location 0xf0.
14813                          */
14814                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14815                         break;
14816                 }
14817         }
14818
14819         switch (nvmpinstrp) {
14820         case FLASH_5720_EEPROM_HD:
14821         case FLASH_5720_EEPROM_LD:
14822                 tp->nvram_jedecnum = JEDEC_ATMEL;
14823                 tg3_flag_set(tp, NVRAM_BUFFERED);
14824
14825                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14826                 tw32(NVRAM_CFG1, nvcfg1);
14827                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14828                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14829                 else
14830                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14831                 return;
14832         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14833         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14834         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14835         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14836         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14837         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14838         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14839         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14840         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14841         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14842         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14843         case FLASH_5720VENDOR_ATMEL_45USPT:
14844                 tp->nvram_jedecnum = JEDEC_ATMEL;
14845                 tg3_flag_set(tp, NVRAM_BUFFERED);
14846                 tg3_flag_set(tp, FLASH);
14847
14848                 switch (nvmpinstrp) {
14849                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14850                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14851                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14852                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14853                         break;
14854                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14855                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14856                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14857                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14858                         break;
14859                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14860                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14861                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14862                         break;
14863                 default:
14864                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14865                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14866                         break;
14867                 }
14868                 break;
14869         case FLASH_5720VENDOR_M_ST_M25PE10:
14870         case FLASH_5720VENDOR_M_ST_M45PE10:
14871         case FLASH_5720VENDOR_A_ST_M25PE10:
14872         case FLASH_5720VENDOR_A_ST_M45PE10:
14873         case FLASH_5720VENDOR_M_ST_M25PE20:
14874         case FLASH_5720VENDOR_M_ST_M45PE20:
14875         case FLASH_5720VENDOR_A_ST_M25PE20:
14876         case FLASH_5720VENDOR_A_ST_M45PE20:
14877         case FLASH_5720VENDOR_M_ST_M25PE40:
14878         case FLASH_5720VENDOR_M_ST_M45PE40:
14879         case FLASH_5720VENDOR_A_ST_M25PE40:
14880         case FLASH_5720VENDOR_A_ST_M45PE40:
14881         case FLASH_5720VENDOR_M_ST_M25PE80:
14882         case FLASH_5720VENDOR_M_ST_M45PE80:
14883         case FLASH_5720VENDOR_A_ST_M25PE80:
14884         case FLASH_5720VENDOR_A_ST_M45PE80:
14885         case FLASH_5720VENDOR_ST_25USPT:
14886         case FLASH_5720VENDOR_ST_45USPT:
14887                 tp->nvram_jedecnum = JEDEC_ST;
14888                 tg3_flag_set(tp, NVRAM_BUFFERED);
14889                 tg3_flag_set(tp, FLASH);
14890
14891                 switch (nvmpinstrp) {
14892                 case FLASH_5720VENDOR_M_ST_M25PE20:
14893                 case FLASH_5720VENDOR_M_ST_M45PE20:
14894                 case FLASH_5720VENDOR_A_ST_M25PE20:
14895                 case FLASH_5720VENDOR_A_ST_M45PE20:
14896                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14897                         break;
14898                 case FLASH_5720VENDOR_M_ST_M25PE40:
14899                 case FLASH_5720VENDOR_M_ST_M45PE40:
14900                 case FLASH_5720VENDOR_A_ST_M25PE40:
14901                 case FLASH_5720VENDOR_A_ST_M45PE40:
14902                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14903                         break;
14904                 case FLASH_5720VENDOR_M_ST_M25PE80:
14905                 case FLASH_5720VENDOR_M_ST_M45PE80:
14906                 case FLASH_5720VENDOR_A_ST_M25PE80:
14907                 case FLASH_5720VENDOR_A_ST_M45PE80:
14908                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14909                         break;
14910                 default:
14911                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14912                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14913                         break;
14914                 }
14915                 break;
14916         default:
14917                 tg3_flag_set(tp, NO_NVRAM);
14918                 return;
14919         }
14920
14921         tg3_nvram_get_pagesize(tp, nvcfg1);
14922         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14923                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14924
14925         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14926                 u32 val;
14927
14928                 if (tg3_nvram_read(tp, 0, &val))
14929                         return;
14930
14931                 if (val != TG3_EEPROM_MAGIC &&
14932                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14933                         tg3_flag_set(tp, NO_NVRAM);
14934         }
14935 }
14936
14937 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14938 static void tg3_nvram_init(struct tg3 *tp)
14939 {
14940         if (tg3_flag(tp, IS_SSB_CORE)) {
14941                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14942                 tg3_flag_clear(tp, NVRAM);
14943                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14944                 tg3_flag_set(tp, NO_NVRAM);
14945                 return;
14946         }
14947
14948         tw32_f(GRC_EEPROM_ADDR,
14949              (EEPROM_ADDR_FSM_RESET |
14950               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14951                EEPROM_ADDR_CLKPERD_SHIFT)));
14952
14953         msleep(1);
14954
14955         /* Enable seeprom accesses. */
14956         tw32_f(GRC_LOCAL_CTRL,
14957              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14958         udelay(100);
14959
14960         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14961             tg3_asic_rev(tp) != ASIC_REV_5701) {
14962                 tg3_flag_set(tp, NVRAM);
14963
14964                 if (tg3_nvram_lock(tp)) {
14965                         netdev_warn(tp->dev,
14966                                     "Cannot get nvram lock, %s failed\n",
14967                                     __func__);
14968                         return;
14969                 }
14970                 tg3_enable_nvram_access(tp);
14971
14972                 tp->nvram_size = 0;
14973
14974                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14975                         tg3_get_5752_nvram_info(tp);
14976                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14977                         tg3_get_5755_nvram_info(tp);
14978                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14979                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14980                          tg3_asic_rev(tp) == ASIC_REV_5785)
14981                         tg3_get_5787_nvram_info(tp);
14982                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14983                         tg3_get_5761_nvram_info(tp);
14984                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14985                         tg3_get_5906_nvram_info(tp);
14986                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14987                          tg3_flag(tp, 57765_CLASS))
14988                         tg3_get_57780_nvram_info(tp);
14989                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14990                          tg3_asic_rev(tp) == ASIC_REV_5719)
14991                         tg3_get_5717_nvram_info(tp);
14992                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14993                          tg3_asic_rev(tp) == ASIC_REV_5762)
14994                         tg3_get_5720_nvram_info(tp);
14995                 else
14996                         tg3_get_nvram_info(tp);
14997
14998                 if (tp->nvram_size == 0)
14999                         tg3_get_nvram_size(tp);
15000
15001                 tg3_disable_nvram_access(tp);
15002                 tg3_nvram_unlock(tp);
15003
15004         } else {
15005                 tg3_flag_clear(tp, NVRAM);
15006                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15007
15008                 tg3_get_eeprom_size(tp);
15009         }
15010 }
15011
15012 struct subsys_tbl_ent {
15013         u16 subsys_vendor, subsys_devid;
15014         u32 phy_id;
15015 };
15016
15017 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15018         /* Broadcom boards. */
15019         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15020           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15021         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15022           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15023         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15024           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15025         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15026           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15027         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15028           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15029         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15030           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15031         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15032           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15033         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15034           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15035         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15036           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15037         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15038           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15039         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15040           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15041
15042         /* 3com boards. */
15043         { TG3PCI_SUBVENDOR_ID_3COM,
15044           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15045         { TG3PCI_SUBVENDOR_ID_3COM,
15046           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15047         { TG3PCI_SUBVENDOR_ID_3COM,
15048           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15049         { TG3PCI_SUBVENDOR_ID_3COM,
15050           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15051         { TG3PCI_SUBVENDOR_ID_3COM,
15052           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15053
15054         /* DELL boards. */
15055         { TG3PCI_SUBVENDOR_ID_DELL,
15056           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15057         { TG3PCI_SUBVENDOR_ID_DELL,
15058           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15059         { TG3PCI_SUBVENDOR_ID_DELL,
15060           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15061         { TG3PCI_SUBVENDOR_ID_DELL,
15062           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15063
15064         /* Compaq boards. */
15065         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15066           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15067         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15068           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15069         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15070           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15071         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15072           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15073         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15074           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15075
15076         /* IBM boards. */
15077         { TG3PCI_SUBVENDOR_ID_IBM,
15078           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15079 };
15080
15081 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15082 {
15083         int i;
15084
15085         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15086                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15087                      tp->pdev->subsystem_vendor) &&
15088                     (subsys_id_to_phy_id[i].subsys_devid ==
15089                      tp->pdev->subsystem_device))
15090                         return &subsys_id_to_phy_id[i];
15091         }
15092         return NULL;
15093 }
15094
15095 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15096 {
15097         u32 val;
15098
15099         tp->phy_id = TG3_PHY_ID_INVALID;
15100         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15101
15102         /* Assume an onboard device and WOL capable by default.  */
15103         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15104         tg3_flag_set(tp, WOL_CAP);
15105
15106         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15107                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15108                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15109                         tg3_flag_set(tp, IS_NIC);
15110                 }
15111                 val = tr32(VCPU_CFGSHDW);
15112                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15113                         tg3_flag_set(tp, ASPM_WORKAROUND);
15114                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15115                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15116                         tg3_flag_set(tp, WOL_ENABLE);
15117                         device_set_wakeup_enable(&tp->pdev->dev, true);
15118                 }
15119                 goto done;
15120         }
15121
15122         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15123         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15124                 u32 nic_cfg, led_cfg;
15125                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15126                 u32 nic_phy_id, ver, eeprom_phy_id;
15127                 int eeprom_phy_serdes = 0;
15128
15129                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15130                 tp->nic_sram_data_cfg = nic_cfg;
15131
15132                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15133                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15134                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15135                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15136                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15137                     (ver > 0) && (ver < 0x100))
15138                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15139
15140                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15141                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15142
15143                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15144                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15145                     tg3_asic_rev(tp) == ASIC_REV_5720)
15146                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15147
15148                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15149                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15150                         eeprom_phy_serdes = 1;
15151
15152                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15153                 if (nic_phy_id != 0) {
15154                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15155                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15156
15157                         eeprom_phy_id  = (id1 >> 16) << 10;
15158                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15159                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15160                 } else
15161                         eeprom_phy_id = 0;
15162
15163                 tp->phy_id = eeprom_phy_id;
15164                 if (eeprom_phy_serdes) {
15165                         if (!tg3_flag(tp, 5705_PLUS))
15166                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15167                         else
15168                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15169                 }
15170
15171                 if (tg3_flag(tp, 5750_PLUS))
15172                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15173                                     SHASTA_EXT_LED_MODE_MASK);
15174                 else
15175                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15176
15177                 switch (led_cfg) {
15178                 default:
15179                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15180                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15181                         break;
15182
15183                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15184                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15185                         break;
15186
15187                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15188                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15189
15190                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15191                          * read on some older 5700/5701 bootcode.
15192                          */
15193                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15194                             tg3_asic_rev(tp) == ASIC_REV_5701)
15195                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15196
15197                         break;
15198
15199                 case SHASTA_EXT_LED_SHARED:
15200                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15201                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15202                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15203                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15204                                                  LED_CTRL_MODE_PHY_2);
15205
15206                         if (tg3_flag(tp, 5717_PLUS) ||
15207                             tg3_asic_rev(tp) == ASIC_REV_5762)
15208                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15209                                                 LED_CTRL_BLINK_RATE_MASK;
15210
15211                         break;
15212
15213                 case SHASTA_EXT_LED_MAC:
15214                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15215                         break;
15216
15217                 case SHASTA_EXT_LED_COMBO:
15218                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15219                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15220                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15221                                                  LED_CTRL_MODE_PHY_2);
15222                         break;
15223
15224                 }
15225
15226                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15227                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15228                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15229                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15230
15231                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15232                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15233
15234                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15235                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15236                         if ((tp->pdev->subsystem_vendor ==
15237                              PCI_VENDOR_ID_ARIMA) &&
15238                             (tp->pdev->subsystem_device == 0x205a ||
15239                              tp->pdev->subsystem_device == 0x2063))
15240                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15241                 } else {
15242                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15243                         tg3_flag_set(tp, IS_NIC);
15244                 }
15245
15246                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15247                         tg3_flag_set(tp, ENABLE_ASF);
15248                         if (tg3_flag(tp, 5750_PLUS))
15249                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15250                 }
15251
15252                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15253                     tg3_flag(tp, 5750_PLUS))
15254                         tg3_flag_set(tp, ENABLE_APE);
15255
15256                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15257                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15258                         tg3_flag_clear(tp, WOL_CAP);
15259
15260                 if (tg3_flag(tp, WOL_CAP) &&
15261                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15262                         tg3_flag_set(tp, WOL_ENABLE);
15263                         device_set_wakeup_enable(&tp->pdev->dev, true);
15264                 }
15265
15266                 if (cfg2 & (1 << 17))
15267                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15268
15269                 /* serdes signal pre-emphasis in register 0x590 set by */
15270                 /* bootcode if bit 18 is set */
15271                 if (cfg2 & (1 << 18))
15272                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15273
15274                 if ((tg3_flag(tp, 57765_PLUS) ||
15275                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15276                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15277                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15278                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15279
15280                 if (tg3_flag(tp, PCI_EXPRESS)) {
15281                         u32 cfg3;
15282
15283                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15284                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15285                             !tg3_flag(tp, 57765_PLUS) &&
15286                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15287                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15288                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15289                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15290                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15291                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15292                 }
15293
15294                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15295                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15296                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15297                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15298                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15299                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15300
15301                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15302                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15303         }
15304 done:
15305         if (tg3_flag(tp, WOL_CAP))
15306                 device_set_wakeup_enable(&tp->pdev->dev,
15307                                          tg3_flag(tp, WOL_ENABLE));
15308         else
15309                 device_set_wakeup_capable(&tp->pdev->dev, false);
15310 }
15311
15312 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15313 {
15314         int i, err;
15315         u32 val2, off = offset * 8;
15316
15317         err = tg3_nvram_lock(tp);
15318         if (err)
15319                 return err;
15320
15321         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15322         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15323                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15324         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15325         udelay(10);
15326
15327         for (i = 0; i < 100; i++) {
15328                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15329                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15330                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15331                         break;
15332                 }
15333                 udelay(10);
15334         }
15335
15336         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15337
15338         tg3_nvram_unlock(tp);
15339         if (val2 & APE_OTP_STATUS_CMD_DONE)
15340                 return 0;
15341
15342         return -EBUSY;
15343 }
15344
15345 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15346 {
15347         int i;
15348         u32 val;
15349
15350         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15351         tw32(OTP_CTRL, cmd);
15352
15353         /* Wait for up to 1 ms for command to execute. */
15354         for (i = 0; i < 100; i++) {
15355                 val = tr32(OTP_STATUS);
15356                 if (val & OTP_STATUS_CMD_DONE)
15357                         break;
15358                 udelay(10);
15359         }
15360
15361         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15362 }
15363
15364 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15365  * configuration is a 32-bit value that straddles the alignment boundary.
15366  * We do two 32-bit reads and then shift and merge the results.
15367  */
15368 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15369 {
15370         u32 bhalf_otp, thalf_otp;
15371
15372         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15373
15374         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15375                 return 0;
15376
15377         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15378
15379         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15380                 return 0;
15381
15382         thalf_otp = tr32(OTP_READ_DATA);
15383
15384         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15385
15386         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15387                 return 0;
15388
15389         bhalf_otp = tr32(OTP_READ_DATA);
15390
15391         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15392 }
15393
15394 static void tg3_phy_init_link_config(struct tg3 *tp)
15395 {
15396         u32 adv = ADVERTISED_Autoneg;
15397
15398         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15399                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15400                         adv |= ADVERTISED_1000baseT_Half;
15401                 adv |= ADVERTISED_1000baseT_Full;
15402         }
15403
15404         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15405                 adv |= ADVERTISED_100baseT_Half |
15406                        ADVERTISED_100baseT_Full |
15407                        ADVERTISED_10baseT_Half |
15408                        ADVERTISED_10baseT_Full |
15409                        ADVERTISED_TP;
15410         else
15411                 adv |= ADVERTISED_FIBRE;
15412
15413         tp->link_config.advertising = adv;
15414         tp->link_config.speed = SPEED_UNKNOWN;
15415         tp->link_config.duplex = DUPLEX_UNKNOWN;
15416         tp->link_config.autoneg = AUTONEG_ENABLE;
15417         tp->link_config.active_speed = SPEED_UNKNOWN;
15418         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15419
15420         tp->old_link = -1;
15421 }
15422
15423 static int tg3_phy_probe(struct tg3 *tp)
15424 {
15425         u32 hw_phy_id_1, hw_phy_id_2;
15426         u32 hw_phy_id, hw_phy_id_masked;
15427         int err;
15428
15429         /* flow control autonegotiation is default behavior */
15430         tg3_flag_set(tp, PAUSE_AUTONEG);
15431         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15432
15433         if (tg3_flag(tp, ENABLE_APE)) {
15434                 switch (tp->pci_fn) {
15435                 case 0:
15436                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15437                         break;
15438                 case 1:
15439                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15440                         break;
15441                 case 2:
15442                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15443                         break;
15444                 case 3:
15445                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15446                         break;
15447                 }
15448         }
15449
15450         if (!tg3_flag(tp, ENABLE_ASF) &&
15451             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15452             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15453                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15454                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15455
15456         if (tg3_flag(tp, USE_PHYLIB))
15457                 return tg3_phy_init(tp);
15458
15459         /* Reading the PHY ID register can conflict with ASF
15460          * firmware access to the PHY hardware.
15461          */
15462         err = 0;
15463         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15464                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15465         } else {
15466                 /* Now read the physical PHY_ID from the chip and verify
15467                  * that it is sane.  If it doesn't look good, we fall back
15468                  * to either the hard-coded table based PHY_ID and failing
15469                  * that the value found in the eeprom area.
15470                  */
15471                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15472                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15473
15474                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15475                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15476                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15477
15478                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15479         }
15480
15481         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15482                 tp->phy_id = hw_phy_id;
15483                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15484                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15485                 else
15486                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15487         } else {
15488                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15489                         /* Do nothing, phy ID already set up in
15490                          * tg3_get_eeprom_hw_cfg().
15491                          */
15492                 } else {
15493                         struct subsys_tbl_ent *p;
15494
15495                         /* No eeprom signature?  Try the hardcoded
15496                          * subsys device table.
15497                          */
15498                         p = tg3_lookup_by_subsys(tp);
15499                         if (p) {
15500                                 tp->phy_id = p->phy_id;
15501                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15502                                 /* For now we saw the IDs 0xbc050cd0,
15503                                  * 0xbc050f80 and 0xbc050c30 on devices
15504                                  * connected to an BCM4785 and there are
15505                                  * probably more. Just assume that the phy is
15506                                  * supported when it is connected to a SSB core
15507                                  * for now.
15508                                  */
15509                                 return -ENODEV;
15510                         }
15511
15512                         if (!tp->phy_id ||
15513                             tp->phy_id == TG3_PHY_ID_BCM8002)
15514                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15515                 }
15516         }
15517
15518         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15519             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15520              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15521              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15522              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15523              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15524               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15525              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15526               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15527                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15528
15529                 tp->eee.supported = SUPPORTED_100baseT_Full |
15530                                     SUPPORTED_1000baseT_Full;
15531                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15532                                      ADVERTISED_1000baseT_Full;
15533                 tp->eee.eee_enabled = 1;
15534                 tp->eee.tx_lpi_enabled = 1;
15535                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15536         }
15537
15538         tg3_phy_init_link_config(tp);
15539
15540         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15541             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15542             !tg3_flag(tp, ENABLE_APE) &&
15543             !tg3_flag(tp, ENABLE_ASF)) {
15544                 u32 bmsr, dummy;
15545
15546                 tg3_readphy(tp, MII_BMSR, &bmsr);
15547                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15548                     (bmsr & BMSR_LSTATUS))
15549                         goto skip_phy_reset;
15550
15551                 err = tg3_phy_reset(tp);
15552                 if (err)
15553                         return err;
15554
15555                 tg3_phy_set_wirespeed(tp);
15556
15557                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15558                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15559                                             tp->link_config.flowctrl);
15560
15561                         tg3_writephy(tp, MII_BMCR,
15562                                      BMCR_ANENABLE | BMCR_ANRESTART);
15563                 }
15564         }
15565
15566 skip_phy_reset:
15567         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15568                 err = tg3_init_5401phy_dsp(tp);
15569                 if (err)
15570                         return err;
15571
15572                 err = tg3_init_5401phy_dsp(tp);
15573         }
15574
15575         return err;
15576 }
15577
15578 static void tg3_read_vpd(struct tg3 *tp)
15579 {
15580         u8 *vpd_data;
15581         unsigned int block_end, rosize, len;
15582         u32 vpdlen;
15583         int j, i = 0;
15584
15585         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15586         if (!vpd_data)
15587                 goto out_no_vpd;
15588
15589         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15590         if (i < 0)
15591                 goto out_not_found;
15592
15593         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15594         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15595         i += PCI_VPD_LRDT_TAG_SIZE;
15596
15597         if (block_end > vpdlen)
15598                 goto out_not_found;
15599
15600         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15601                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15602         if (j > 0) {
15603                 len = pci_vpd_info_field_size(&vpd_data[j]);
15604
15605                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15606                 if (j + len > block_end || len != 4 ||
15607                     memcmp(&vpd_data[j], "1028", 4))
15608                         goto partno;
15609
15610                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15611                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15612                 if (j < 0)
15613                         goto partno;
15614
15615                 len = pci_vpd_info_field_size(&vpd_data[j]);
15616
15617                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15618                 if (j + len > block_end)
15619                         goto partno;
15620
15621                 if (len >= sizeof(tp->fw_ver))
15622                         len = sizeof(tp->fw_ver) - 1;
15623                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15624                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15625                          &vpd_data[j]);
15626         }
15627
15628 partno:
15629         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15630                                       PCI_VPD_RO_KEYWORD_PARTNO);
15631         if (i < 0)
15632                 goto out_not_found;
15633
15634         len = pci_vpd_info_field_size(&vpd_data[i]);
15635
15636         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15637         if (len > TG3_BPN_SIZE ||
15638             (len + i) > vpdlen)
15639                 goto out_not_found;
15640
15641         memcpy(tp->board_part_number, &vpd_data[i], len);
15642
15643 out_not_found:
15644         kfree(vpd_data);
15645         if (tp->board_part_number[0])
15646                 return;
15647
15648 out_no_vpd:
15649         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15650                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15651                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15652                         strcpy(tp->board_part_number, "BCM5717");
15653                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15654                         strcpy(tp->board_part_number, "BCM5718");
15655                 else
15656                         goto nomatch;
15657         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15658                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15659                         strcpy(tp->board_part_number, "BCM57780");
15660                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15661                         strcpy(tp->board_part_number, "BCM57760");
15662                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15663                         strcpy(tp->board_part_number, "BCM57790");
15664                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15665                         strcpy(tp->board_part_number, "BCM57788");
15666                 else
15667                         goto nomatch;
15668         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15669                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15670                         strcpy(tp->board_part_number, "BCM57761");
15671                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15672                         strcpy(tp->board_part_number, "BCM57765");
15673                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15674                         strcpy(tp->board_part_number, "BCM57781");
15675                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15676                         strcpy(tp->board_part_number, "BCM57785");
15677                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15678                         strcpy(tp->board_part_number, "BCM57791");
15679                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15680                         strcpy(tp->board_part_number, "BCM57795");
15681                 else
15682                         goto nomatch;
15683         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15684                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15685                         strcpy(tp->board_part_number, "BCM57762");
15686                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15687                         strcpy(tp->board_part_number, "BCM57766");
15688                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15689                         strcpy(tp->board_part_number, "BCM57782");
15690                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15691                         strcpy(tp->board_part_number, "BCM57786");
15692                 else
15693                         goto nomatch;
15694         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15695                 strcpy(tp->board_part_number, "BCM95906");
15696         } else {
15697 nomatch:
15698                 strcpy(tp->board_part_number, "none");
15699         }
15700 }
15701
15702 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15703 {
15704         u32 val;
15705
15706         if (tg3_nvram_read(tp, offset, &val) ||
15707             (val & 0xfc000000) != 0x0c000000 ||
15708             tg3_nvram_read(tp, offset + 4, &val) ||
15709             val != 0)
15710                 return 0;
15711
15712         return 1;
15713 }
15714
15715 static void tg3_read_bc_ver(struct tg3 *tp)
15716 {
15717         u32 val, offset, start, ver_offset;
15718         int i, dst_off;
15719         bool newver = false;
15720
15721         if (tg3_nvram_read(tp, 0xc, &offset) ||
15722             tg3_nvram_read(tp, 0x4, &start))
15723                 return;
15724
15725         offset = tg3_nvram_logical_addr(tp, offset);
15726
15727         if (tg3_nvram_read(tp, offset, &val))
15728                 return;
15729
15730         if ((val & 0xfc000000) == 0x0c000000) {
15731                 if (tg3_nvram_read(tp, offset + 4, &val))
15732                         return;
15733
15734                 if (val == 0)
15735                         newver = true;
15736         }
15737
15738         dst_off = strlen(tp->fw_ver);
15739
15740         if (newver) {
15741                 if (TG3_VER_SIZE - dst_off < 16 ||
15742                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15743                         return;
15744
15745                 offset = offset + ver_offset - start;
15746                 for (i = 0; i < 16; i += 4) {
15747                         __be32 v;
15748                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15749                                 return;
15750
15751                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15752                 }
15753         } else {
15754                 u32 major, minor;
15755
15756                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15757                         return;
15758
15759                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15760                         TG3_NVM_BCVER_MAJSFT;
15761                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15762                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15763                          "v%d.%02d", major, minor);
15764         }
15765 }
15766
15767 static void tg3_read_hwsb_ver(struct tg3 *tp)
15768 {
15769         u32 val, major, minor;
15770
15771         /* Use native endian representation */
15772         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15773                 return;
15774
15775         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15776                 TG3_NVM_HWSB_CFG1_MAJSFT;
15777         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15778                 TG3_NVM_HWSB_CFG1_MINSFT;
15779
15780         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15781 }
15782
15783 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15784 {
15785         u32 offset, major, minor, build;
15786
15787         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15788
15789         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15790                 return;
15791
15792         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15793         case TG3_EEPROM_SB_REVISION_0:
15794                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15795                 break;
15796         case TG3_EEPROM_SB_REVISION_2:
15797                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15798                 break;
15799         case TG3_EEPROM_SB_REVISION_3:
15800                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15801                 break;
15802         case TG3_EEPROM_SB_REVISION_4:
15803                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15804                 break;
15805         case TG3_EEPROM_SB_REVISION_5:
15806                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15807                 break;
15808         case TG3_EEPROM_SB_REVISION_6:
15809                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15810                 break;
15811         default:
15812                 return;
15813         }
15814
15815         if (tg3_nvram_read(tp, offset, &val))
15816                 return;
15817
15818         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15819                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15820         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15821                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15822         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15823
15824         if (minor > 99 || build > 26)
15825                 return;
15826
15827         offset = strlen(tp->fw_ver);
15828         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15829                  " v%d.%02d", major, minor);
15830
15831         if (build > 0) {
15832                 offset = strlen(tp->fw_ver);
15833                 if (offset < TG3_VER_SIZE - 1)
15834                         tp->fw_ver[offset] = 'a' + build - 1;
15835         }
15836 }
15837
15838 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15839 {
15840         u32 val, offset, start;
15841         int i, vlen;
15842
15843         for (offset = TG3_NVM_DIR_START;
15844              offset < TG3_NVM_DIR_END;
15845              offset += TG3_NVM_DIRENT_SIZE) {
15846                 if (tg3_nvram_read(tp, offset, &val))
15847                         return;
15848
15849                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15850                         break;
15851         }
15852
15853         if (offset == TG3_NVM_DIR_END)
15854                 return;
15855
15856         if (!tg3_flag(tp, 5705_PLUS))
15857                 start = 0x08000000;
15858         else if (tg3_nvram_read(tp, offset - 4, &start))
15859                 return;
15860
15861         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15862             !tg3_fw_img_is_valid(tp, offset) ||
15863             tg3_nvram_read(tp, offset + 8, &val))
15864                 return;
15865
15866         offset += val - start;
15867
15868         vlen = strlen(tp->fw_ver);
15869
15870         tp->fw_ver[vlen++] = ',';
15871         tp->fw_ver[vlen++] = ' ';
15872
15873         for (i = 0; i < 4; i++) {
15874                 __be32 v;
15875                 if (tg3_nvram_read_be32(tp, offset, &v))
15876                         return;
15877
15878                 offset += sizeof(v);
15879
15880                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15881                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15882                         break;
15883                 }
15884
15885                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15886                 vlen += sizeof(v);
15887         }
15888 }
15889
15890 static void tg3_probe_ncsi(struct tg3 *tp)
15891 {
15892         u32 apedata;
15893
15894         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15895         if (apedata != APE_SEG_SIG_MAGIC)
15896                 return;
15897
15898         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15899         if (!(apedata & APE_FW_STATUS_READY))
15900                 return;
15901
15902         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15903                 tg3_flag_set(tp, APE_HAS_NCSI);
15904 }
15905
15906 static void tg3_read_dash_ver(struct tg3 *tp)
15907 {
15908         int vlen;
15909         u32 apedata;
15910         char *fwtype;
15911
15912         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15913
15914         if (tg3_flag(tp, APE_HAS_NCSI))
15915                 fwtype = "NCSI";
15916         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15917                 fwtype = "SMASH";
15918         else
15919                 fwtype = "DASH";
15920
15921         vlen = strlen(tp->fw_ver);
15922
15923         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15924                  fwtype,
15925                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15926                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15927                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15928                  (apedata & APE_FW_VERSION_BLDMSK));
15929 }
15930
15931 static void tg3_read_otp_ver(struct tg3 *tp)
15932 {
15933         u32 val, val2;
15934
15935         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15936                 return;
15937
15938         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15939             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15940             TG3_OTP_MAGIC0_VALID(val)) {
15941                 u64 val64 = (u64) val << 32 | val2;
15942                 u32 ver = 0;
15943                 int i, vlen;
15944
15945                 for (i = 0; i < 7; i++) {
15946                         if ((val64 & 0xff) == 0)
15947                                 break;
15948                         ver = val64 & 0xff;
15949                         val64 >>= 8;
15950                 }
15951                 vlen = strlen(tp->fw_ver);
15952                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15953         }
15954 }
15955
15956 static void tg3_read_fw_ver(struct tg3 *tp)
15957 {
15958         u32 val;
15959         bool vpd_vers = false;
15960
15961         if (tp->fw_ver[0] != 0)
15962                 vpd_vers = true;
15963
15964         if (tg3_flag(tp, NO_NVRAM)) {
15965                 strcat(tp->fw_ver, "sb");
15966                 tg3_read_otp_ver(tp);
15967                 return;
15968         }
15969
15970         if (tg3_nvram_read(tp, 0, &val))
15971                 return;
15972
15973         if (val == TG3_EEPROM_MAGIC)
15974                 tg3_read_bc_ver(tp);
15975         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15976                 tg3_read_sb_ver(tp, val);
15977         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15978                 tg3_read_hwsb_ver(tp);
15979
15980         if (tg3_flag(tp, ENABLE_ASF)) {
15981                 if (tg3_flag(tp, ENABLE_APE)) {
15982                         tg3_probe_ncsi(tp);
15983                         if (!vpd_vers)
15984                                 tg3_read_dash_ver(tp);
15985                 } else if (!vpd_vers) {
15986                         tg3_read_mgmtfw_ver(tp);
15987                 }
15988         }
15989
15990         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15991 }
15992
15993 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15994 {
15995         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15996                 return TG3_RX_RET_MAX_SIZE_5717;
15997         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15998                 return TG3_RX_RET_MAX_SIZE_5700;
15999         else
16000                 return TG3_RX_RET_MAX_SIZE_5705;
16001 }
16002
16003 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16004         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16005         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16006         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16007         { },
16008 };
16009
16010 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16011 {
16012         struct pci_dev *peer;
16013         unsigned int func, devnr = tp->pdev->devfn & ~7;
16014
16015         for (func = 0; func < 8; func++) {
16016                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16017                 if (peer && peer != tp->pdev)
16018                         break;
16019                 pci_dev_put(peer);
16020         }
16021         /* 5704 can be configured in single-port mode, set peer to
16022          * tp->pdev in that case.
16023          */
16024         if (!peer) {
16025                 peer = tp->pdev;
16026                 return peer;
16027         }
16028
16029         /*
16030          * We don't need to keep the refcount elevated; there's no way
16031          * to remove one half of this device without removing the other
16032          */
16033         pci_dev_put(peer);
16034
16035         return peer;
16036 }
16037
16038 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16039 {
16040         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16041         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16042                 u32 reg;
16043
16044                 /* All devices that use the alternate
16045                  * ASIC REV location have a CPMU.
16046                  */
16047                 tg3_flag_set(tp, CPMU_PRESENT);
16048
16049                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16050                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16051                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16052                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16053                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16054                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16055                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16056                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16057                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16058                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16059                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16060                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16061                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16062                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16063                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16064                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16065                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16066                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16067                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16068                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16069                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16070                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16071                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16072                 else
16073                         reg = TG3PCI_PRODID_ASICREV;
16074
16075                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16076         }
16077
16078         /* Wrong chip ID in 5752 A0. This code can be removed later
16079          * as A0 is not in production.
16080          */
16081         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16082                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16083
16084         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16085                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16086
16087         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16088             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16089             tg3_asic_rev(tp) == ASIC_REV_5720)
16090                 tg3_flag_set(tp, 5717_PLUS);
16091
16092         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16093             tg3_asic_rev(tp) == ASIC_REV_57766)
16094                 tg3_flag_set(tp, 57765_CLASS);
16095
16096         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16097              tg3_asic_rev(tp) == ASIC_REV_5762)
16098                 tg3_flag_set(tp, 57765_PLUS);
16099
16100         /* Intentionally exclude ASIC_REV_5906 */
16101         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16102             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16103             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16104             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16105             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16106             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16107             tg3_flag(tp, 57765_PLUS))
16108                 tg3_flag_set(tp, 5755_PLUS);
16109
16110         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16111             tg3_asic_rev(tp) == ASIC_REV_5714)
16112                 tg3_flag_set(tp, 5780_CLASS);
16113
16114         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16115             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16116             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16117             tg3_flag(tp, 5755_PLUS) ||
16118             tg3_flag(tp, 5780_CLASS))
16119                 tg3_flag_set(tp, 5750_PLUS);
16120
16121         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16122             tg3_flag(tp, 5750_PLUS))
16123                 tg3_flag_set(tp, 5705_PLUS);
16124 }
16125
16126 static bool tg3_10_100_only_device(struct tg3 *tp,
16127                                    const struct pci_device_id *ent)
16128 {
16129         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16130
16131         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16132              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16133             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16134                 return true;
16135
16136         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16137                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16138                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16139                                 return true;
16140                 } else {
16141                         return true;
16142                 }
16143         }
16144
16145         return false;
16146 }
16147
16148 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16149 {
16150         u32 misc_ctrl_reg;
16151         u32 pci_state_reg, grc_misc_cfg;
16152         u32 val;
16153         u16 pci_cmd;
16154         int err;
16155
16156         /* Force memory write invalidate off.  If we leave it on,
16157          * then on 5700_BX chips we have to enable a workaround.
16158          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16159          * to match the cacheline size.  The Broadcom driver have this
16160          * workaround but turns MWI off all the times so never uses
16161          * it.  This seems to suggest that the workaround is insufficient.
16162          */
16163         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16164         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16165         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16166
16167         /* Important! -- Make sure register accesses are byteswapped
16168          * correctly.  Also, for those chips that require it, make
16169          * sure that indirect register accesses are enabled before
16170          * the first operation.
16171          */
16172         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16173                               &misc_ctrl_reg);
16174         tp->misc_host_ctrl |= (misc_ctrl_reg &
16175                                MISC_HOST_CTRL_CHIPREV);
16176         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16177                                tp->misc_host_ctrl);
16178
16179         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16180
16181         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16182          * we need to disable memory and use config. cycles
16183          * only to access all registers. The 5702/03 chips
16184          * can mistakenly decode the special cycles from the
16185          * ICH chipsets as memory write cycles, causing corruption
16186          * of register and memory space. Only certain ICH bridges
16187          * will drive special cycles with non-zero data during the
16188          * address phase which can fall within the 5703's address
16189          * range. This is not an ICH bug as the PCI spec allows
16190          * non-zero address during special cycles. However, only
16191          * these ICH bridges are known to drive non-zero addresses
16192          * during special cycles.
16193          *
16194          * Since special cycles do not cross PCI bridges, we only
16195          * enable this workaround if the 5703 is on the secondary
16196          * bus of these ICH bridges.
16197          */
16198         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16199             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16200                 static struct tg3_dev_id {
16201                         u32     vendor;
16202                         u32     device;
16203                         u32     rev;
16204                 } ich_chipsets[] = {
16205                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16206                           PCI_ANY_ID },
16207                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16208                           PCI_ANY_ID },
16209                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16210                           0xa },
16211                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16212                           PCI_ANY_ID },
16213                         { },
16214                 };
16215                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16216                 struct pci_dev *bridge = NULL;
16217
16218                 while (pci_id->vendor != 0) {
16219                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16220                                                 bridge);
16221                         if (!bridge) {
16222                                 pci_id++;
16223                                 continue;
16224                         }
16225                         if (pci_id->rev != PCI_ANY_ID) {
16226                                 if (bridge->revision > pci_id->rev)
16227                                         continue;
16228                         }
16229                         if (bridge->subordinate &&
16230                             (bridge->subordinate->number ==
16231                              tp->pdev->bus->number)) {
16232                                 tg3_flag_set(tp, ICH_WORKAROUND);
16233                                 pci_dev_put(bridge);
16234                                 break;
16235                         }
16236                 }
16237         }
16238
16239         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16240                 static struct tg3_dev_id {
16241                         u32     vendor;
16242                         u32     device;
16243                 } bridge_chipsets[] = {
16244                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16245                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16246                         { },
16247                 };
16248                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16249                 struct pci_dev *bridge = NULL;
16250
16251                 while (pci_id->vendor != 0) {
16252                         bridge = pci_get_device(pci_id->vendor,
16253                                                 pci_id->device,
16254                                                 bridge);
16255                         if (!bridge) {
16256                                 pci_id++;
16257                                 continue;
16258                         }
16259                         if (bridge->subordinate &&
16260                             (bridge->subordinate->number <=
16261                              tp->pdev->bus->number) &&
16262                             (bridge->subordinate->busn_res.end >=
16263                              tp->pdev->bus->number)) {
16264                                 tg3_flag_set(tp, 5701_DMA_BUG);
16265                                 pci_dev_put(bridge);
16266                                 break;
16267                         }
16268                 }
16269         }
16270
16271         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16272          * DMA addresses > 40-bit. This bridge may have other additional
16273          * 57xx devices behind it in some 4-port NIC designs for example.
16274          * Any tg3 device found behind the bridge will also need the 40-bit
16275          * DMA workaround.
16276          */
16277         if (tg3_flag(tp, 5780_CLASS)) {
16278                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16279                 tp->msi_cap = tp->pdev->msi_cap;
16280         } else {
16281                 struct pci_dev *bridge = NULL;
16282
16283                 do {
16284                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16285                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16286                                                 bridge);
16287                         if (bridge && bridge->subordinate &&
16288                             (bridge->subordinate->number <=
16289                              tp->pdev->bus->number) &&
16290                             (bridge->subordinate->busn_res.end >=
16291                              tp->pdev->bus->number)) {
16292                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16293                                 pci_dev_put(bridge);
16294                                 break;
16295                         }
16296                 } while (bridge);
16297         }
16298
16299         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16300             tg3_asic_rev(tp) == ASIC_REV_5714)
16301                 tp->pdev_peer = tg3_find_peer(tp);
16302
16303         /* Determine TSO capabilities */
16304         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16305                 ; /* Do nothing. HW bug. */
16306         else if (tg3_flag(tp, 57765_PLUS))
16307                 tg3_flag_set(tp, HW_TSO_3);
16308         else if (tg3_flag(tp, 5755_PLUS) ||
16309                  tg3_asic_rev(tp) == ASIC_REV_5906)
16310                 tg3_flag_set(tp, HW_TSO_2);
16311         else if (tg3_flag(tp, 5750_PLUS)) {
16312                 tg3_flag_set(tp, HW_TSO_1);
16313                 tg3_flag_set(tp, TSO_BUG);
16314                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16315                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16316                         tg3_flag_clear(tp, TSO_BUG);
16317         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16318                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16319                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16320                 tg3_flag_set(tp, FW_TSO);
16321                 tg3_flag_set(tp, TSO_BUG);
16322                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16323                         tp->fw_needed = FIRMWARE_TG3TSO5;
16324                 else
16325                         tp->fw_needed = FIRMWARE_TG3TSO;
16326         }
16327
16328         /* Selectively allow TSO based on operating conditions */
16329         if (tg3_flag(tp, HW_TSO_1) ||
16330             tg3_flag(tp, HW_TSO_2) ||
16331             tg3_flag(tp, HW_TSO_3) ||
16332             tg3_flag(tp, FW_TSO)) {
16333                 /* For firmware TSO, assume ASF is disabled.
16334                  * We'll disable TSO later if we discover ASF
16335                  * is enabled in tg3_get_eeprom_hw_cfg().
16336                  */
16337                 tg3_flag_set(tp, TSO_CAPABLE);
16338         } else {
16339                 tg3_flag_clear(tp, TSO_CAPABLE);
16340                 tg3_flag_clear(tp, TSO_BUG);
16341                 tp->fw_needed = NULL;
16342         }
16343
16344         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16345                 tp->fw_needed = FIRMWARE_TG3;
16346
16347         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16348                 tp->fw_needed = FIRMWARE_TG357766;
16349
16350         tp->irq_max = 1;
16351
16352         if (tg3_flag(tp, 5750_PLUS)) {
16353                 tg3_flag_set(tp, SUPPORT_MSI);
16354                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16355                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16356                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16357                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16358                      tp->pdev_peer == tp->pdev))
16359                         tg3_flag_clear(tp, SUPPORT_MSI);
16360
16361                 if (tg3_flag(tp, 5755_PLUS) ||
16362                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16363                         tg3_flag_set(tp, 1SHOT_MSI);
16364                 }
16365
16366                 if (tg3_flag(tp, 57765_PLUS)) {
16367                         tg3_flag_set(tp, SUPPORT_MSIX);
16368                         tp->irq_max = TG3_IRQ_MAX_VECS;
16369                 }
16370         }
16371
16372         tp->txq_max = 1;
16373         tp->rxq_max = 1;
16374         if (tp->irq_max > 1) {
16375                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16376                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16377
16378                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16379                     tg3_asic_rev(tp) == ASIC_REV_5720)
16380                         tp->txq_max = tp->irq_max - 1;
16381         }
16382
16383         if (tg3_flag(tp, 5755_PLUS) ||
16384             tg3_asic_rev(tp) == ASIC_REV_5906)
16385                 tg3_flag_set(tp, SHORT_DMA_BUG);
16386
16387         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16388                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16389
16390         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16391             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16392             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16393             tg3_asic_rev(tp) == ASIC_REV_5762)
16394                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16395
16396         if (tg3_flag(tp, 57765_PLUS) &&
16397             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16398                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16399
16400         if (!tg3_flag(tp, 5705_PLUS) ||
16401             tg3_flag(tp, 5780_CLASS) ||
16402             tg3_flag(tp, USE_JUMBO_BDFLAG))
16403                 tg3_flag_set(tp, JUMBO_CAPABLE);
16404
16405         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16406                               &pci_state_reg);
16407
16408         if (pci_is_pcie(tp->pdev)) {
16409                 u16 lnkctl;
16410
16411                 tg3_flag_set(tp, PCI_EXPRESS);
16412
16413                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16414                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16415                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16416                                 tg3_flag_clear(tp, HW_TSO_2);
16417                                 tg3_flag_clear(tp, TSO_CAPABLE);
16418                         }
16419                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16420                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16421                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16422                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16423                                 tg3_flag_set(tp, CLKREQ_BUG);
16424                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16425                         tg3_flag_set(tp, L1PLLPD_EN);
16426                 }
16427         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16428                 /* BCM5785 devices are effectively PCIe devices, and should
16429                  * follow PCIe codepaths, but do not have a PCIe capabilities
16430                  * section.
16431                  */
16432                 tg3_flag_set(tp, PCI_EXPRESS);
16433         } else if (!tg3_flag(tp, 5705_PLUS) ||
16434                    tg3_flag(tp, 5780_CLASS)) {
16435                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16436                 if (!tp->pcix_cap) {
16437                         dev_err(&tp->pdev->dev,
16438                                 "Cannot find PCI-X capability, aborting\n");
16439                         return -EIO;
16440                 }
16441
16442                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16443                         tg3_flag_set(tp, PCIX_MODE);
16444         }
16445
16446         /* If we have an AMD 762 or VIA K8T800 chipset, write
16447          * reordering to the mailbox registers done by the host
16448          * controller can cause major troubles.  We read back from
16449          * every mailbox register write to force the writes to be
16450          * posted to the chip in order.
16451          */
16452         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16453             !tg3_flag(tp, PCI_EXPRESS))
16454                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16455
16456         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16457                              &tp->pci_cacheline_sz);
16458         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16459                              &tp->pci_lat_timer);
16460         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16461             tp->pci_lat_timer < 64) {
16462                 tp->pci_lat_timer = 64;
16463                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16464                                       tp->pci_lat_timer);
16465         }
16466
16467         /* Important! -- It is critical that the PCI-X hw workaround
16468          * situation is decided before the first MMIO register access.
16469          */
16470         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16471                 /* 5700 BX chips need to have their TX producer index
16472                  * mailboxes written twice to workaround a bug.
16473                  */
16474                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16475
16476                 /* If we are in PCI-X mode, enable register write workaround.
16477                  *
16478                  * The workaround is to use indirect register accesses
16479                  * for all chip writes not to mailbox registers.
16480                  */
16481                 if (tg3_flag(tp, PCIX_MODE)) {
16482                         u32 pm_reg;
16483
16484                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16485
16486                         /* The chip can have it's power management PCI config
16487                          * space registers clobbered due to this bug.
16488                          * So explicitly force the chip into D0 here.
16489                          */
16490                         pci_read_config_dword(tp->pdev,
16491                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16492                                               &pm_reg);
16493                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16494                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16495                         pci_write_config_dword(tp->pdev,
16496                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16497                                                pm_reg);
16498
16499                         /* Also, force SERR#/PERR# in PCI command. */
16500                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16501                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16502                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16503                 }
16504         }
16505
16506         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16507                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16508         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16509                 tg3_flag_set(tp, PCI_32BIT);
16510
16511         /* Chip-specific fixup from Broadcom driver */
16512         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16513             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16514                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16515                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16516         }
16517
16518         /* Default fast path register access methods */
16519         tp->read32 = tg3_read32;
16520         tp->write32 = tg3_write32;
16521         tp->read32_mbox = tg3_read32;
16522         tp->write32_mbox = tg3_write32;
16523         tp->write32_tx_mbox = tg3_write32;
16524         tp->write32_rx_mbox = tg3_write32;
16525
16526         /* Various workaround register access methods */
16527         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16528                 tp->write32 = tg3_write_indirect_reg32;
16529         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16530                  (tg3_flag(tp, PCI_EXPRESS) &&
16531                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16532                 /*
16533                  * Back to back register writes can cause problems on these
16534                  * chips, the workaround is to read back all reg writes
16535                  * except those to mailbox regs.
16536                  *
16537                  * See tg3_write_indirect_reg32().
16538                  */
16539                 tp->write32 = tg3_write_flush_reg32;
16540         }
16541
16542         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16543                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16544                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16545                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16546         }
16547
16548         if (tg3_flag(tp, ICH_WORKAROUND)) {
16549                 tp->read32 = tg3_read_indirect_reg32;
16550                 tp->write32 = tg3_write_indirect_reg32;
16551                 tp->read32_mbox = tg3_read_indirect_mbox;
16552                 tp->write32_mbox = tg3_write_indirect_mbox;
16553                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16554                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16555
16556                 iounmap(tp->regs);
16557                 tp->regs = NULL;
16558
16559                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16560                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16561                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16562         }
16563         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16564                 tp->read32_mbox = tg3_read32_mbox_5906;
16565                 tp->write32_mbox = tg3_write32_mbox_5906;
16566                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16567                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16568         }
16569
16570         if (tp->write32 == tg3_write_indirect_reg32 ||
16571             (tg3_flag(tp, PCIX_MODE) &&
16572              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16573               tg3_asic_rev(tp) == ASIC_REV_5701)))
16574                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16575
16576         /* The memory arbiter has to be enabled in order for SRAM accesses
16577          * to succeed.  Normally on powerup the tg3 chip firmware will make
16578          * sure it is enabled, but other entities such as system netboot
16579          * code might disable it.
16580          */
16581         val = tr32(MEMARB_MODE);
16582         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16583
16584         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16585         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16586             tg3_flag(tp, 5780_CLASS)) {
16587                 if (tg3_flag(tp, PCIX_MODE)) {
16588                         pci_read_config_dword(tp->pdev,
16589                                               tp->pcix_cap + PCI_X_STATUS,
16590                                               &val);
16591                         tp->pci_fn = val & 0x7;
16592                 }
16593         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16594                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16595                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16596                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16597                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16598                         val = tr32(TG3_CPMU_STATUS);
16599
16600                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16601                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16602                 else
16603                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16604                                      TG3_CPMU_STATUS_FSHFT_5719;
16605         }
16606
16607         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16608                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16609                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16610         }
16611
16612         /* Get eeprom hw config before calling tg3_set_power_state().
16613          * In particular, the TG3_FLAG_IS_NIC flag must be
16614          * determined before calling tg3_set_power_state() so that
16615          * we know whether or not to switch out of Vaux power.
16616          * When the flag is set, it means that GPIO1 is used for eeprom
16617          * write protect and also implies that it is a LOM where GPIOs
16618          * are not used to switch power.
16619          */
16620         tg3_get_eeprom_hw_cfg(tp);
16621
16622         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16623                 tg3_flag_clear(tp, TSO_CAPABLE);
16624                 tg3_flag_clear(tp, TSO_BUG);
16625                 tp->fw_needed = NULL;
16626         }
16627
16628         if (tg3_flag(tp, ENABLE_APE)) {
16629                 /* Allow reads and writes to the
16630                  * APE register and memory space.
16631                  */
16632                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16633                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16634                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16635                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16636                                        pci_state_reg);
16637
16638                 tg3_ape_lock_init(tp);
16639         }
16640
16641         /* Set up tp->grc_local_ctrl before calling
16642          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16643          * will bring 5700's external PHY out of reset.
16644          * It is also used as eeprom write protect on LOMs.
16645          */
16646         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16647         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16648             tg3_flag(tp, EEPROM_WRITE_PROT))
16649                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16650                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16651         /* Unused GPIO3 must be driven as output on 5752 because there
16652          * are no pull-up resistors on unused GPIO pins.
16653          */
16654         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16655                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16656
16657         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16658             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16659             tg3_flag(tp, 57765_CLASS))
16660                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16661
16662         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16663             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16664                 /* Turn off the debug UART. */
16665                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16666                 if (tg3_flag(tp, IS_NIC))
16667                         /* Keep VMain power. */
16668                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16669                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16670         }
16671
16672         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16673                 tp->grc_local_ctrl |=
16674                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16675
16676         /* Switch out of Vaux if it is a NIC */
16677         tg3_pwrsrc_switch_to_vmain(tp);
16678
16679         /* Derive initial jumbo mode from MTU assigned in
16680          * ether_setup() via the alloc_etherdev() call
16681          */
16682         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16683                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16684
16685         /* Determine WakeOnLan speed to use. */
16686         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16687             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16688             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16689             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16690                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16691         } else {
16692                 tg3_flag_set(tp, WOL_SPEED_100MB);
16693         }
16694
16695         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16696                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16697
16698         /* A few boards don't want Ethernet@WireSpeed phy feature */
16699         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16700             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16701              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16702              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16703             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16704             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16705                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16706
16707         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16708             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16709                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16710         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16711                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16712
16713         if (tg3_flag(tp, 5705_PLUS) &&
16714             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16715             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16716             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16717             !tg3_flag(tp, 57765_PLUS)) {
16718                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16719                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16720                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16721                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16722                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16723                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16724                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16725                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16726                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16727                 } else
16728                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16729         }
16730
16731         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16732             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16733                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16734                 if (tp->phy_otp == 0)
16735                         tp->phy_otp = TG3_OTP_DEFAULT;
16736         }
16737
16738         if (tg3_flag(tp, CPMU_PRESENT))
16739                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16740         else
16741                 tp->mi_mode = MAC_MI_MODE_BASE;
16742
16743         tp->coalesce_mode = 0;
16744         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16745             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16746                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16747
16748         /* Set these bits to enable statistics workaround. */
16749         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16750             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16751             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16752             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16753                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16754                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16755         }
16756
16757         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16758             tg3_asic_rev(tp) == ASIC_REV_57780)
16759                 tg3_flag_set(tp, USE_PHYLIB);
16760
16761         err = tg3_mdio_init(tp);
16762         if (err)
16763                 return err;
16764
16765         /* Initialize data/descriptor byte/word swapping. */
16766         val = tr32(GRC_MODE);
16767         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16768             tg3_asic_rev(tp) == ASIC_REV_5762)
16769                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16770                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16771                         GRC_MODE_B2HRX_ENABLE |
16772                         GRC_MODE_HTX2B_ENABLE |
16773                         GRC_MODE_HOST_STACKUP);
16774         else
16775                 val &= GRC_MODE_HOST_STACKUP;
16776
16777         tw32(GRC_MODE, val | tp->grc_mode);
16778
16779         tg3_switch_clocks(tp);
16780
16781         /* Clear this out for sanity. */
16782         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16783
16784         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16785         tw32(TG3PCI_REG_BASE_ADDR, 0);
16786
16787         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16788                               &pci_state_reg);
16789         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16790             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16791                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16792                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16793                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16794                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16795                         void __iomem *sram_base;
16796
16797                         /* Write some dummy words into the SRAM status block
16798                          * area, see if it reads back correctly.  If the return
16799                          * value is bad, force enable the PCIX workaround.
16800                          */
16801                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16802
16803                         writel(0x00000000, sram_base);
16804                         writel(0x00000000, sram_base + 4);
16805                         writel(0xffffffff, sram_base + 4);
16806                         if (readl(sram_base) != 0x00000000)
16807                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16808                 }
16809         }
16810
16811         udelay(50);
16812         tg3_nvram_init(tp);
16813
16814         /* If the device has an NVRAM, no need to load patch firmware */
16815         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16816             !tg3_flag(tp, NO_NVRAM))
16817                 tp->fw_needed = NULL;
16818
16819         grc_misc_cfg = tr32(GRC_MISC_CFG);
16820         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16821
16822         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16823             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16824              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16825                 tg3_flag_set(tp, IS_5788);
16826
16827         if (!tg3_flag(tp, IS_5788) &&
16828             tg3_asic_rev(tp) != ASIC_REV_5700)
16829                 tg3_flag_set(tp, TAGGED_STATUS);
16830         if (tg3_flag(tp, TAGGED_STATUS)) {
16831                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16832                                       HOSTCC_MODE_CLRTICK_TXBD);
16833
16834                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16835                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16836                                        tp->misc_host_ctrl);
16837         }
16838
16839         /* Preserve the APE MAC_MODE bits */
16840         if (tg3_flag(tp, ENABLE_APE))
16841                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16842         else
16843                 tp->mac_mode = 0;
16844
16845         if (tg3_10_100_only_device(tp, ent))
16846                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16847
16848         err = tg3_phy_probe(tp);
16849         if (err) {
16850                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16851                 /* ... but do not return immediately ... */
16852                 tg3_mdio_fini(tp);
16853         }
16854
16855         tg3_read_vpd(tp);
16856         tg3_read_fw_ver(tp);
16857
16858         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16859                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16860         } else {
16861                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16862                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16863                 else
16864                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16865         }
16866
16867         /* 5700 {AX,BX} chips have a broken status block link
16868          * change bit implementation, so we must use the
16869          * status register in those cases.
16870          */
16871         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16872                 tg3_flag_set(tp, USE_LINKCHG_REG);
16873         else
16874                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16875
16876         /* The led_ctrl is set during tg3_phy_probe, here we might
16877          * have to force the link status polling mechanism based
16878          * upon subsystem IDs.
16879          */
16880         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16881             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16882             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16883                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16884                 tg3_flag_set(tp, USE_LINKCHG_REG);
16885         }
16886
16887         /* For all SERDES we poll the MAC status register. */
16888         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16889                 tg3_flag_set(tp, POLL_SERDES);
16890         else
16891                 tg3_flag_clear(tp, POLL_SERDES);
16892
16893         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16894                 tg3_flag_set(tp, POLL_CPMU_LINK);
16895
16896         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16897         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16898         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16899             tg3_flag(tp, PCIX_MODE)) {
16900                 tp->rx_offset = NET_SKB_PAD;
16901 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16902                 tp->rx_copy_thresh = ~(u16)0;
16903 #endif
16904         }
16905
16906         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16907         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16908         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16909
16910         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16911
16912         /* Increment the rx prod index on the rx std ring by at most
16913          * 8 for these chips to workaround hw errata.
16914          */
16915         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16916             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16917             tg3_asic_rev(tp) == ASIC_REV_5755)
16918                 tp->rx_std_max_post = 8;
16919
16920         if (tg3_flag(tp, ASPM_WORKAROUND))
16921                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16922                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16923
16924         return err;
16925 }
16926
16927 #ifdef CONFIG_SPARC
16928 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16929 {
16930         struct net_device *dev = tp->dev;
16931         struct pci_dev *pdev = tp->pdev;
16932         struct device_node *dp = pci_device_to_OF_node(pdev);
16933         const unsigned char *addr;
16934         int len;
16935
16936         addr = of_get_property(dp, "local-mac-address", &len);
16937         if (addr && len == ETH_ALEN) {
16938                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16939                 return 0;
16940         }
16941         return -ENODEV;
16942 }
16943
16944 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16945 {
16946         struct net_device *dev = tp->dev;
16947
16948         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16949         return 0;
16950 }
16951 #endif
16952
16953 static int tg3_get_device_address(struct tg3 *tp)
16954 {
16955         struct net_device *dev = tp->dev;
16956         u32 hi, lo, mac_offset;
16957         int addr_ok = 0;
16958         int err;
16959
16960 #ifdef CONFIG_SPARC
16961         if (!tg3_get_macaddr_sparc(tp))
16962                 return 0;
16963 #endif
16964
16965         if (tg3_flag(tp, IS_SSB_CORE)) {
16966                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16967                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16968                         return 0;
16969         }
16970
16971         mac_offset = 0x7c;
16972         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16973             tg3_flag(tp, 5780_CLASS)) {
16974                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16975                         mac_offset = 0xcc;
16976                 if (tg3_nvram_lock(tp))
16977                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16978                 else
16979                         tg3_nvram_unlock(tp);
16980         } else if (tg3_flag(tp, 5717_PLUS)) {
16981                 if (tp->pci_fn & 1)
16982                         mac_offset = 0xcc;
16983                 if (tp->pci_fn > 1)
16984                         mac_offset += 0x18c;
16985         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16986                 mac_offset = 0x10;
16987
16988         /* First try to get it from MAC address mailbox. */
16989         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16990         if ((hi >> 16) == 0x484b) {
16991                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16992                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16993
16994                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16995                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16996                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16997                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16998                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16999
17000                 /* Some old bootcode may report a 0 MAC address in SRAM */
17001                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17002         }
17003         if (!addr_ok) {
17004                 /* Next, try NVRAM. */
17005                 if (!tg3_flag(tp, NO_NVRAM) &&
17006                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17007                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17008                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17009                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17010                 }
17011                 /* Finally just fetch it out of the MAC control regs. */
17012                 else {
17013                         hi = tr32(MAC_ADDR_0_HIGH);
17014                         lo = tr32(MAC_ADDR_0_LOW);
17015
17016                         dev->dev_addr[5] = lo & 0xff;
17017                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17018                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17019                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17020                         dev->dev_addr[1] = hi & 0xff;
17021                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17022                 }
17023         }
17024
17025         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17026 #ifdef CONFIG_SPARC
17027                 if (!tg3_get_default_macaddr_sparc(tp))
17028                         return 0;
17029 #endif
17030                 return -EINVAL;
17031         }
17032         return 0;
17033 }
17034
17035 #define BOUNDARY_SINGLE_CACHELINE       1
17036 #define BOUNDARY_MULTI_CACHELINE        2
17037
17038 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17039 {
17040         int cacheline_size;
17041         u8 byte;
17042         int goal;
17043
17044         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17045         if (byte == 0)
17046                 cacheline_size = 1024;
17047         else
17048                 cacheline_size = (int) byte * 4;
17049
17050         /* On 5703 and later chips, the boundary bits have no
17051          * effect.
17052          */
17053         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17054             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17055             !tg3_flag(tp, PCI_EXPRESS))
17056                 goto out;
17057
17058 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17059         goal = BOUNDARY_MULTI_CACHELINE;
17060 #else
17061 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17062         goal = BOUNDARY_SINGLE_CACHELINE;
17063 #else
17064         goal = 0;
17065 #endif
17066 #endif
17067
17068         if (tg3_flag(tp, 57765_PLUS)) {
17069                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17070                 goto out;
17071         }
17072
17073         if (!goal)
17074                 goto out;
17075
17076         /* PCI controllers on most RISC systems tend to disconnect
17077          * when a device tries to burst across a cache-line boundary.
17078          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17079          *
17080          * Unfortunately, for PCI-E there are only limited
17081          * write-side controls for this, and thus for reads
17082          * we will still get the disconnects.  We'll also waste
17083          * these PCI cycles for both read and write for chips
17084          * other than 5700 and 5701 which do not implement the
17085          * boundary bits.
17086          */
17087         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17088                 switch (cacheline_size) {
17089                 case 16:
17090                 case 32:
17091                 case 64:
17092                 case 128:
17093                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17094                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17095                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17096                         } else {
17097                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17098                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17099                         }
17100                         break;
17101
17102                 case 256:
17103                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17104                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17105                         break;
17106
17107                 default:
17108                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17109                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17110                         break;
17111                 }
17112         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17113                 switch (cacheline_size) {
17114                 case 16:
17115                 case 32:
17116                 case 64:
17117                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17118                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17119                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17120                                 break;
17121                         }
17122                         /* fallthrough */
17123                 case 128:
17124                 default:
17125                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17126                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17127                         break;
17128                 }
17129         } else {
17130                 switch (cacheline_size) {
17131                 case 16:
17132                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17133                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17134                                         DMA_RWCTRL_WRITE_BNDRY_16);
17135                                 break;
17136                         }
17137                         /* fallthrough */
17138                 case 32:
17139                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17140                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17141                                         DMA_RWCTRL_WRITE_BNDRY_32);
17142                                 break;
17143                         }
17144                         /* fallthrough */
17145                 case 64:
17146                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17147                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17148                                         DMA_RWCTRL_WRITE_BNDRY_64);
17149                                 break;
17150                         }
17151                         /* fallthrough */
17152                 case 128:
17153                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17154                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17155                                         DMA_RWCTRL_WRITE_BNDRY_128);
17156                                 break;
17157                         }
17158                         /* fallthrough */
17159                 case 256:
17160                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17161                                 DMA_RWCTRL_WRITE_BNDRY_256);
17162                         break;
17163                 case 512:
17164                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17165                                 DMA_RWCTRL_WRITE_BNDRY_512);
17166                         break;
17167                 case 1024:
17168                 default:
17169                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17170                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17171                         break;
17172                 }
17173         }
17174
17175 out:
17176         return val;
17177 }
17178
17179 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17180                            int size, bool to_device)
17181 {
17182         struct tg3_internal_buffer_desc test_desc;
17183         u32 sram_dma_descs;
17184         int i, ret;
17185
17186         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17187
17188         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17189         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17190         tw32(RDMAC_STATUS, 0);
17191         tw32(WDMAC_STATUS, 0);
17192
17193         tw32(BUFMGR_MODE, 0);
17194         tw32(FTQ_RESET, 0);
17195
17196         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17197         test_desc.addr_lo = buf_dma & 0xffffffff;
17198         test_desc.nic_mbuf = 0x00002100;
17199         test_desc.len = size;
17200
17201         /*
17202          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17203          * the *second* time the tg3 driver was getting loaded after an
17204          * initial scan.
17205          *
17206          * Broadcom tells me:
17207          *   ...the DMA engine is connected to the GRC block and a DMA
17208          *   reset may affect the GRC block in some unpredictable way...
17209          *   The behavior of resets to individual blocks has not been tested.
17210          *
17211          * Broadcom noted the GRC reset will also reset all sub-components.
17212          */
17213         if (to_device) {
17214                 test_desc.cqid_sqid = (13 << 8) | 2;
17215
17216                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17217                 udelay(40);
17218         } else {
17219                 test_desc.cqid_sqid = (16 << 8) | 7;
17220
17221                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17222                 udelay(40);
17223         }
17224         test_desc.flags = 0x00000005;
17225
17226         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17227                 u32 val;
17228
17229                 val = *(((u32 *)&test_desc) + i);
17230                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17231                                        sram_dma_descs + (i * sizeof(u32)));
17232                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17233         }
17234         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17235
17236         if (to_device)
17237                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17238         else
17239                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17240
17241         ret = -ENODEV;
17242         for (i = 0; i < 40; i++) {
17243                 u32 val;
17244
17245                 if (to_device)
17246                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17247                 else
17248                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17249                 if ((val & 0xffff) == sram_dma_descs) {
17250                         ret = 0;
17251                         break;
17252                 }
17253
17254                 udelay(100);
17255         }
17256
17257         return ret;
17258 }
17259
17260 #define TEST_BUFFER_SIZE        0x2000
17261
17262 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17263         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17264         { },
17265 };
17266
17267 static int tg3_test_dma(struct tg3 *tp)
17268 {
17269         dma_addr_t buf_dma;
17270         u32 *buf, saved_dma_rwctrl;
17271         int ret = 0;
17272
17273         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17274                                  &buf_dma, GFP_KERNEL);
17275         if (!buf) {
17276                 ret = -ENOMEM;
17277                 goto out_nofree;
17278         }
17279
17280         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17281                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17282
17283         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17284
17285         if (tg3_flag(tp, 57765_PLUS))
17286                 goto out;
17287
17288         if (tg3_flag(tp, PCI_EXPRESS)) {
17289                 /* DMA read watermark not used on PCIE */
17290                 tp->dma_rwctrl |= 0x00180000;
17291         } else if (!tg3_flag(tp, PCIX_MODE)) {
17292                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17293                     tg3_asic_rev(tp) == ASIC_REV_5750)
17294                         tp->dma_rwctrl |= 0x003f0000;
17295                 else
17296                         tp->dma_rwctrl |= 0x003f000f;
17297         } else {
17298                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17299                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17300                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17301                         u32 read_water = 0x7;
17302
17303                         /* If the 5704 is behind the EPB bridge, we can
17304                          * do the less restrictive ONE_DMA workaround for
17305                          * better performance.
17306                          */
17307                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17308                             tg3_asic_rev(tp) == ASIC_REV_5704)
17309                                 tp->dma_rwctrl |= 0x8000;
17310                         else if (ccval == 0x6 || ccval == 0x7)
17311                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17312
17313                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17314                                 read_water = 4;
17315                         /* Set bit 23 to enable PCIX hw bug fix */
17316                         tp->dma_rwctrl |=
17317                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17318                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17319                                 (1 << 23);
17320                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17321                         /* 5780 always in PCIX mode */
17322                         tp->dma_rwctrl |= 0x00144000;
17323                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17324                         /* 5714 always in PCIX mode */
17325                         tp->dma_rwctrl |= 0x00148000;
17326                 } else {
17327                         tp->dma_rwctrl |= 0x001b000f;
17328                 }
17329         }
17330         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17331                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17332
17333         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17334             tg3_asic_rev(tp) == ASIC_REV_5704)
17335                 tp->dma_rwctrl &= 0xfffffff0;
17336
17337         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17338             tg3_asic_rev(tp) == ASIC_REV_5701) {
17339                 /* Remove this if it causes problems for some boards. */
17340                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17341
17342                 /* On 5700/5701 chips, we need to set this bit.
17343                  * Otherwise the chip will issue cacheline transactions
17344                  * to streamable DMA memory with not all the byte
17345                  * enables turned on.  This is an error on several
17346                  * RISC PCI controllers, in particular sparc64.
17347                  *
17348                  * On 5703/5704 chips, this bit has been reassigned
17349                  * a different meaning.  In particular, it is used
17350                  * on those chips to enable a PCI-X workaround.
17351                  */
17352                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17353         }
17354
17355         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17356
17357
17358         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17359             tg3_asic_rev(tp) != ASIC_REV_5701)
17360                 goto out;
17361
17362         /* It is best to perform DMA test with maximum write burst size
17363          * to expose the 5700/5701 write DMA bug.
17364          */
17365         saved_dma_rwctrl = tp->dma_rwctrl;
17366         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17367         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17368
17369         while (1) {
17370                 u32 *p = buf, i;
17371
17372                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17373                         p[i] = i;
17374
17375                 /* Send the buffer to the chip. */
17376                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17377                 if (ret) {
17378                         dev_err(&tp->pdev->dev,
17379                                 "%s: Buffer write failed. err = %d\n",
17380                                 __func__, ret);
17381                         break;
17382                 }
17383
17384                 /* Now read it back. */
17385                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17386                 if (ret) {
17387                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17388                                 "err = %d\n", __func__, ret);
17389                         break;
17390                 }
17391
17392                 /* Verify it. */
17393                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17394                         if (p[i] == i)
17395                                 continue;
17396
17397                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17398                             DMA_RWCTRL_WRITE_BNDRY_16) {
17399                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17400                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17401                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17402                                 break;
17403                         } else {
17404                                 dev_err(&tp->pdev->dev,
17405                                         "%s: Buffer corrupted on read back! "
17406                                         "(%d != %d)\n", __func__, p[i], i);
17407                                 ret = -ENODEV;
17408                                 goto out;
17409                         }
17410                 }
17411
17412                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17413                         /* Success. */
17414                         ret = 0;
17415                         break;
17416                 }
17417         }
17418         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17419             DMA_RWCTRL_WRITE_BNDRY_16) {
17420                 /* DMA test passed without adjusting DMA boundary,
17421                  * now look for chipsets that are known to expose the
17422                  * DMA bug without failing the test.
17423                  */
17424                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17425                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17426                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17427                 } else {
17428                         /* Safe to use the calculated DMA boundary. */
17429                         tp->dma_rwctrl = saved_dma_rwctrl;
17430                 }
17431
17432                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17433         }
17434
17435 out:
17436         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17437 out_nofree:
17438         return ret;
17439 }
17440
17441 static void tg3_init_bufmgr_config(struct tg3 *tp)
17442 {
17443         if (tg3_flag(tp, 57765_PLUS)) {
17444                 tp->bufmgr_config.mbuf_read_dma_low_water =
17445                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17446                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17447                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17448                 tp->bufmgr_config.mbuf_high_water =
17449                         DEFAULT_MB_HIGH_WATER_57765;
17450
17451                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17452                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17453                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17454                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17455                 tp->bufmgr_config.mbuf_high_water_jumbo =
17456                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17457         } else if (tg3_flag(tp, 5705_PLUS)) {
17458                 tp->bufmgr_config.mbuf_read_dma_low_water =
17459                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17460                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17461                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17462                 tp->bufmgr_config.mbuf_high_water =
17463                         DEFAULT_MB_HIGH_WATER_5705;
17464                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17465                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17466                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17467                         tp->bufmgr_config.mbuf_high_water =
17468                                 DEFAULT_MB_HIGH_WATER_5906;
17469                 }
17470
17471                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17472                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17473                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17474                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17475                 tp->bufmgr_config.mbuf_high_water_jumbo =
17476                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17477         } else {
17478                 tp->bufmgr_config.mbuf_read_dma_low_water =
17479                         DEFAULT_MB_RDMA_LOW_WATER;
17480                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17481                         DEFAULT_MB_MACRX_LOW_WATER;
17482                 tp->bufmgr_config.mbuf_high_water =
17483                         DEFAULT_MB_HIGH_WATER;
17484
17485                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17486                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17487                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17488                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17489                 tp->bufmgr_config.mbuf_high_water_jumbo =
17490                         DEFAULT_MB_HIGH_WATER_JUMBO;
17491         }
17492
17493         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17494         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17495 }
17496
17497 static char *tg3_phy_string(struct tg3 *tp)
17498 {
17499         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17500         case TG3_PHY_ID_BCM5400:        return "5400";
17501         case TG3_PHY_ID_BCM5401:        return "5401";
17502         case TG3_PHY_ID_BCM5411:        return "5411";
17503         case TG3_PHY_ID_BCM5701:        return "5701";
17504         case TG3_PHY_ID_BCM5703:        return "5703";
17505         case TG3_PHY_ID_BCM5704:        return "5704";
17506         case TG3_PHY_ID_BCM5705:        return "5705";
17507         case TG3_PHY_ID_BCM5750:        return "5750";
17508         case TG3_PHY_ID_BCM5752:        return "5752";
17509         case TG3_PHY_ID_BCM5714:        return "5714";
17510         case TG3_PHY_ID_BCM5780:        return "5780";
17511         case TG3_PHY_ID_BCM5755:        return "5755";
17512         case TG3_PHY_ID_BCM5787:        return "5787";
17513         case TG3_PHY_ID_BCM5784:        return "5784";
17514         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17515         case TG3_PHY_ID_BCM5906:        return "5906";
17516         case TG3_PHY_ID_BCM5761:        return "5761";
17517         case TG3_PHY_ID_BCM5718C:       return "5718C";
17518         case TG3_PHY_ID_BCM5718S:       return "5718S";
17519         case TG3_PHY_ID_BCM57765:       return "57765";
17520         case TG3_PHY_ID_BCM5719C:       return "5719C";
17521         case TG3_PHY_ID_BCM5720C:       return "5720C";
17522         case TG3_PHY_ID_BCM5762:        return "5762C";
17523         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17524         case 0:                 return "serdes";
17525         default:                return "unknown";
17526         }
17527 }
17528
17529 static char *tg3_bus_string(struct tg3 *tp, char *str)
17530 {
17531         if (tg3_flag(tp, PCI_EXPRESS)) {
17532                 strcpy(str, "PCI Express");
17533                 return str;
17534         } else if (tg3_flag(tp, PCIX_MODE)) {
17535                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17536
17537                 strcpy(str, "PCIX:");
17538
17539                 if ((clock_ctrl == 7) ||
17540                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17541                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17542                         strcat(str, "133MHz");
17543                 else if (clock_ctrl == 0)
17544                         strcat(str, "33MHz");
17545                 else if (clock_ctrl == 2)
17546                         strcat(str, "50MHz");
17547                 else if (clock_ctrl == 4)
17548                         strcat(str, "66MHz");
17549                 else if (clock_ctrl == 6)
17550                         strcat(str, "100MHz");
17551         } else {
17552                 strcpy(str, "PCI:");
17553                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17554                         strcat(str, "66MHz");
17555                 else
17556                         strcat(str, "33MHz");
17557         }
17558         if (tg3_flag(tp, PCI_32BIT))
17559                 strcat(str, ":32-bit");
17560         else
17561                 strcat(str, ":64-bit");
17562         return str;
17563 }
17564
17565 static void tg3_init_coal(struct tg3 *tp)
17566 {
17567         struct ethtool_coalesce *ec = &tp->coal;
17568
17569         memset(ec, 0, sizeof(*ec));
17570         ec->cmd = ETHTOOL_GCOALESCE;
17571         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17572         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17573         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17574         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17575         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17576         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17577         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17578         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17579         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17580
17581         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17582                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17583                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17584                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17585                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17586                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17587         }
17588
17589         if (tg3_flag(tp, 5705_PLUS)) {
17590                 ec->rx_coalesce_usecs_irq = 0;
17591                 ec->tx_coalesce_usecs_irq = 0;
17592                 ec->stats_block_coalesce_usecs = 0;
17593         }
17594 }
17595
17596 static int tg3_init_one(struct pci_dev *pdev,
17597                                   const struct pci_device_id *ent)
17598 {
17599         struct net_device *dev;
17600         struct tg3 *tp;
17601         int i, err;
17602         u32 sndmbx, rcvmbx, intmbx;
17603         char str[40];
17604         u64 dma_mask, persist_dma_mask;
17605         netdev_features_t features = 0;
17606
17607         printk_once(KERN_INFO "%s\n", version);
17608
17609         err = pci_enable_device(pdev);
17610         if (err) {
17611                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17612                 return err;
17613         }
17614
17615         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17616         if (err) {
17617                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17618                 goto err_out_disable_pdev;
17619         }
17620
17621         pci_set_master(pdev);
17622
17623         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17624         if (!dev) {
17625                 err = -ENOMEM;
17626                 goto err_out_free_res;
17627         }
17628
17629         SET_NETDEV_DEV(dev, &pdev->dev);
17630
17631         tp = netdev_priv(dev);
17632         tp->pdev = pdev;
17633         tp->dev = dev;
17634         tp->rx_mode = TG3_DEF_RX_MODE;
17635         tp->tx_mode = TG3_DEF_TX_MODE;
17636         tp->irq_sync = 1;
17637         tp->pcierr_recovery = false;
17638
17639         if (tg3_debug > 0)
17640                 tp->msg_enable = tg3_debug;
17641         else
17642                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17643
17644         if (pdev_is_ssb_gige_core(pdev)) {
17645                 tg3_flag_set(tp, IS_SSB_CORE);
17646                 if (ssb_gige_must_flush_posted_writes(pdev))
17647                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17648                 if (ssb_gige_one_dma_at_once(pdev))
17649                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17650                 if (ssb_gige_have_roboswitch(pdev)) {
17651                         tg3_flag_set(tp, USE_PHYLIB);
17652                         tg3_flag_set(tp, ROBOSWITCH);
17653                 }
17654                 if (ssb_gige_is_rgmii(pdev))
17655                         tg3_flag_set(tp, RGMII_MODE);
17656         }
17657
17658         /* The word/byte swap controls here control register access byte
17659          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17660          * setting below.
17661          */
17662         tp->misc_host_ctrl =
17663                 MISC_HOST_CTRL_MASK_PCI_INT |
17664                 MISC_HOST_CTRL_WORD_SWAP |
17665                 MISC_HOST_CTRL_INDIR_ACCESS |
17666                 MISC_HOST_CTRL_PCISTATE_RW;
17667
17668         /* The NONFRM (non-frame) byte/word swap controls take effect
17669          * on descriptor entries, anything which isn't packet data.
17670          *
17671          * The StrongARM chips on the board (one for tx, one for rx)
17672          * are running in big-endian mode.
17673          */
17674         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17675                         GRC_MODE_WSWAP_NONFRM_DATA);
17676 #ifdef __BIG_ENDIAN
17677         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17678 #endif
17679         spin_lock_init(&tp->lock);
17680         spin_lock_init(&tp->indirect_lock);
17681         INIT_WORK(&tp->reset_task, tg3_reset_task);
17682
17683         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17684         if (!tp->regs) {
17685                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17686                 err = -ENOMEM;
17687                 goto err_out_free_dev;
17688         }
17689
17690         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17691             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17692             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17693             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17694             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17695             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17696             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17697             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17698             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17699             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17700             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17701             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17702             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17703             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17704             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17705                 tg3_flag_set(tp, ENABLE_APE);
17706                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17707                 if (!tp->aperegs) {
17708                         dev_err(&pdev->dev,
17709                                 "Cannot map APE registers, aborting\n");
17710                         err = -ENOMEM;
17711                         goto err_out_iounmap;
17712                 }
17713         }
17714
17715         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17716         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17717
17718         dev->ethtool_ops = &tg3_ethtool_ops;
17719         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17720         dev->netdev_ops = &tg3_netdev_ops;
17721         dev->irq = pdev->irq;
17722
17723         err = tg3_get_invariants(tp, ent);
17724         if (err) {
17725                 dev_err(&pdev->dev,
17726                         "Problem fetching invariants of chip, aborting\n");
17727                 goto err_out_apeunmap;
17728         }
17729
17730         /* The EPB bridge inside 5714, 5715, and 5780 and any
17731          * device behind the EPB cannot support DMA addresses > 40-bit.
17732          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17733          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17734          * do DMA address check in tg3_start_xmit().
17735          */
17736         if (tg3_flag(tp, IS_5788))
17737                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17738         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17739                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17740 #ifdef CONFIG_HIGHMEM
17741                 dma_mask = DMA_BIT_MASK(64);
17742 #endif
17743         } else
17744                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17745
17746         /* Configure DMA attributes. */
17747         if (dma_mask > DMA_BIT_MASK(32)) {
17748                 err = pci_set_dma_mask(pdev, dma_mask);
17749                 if (!err) {
17750                         features |= NETIF_F_HIGHDMA;
17751                         err = pci_set_consistent_dma_mask(pdev,
17752                                                           persist_dma_mask);
17753                         if (err < 0) {
17754                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17755                                         "DMA for consistent allocations\n");
17756                                 goto err_out_apeunmap;
17757                         }
17758                 }
17759         }
17760         if (err || dma_mask == DMA_BIT_MASK(32)) {
17761                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17762                 if (err) {
17763                         dev_err(&pdev->dev,
17764                                 "No usable DMA configuration, aborting\n");
17765                         goto err_out_apeunmap;
17766                 }
17767         }
17768
17769         tg3_init_bufmgr_config(tp);
17770
17771         /* 5700 B0 chips do not support checksumming correctly due
17772          * to hardware bugs.
17773          */
17774         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17775                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17776
17777                 if (tg3_flag(tp, 5755_PLUS))
17778                         features |= NETIF_F_IPV6_CSUM;
17779         }
17780
17781         /* TSO is on by default on chips that support hardware TSO.
17782          * Firmware TSO on older chips gives lower performance, so it
17783          * is off by default, but can be enabled using ethtool.
17784          */
17785         if ((tg3_flag(tp, HW_TSO_1) ||
17786              tg3_flag(tp, HW_TSO_2) ||
17787              tg3_flag(tp, HW_TSO_3)) &&
17788             (features & NETIF_F_IP_CSUM))
17789                 features |= NETIF_F_TSO;
17790         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17791                 if (features & NETIF_F_IPV6_CSUM)
17792                         features |= NETIF_F_TSO6;
17793                 if (tg3_flag(tp, HW_TSO_3) ||
17794                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17795                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17796                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17797                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17798                     tg3_asic_rev(tp) == ASIC_REV_57780)
17799                         features |= NETIF_F_TSO_ECN;
17800         }
17801
17802         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17803                          NETIF_F_HW_VLAN_CTAG_RX;
17804         dev->vlan_features |= features;
17805
17806         /*
17807          * Add loopback capability only for a subset of devices that support
17808          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17809          * loopback for the remaining devices.
17810          */
17811         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17812             !tg3_flag(tp, CPMU_PRESENT))
17813                 /* Add the loopback capability */
17814                 features |= NETIF_F_LOOPBACK;
17815
17816         dev->hw_features |= features;
17817         dev->priv_flags |= IFF_UNICAST_FLT;
17818
17819         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17820         dev->min_mtu = TG3_MIN_MTU;
17821         dev->max_mtu = TG3_MAX_MTU(tp);
17822
17823         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17824             !tg3_flag(tp, TSO_CAPABLE) &&
17825             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17826                 tg3_flag_set(tp, MAX_RXPEND_64);
17827                 tp->rx_pending = 63;
17828         }
17829
17830         err = tg3_get_device_address(tp);
17831         if (err) {
17832                 dev_err(&pdev->dev,
17833                         "Could not obtain valid ethernet address, aborting\n");
17834                 goto err_out_apeunmap;
17835         }
17836
17837         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17838         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17839         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17840         for (i = 0; i < tp->irq_max; i++) {
17841                 struct tg3_napi *tnapi = &tp->napi[i];
17842
17843                 tnapi->tp = tp;
17844                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17845
17846                 tnapi->int_mbox = intmbx;
17847                 if (i <= 4)
17848                         intmbx += 0x8;
17849                 else
17850                         intmbx += 0x4;
17851
17852                 tnapi->consmbox = rcvmbx;
17853                 tnapi->prodmbox = sndmbx;
17854
17855                 if (i)
17856                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17857                 else
17858                         tnapi->coal_now = HOSTCC_MODE_NOW;
17859
17860                 if (!tg3_flag(tp, SUPPORT_MSIX))
17861                         break;
17862
17863                 /*
17864                  * If we support MSIX, we'll be using RSS.  If we're using
17865                  * RSS, the first vector only handles link interrupts and the
17866                  * remaining vectors handle rx and tx interrupts.  Reuse the
17867                  * mailbox values for the next iteration.  The values we setup
17868                  * above are still useful for the single vectored mode.
17869                  */
17870                 if (!i)
17871                         continue;
17872
17873                 rcvmbx += 0x8;
17874
17875                 if (sndmbx & 0x4)
17876                         sndmbx -= 0x4;
17877                 else
17878                         sndmbx += 0xc;
17879         }
17880
17881         /*
17882          * Reset chip in case UNDI or EFI driver did not shutdown
17883          * DMA self test will enable WDMAC and we'll see (spurious)
17884          * pending DMA on the PCI bus at that point.
17885          */
17886         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17887             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17888                 tg3_full_lock(tp, 0);
17889                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17890                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17891                 tg3_full_unlock(tp);
17892         }
17893
17894         err = tg3_test_dma(tp);
17895         if (err) {
17896                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17897                 goto err_out_apeunmap;
17898         }
17899
17900         tg3_init_coal(tp);
17901
17902         pci_set_drvdata(pdev, dev);
17903
17904         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17905             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17906             tg3_asic_rev(tp) == ASIC_REV_5762)
17907                 tg3_flag_set(tp, PTP_CAPABLE);
17908
17909         tg3_timer_init(tp);
17910
17911         tg3_carrier_off(tp);
17912
17913         err = register_netdev(dev);
17914         if (err) {
17915                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17916                 goto err_out_apeunmap;
17917         }
17918
17919         if (tg3_flag(tp, PTP_CAPABLE)) {
17920                 tg3_ptp_init(tp);
17921                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17922                                                    &tp->pdev->dev);
17923                 if (IS_ERR(tp->ptp_clock))
17924                         tp->ptp_clock = NULL;
17925         }
17926
17927         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17928                     tp->board_part_number,
17929                     tg3_chip_rev_id(tp),
17930                     tg3_bus_string(tp, str),
17931                     dev->dev_addr);
17932
17933         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17934                 char *ethtype;
17935
17936                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17937                         ethtype = "10/100Base-TX";
17938                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17939                         ethtype = "1000Base-SX";
17940                 else
17941                         ethtype = "10/100/1000Base-T";
17942
17943                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17944                             "(WireSpeed[%d], EEE[%d])\n",
17945                             tg3_phy_string(tp), ethtype,
17946                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17947                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17948         }
17949
17950         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17951                     (dev->features & NETIF_F_RXCSUM) != 0,
17952                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17953                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17954                     tg3_flag(tp, ENABLE_ASF) != 0,
17955                     tg3_flag(tp, TSO_CAPABLE) != 0);
17956         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17957                     tp->dma_rwctrl,
17958                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17959                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17960
17961         pci_save_state(pdev);
17962
17963         return 0;
17964
17965 err_out_apeunmap:
17966         if (tp->aperegs) {
17967                 iounmap(tp->aperegs);
17968                 tp->aperegs = NULL;
17969         }
17970
17971 err_out_iounmap:
17972         if (tp->regs) {
17973                 iounmap(tp->regs);
17974                 tp->regs = NULL;
17975         }
17976
17977 err_out_free_dev:
17978         free_netdev(dev);
17979
17980 err_out_free_res:
17981         pci_release_regions(pdev);
17982
17983 err_out_disable_pdev:
17984         if (pci_is_enabled(pdev))
17985                 pci_disable_device(pdev);
17986         return err;
17987 }
17988
17989 static void tg3_remove_one(struct pci_dev *pdev)
17990 {
17991         struct net_device *dev = pci_get_drvdata(pdev);
17992
17993         if (dev) {
17994                 struct tg3 *tp = netdev_priv(dev);
17995
17996                 tg3_ptp_fini(tp);
17997
17998                 release_firmware(tp->fw);
17999
18000                 tg3_reset_task_cancel(tp);
18001
18002                 if (tg3_flag(tp, USE_PHYLIB)) {
18003                         tg3_phy_fini(tp);
18004                         tg3_mdio_fini(tp);
18005                 }
18006
18007                 unregister_netdev(dev);
18008                 if (tp->aperegs) {
18009                         iounmap(tp->aperegs);
18010                         tp->aperegs = NULL;
18011                 }
18012                 if (tp->regs) {
18013                         iounmap(tp->regs);
18014                         tp->regs = NULL;
18015                 }
18016                 free_netdev(dev);
18017                 pci_release_regions(pdev);
18018                 pci_disable_device(pdev);
18019         }
18020 }
18021
18022 #ifdef CONFIG_PM_SLEEP
18023 static int tg3_suspend(struct device *device)
18024 {
18025         struct pci_dev *pdev = to_pci_dev(device);
18026         struct net_device *dev = pci_get_drvdata(pdev);
18027         struct tg3 *tp = netdev_priv(dev);
18028         int err = 0;
18029
18030         rtnl_lock();
18031
18032         if (!netif_running(dev))
18033                 goto unlock;
18034
18035         tg3_reset_task_cancel(tp);
18036         tg3_phy_stop(tp);
18037         tg3_netif_stop(tp);
18038
18039         tg3_timer_stop(tp);
18040
18041         tg3_full_lock(tp, 1);
18042         tg3_disable_ints(tp);
18043         tg3_full_unlock(tp);
18044
18045         netif_device_detach(dev);
18046
18047         tg3_full_lock(tp, 0);
18048         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18049         tg3_flag_clear(tp, INIT_COMPLETE);
18050         tg3_full_unlock(tp);
18051
18052         err = tg3_power_down_prepare(tp);
18053         if (err) {
18054                 int err2;
18055
18056                 tg3_full_lock(tp, 0);
18057
18058                 tg3_flag_set(tp, INIT_COMPLETE);
18059                 err2 = tg3_restart_hw(tp, true);
18060                 if (err2)
18061                         goto out;
18062
18063                 tg3_timer_start(tp);
18064
18065                 netif_device_attach(dev);
18066                 tg3_netif_start(tp);
18067
18068 out:
18069                 tg3_full_unlock(tp);
18070
18071                 if (!err2)
18072                         tg3_phy_start(tp);
18073         }
18074
18075 unlock:
18076         rtnl_unlock();
18077         return err;
18078 }
18079
18080 static int tg3_resume(struct device *device)
18081 {
18082         struct pci_dev *pdev = to_pci_dev(device);
18083         struct net_device *dev = pci_get_drvdata(pdev);
18084         struct tg3 *tp = netdev_priv(dev);
18085         int err = 0;
18086
18087         rtnl_lock();
18088
18089         if (!netif_running(dev))
18090                 goto unlock;
18091
18092         netif_device_attach(dev);
18093
18094         tg3_full_lock(tp, 0);
18095
18096         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18097
18098         tg3_flag_set(tp, INIT_COMPLETE);
18099         err = tg3_restart_hw(tp,
18100                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18101         if (err)
18102                 goto out;
18103
18104         tg3_timer_start(tp);
18105
18106         tg3_netif_start(tp);
18107
18108 out:
18109         tg3_full_unlock(tp);
18110
18111         if (!err)
18112                 tg3_phy_start(tp);
18113
18114 unlock:
18115         rtnl_unlock();
18116         return err;
18117 }
18118 #endif /* CONFIG_PM_SLEEP */
18119
18120 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18121
18122 static void tg3_shutdown(struct pci_dev *pdev)
18123 {
18124         struct net_device *dev = pci_get_drvdata(pdev);
18125         struct tg3 *tp = netdev_priv(dev);
18126
18127         rtnl_lock();
18128         netif_device_detach(dev);
18129
18130         if (netif_running(dev))
18131                 dev_close(dev);
18132
18133         if (system_state == SYSTEM_POWER_OFF)
18134                 tg3_power_down(tp);
18135
18136         rtnl_unlock();
18137 }
18138
18139 /**
18140  * tg3_io_error_detected - called when PCI error is detected
18141  * @pdev: Pointer to PCI device
18142  * @state: The current pci connection state
18143  *
18144  * This function is called after a PCI bus error affecting
18145  * this device has been detected.
18146  */
18147 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18148                                               pci_channel_state_t state)
18149 {
18150         struct net_device *netdev = pci_get_drvdata(pdev);
18151         struct tg3 *tp = netdev_priv(netdev);
18152         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18153
18154         netdev_info(netdev, "PCI I/O error detected\n");
18155
18156         rtnl_lock();
18157
18158         /* We probably don't have netdev yet */
18159         if (!netdev || !netif_running(netdev))
18160                 goto done;
18161
18162         /* We needn't recover from permanent error */
18163         if (state == pci_channel_io_frozen)
18164                 tp->pcierr_recovery = true;
18165
18166         tg3_phy_stop(tp);
18167
18168         tg3_netif_stop(tp);
18169
18170         tg3_timer_stop(tp);
18171
18172         /* Want to make sure that the reset task doesn't run */
18173         tg3_reset_task_cancel(tp);
18174
18175         netif_device_detach(netdev);
18176
18177         /* Clean up software state, even if MMIO is blocked */
18178         tg3_full_lock(tp, 0);
18179         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18180         tg3_full_unlock(tp);
18181
18182 done:
18183         if (state == pci_channel_io_perm_failure) {
18184                 if (netdev) {
18185                         tg3_napi_enable(tp);
18186                         dev_close(netdev);
18187                 }
18188                 err = PCI_ERS_RESULT_DISCONNECT;
18189         } else {
18190                 pci_disable_device(pdev);
18191         }
18192
18193         rtnl_unlock();
18194
18195         return err;
18196 }
18197
18198 /**
18199  * tg3_io_slot_reset - called after the pci bus has been reset.
18200  * @pdev: Pointer to PCI device
18201  *
18202  * Restart the card from scratch, as if from a cold-boot.
18203  * At this point, the card has exprienced a hard reset,
18204  * followed by fixups by BIOS, and has its config space
18205  * set up identically to what it was at cold boot.
18206  */
18207 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18208 {
18209         struct net_device *netdev = pci_get_drvdata(pdev);
18210         struct tg3 *tp = netdev_priv(netdev);
18211         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18212         int err;
18213
18214         rtnl_lock();
18215
18216         if (pci_enable_device(pdev)) {
18217                 dev_err(&pdev->dev,
18218                         "Cannot re-enable PCI device after reset.\n");
18219                 goto done;
18220         }
18221
18222         pci_set_master(pdev);
18223         pci_restore_state(pdev);
18224         pci_save_state(pdev);
18225
18226         if (!netdev || !netif_running(netdev)) {
18227                 rc = PCI_ERS_RESULT_RECOVERED;
18228                 goto done;
18229         }
18230
18231         err = tg3_power_up(tp);
18232         if (err)
18233                 goto done;
18234
18235         rc = PCI_ERS_RESULT_RECOVERED;
18236
18237 done:
18238         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18239                 tg3_napi_enable(tp);
18240                 dev_close(netdev);
18241         }
18242         rtnl_unlock();
18243
18244         return rc;
18245 }
18246
18247 /**
18248  * tg3_io_resume - called when traffic can start flowing again.
18249  * @pdev: Pointer to PCI device
18250  *
18251  * This callback is called when the error recovery driver tells
18252  * us that its OK to resume normal operation.
18253  */
18254 static void tg3_io_resume(struct pci_dev *pdev)
18255 {
18256         struct net_device *netdev = pci_get_drvdata(pdev);
18257         struct tg3 *tp = netdev_priv(netdev);
18258         int err;
18259
18260         rtnl_lock();
18261
18262         if (!netdev || !netif_running(netdev))
18263                 goto done;
18264
18265         tg3_full_lock(tp, 0);
18266         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18267         tg3_flag_set(tp, INIT_COMPLETE);
18268         err = tg3_restart_hw(tp, true);
18269         if (err) {
18270                 tg3_full_unlock(tp);
18271                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18272                 goto done;
18273         }
18274
18275         netif_device_attach(netdev);
18276
18277         tg3_timer_start(tp);
18278
18279         tg3_netif_start(tp);
18280
18281         tg3_full_unlock(tp);
18282
18283         tg3_phy_start(tp);
18284
18285 done:
18286         tp->pcierr_recovery = false;
18287         rtnl_unlock();
18288 }
18289
18290 static const struct pci_error_handlers tg3_err_handler = {
18291         .error_detected = tg3_io_error_detected,
18292         .slot_reset     = tg3_io_slot_reset,
18293         .resume         = tg3_io_resume
18294 };
18295
18296 static struct pci_driver tg3_driver = {
18297         .name           = DRV_MODULE_NAME,
18298         .id_table       = tg3_pci_tbl,
18299         .probe          = tg3_init_one,
18300         .remove         = tg3_remove_one,
18301         .err_handler    = &tg3_err_handler,
18302         .driver.pm      = &tg3_pm_ops,
18303         .shutdown       = tg3_shutdown,
18304 };
18305
18306 module_pci_driver(tg3_driver);