Merge tag 'mm-nonmm-stable-2024-03-14-09-36' of git://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69
70 #define BAR_0   0
71 #define BAR_2   2
72
73 #include "tg3.h"
74
75 /* Functions & macros to verify TG3_FLAGS types */
76
77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         return test_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         set_bit(flag, bits);
85 }
86
87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89         clear_bit(flag, bits);
90 }
91
92 #define tg3_flag(tp, flag)                              \
93         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag)                          \
95         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag)                        \
97         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98
99 #define DRV_MODULE_NAME         "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM                     3
102 #define TG3_MIN_NUM                     137
103
104 #define RESET_KIND_SHUTDOWN     0
105 #define RESET_KIND_INIT         1
106 #define RESET_KIND_SUSPEND      2
107
108 #define TG3_DEF_RX_MODE         0
109 #define TG3_DEF_TX_MODE         0
110 #define TG3_DEF_MSG_ENABLE        \
111         (NETIF_MSG_DRV          | \
112          NETIF_MSG_PROBE        | \
113          NETIF_MSG_LINK         | \
114          NETIF_MSG_TIMER        | \
115          NETIF_MSG_IFDOWN       | \
116          NETIF_MSG_IFUP         | \
117          NETIF_MSG_RX_ERR       | \
118          NETIF_MSG_TX_ERR)
119
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
121
122 /* length of time before we decide the hardware is borked,
123  * and dev->tx_timeout() should be called to fix the problem
124  */
125
126 #define TG3_TX_TIMEOUT                  (5 * HZ)
127
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU                     ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134  * You can't change the ring sizes, but you can change where you place
135  * them in the NIC onboard memory.
136  */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING         200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
145
146 /* Do not place this n-ring entries value into the tp struct itself,
147  * we really want to expose these constants to GCC so that modulo et
148  * al.  operations are done with shifts and masks instead of with
149  * hw multiply/modulo instructions.  Another solution would be to
150  * replace things like '% foo' with '& (foo - 1)'.
151  */
152
153 #define TG3_TX_RING_SIZE                512
154 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
155
156 #define TG3_RX_STD_RING_BYTES(tp) \
157         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
163                                  TG3_TX_RING_SIZE)
164 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165
166 #define TG3_DMA_BYTE_ENAB               64
167
168 #define TG3_RX_STD_DMA_SZ               1536
169 #define TG3_RX_JMB_DMA_SZ               9046
170
171 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
172
173 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183  * that are at least dword aligned when used in PCIX mode.  The driver
184  * works around this bug by double copying the packet.  This workaround
185  * is built into the normal double copy length check for efficiency.
186  *
187  * However, the double copy is only necessary on those architectures
188  * where unaligned memory accesses are inefficient.  For those architectures
189  * where unaligned memory accesses incur little penalty, we can reintegrate
190  * the 5701 in the normal rx path.  Doing so saves a device structure
191  * dereference by hardcoding the double copy threshold in place.
192  */
193 #define TG3_RX_COPY_THRESHOLD           256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
196 #else
197         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
198 #endif
199
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
204 #endif
205
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K            2048
209 #define TG3_TX_BD_DMA_MAX_4K            4096
210
211 #define TG3_RAW_IP_ALIGN 2
212
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215
216 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
217 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218
219 #define FIRMWARE_TG3            "tigon/tg3.bin"
220 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
223
224 MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231
232 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
238
239 static const struct pci_device_id tg3_pci_tbl[] = {
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260                         TG3_DRV_DATA_FLAG_5705_10_100},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267                         TG3_DRV_DATA_FLAG_5705_10_100},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289                         PCI_VENDOR_ID_LENOVO,
290                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355         {}
356 };
357
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359
360 static const struct {
361         const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363         { "rx_octets" },
364         { "rx_fragments" },
365         { "rx_ucast_packets" },
366         { "rx_mcast_packets" },
367         { "rx_bcast_packets" },
368         { "rx_fcs_errors" },
369         { "rx_align_errors" },
370         { "rx_xon_pause_rcvd" },
371         { "rx_xoff_pause_rcvd" },
372         { "rx_mac_ctrl_rcvd" },
373         { "rx_xoff_entered" },
374         { "rx_frame_too_long_errors" },
375         { "rx_jabbers" },
376         { "rx_undersize_packets" },
377         { "rx_in_length_errors" },
378         { "rx_out_length_errors" },
379         { "rx_64_or_less_octet_packets" },
380         { "rx_65_to_127_octet_packets" },
381         { "rx_128_to_255_octet_packets" },
382         { "rx_256_to_511_octet_packets" },
383         { "rx_512_to_1023_octet_packets" },
384         { "rx_1024_to_1522_octet_packets" },
385         { "rx_1523_to_2047_octet_packets" },
386         { "rx_2048_to_4095_octet_packets" },
387         { "rx_4096_to_8191_octet_packets" },
388         { "rx_8192_to_9022_octet_packets" },
389
390         { "tx_octets" },
391         { "tx_collisions" },
392
393         { "tx_xon_sent" },
394         { "tx_xoff_sent" },
395         { "tx_flow_control" },
396         { "tx_mac_errors" },
397         { "tx_single_collisions" },
398         { "tx_mult_collisions" },
399         { "tx_deferred" },
400         { "tx_excessive_collisions" },
401         { "tx_late_collisions" },
402         { "tx_collide_2times" },
403         { "tx_collide_3times" },
404         { "tx_collide_4times" },
405         { "tx_collide_5times" },
406         { "tx_collide_6times" },
407         { "tx_collide_7times" },
408         { "tx_collide_8times" },
409         { "tx_collide_9times" },
410         { "tx_collide_10times" },
411         { "tx_collide_11times" },
412         { "tx_collide_12times" },
413         { "tx_collide_13times" },
414         { "tx_collide_14times" },
415         { "tx_collide_15times" },
416         { "tx_ucast_packets" },
417         { "tx_mcast_packets" },
418         { "tx_bcast_packets" },
419         { "tx_carrier_sense_errors" },
420         { "tx_discards" },
421         { "tx_errors" },
422
423         { "dma_writeq_full" },
424         { "dma_write_prioq_full" },
425         { "rxbds_empty" },
426         { "rx_discards" },
427         { "rx_errors" },
428         { "rx_threshold_hit" },
429
430         { "dma_readq_full" },
431         { "dma_read_prioq_full" },
432         { "tx_comp_queue_full" },
433
434         { "ring_set_send_prod_index" },
435         { "ring_status_update" },
436         { "nic_irqs" },
437         { "nic_avoided_irqs" },
438         { "nic_tx_threshold_hit" },
439
440         { "mbuf_lwm_thresh_hit" },
441 };
442
443 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST          0
445 #define TG3_LINK_TEST           1
446 #define TG3_REGISTER_TEST       2
447 #define TG3_MEMORY_TEST         3
448 #define TG3_MAC_LOOPB_TEST      4
449 #define TG3_PHY_LOOPB_TEST      5
450 #define TG3_EXT_LOOPB_TEST      6
451 #define TG3_INTERRUPT_TEST      7
452
453
454 static const struct {
455         const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
458         [TG3_LINK_TEST]         = { "link test         (online) " },
459         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
460         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
461         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
462         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
463         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
464         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
465 };
466
467 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
468
469
470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472         writel(val, tp->regs + off);
473 }
474
475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477         return readl(tp->regs + off);
478 }
479
480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482         writel(val, tp->aperegs + off);
483 }
484
485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487         return readl(tp->aperegs + off);
488 }
489
490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492         unsigned long flags;
493
494         spin_lock_irqsave(&tp->indirect_lock, flags);
495         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497         spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499
500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502         writel(val, tp->regs + off);
503         readl(tp->regs + off);
504 }
505
506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508         unsigned long flags;
509         u32 val;
510
511         spin_lock_irqsave(&tp->indirect_lock, flags);
512         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514         spin_unlock_irqrestore(&tp->indirect_lock, flags);
515         return val;
516 }
517
518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520         unsigned long flags;
521
522         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524                                        TG3_64BIT_REG_LOW, val);
525                 return;
526         }
527         if (off == TG3_RX_STD_PROD_IDX_REG) {
528                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529                                        TG3_64BIT_REG_LOW, val);
530                 return;
531         }
532
533         spin_lock_irqsave(&tp->indirect_lock, flags);
534         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536         spin_unlock_irqrestore(&tp->indirect_lock, flags);
537
538         /* In indirect mode when disabling interrupts, we also need
539          * to clear the interrupt bit in the GRC local ctrl register.
540          */
541         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542             (val == 0x1)) {
543                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545         }
546 }
547
548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550         unsigned long flags;
551         u32 val;
552
553         spin_lock_irqsave(&tp->indirect_lock, flags);
554         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556         spin_unlock_irqrestore(&tp->indirect_lock, flags);
557         return val;
558 }
559
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561  * where it is unsafe to read back the register without some delay.
562  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564  */
565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568                 /* Non-posted methods */
569                 tp->write32(tp, off, val);
570         else {
571                 /* Posted method */
572                 tg3_write32(tp, off, val);
573                 if (usec_wait)
574                         udelay(usec_wait);
575                 tp->read32(tp, off);
576         }
577         /* Wait again after the read for the posted method to guarantee that
578          * the wait time is met.
579          */
580         if (usec_wait)
581                 udelay(usec_wait);
582 }
583
584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586         tp->write32_mbox(tp, off, val);
587         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589              !tg3_flag(tp, ICH_WORKAROUND)))
590                 tp->read32_mbox(tp, off);
591 }
592
593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595         void __iomem *mbox = tp->regs + off;
596         writel(val, mbox);
597         if (tg3_flag(tp, TXD_MBOX_HWBUG))
598                 writel(val, mbox);
599         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600             tg3_flag(tp, FLUSH_POSTED_WRITES))
601                 readl(mbox);
602 }
603
604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606         return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608
609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611         writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613
614 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
619
620 #define tw32(reg, val)                  tp->write32(tp, reg, val)
621 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg)                       tp->read32(tp, reg)
624
625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627         unsigned long flags;
628
629         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631                 return;
632
633         spin_lock_irqsave(&tp->indirect_lock, flags);
634         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637
638                 /* Always leave this as zero. */
639                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640         } else {
641                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
643
644                 /* Always leave this as zero. */
645                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646         }
647         spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649
650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652         unsigned long flags;
653
654         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656                 *val = 0;
657                 return;
658         }
659
660         spin_lock_irqsave(&tp->indirect_lock, flags);
661         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664
665                 /* Always leave this as zero. */
666                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667         } else {
668                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669                 *val = tr32(TG3PCI_MEM_WIN_DATA);
670
671                 /* Always leave this as zero. */
672                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673         }
674         spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676
677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679         int i;
680         u32 regbase, bit;
681
682         if (tg3_asic_rev(tp) == ASIC_REV_5761)
683                 regbase = TG3_APE_LOCK_GRANT;
684         else
685                 regbase = TG3_APE_PER_LOCK_GRANT;
686
687         /* Make sure the driver hasn't any stale locks. */
688         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689                 switch (i) {
690                 case TG3_APE_LOCK_PHY0:
691                 case TG3_APE_LOCK_PHY1:
692                 case TG3_APE_LOCK_PHY2:
693                 case TG3_APE_LOCK_PHY3:
694                         bit = APE_LOCK_GRANT_DRIVER;
695                         break;
696                 default:
697                         if (!tp->pci_fn)
698                                 bit = APE_LOCK_GRANT_DRIVER;
699                         else
700                                 bit = 1 << tp->pci_fn;
701                 }
702                 tg3_ape_write32(tp, regbase + 4 * i, bit);
703         }
704
705 }
706
707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709         int i, off;
710         int ret = 0;
711         u32 status, req, gnt, bit;
712
713         if (!tg3_flag(tp, ENABLE_APE))
714                 return 0;
715
716         switch (locknum) {
717         case TG3_APE_LOCK_GPIO:
718                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719                         return 0;
720                 fallthrough;
721         case TG3_APE_LOCK_GRC:
722         case TG3_APE_LOCK_MEM:
723                 if (!tp->pci_fn)
724                         bit = APE_LOCK_REQ_DRIVER;
725                 else
726                         bit = 1 << tp->pci_fn;
727                 break;
728         case TG3_APE_LOCK_PHY0:
729         case TG3_APE_LOCK_PHY1:
730         case TG3_APE_LOCK_PHY2:
731         case TG3_APE_LOCK_PHY3:
732                 bit = APE_LOCK_REQ_DRIVER;
733                 break;
734         default:
735                 return -EINVAL;
736         }
737
738         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739                 req = TG3_APE_LOCK_REQ;
740                 gnt = TG3_APE_LOCK_GRANT;
741         } else {
742                 req = TG3_APE_PER_LOCK_REQ;
743                 gnt = TG3_APE_PER_LOCK_GRANT;
744         }
745
746         off = 4 * locknum;
747
748         tg3_ape_write32(tp, req + off, bit);
749
750         /* Wait for up to 1 millisecond to acquire lock. */
751         for (i = 0; i < 100; i++) {
752                 status = tg3_ape_read32(tp, gnt + off);
753                 if (status == bit)
754                         break;
755                 if (pci_channel_offline(tp->pdev))
756                         break;
757
758                 udelay(10);
759         }
760
761         if (status != bit) {
762                 /* Revoke the lock request. */
763                 tg3_ape_write32(tp, gnt + off, bit);
764                 ret = -EBUSY;
765         }
766
767         return ret;
768 }
769
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772         u32 gnt, bit;
773
774         if (!tg3_flag(tp, ENABLE_APE))
775                 return;
776
777         switch (locknum) {
778         case TG3_APE_LOCK_GPIO:
779                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780                         return;
781                 fallthrough;
782         case TG3_APE_LOCK_GRC:
783         case TG3_APE_LOCK_MEM:
784                 if (!tp->pci_fn)
785                         bit = APE_LOCK_GRANT_DRIVER;
786                 else
787                         bit = 1 << tp->pci_fn;
788                 break;
789         case TG3_APE_LOCK_PHY0:
790         case TG3_APE_LOCK_PHY1:
791         case TG3_APE_LOCK_PHY2:
792         case TG3_APE_LOCK_PHY3:
793                 bit = APE_LOCK_GRANT_DRIVER;
794                 break;
795         default:
796                 return;
797         }
798
799         if (tg3_asic_rev(tp) == ASIC_REV_5761)
800                 gnt = TG3_APE_LOCK_GRANT;
801         else
802                 gnt = TG3_APE_PER_LOCK_GRANT;
803
804         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809         u32 apedata;
810
811         while (timeout_us) {
812                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813                         return -EBUSY;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817                         break;
818
819                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821                 udelay(10);
822                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823         }
824
825         return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831         u32 i, apedata;
832
833         for (i = 0; i < timeout_us / 10; i++) {
834                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837                         break;
838
839                 udelay(10);
840         }
841
842         return i == timeout_us / 10;
843 }
844
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846                                    u32 len)
847 {
848         int err;
849         u32 i, bufoff, msgoff, maxlen, apedata;
850
851         if (!tg3_flag(tp, APE_HAS_NCSI))
852                 return 0;
853
854         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855         if (apedata != APE_SEG_SIG_MAGIC)
856                 return -ENODEV;
857
858         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859         if (!(apedata & APE_FW_STATUS_READY))
860                 return -EAGAIN;
861
862         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863                  TG3_APE_SHMEM_BASE;
864         msgoff = bufoff + 2 * sizeof(u32);
865         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867         while (len) {
868                 u32 length;
869
870                 /* Cap xfer sizes to scratchpad limits. */
871                 length = (len > maxlen) ? maxlen : len;
872                 len -= length;
873
874                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875                 if (!(apedata & APE_FW_STATUS_READY))
876                         return -EAGAIN;
877
878                 /* Wait for up to 1 msec for APE to service previous event. */
879                 err = tg3_ape_event_lock(tp, 1000);
880                 if (err)
881                         return err;
882
883                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884                           APE_EVENT_STATUS_SCRTCHPD_READ |
885                           APE_EVENT_STATUS_EVENT_PENDING;
886                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888                 tg3_ape_write32(tp, bufoff, base_off);
889                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894                 base_off += length;
895
896                 if (tg3_ape_wait_for_event(tp, 30000))
897                         return -EAGAIN;
898
899                 for (i = 0; length; i += 4, length -= 4) {
900                         u32 val = tg3_ape_read32(tp, msgoff + i);
901                         memcpy(data, &val, sizeof(u32));
902                         data++;
903                 }
904         }
905
906         return 0;
907 }
908 #endif
909
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912         int err;
913         u32 apedata;
914
915         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916         if (apedata != APE_SEG_SIG_MAGIC)
917                 return -EAGAIN;
918
919         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920         if (!(apedata & APE_FW_STATUS_READY))
921                 return -EAGAIN;
922
923         /* Wait for up to 20 millisecond for APE to service previous event. */
924         err = tg3_ape_event_lock(tp, 20000);
925         if (err)
926                 return err;
927
928         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929                         event | APE_EVENT_STATUS_EVENT_PENDING);
930
931         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934         return 0;
935 }
936
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939         u32 event;
940         u32 apedata;
941
942         if (!tg3_flag(tp, ENABLE_APE))
943                 return;
944
945         switch (kind) {
946         case RESET_KIND_INIT:
947                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949                                 APE_HOST_SEG_SIG_MAGIC);
950                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951                                 APE_HOST_SEG_LEN_MAGIC);
952                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957                                 APE_HOST_BEHAV_NO_PHYLOCK);
958                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959                                     TG3_APE_HOST_DRVR_STATE_START);
960
961                 event = APE_EVENT_STATUS_STATE_START;
962                 break;
963         case RESET_KIND_SHUTDOWN:
964                 if (device_may_wakeup(&tp->pdev->dev) &&
965                     tg3_flag(tp, WOL_ENABLE)) {
966                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967                                             TG3_APE_HOST_WOL_SPEED_AUTO);
968                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969                 } else
970                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971
972                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973
974                 event = APE_EVENT_STATUS_STATE_UNLOAD;
975                 break;
976         default:
977                 return;
978         }
979
980         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981
982         tg3_ape_send_event(tp, event);
983 }
984
985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986                                    unsigned long interval)
987 {
988         /* Check if hb interval has exceeded */
989         if (!tg3_flag(tp, ENABLE_APE) ||
990             time_before(jiffies, tp->ape_hb_jiffies + interval))
991                 return;
992
993         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994         tp->ape_hb_jiffies = jiffies;
995 }
996
997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999         int i;
1000
1001         tw32(TG3PCI_MISC_HOST_CTRL,
1002              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003         for (i = 0; i < tp->irq_max; i++)
1004                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006
1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009         int i;
1010
1011         tp->irq_sync = 0;
1012         wmb();
1013
1014         tw32(TG3PCI_MISC_HOST_CTRL,
1015              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016
1017         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018         for (i = 0; i < tp->irq_cnt; i++) {
1019                 struct tg3_napi *tnapi = &tp->napi[i];
1020
1021                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022                 if (tg3_flag(tp, 1SHOT_MSI))
1023                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024
1025                 tp->coal_now |= tnapi->coal_now;
1026         }
1027
1028         /* Force an initial interrupt */
1029         if (!tg3_flag(tp, TAGGED_STATUS) &&
1030             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032         else
1033                 tw32(HOSTCC_MODE, tp->coal_now);
1034
1035         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037
1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040         struct tg3 *tp = tnapi->tp;
1041         struct tg3_hw_status *sblk = tnapi->hw_status;
1042         unsigned int work_exists = 0;
1043
1044         /* check for phy events */
1045         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046                 if (sblk->status & SD_STATUS_LINK_CHG)
1047                         work_exists = 1;
1048         }
1049
1050         /* check for TX work to do */
1051         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052                 work_exists = 1;
1053
1054         /* check for RX work to do */
1055         if (tnapi->rx_rcb_prod_idx &&
1056             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057                 work_exists = 1;
1058
1059         return work_exists;
1060 }
1061
1062 /* tg3_int_reenable
1063  *  similar to tg3_enable_ints, but it accurately determines whether there
1064  *  is new work pending and can return without flushing the PIO write
1065  *  which reenables interrupts
1066  */
1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069         struct tg3 *tp = tnapi->tp;
1070
1071         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072
1073         /* When doing tagged status, this work check is unnecessary.
1074          * The last_tag we write above tells the chip which piece of
1075          * work we've completed.
1076          */
1077         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081
1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084         u32 clock_ctrl;
1085         u32 orig_clock_ctrl;
1086
1087         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088                 return;
1089
1090         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091
1092         orig_clock_ctrl = clock_ctrl;
1093         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094                        CLOCK_CTRL_CLKRUN_OENABLE |
1095                        0x1f);
1096         tp->pci_clock_ctrl = clock_ctrl;
1097
1098         if (tg3_flag(tp, 5705_PLUS)) {
1099                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102                 }
1103         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105                             clock_ctrl |
1106                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107                             40);
1108                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110                             40);
1111         }
1112         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114
1115 #define PHY_BUSY_LOOPS  5000
1116
1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118                          u32 *val)
1119 {
1120         u32 frame_val;
1121         unsigned int loops;
1122         int ret;
1123
1124         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125                 tw32_f(MAC_MI_MODE,
1126                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127                 udelay(80);
1128         }
1129
1130         tg3_ape_lock(tp, tp->phy_ape_lock);
1131
1132         *val = 0x0;
1133
1134         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135                       MI_COM_PHY_ADDR_MASK);
1136         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137                       MI_COM_REG_ADDR_MASK);
1138         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139
1140         tw32_f(MAC_MI_COM, frame_val);
1141
1142         loops = PHY_BUSY_LOOPS;
1143         while (loops != 0) {
1144                 udelay(10);
1145                 frame_val = tr32(MAC_MI_COM);
1146
1147                 if ((frame_val & MI_COM_BUSY) == 0) {
1148                         udelay(5);
1149                         frame_val = tr32(MAC_MI_COM);
1150                         break;
1151                 }
1152                 loops -= 1;
1153         }
1154
1155         ret = -EBUSY;
1156         if (loops != 0) {
1157                 *val = frame_val & MI_COM_DATA_MASK;
1158                 ret = 0;
1159         }
1160
1161         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1163                 udelay(80);
1164         }
1165
1166         tg3_ape_unlock(tp, tp->phy_ape_lock);
1167
1168         return ret;
1169 }
1170
1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175
1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177                           u32 val)
1178 {
1179         u32 frame_val;
1180         unsigned int loops;
1181         int ret;
1182
1183         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185                 return 0;
1186
1187         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188                 tw32_f(MAC_MI_MODE,
1189                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190                 udelay(80);
1191         }
1192
1193         tg3_ape_lock(tp, tp->phy_ape_lock);
1194
1195         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196                       MI_COM_PHY_ADDR_MASK);
1197         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198                       MI_COM_REG_ADDR_MASK);
1199         frame_val |= (val & MI_COM_DATA_MASK);
1200         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201
1202         tw32_f(MAC_MI_COM, frame_val);
1203
1204         loops = PHY_BUSY_LOOPS;
1205         while (loops != 0) {
1206                 udelay(10);
1207                 frame_val = tr32(MAC_MI_COM);
1208                 if ((frame_val & MI_COM_BUSY) == 0) {
1209                         udelay(5);
1210                         frame_val = tr32(MAC_MI_COM);
1211                         break;
1212                 }
1213                 loops -= 1;
1214         }
1215
1216         ret = -EBUSY;
1217         if (loops != 0)
1218                 ret = 0;
1219
1220         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1222                 udelay(80);
1223         }
1224
1225         tg3_ape_unlock(tp, tp->phy_ape_lock);
1226
1227         return ret;
1228 }
1229
1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234
1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237         int err;
1238
1239         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240         if (err)
1241                 goto done;
1242
1243         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244         if (err)
1245                 goto done;
1246
1247         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249         if (err)
1250                 goto done;
1251
1252         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253
1254 done:
1255         return err;
1256 }
1257
1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260         int err;
1261
1262         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263         if (err)
1264                 goto done;
1265
1266         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267         if (err)
1268                 goto done;
1269
1270         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272         if (err)
1273                 goto done;
1274
1275         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276
1277 done:
1278         return err;
1279 }
1280
1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283         int err;
1284
1285         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286         if (!err)
1287                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288
1289         return err;
1290 }
1291
1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294         int err;
1295
1296         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297         if (!err)
1298                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299
1300         return err;
1301 }
1302
1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305         int err;
1306
1307         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1310         if (!err)
1311                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312
1313         return err;
1314 }
1315
1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319                 set |= MII_TG3_AUXCTL_MISC_WREN;
1320
1321         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323
1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326         u32 val;
1327         int err;
1328
1329         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330
1331         if (err)
1332                 return err;
1333
1334         if (enable)
1335                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336         else
1337                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338
1339         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341
1342         return err;
1343 }
1344
1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348                             reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350
1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353         u32 phy_control;
1354         int limit, err;
1355
1356         /* OK, reset it, and poll the BMCR_RESET bit until it
1357          * clears or we time out.
1358          */
1359         phy_control = BMCR_RESET;
1360         err = tg3_writephy(tp, MII_BMCR, phy_control);
1361         if (err != 0)
1362                 return -EBUSY;
1363
1364         limit = 5000;
1365         while (limit--) {
1366                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367                 if (err != 0)
1368                         return -EBUSY;
1369
1370                 if ((phy_control & BMCR_RESET) == 0) {
1371                         udelay(40);
1372                         break;
1373                 }
1374                 udelay(10);
1375         }
1376         if (limit < 0)
1377                 return -EBUSY;
1378
1379         return 0;
1380 }
1381
1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384         struct tg3 *tp = bp->priv;
1385         u32 val;
1386
1387         spin_lock_bh(&tp->lock);
1388
1389         if (__tg3_readphy(tp, mii_id, reg, &val))
1390                 val = -EIO;
1391
1392         spin_unlock_bh(&tp->lock);
1393
1394         return val;
1395 }
1396
1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399         struct tg3 *tp = bp->priv;
1400         u32 ret = 0;
1401
1402         spin_lock_bh(&tp->lock);
1403
1404         if (__tg3_writephy(tp, mii_id, reg, val))
1405                 ret = -EIO;
1406
1407         spin_unlock_bh(&tp->lock);
1408
1409         return ret;
1410 }
1411
1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414         u32 val;
1415         struct phy_device *phydev;
1416
1417         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419         case PHY_ID_BCM50610:
1420         case PHY_ID_BCM50610M:
1421                 val = MAC_PHYCFG2_50610_LED_MODES;
1422                 break;
1423         case PHY_ID_BCMAC131:
1424                 val = MAC_PHYCFG2_AC131_LED_MODES;
1425                 break;
1426         case PHY_ID_RTL8211C:
1427                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428                 break;
1429         case PHY_ID_RTL8201E:
1430                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431                 break;
1432         default:
1433                 return;
1434         }
1435
1436         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437                 tw32(MAC_PHYCFG2, val);
1438
1439                 val = tr32(MAC_PHYCFG1);
1440                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443                 tw32(MAC_PHYCFG1, val);
1444
1445                 return;
1446         }
1447
1448         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450                        MAC_PHYCFG2_FMODE_MASK_MASK |
1451                        MAC_PHYCFG2_GMODE_MASK_MASK |
1452                        MAC_PHYCFG2_ACT_MASK_MASK   |
1453                        MAC_PHYCFG2_QUAL_MASK_MASK |
1454                        MAC_PHYCFG2_INBAND_ENABLE;
1455
1456         tw32(MAC_PHYCFG2, val);
1457
1458         val = tr32(MAC_PHYCFG1);
1459         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466         }
1467         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469         tw32(MAC_PHYCFG1, val);
1470
1471         val = tr32(MAC_EXT_RGMII_MODE);
1472         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473                  MAC_RGMII_MODE_RX_QUALITY |
1474                  MAC_RGMII_MODE_RX_ACTIVITY |
1475                  MAC_RGMII_MODE_RX_ENG_DET |
1476                  MAC_RGMII_MODE_TX_ENABLE |
1477                  MAC_RGMII_MODE_TX_LOWPWR |
1478                  MAC_RGMII_MODE_TX_RESET);
1479         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481                         val |= MAC_RGMII_MODE_RX_INT_B |
1482                                MAC_RGMII_MODE_RX_QUALITY |
1483                                MAC_RGMII_MODE_RX_ACTIVITY |
1484                                MAC_RGMII_MODE_RX_ENG_DET;
1485                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486                         val |= MAC_RGMII_MODE_TX_ENABLE |
1487                                MAC_RGMII_MODE_TX_LOWPWR |
1488                                MAC_RGMII_MODE_TX_RESET;
1489         }
1490         tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492
1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496         tw32_f(MAC_MI_MODE, tp->mi_mode);
1497         udelay(80);
1498
1499         if (tg3_flag(tp, MDIOBUS_INITED) &&
1500             tg3_asic_rev(tp) == ASIC_REV_5785)
1501                 tg3_mdio_config_5785(tp);
1502 }
1503
1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506         int i;
1507         u32 reg;
1508         struct phy_device *phydev;
1509
1510         if (tg3_flag(tp, 5717_PLUS)) {
1511                 u32 is_serdes;
1512
1513                 tp->phy_addr = tp->pci_fn + 1;
1514
1515                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517                 else
1518                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1520                 if (is_serdes)
1521                         tp->phy_addr += 7;
1522         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523                 int addr;
1524
1525                 addr = ssb_gige_get_phyaddr(tp->pdev);
1526                 if (addr < 0)
1527                         return addr;
1528                 tp->phy_addr = addr;
1529         } else
1530                 tp->phy_addr = TG3_PHY_MII_ADDR;
1531
1532         tg3_mdio_start(tp);
1533
1534         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535                 return 0;
1536
1537         tp->mdio_bus = mdiobus_alloc();
1538         if (tp->mdio_bus == NULL)
1539                 return -ENOMEM;
1540
1541         tp->mdio_bus->name     = "tg3 mdio bus";
1542         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543         tp->mdio_bus->priv     = tp;
1544         tp->mdio_bus->parent   = &tp->pdev->dev;
1545         tp->mdio_bus->read     = &tg3_mdio_read;
1546         tp->mdio_bus->write    = &tg3_mdio_write;
1547         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548
1549         /* The bus registration will look for all the PHYs on the mdio bus.
1550          * Unfortunately, it does not ensure the PHY is powered up before
1551          * accessing the PHY ID registers.  A chip reset is the
1552          * quickest way to bring the device back to an operational state..
1553          */
1554         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1555                 tg3_bmcr_reset(tp);
1556
1557         i = mdiobus_register(tp->mdio_bus);
1558         if (i) {
1559                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560                 mdiobus_free(tp->mdio_bus);
1561                 return i;
1562         }
1563
1564         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565
1566         if (!phydev || !phydev->drv) {
1567                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568                 mdiobus_unregister(tp->mdio_bus);
1569                 mdiobus_free(tp->mdio_bus);
1570                 return -ENODEV;
1571         }
1572
1573         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574         case PHY_ID_BCM57780:
1575                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577                 break;
1578         case PHY_ID_BCM50610:
1579         case PHY_ID_BCM50610M:
1580                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581                                      PHY_BRCM_RX_REFCLK_UNUSED |
1582                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584                 fallthrough;
1585         case PHY_ID_RTL8211C:
1586                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587                 break;
1588         case PHY_ID_RTL8201E:
1589         case PHY_ID_BCMAC131:
1590                 phydev->interface = PHY_INTERFACE_MODE_MII;
1591                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1593                 break;
1594         }
1595
1596         tg3_flag_set(tp, MDIOBUS_INITED);
1597
1598         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599                 tg3_mdio_config_5785(tp);
1600
1601         return 0;
1602 }
1603
1604 static void tg3_mdio_fini(struct tg3 *tp)
1605 {
1606         if (tg3_flag(tp, MDIOBUS_INITED)) {
1607                 tg3_flag_clear(tp, MDIOBUS_INITED);
1608                 mdiobus_unregister(tp->mdio_bus);
1609                 mdiobus_free(tp->mdio_bus);
1610         }
1611 }
1612
1613 /* tp->lock is held. */
1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1615 {
1616         u32 val;
1617
1618         val = tr32(GRC_RX_CPU_EVENT);
1619         val |= GRC_RX_CPU_DRIVER_EVENT;
1620         tw32_f(GRC_RX_CPU_EVENT, val);
1621
1622         tp->last_event_jiffies = jiffies;
1623 }
1624
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626
1627 /* tp->lock is held. */
1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1629 {
1630         int i;
1631         unsigned int delay_cnt;
1632         long time_remain;
1633
1634         /* If enough time has passed, no wait is necessary. */
1635         time_remain = (long)(tp->last_event_jiffies + 1 +
1636                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637                       (long)jiffies;
1638         if (time_remain < 0)
1639                 return;
1640
1641         /* Check if we can shorten the wait time. */
1642         delay_cnt = jiffies_to_usecs(time_remain);
1643         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645         delay_cnt = (delay_cnt >> 3) + 1;
1646
1647         for (i = 0; i < delay_cnt; i++) {
1648                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649                         break;
1650                 if (pci_channel_offline(tp->pdev))
1651                         break;
1652
1653                 udelay(8);
1654         }
1655 }
1656
1657 /* tp->lock is held. */
1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1659 {
1660         u32 reg, val;
1661
1662         val = 0;
1663         if (!tg3_readphy(tp, MII_BMCR, &reg))
1664                 val = reg << 16;
1665         if (!tg3_readphy(tp, MII_BMSR, &reg))
1666                 val |= (reg & 0xffff);
1667         *data++ = val;
1668
1669         val = 0;
1670         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1671                 val = reg << 16;
1672         if (!tg3_readphy(tp, MII_LPA, &reg))
1673                 val |= (reg & 0xffff);
1674         *data++ = val;
1675
1676         val = 0;
1677         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1679                         val = reg << 16;
1680                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1681                         val |= (reg & 0xffff);
1682         }
1683         *data++ = val;
1684
1685         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1686                 val = reg << 16;
1687         else
1688                 val = 0;
1689         *data++ = val;
1690 }
1691
1692 /* tp->lock is held. */
1693 static void tg3_ump_link_report(struct tg3 *tp)
1694 {
1695         u32 data[4];
1696
1697         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1698                 return;
1699
1700         tg3_phy_gather_ump_data(tp, data);
1701
1702         tg3_wait_for_event_ack(tp);
1703
1704         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710
1711         tg3_generate_fw_event(tp);
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_stop_fw(struct tg3 *tp)
1716 {
1717         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718                 /* Wait for RX cpu to ACK the previous event. */
1719                 tg3_wait_for_event_ack(tp);
1720
1721                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722
1723                 tg3_generate_fw_event(tp);
1724
1725                 /* Wait for RX cpu to ACK this event. */
1726                 tg3_wait_for_event_ack(tp);
1727         }
1728 }
1729
1730 /* tp->lock is held. */
1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 {
1733         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735
1736         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737                 switch (kind) {
1738                 case RESET_KIND_INIT:
1739                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740                                       DRV_STATE_START);
1741                         break;
1742
1743                 case RESET_KIND_SHUTDOWN:
1744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745                                       DRV_STATE_UNLOAD);
1746                         break;
1747
1748                 case RESET_KIND_SUSPEND:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_SUSPEND);
1751                         break;
1752
1753                 default:
1754                         break;
1755                 }
1756         }
1757 }
1758
1759 /* tp->lock is held. */
1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 {
1762         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763                 switch (kind) {
1764                 case RESET_KIND_INIT:
1765                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766                                       DRV_STATE_START_DONE);
1767                         break;
1768
1769                 case RESET_KIND_SHUTDOWN:
1770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771                                       DRV_STATE_UNLOAD_DONE);
1772                         break;
1773
1774                 default:
1775                         break;
1776                 }
1777         }
1778 }
1779
1780 /* tp->lock is held. */
1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 {
1783         if (tg3_flag(tp, ENABLE_ASF)) {
1784                 switch (kind) {
1785                 case RESET_KIND_INIT:
1786                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787                                       DRV_STATE_START);
1788                         break;
1789
1790                 case RESET_KIND_SHUTDOWN:
1791                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792                                       DRV_STATE_UNLOAD);
1793                         break;
1794
1795                 case RESET_KIND_SUSPEND:
1796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797                                       DRV_STATE_SUSPEND);
1798                         break;
1799
1800                 default:
1801                         break;
1802                 }
1803         }
1804 }
1805
1806 static int tg3_poll_fw(struct tg3 *tp)
1807 {
1808         int i;
1809         u32 val;
1810
1811         if (tg3_flag(tp, NO_FWARE_REPORTED))
1812                 return 0;
1813
1814         if (tg3_flag(tp, IS_SSB_CORE)) {
1815                 /* We don't use firmware. */
1816                 return 0;
1817         }
1818
1819         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820                 /* Wait up to 20ms for init done. */
1821                 for (i = 0; i < 200; i++) {
1822                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823                                 return 0;
1824                         if (pci_channel_offline(tp->pdev))
1825                                 return -ENODEV;
1826
1827                         udelay(100);
1828                 }
1829                 return -ENODEV;
1830         }
1831
1832         /* Wait for firmware initialization to complete. */
1833         for (i = 0; i < 100000; i++) {
1834                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836                         break;
1837                 if (pci_channel_offline(tp->pdev)) {
1838                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840                                 netdev_info(tp->dev, "No firmware running\n");
1841                         }
1842
1843                         break;
1844                 }
1845
1846                 udelay(10);
1847         }
1848
1849         /* Chip might not be fitted with firmware.  Some Sun onboard
1850          * parts are configured like that.  So don't signal the timeout
1851          * of the above loop as an error, but do report the lack of
1852          * running firmware once.
1853          */
1854         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856
1857                 netdev_info(tp->dev, "No firmware running\n");
1858         }
1859
1860         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861                 /* The 57765 A0 needs a little more
1862                  * time to do some important work.
1863                  */
1864                 mdelay(10);
1865         }
1866
1867         return 0;
1868 }
1869
1870 static void tg3_link_report(struct tg3 *tp)
1871 {
1872         if (!netif_carrier_ok(tp->dev)) {
1873                 netif_info(tp, link, tp->dev, "Link is down\n");
1874                 tg3_ump_link_report(tp);
1875         } else if (netif_msg_link(tp)) {
1876                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877                             (tp->link_config.active_speed == SPEED_1000 ?
1878                              1000 :
1879                              (tp->link_config.active_speed == SPEED_100 ?
1880                               100 : 10)),
1881                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1882                              "full" : "half"));
1883
1884                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886                             "on" : "off",
1887                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888                             "on" : "off");
1889
1890                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891                         netdev_info(tp->dev, "EEE is %s\n",
1892                                     tp->setlpicnt ? "enabled" : "disabled");
1893
1894                 tg3_ump_link_report(tp);
1895         }
1896
1897         tp->link_up = netif_carrier_ok(tp->dev);
1898 }
1899
1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1901 {
1902         u32 flowctrl = 0;
1903
1904         if (adv & ADVERTISE_PAUSE_CAP) {
1905                 flowctrl |= FLOW_CTRL_RX;
1906                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907                         flowctrl |= FLOW_CTRL_TX;
1908         } else if (adv & ADVERTISE_PAUSE_ASYM)
1909                 flowctrl |= FLOW_CTRL_TX;
1910
1911         return flowctrl;
1912 }
1913
1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1915 {
1916         u16 miireg;
1917
1918         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919                 miireg = ADVERTISE_1000XPAUSE;
1920         else if (flow_ctrl & FLOW_CTRL_TX)
1921                 miireg = ADVERTISE_1000XPSE_ASYM;
1922         else if (flow_ctrl & FLOW_CTRL_RX)
1923                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1924         else
1925                 miireg = 0;
1926
1927         return miireg;
1928 }
1929
1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1931 {
1932         u32 flowctrl = 0;
1933
1934         if (adv & ADVERTISE_1000XPAUSE) {
1935                 flowctrl |= FLOW_CTRL_RX;
1936                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937                         flowctrl |= FLOW_CTRL_TX;
1938         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939                 flowctrl |= FLOW_CTRL_TX;
1940
1941         return flowctrl;
1942 }
1943
1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1945 {
1946         u8 cap = 0;
1947
1948         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951                 if (lcladv & ADVERTISE_1000XPAUSE)
1952                         cap = FLOW_CTRL_RX;
1953                 if (rmtadv & ADVERTISE_1000XPAUSE)
1954                         cap = FLOW_CTRL_TX;
1955         }
1956
1957         return cap;
1958 }
1959
1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1961 {
1962         u8 autoneg;
1963         u8 flowctrl = 0;
1964         u32 old_rx_mode = tp->rx_mode;
1965         u32 old_tx_mode = tp->tx_mode;
1966
1967         if (tg3_flag(tp, USE_PHYLIB))
1968                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969         else
1970                 autoneg = tp->link_config.autoneg;
1971
1972         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975                 else
1976                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977         } else
1978                 flowctrl = tp->link_config.flowctrl;
1979
1980         tp->link_config.active_flowctrl = flowctrl;
1981
1982         if (flowctrl & FLOW_CTRL_RX)
1983                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984         else
1985                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986
1987         if (old_rx_mode != tp->rx_mode)
1988                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989
1990         if (flowctrl & FLOW_CTRL_TX)
1991                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992         else
1993                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994
1995         if (old_tx_mode != tp->tx_mode)
1996                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 }
1998
1999 static void tg3_adjust_link(struct net_device *dev)
2000 {
2001         u8 oldflowctrl, linkmesg = 0;
2002         u32 mac_mode, lcl_adv, rmt_adv;
2003         struct tg3 *tp = netdev_priv(dev);
2004         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005
2006         spin_lock_bh(&tp->lock);
2007
2008         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009                                     MAC_MODE_HALF_DUPLEX);
2010
2011         oldflowctrl = tp->link_config.active_flowctrl;
2012
2013         if (phydev->link) {
2014                 lcl_adv = 0;
2015                 rmt_adv = 0;
2016
2017                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2019                 else if (phydev->speed == SPEED_1000 ||
2020                          tg3_asic_rev(tp) != ASIC_REV_5785)
2021                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022                 else
2023                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2024
2025                 if (phydev->duplex == DUPLEX_HALF)
2026                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2027                 else {
2028                         lcl_adv = mii_advertise_flowctrl(
2029                                   tp->link_config.flowctrl);
2030
2031                         if (phydev->pause)
2032                                 rmt_adv = LPA_PAUSE_CAP;
2033                         if (phydev->asym_pause)
2034                                 rmt_adv |= LPA_PAUSE_ASYM;
2035                 }
2036
2037                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038         } else
2039                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040
2041         if (mac_mode != tp->mac_mode) {
2042                 tp->mac_mode = mac_mode;
2043                 tw32_f(MAC_MODE, tp->mac_mode);
2044                 udelay(40);
2045         }
2046
2047         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048                 if (phydev->speed == SPEED_10)
2049                         tw32(MAC_MI_STAT,
2050                              MAC_MI_STAT_10MBPS_MODE |
2051                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052                 else
2053                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054         }
2055
2056         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057                 tw32(MAC_TX_LENGTHS,
2058                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059                       (6 << TX_LENGTHS_IPG_SHIFT) |
2060                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061         else
2062                 tw32(MAC_TX_LENGTHS,
2063                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064                       (6 << TX_LENGTHS_IPG_SHIFT) |
2065                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066
2067         if (phydev->link != tp->old_link ||
2068             phydev->speed != tp->link_config.active_speed ||
2069             phydev->duplex != tp->link_config.active_duplex ||
2070             oldflowctrl != tp->link_config.active_flowctrl)
2071                 linkmesg = 1;
2072
2073         tp->old_link = phydev->link;
2074         tp->link_config.active_speed = phydev->speed;
2075         tp->link_config.active_duplex = phydev->duplex;
2076
2077         spin_unlock_bh(&tp->lock);
2078
2079         if (linkmesg)
2080                 tg3_link_report(tp);
2081 }
2082
2083 static int tg3_phy_init(struct tg3 *tp)
2084 {
2085         struct phy_device *phydev;
2086
2087         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088                 return 0;
2089
2090         /* Bring the PHY back to a known state. */
2091         tg3_bmcr_reset(tp);
2092
2093         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094
2095         /* Attach the MAC to the PHY. */
2096         phydev = phy_connect(tp->dev, phydev_name(phydev),
2097                              tg3_adjust_link, phydev->interface);
2098         if (IS_ERR(phydev)) {
2099                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100                 return PTR_ERR(phydev);
2101         }
2102
2103         /* Mask with MAC supported features. */
2104         switch (phydev->interface) {
2105         case PHY_INTERFACE_MODE_GMII:
2106         case PHY_INTERFACE_MODE_RGMII:
2107                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108                         phy_set_max_speed(phydev, SPEED_1000);
2109                         phy_support_asym_pause(phydev);
2110                         break;
2111                 }
2112                 fallthrough;
2113         case PHY_INTERFACE_MODE_MII:
2114                 phy_set_max_speed(phydev, SPEED_100);
2115                 phy_support_asym_pause(phydev);
2116                 break;
2117         default:
2118                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2119                 return -EINVAL;
2120         }
2121
2122         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124         phy_attached_info(phydev);
2125
2126         return 0;
2127 }
2128
2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131         struct phy_device *phydev;
2132
2133         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134                 return;
2135
2136         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137
2138         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140                 phydev->speed = tp->link_config.speed;
2141                 phydev->duplex = tp->link_config.duplex;
2142                 phydev->autoneg = tp->link_config.autoneg;
2143                 ethtool_convert_legacy_u32_to_link_mode(
2144                         phydev->advertising, tp->link_config.advertising);
2145         }
2146
2147         phy_start(phydev);
2148
2149         phy_start_aneg(phydev);
2150 }
2151
2152 static void tg3_phy_stop(struct tg3 *tp)
2153 {
2154         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155                 return;
2156
2157         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2158 }
2159
2160 static void tg3_phy_fini(struct tg3 *tp)
2161 {
2162         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2165         }
2166 }
2167
2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2169 {
2170         int err;
2171         u32 val;
2172
2173         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2174                 return 0;
2175
2176         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177                 /* Cannot do read-modify-write on 5401 */
2178                 err = tg3_phy_auxctl_write(tp,
2179                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2181                                            0x4c20);
2182                 goto done;
2183         }
2184
2185         err = tg3_phy_auxctl_read(tp,
2186                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2187         if (err)
2188                 return err;
2189
2190         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191         err = tg3_phy_auxctl_write(tp,
2192                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2193
2194 done:
2195         return err;
2196 }
2197
2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2199 {
2200         u32 phytest;
2201
2202         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2203                 u32 phy;
2204
2205                 tg3_writephy(tp, MII_TG3_FET_TEST,
2206                              phytest | MII_TG3_FET_SHADOW_EN);
2207                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208                         if (enable)
2209                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210                         else
2211                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213                 }
2214                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2215         }
2216 }
2217
2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2219 {
2220         u32 reg;
2221
2222         if (!tg3_flag(tp, 5705_PLUS) ||
2223             (tg3_flag(tp, 5717_PLUS) &&
2224              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2225                 return;
2226
2227         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228                 tg3_phy_fet_toggle_apd(tp, enable);
2229                 return;
2230         }
2231
2232         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234               MII_TG3_MISC_SHDW_SCR5_SDTL |
2235               MII_TG3_MISC_SHDW_SCR5_C125OE;
2236         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238
2239         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2240
2241
2242         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243         if (enable)
2244                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245
2246         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2247 }
2248
2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2250 {
2251         u32 phy;
2252
2253         if (!tg3_flag(tp, 5705_PLUS) ||
2254             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2255                 return;
2256
2257         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2258                 u32 ephy;
2259
2260                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262
2263                         tg3_writephy(tp, MII_TG3_FET_TEST,
2264                                      ephy | MII_TG3_FET_SHADOW_EN);
2265                         if (!tg3_readphy(tp, reg, &phy)) {
2266                                 if (enable)
2267                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268                                 else
2269                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270                                 tg3_writephy(tp, reg, phy);
2271                         }
2272                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2273                 }
2274         } else {
2275                 int ret;
2276
2277                 ret = tg3_phy_auxctl_read(tp,
2278                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2279                 if (!ret) {
2280                         if (enable)
2281                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282                         else
2283                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284                         tg3_phy_auxctl_write(tp,
2285                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2286                 }
2287         }
2288 }
2289
2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2291 {
2292         int ret;
2293         u32 val;
2294
2295         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2296                 return;
2297
2298         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299         if (!ret)
2300                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2302 }
2303
2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2305 {
2306         u32 otp, phy;
2307
2308         if (!tp->phy_otp)
2309                 return;
2310
2311         otp = tp->phy_otp;
2312
2313         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2314                 return;
2315
2316         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319
2320         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323
2324         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327
2328         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330
2331         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333
2334         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337
2338         tg3_phy_toggle_auxctl_smdsp(tp, false);
2339 }
2340
2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
2342 {
2343         u32 val;
2344         struct ethtool_keee *dest = &tp->eee;
2345
2346         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2347                 return;
2348
2349         if (eee)
2350                 dest = eee;
2351
2352         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2353                 return;
2354
2355         /* Pull eee_active */
2356         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358                 dest->eee_active = 1;
2359         } else
2360                 dest->eee_active = 0;
2361
2362         /* Pull lp advertised settings */
2363         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364                 return;
2365         mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
2366
2367         /* Pull advertised and eee_enabled settings */
2368         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369                 return;
2370         dest->eee_enabled = !!val;
2371         mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
2372
2373         /* Pull tx_lpi_enabled */
2374         val = tr32(TG3_CPMU_EEE_MODE);
2375         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376
2377         /* Pull lpi timer value */
2378         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2379 }
2380
2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2382 {
2383         u32 val;
2384
2385         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2386                 return;
2387
2388         tp->setlpicnt = 0;
2389
2390         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391             current_link_up &&
2392             tp->link_config.active_duplex == DUPLEX_FULL &&
2393             (tp->link_config.active_speed == SPEED_100 ||
2394              tp->link_config.active_speed == SPEED_1000)) {
2395                 u32 eeectl;
2396
2397                 if (tp->link_config.active_speed == SPEED_1000)
2398                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399                 else
2400                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401
2402                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403
2404                 tg3_eee_pull_config(tp, NULL);
2405                 if (tp->eee.eee_active)
2406                         tp->setlpicnt = 2;
2407         }
2408
2409         if (!tp->setlpicnt) {
2410                 if (current_link_up &&
2411                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2414                 }
2415
2416                 val = tr32(TG3_CPMU_EEE_MODE);
2417                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2418         }
2419 }
2420
2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2422 {
2423         u32 val;
2424
2425         if (tp->link_config.active_speed == SPEED_1000 &&
2426             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428              tg3_flag(tp, 57765_CLASS)) &&
2429             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430                 val = MII_TG3_DSP_TAP26_ALNOKO |
2431                       MII_TG3_DSP_TAP26_RMRXSTO;
2432                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2434         }
2435
2436         val = tr32(TG3_CPMU_EEE_MODE);
2437         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439
2440 static int tg3_wait_macro_done(struct tg3 *tp)
2441 {
2442         int limit = 100;
2443
2444         while (limit--) {
2445                 u32 tmp32;
2446
2447                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448                         if ((tmp32 & 0x1000) == 0)
2449                                 break;
2450                 }
2451         }
2452         if (limit < 0)
2453                 return -EBUSY;
2454
2455         return 0;
2456 }
2457
2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 {
2460         static const u32 test_pat[4][6] = {
2461         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2465         };
2466         int chan;
2467
2468         for (chan = 0; chan < 4; chan++) {
2469                 int i;
2470
2471                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472                              (chan * 0x2000) | 0x0200);
2473                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474
2475                 for (i = 0; i < 6; i++)
2476                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2477                                      test_pat[chan][i]);
2478
2479                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480                 if (tg3_wait_macro_done(tp)) {
2481                         *resetp = 1;
2482                         return -EBUSY;
2483                 }
2484
2485                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486                              (chan * 0x2000) | 0x0200);
2487                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488                 if (tg3_wait_macro_done(tp)) {
2489                         *resetp = 1;
2490                         return -EBUSY;
2491                 }
2492
2493                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494                 if (tg3_wait_macro_done(tp)) {
2495                         *resetp = 1;
2496                         return -EBUSY;
2497                 }
2498
2499                 for (i = 0; i < 6; i += 2) {
2500                         u32 low, high;
2501
2502                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504                             tg3_wait_macro_done(tp)) {
2505                                 *resetp = 1;
2506                                 return -EBUSY;
2507                         }
2508                         low &= 0x7fff;
2509                         high &= 0x000f;
2510                         if (low != test_pat[chan][i] ||
2511                             high != test_pat[chan][i+1]) {
2512                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2515
2516                                 return -EBUSY;
2517                         }
2518                 }
2519         }
2520
2521         return 0;
2522 }
2523
2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2525 {
2526         int chan;
2527
2528         for (chan = 0; chan < 4; chan++) {
2529                 int i;
2530
2531                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532                              (chan * 0x2000) | 0x0200);
2533                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534                 for (i = 0; i < 6; i++)
2535                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537                 if (tg3_wait_macro_done(tp))
2538                         return -EBUSY;
2539         }
2540
2541         return 0;
2542 }
2543
2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 {
2546         u32 reg32, phy9_orig;
2547         int retries, do_phy_reset, err;
2548
2549         retries = 10;
2550         do_phy_reset = 1;
2551         do {
2552                 if (do_phy_reset) {
2553                         err = tg3_bmcr_reset(tp);
2554                         if (err)
2555                                 return err;
2556                         do_phy_reset = 0;
2557                 }
2558
2559                 /* Disable transmitter and interrupt.  */
2560                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2561                         continue;
2562
2563                 reg32 |= 0x3000;
2564                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565
2566                 /* Set full-duplex, 1000 mbps.  */
2567                 tg3_writephy(tp, MII_BMCR,
2568                              BMCR_FULLDPLX | BMCR_SPEED1000);
2569
2570                 /* Set to master mode.  */
2571                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2572                         continue;
2573
2574                 tg3_writephy(tp, MII_CTRL1000,
2575                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576
2577                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2578                 if (err)
2579                         return err;
2580
2581                 /* Block the PHY control access.  */
2582                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583
2584                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2585                 if (!err)
2586                         break;
2587         } while (--retries);
2588
2589         err = tg3_phy_reset_chanpat(tp);
2590         if (err)
2591                 return err;
2592
2593         tg3_phydsp_write(tp, 0x8005, 0x0000);
2594
2595         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597
2598         tg3_phy_toggle_auxctl_smdsp(tp, false);
2599
2600         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601
2602         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2603         if (err)
2604                 return err;
2605
2606         reg32 &= ~0x3000;
2607         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2608
2609         return 0;
2610 }
2611
2612 static void tg3_carrier_off(struct tg3 *tp)
2613 {
2614         netif_carrier_off(tp->dev);
2615         tp->link_up = false;
2616 }
2617
2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 {
2620         if (tg3_flag(tp, ENABLE_ASF))
2621                 netdev_warn(tp->dev,
2622                             "Management side-band traffic will be interrupted during phy settings change\n");
2623 }
2624
2625 /* This will reset the tigon3 PHY if there is no valid
2626  * link unless the FORCE argument is non-zero.
2627  */
2628 static int tg3_phy_reset(struct tg3 *tp)
2629 {
2630         u32 val, cpmuctrl;
2631         int err;
2632
2633         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634                 val = tr32(GRC_MISC_CFG);
2635                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2636                 udelay(40);
2637         }
2638         err  = tg3_readphy(tp, MII_BMSR, &val);
2639         err |= tg3_readphy(tp, MII_BMSR, &val);
2640         if (err != 0)
2641                 return -EBUSY;
2642
2643         if (netif_running(tp->dev) && tp->link_up) {
2644                 netif_carrier_off(tp->dev);
2645                 tg3_link_report(tp);
2646         }
2647
2648         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650             tg3_asic_rev(tp) == ASIC_REV_5705) {
2651                 err = tg3_phy_reset_5703_4_5(tp);
2652                 if (err)
2653                         return err;
2654                 goto out;
2655         }
2656
2657         cpmuctrl = 0;
2658         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2661                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662                         tw32(TG3_CPMU_CTRL,
2663                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2664         }
2665
2666         err = tg3_bmcr_reset(tp);
2667         if (err)
2668                 return err;
2669
2670         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673
2674                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2675         }
2676
2677         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2682                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683                         udelay(40);
2684                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2685                 }
2686         }
2687
2688         if (tg3_flag(tp, 5717_PLUS) &&
2689             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2690                 return 0;
2691
2692         tg3_phy_apply_otp(tp);
2693
2694         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695                 tg3_phy_toggle_apd(tp, true);
2696         else
2697                 tg3_phy_toggle_apd(tp, false);
2698
2699 out:
2700         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2704                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2705         }
2706
2707         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710         }
2711
2712         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2715                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2716                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2718                 }
2719         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724                                 tg3_writephy(tp, MII_TG3_TEST1,
2725                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2726                         } else
2727                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728
2729                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2730                 }
2731         }
2732
2733         /* Set Extended packet length bit (bit 14) on all chips that */
2734         /* support jumbo frames */
2735         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736                 /* Cannot do read-modify-write on 5401 */
2737                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739                 /* Set bit 14 with read-modify-write to preserve other bits */
2740                 err = tg3_phy_auxctl_read(tp,
2741                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742                 if (!err)
2743                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2745         }
2746
2747         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748          * jumbo frames transmission.
2749          */
2750         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2754         }
2755
2756         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757                 /* adjust output voltage */
2758                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2759         }
2760
2761         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763
2764         tg3_phy_toggle_automdix(tp, true);
2765         tg3_phy_set_wirespeed(tp);
2766         return 0;
2767 }
2768
2769 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2771 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2772                                           TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777          (TG3_GPIO_MSG_DRVR_PRES << 12))
2778
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783          (TG3_GPIO_MSG_NEED_VAUX << 12))
2784
2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2786 {
2787         u32 status, shift;
2788
2789         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790             tg3_asic_rev(tp) == ASIC_REV_5719)
2791                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792         else
2793                 status = tr32(TG3_CPMU_DRV_STATUS);
2794
2795         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796         status &= ~(TG3_GPIO_MSG_MASK << shift);
2797         status |= (newstat << shift);
2798
2799         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800             tg3_asic_rev(tp) == ASIC_REV_5719)
2801                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802         else
2803                 tw32(TG3_CPMU_DRV_STATUS, status);
2804
2805         return status >> TG3_APE_GPIO_MSG_SHIFT;
2806 }
2807
2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 {
2810         if (!tg3_flag(tp, IS_NIC))
2811                 return 0;
2812
2813         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815             tg3_asic_rev(tp) == ASIC_REV_5720) {
2816                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2817                         return -EIO;
2818
2819                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820
2821                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2823
2824                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825         } else {
2826                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2828         }
2829
2830         return 0;
2831 }
2832
2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2834 {
2835         u32 grc_local_ctrl;
2836
2837         if (!tg3_flag(tp, IS_NIC) ||
2838             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839             tg3_asic_rev(tp) == ASIC_REV_5701)
2840                 return;
2841
2842         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843
2844         tw32_wait_f(GRC_LOCAL_CTRL,
2845                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848         tw32_wait_f(GRC_LOCAL_CTRL,
2849                     grc_local_ctrl,
2850                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852         tw32_wait_f(GRC_LOCAL_CTRL,
2853                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 }
2856
2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 {
2859         if (!tg3_flag(tp, IS_NIC))
2860                 return;
2861
2862         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863             tg3_asic_rev(tp) == ASIC_REV_5701) {
2864                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865                             (GRC_LCLCTRL_GPIO_OE0 |
2866                              GRC_LCLCTRL_GPIO_OE1 |
2867                              GRC_LCLCTRL_GPIO_OE2 |
2868                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2869                              GRC_LCLCTRL_GPIO_OUTPUT1),
2870                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2871         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875                                      GRC_LCLCTRL_GPIO_OE1 |
2876                                      GRC_LCLCTRL_GPIO_OE2 |
2877                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2878                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2879                                      tp->grc_local_ctrl;
2880                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2882
2883                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2890         } else {
2891                 u32 no_gpio2;
2892                 u32 grc_local_ctrl = 0;
2893
2894                 /* Workaround to prevent overdrawing Amps. */
2895                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898                                     grc_local_ctrl,
2899                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2900                 }
2901
2902                 /* On 5753 and variants, GPIO2 cannot be used. */
2903                 no_gpio2 = tp->nic_sram_data_cfg &
2904                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2905
2906                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907                                   GRC_LCLCTRL_GPIO_OE1 |
2908                                   GRC_LCLCTRL_GPIO_OE2 |
2909                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2910                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2911                 if (no_gpio2) {
2912                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2914                 }
2915                 tw32_wait_f(GRC_LOCAL_CTRL,
2916                             tp->grc_local_ctrl | grc_local_ctrl,
2917                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2918
2919                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920
2921                 tw32_wait_f(GRC_LOCAL_CTRL,
2922                             tp->grc_local_ctrl | grc_local_ctrl,
2923                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2924
2925                 if (!no_gpio2) {
2926                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927                         tw32_wait_f(GRC_LOCAL_CTRL,
2928                                     tp->grc_local_ctrl | grc_local_ctrl,
2929                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2930                 }
2931         }
2932 }
2933
2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2935 {
2936         u32 msg = 0;
2937
2938         /* Serialize power state transitions */
2939         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2940                 return;
2941
2942         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943                 msg = TG3_GPIO_MSG_NEED_VAUX;
2944
2945         msg = tg3_set_function_status(tp, msg);
2946
2947         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2948                 goto done;
2949
2950         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951                 tg3_pwrsrc_switch_to_vaux(tp);
2952         else
2953                 tg3_pwrsrc_die_with_vmain(tp);
2954
2955 done:
2956         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2957 }
2958
2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 {
2961         bool need_vaux = false;
2962
2963         /* The GPIOs do something completely different on 57765. */
2964         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2965                 return;
2966
2967         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969             tg3_asic_rev(tp) == ASIC_REV_5720) {
2970                 tg3_frob_aux_power_5717(tp, include_wol ?
2971                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2972                 return;
2973         }
2974
2975         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976                 struct net_device *dev_peer;
2977
2978                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979
2980                 /* remove_one() may have been run on the peer. */
2981                 if (dev_peer) {
2982                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2983
2984                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2985                                 return;
2986
2987                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988                             tg3_flag(tp_peer, ENABLE_ASF))
2989                                 need_vaux = true;
2990                 }
2991         }
2992
2993         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994             tg3_flag(tp, ENABLE_ASF))
2995                 need_vaux = true;
2996
2997         if (need_vaux)
2998                 tg3_pwrsrc_switch_to_vaux(tp);
2999         else
3000                 tg3_pwrsrc_die_with_vmain(tp);
3001 }
3002
3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 {
3005         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006                 return 1;
3007         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008                 if (speed != SPEED_10)
3009                         return 1;
3010         } else if (speed == SPEED_10)
3011                 return 1;
3012
3013         return 0;
3014 }
3015
3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 {
3018         switch (tg3_asic_rev(tp)) {
3019         case ASIC_REV_5700:
3020         case ASIC_REV_5704:
3021                 return true;
3022         case ASIC_REV_5780:
3023                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024                         return true;
3025                 return false;
3026         case ASIC_REV_5717:
3027                 if (!tp->pci_fn)
3028                         return true;
3029                 return false;
3030         case ASIC_REV_5719:
3031         case ASIC_REV_5720:
3032                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033                     !tp->pci_fn)
3034                         return true;
3035                 return false;
3036         }
3037
3038         return false;
3039 }
3040
3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 {
3043         switch (tg3_asic_rev(tp)) {
3044         case ASIC_REV_5719:
3045         case ASIC_REV_5720:
3046                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3047                     !tp->pci_fn)
3048                         return true;
3049                 return false;
3050         }
3051
3052         return false;
3053 }
3054
3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3056 {
3057         u32 val;
3058
3059         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3060                 return;
3061
3062         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3066
3067                         sg_dig_ctrl |=
3068                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3071                 }
3072                 return;
3073         }
3074
3075         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076                 tg3_bmcr_reset(tp);
3077                 val = tr32(GRC_MISC_CFG);
3078                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3079                 udelay(40);
3080                 return;
3081         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082                 u32 phytest;
3083                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3084                         u32 phy;
3085
3086                         tg3_writephy(tp, MII_ADVERTISE, 0);
3087                         tg3_writephy(tp, MII_BMCR,
3088                                      BMCR_ANENABLE | BMCR_ANRESTART);
3089
3090                         tg3_writephy(tp, MII_TG3_FET_TEST,
3091                                      phytest | MII_TG3_FET_SHADOW_EN);
3092                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094                                 tg3_writephy(tp,
3095                                              MII_TG3_FET_SHDW_AUXMODE4,
3096                                              phy);
3097                         }
3098                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3099                 }
3100                 return;
3101         } else if (do_low_power) {
3102                 if (!tg3_phy_led_bug(tp))
3103                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105
3106                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3109                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3110         }
3111
3112         /* The PHY should not be powered down on some chips because
3113          * of bugs.
3114          */
3115         if (tg3_phy_power_bug(tp))
3116                 return;
3117
3118         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3124         }
3125
3126         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3127 }
3128
3129 /* tp->lock is held. */
3130 static int tg3_nvram_lock(struct tg3 *tp)
3131 {
3132         if (tg3_flag(tp, NVRAM)) {
3133                 int i;
3134
3135                 if (tp->nvram_lock_cnt == 0) {
3136                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137                         for (i = 0; i < 8000; i++) {
3138                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3139                                         break;
3140                                 udelay(20);
3141                         }
3142                         if (i == 8000) {
3143                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3144                                 return -ENODEV;
3145                         }
3146                 }
3147                 tp->nvram_lock_cnt++;
3148         }
3149         return 0;
3150 }
3151
3152 /* tp->lock is held. */
3153 static void tg3_nvram_unlock(struct tg3 *tp)
3154 {
3155         if (tg3_flag(tp, NVRAM)) {
3156                 if (tp->nvram_lock_cnt > 0)
3157                         tp->nvram_lock_cnt--;
3158                 if (tp->nvram_lock_cnt == 0)
3159                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3160         }
3161 }
3162
3163 /* tp->lock is held. */
3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 {
3166         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167                 u32 nvaccess = tr32(NVRAM_ACCESS);
3168
3169                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3170         }
3171 }
3172
3173 /* tp->lock is held. */
3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 {
3176         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177                 u32 nvaccess = tr32(NVRAM_ACCESS);
3178
3179                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3180         }
3181 }
3182
3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184                                         u32 offset, u32 *val)
3185 {
3186         u32 tmp;
3187         int i;
3188
3189         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3190                 return -EINVAL;
3191
3192         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193                                         EEPROM_ADDR_DEVID_MASK |
3194                                         EEPROM_ADDR_READ);
3195         tw32(GRC_EEPROM_ADDR,
3196              tmp |
3197              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199               EEPROM_ADDR_ADDR_MASK) |
3200              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201
3202         for (i = 0; i < 1000; i++) {
3203                 tmp = tr32(GRC_EEPROM_ADDR);
3204
3205                 if (tmp & EEPROM_ADDR_COMPLETE)
3206                         break;
3207                 msleep(1);
3208         }
3209         if (!(tmp & EEPROM_ADDR_COMPLETE))
3210                 return -EBUSY;
3211
3212         tmp = tr32(GRC_EEPROM_DATA);
3213
3214         /*
3215          * The data will always be opposite the native endian
3216          * format.  Perform a blind byteswap to compensate.
3217          */
3218         *val = swab32(tmp);
3219
3220         return 0;
3221 }
3222
3223 #define NVRAM_CMD_TIMEOUT 10000
3224
3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3226 {
3227         int i;
3228
3229         tw32(NVRAM_CMD, nvram_cmd);
3230         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231                 usleep_range(10, 40);
3232                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3233                         udelay(10);
3234                         break;
3235                 }
3236         }
3237
3238         if (i == NVRAM_CMD_TIMEOUT)
3239                 return -EBUSY;
3240
3241         return 0;
3242 }
3243
3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 {
3246         if (tg3_flag(tp, NVRAM) &&
3247             tg3_flag(tp, NVRAM_BUFFERED) &&
3248             tg3_flag(tp, FLASH) &&
3249             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250             (tp->nvram_jedecnum == JEDEC_ATMEL))
3251
3252                 addr = ((addr / tp->nvram_pagesize) <<
3253                         ATMEL_AT45DB0X1B_PAGE_POS) +
3254                        (addr % tp->nvram_pagesize);
3255
3256         return addr;
3257 }
3258
3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 {
3261         if (tg3_flag(tp, NVRAM) &&
3262             tg3_flag(tp, NVRAM_BUFFERED) &&
3263             tg3_flag(tp, FLASH) &&
3264             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265             (tp->nvram_jedecnum == JEDEC_ATMEL))
3266
3267                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268                         tp->nvram_pagesize) +
3269                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3270
3271         return addr;
3272 }
3273
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275  * the byteswapping settings for all other register accesses.
3276  * tg3 devices are BE devices, so on a BE machine, the data
3277  * returned will be exactly as it is seen in NVRAM.  On a LE
3278  * machine, the 32-bit value will be byteswapped.
3279  */
3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3281 {
3282         int ret;
3283
3284         if (!tg3_flag(tp, NVRAM))
3285                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286
3287         offset = tg3_nvram_phys_addr(tp, offset);
3288
3289         if (offset > NVRAM_ADDR_MSK)
3290                 return -EINVAL;
3291
3292         ret = tg3_nvram_lock(tp);
3293         if (ret)
3294                 return ret;
3295
3296         tg3_enable_nvram_access(tp);
3297
3298         tw32(NVRAM_ADDR, offset);
3299         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3301
3302         if (ret == 0)
3303                 *val = tr32(NVRAM_RDDATA);
3304
3305         tg3_disable_nvram_access(tp);
3306
3307         tg3_nvram_unlock(tp);
3308
3309         return ret;
3310 }
3311
3312 /* Ensures NVRAM data is in bytestream format. */
3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3314 {
3315         u32 v;
3316         int res = tg3_nvram_read(tp, offset, &v);
3317         if (!res)
3318                 *val = cpu_to_be32(v);
3319         return res;
3320 }
3321
3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323                                     u32 offset, u32 len, u8 *buf)
3324 {
3325         int i, j, rc = 0;
3326         u32 val;
3327
3328         for (i = 0; i < len; i += 4) {
3329                 u32 addr;
3330                 __be32 data;
3331
3332                 addr = offset + i;
3333
3334                 memcpy(&data, buf + i, 4);
3335
3336                 /*
3337                  * The SEEPROM interface expects the data to always be opposite
3338                  * the native endian format.  We accomplish this by reversing
3339                  * all the operations that would have been performed on the
3340                  * data from a call to tg3_nvram_read_be32().
3341                  */
3342                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343
3344                 val = tr32(GRC_EEPROM_ADDR);
3345                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346
3347                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348                         EEPROM_ADDR_READ);
3349                 tw32(GRC_EEPROM_ADDR, val |
3350                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3351                         (addr & EEPROM_ADDR_ADDR_MASK) |
3352                         EEPROM_ADDR_START |
3353                         EEPROM_ADDR_WRITE);
3354
3355                 for (j = 0; j < 1000; j++) {
3356                         val = tr32(GRC_EEPROM_ADDR);
3357
3358                         if (val & EEPROM_ADDR_COMPLETE)
3359                                 break;
3360                         msleep(1);
3361                 }
3362                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3363                         rc = -EBUSY;
3364                         break;
3365                 }
3366         }
3367
3368         return rc;
3369 }
3370
3371 /* offset and length are dword aligned */
3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3373                 u8 *buf)
3374 {
3375         int ret = 0;
3376         u32 pagesize = tp->nvram_pagesize;
3377         u32 pagemask = pagesize - 1;
3378         u32 nvram_cmd;
3379         u8 *tmp;
3380
3381         tmp = kmalloc(pagesize, GFP_KERNEL);
3382         if (tmp == NULL)
3383                 return -ENOMEM;
3384
3385         while (len) {
3386                 int j;
3387                 u32 phy_addr, page_off, size;
3388
3389                 phy_addr = offset & ~pagemask;
3390
3391                 for (j = 0; j < pagesize; j += 4) {
3392                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393                                                   (__be32 *) (tmp + j));
3394                         if (ret)
3395                                 break;
3396                 }
3397                 if (ret)
3398                         break;
3399
3400                 page_off = offset & pagemask;
3401                 size = pagesize;
3402                 if (len < size)
3403                         size = len;
3404
3405                 len -= size;
3406
3407                 memcpy(tmp + page_off, buf, size);
3408
3409                 offset = offset + (pagesize - page_off);
3410
3411                 tg3_enable_nvram_access(tp);
3412
3413                 /*
3414                  * Before we can erase the flash page, we need
3415                  * to issue a special "write enable" command.
3416                  */
3417                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418
3419                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420                         break;
3421
3422                 /* Erase the target page */
3423                 tw32(NVRAM_ADDR, phy_addr);
3424
3425                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427
3428                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3429                         break;
3430
3431                 /* Issue another write enable to start the write. */
3432                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433
3434                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435                         break;
3436
3437                 for (j = 0; j < pagesize; j += 4) {
3438                         __be32 data;
3439
3440                         data = *((__be32 *) (tmp + j));
3441
3442                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443
3444                         tw32(NVRAM_ADDR, phy_addr + j);
3445
3446                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3447                                 NVRAM_CMD_WR;
3448
3449                         if (j == 0)
3450                                 nvram_cmd |= NVRAM_CMD_FIRST;
3451                         else if (j == (pagesize - 4))
3452                                 nvram_cmd |= NVRAM_CMD_LAST;
3453
3454                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3455                         if (ret)
3456                                 break;
3457                 }
3458                 if (ret)
3459                         break;
3460         }
3461
3462         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463         tg3_nvram_exec_cmd(tp, nvram_cmd);
3464
3465         kfree(tmp);
3466
3467         return ret;
3468 }
3469
3470 /* offset and length are dword aligned */
3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3472                 u8 *buf)
3473 {
3474         int i, ret = 0;
3475
3476         for (i = 0; i < len; i += 4, offset += 4) {
3477                 u32 page_off, phy_addr, nvram_cmd;
3478                 __be32 data;
3479
3480                 memcpy(&data, buf + i, 4);
3481                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482
3483                 page_off = offset % tp->nvram_pagesize;
3484
3485                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486
3487                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488
3489                 if (page_off == 0 || i == 0)
3490                         nvram_cmd |= NVRAM_CMD_FIRST;
3491                 if (page_off == (tp->nvram_pagesize - 4))
3492                         nvram_cmd |= NVRAM_CMD_LAST;
3493
3494                 if (i == (len - 4))
3495                         nvram_cmd |= NVRAM_CMD_LAST;
3496
3497                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498                     !tg3_flag(tp, FLASH) ||
3499                     !tg3_flag(tp, 57765_PLUS))
3500                         tw32(NVRAM_ADDR, phy_addr);
3501
3502                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503                     !tg3_flag(tp, 5755_PLUS) &&
3504                     (tp->nvram_jedecnum == JEDEC_ST) &&
3505                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3506                         u32 cmd;
3507
3508                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509                         ret = tg3_nvram_exec_cmd(tp, cmd);
3510                         if (ret)
3511                                 break;
3512                 }
3513                 if (!tg3_flag(tp, FLASH)) {
3514                         /* We always do complete word writes to eeprom. */
3515                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3516                 }
3517
3518                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3519                 if (ret)
3520                         break;
3521         }
3522         return ret;
3523 }
3524
3525 /* offset and length are dword aligned */
3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3527 {
3528         int ret;
3529
3530         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3533                 udelay(40);
3534         }
3535
3536         if (!tg3_flag(tp, NVRAM)) {
3537                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3538         } else {
3539                 u32 grc_mode;
3540
3541                 ret = tg3_nvram_lock(tp);
3542                 if (ret)
3543                         return ret;
3544
3545                 tg3_enable_nvram_access(tp);
3546                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547                         tw32(NVRAM_WRITE1, 0x406);
3548
3549                 grc_mode = tr32(GRC_MODE);
3550                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551
3552                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3554                                 buf);
3555                 } else {
3556                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3557                                 buf);
3558                 }
3559
3560                 grc_mode = tr32(GRC_MODE);
3561                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562
3563                 tg3_disable_nvram_access(tp);
3564                 tg3_nvram_unlock(tp);
3565         }
3566
3567         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3569                 udelay(40);
3570         }
3571
3572         return ret;
3573 }
3574
3575 #define RX_CPU_SCRATCH_BASE     0x30000
3576 #define RX_CPU_SCRATCH_SIZE     0x04000
3577 #define TX_CPU_SCRATCH_BASE     0x34000
3578 #define TX_CPU_SCRATCH_SIZE     0x04000
3579
3580 /* tp->lock is held. */
3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583         int i;
3584         const int iters = 10000;
3585
3586         for (i = 0; i < iters; i++) {
3587                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3588                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3589                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590                         break;
3591                 if (pci_channel_offline(tp->pdev))
3592                         return -EBUSY;
3593         }
3594
3595         return (i == iters) ? -EBUSY : 0;
3596 }
3597
3598 /* tp->lock is held. */
3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 {
3601         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602
3603         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3605         udelay(10);
3606
3607         return rc;
3608 }
3609
3610 /* tp->lock is held. */
3611 static int tg3_txcpu_pause(struct tg3 *tp)
3612 {
3613         return tg3_pause_cpu(tp, TX_CPU_BASE);
3614 }
3615
3616 /* tp->lock is held. */
3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619         tw32(cpu_base + CPU_STATE, 0xffffffff);
3620         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3621 }
3622
3623 /* tp->lock is held. */
3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 {
3626         tg3_resume_cpu(tp, RX_CPU_BASE);
3627 }
3628
3629 /* tp->lock is held. */
3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632         int rc;
3633
3634         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635
3636         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638
3639                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3640                 return 0;
3641         }
3642         if (cpu_base == RX_CPU_BASE) {
3643                 rc = tg3_rxcpu_pause(tp);
3644         } else {
3645                 /*
3646                  * There is only an Rx CPU for the 5750 derivative in the
3647                  * BCM4785.
3648                  */
3649                 if (tg3_flag(tp, IS_SSB_CORE))
3650                         return 0;
3651
3652                 rc = tg3_txcpu_pause(tp);
3653         }
3654
3655         if (rc) {
3656                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3658                 return -ENODEV;
3659         }
3660
3661         /* Clear firmware's nvram arbitration. */
3662         if (tg3_flag(tp, NVRAM))
3663                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3664         return 0;
3665 }
3666
3667 static int tg3_fw_data_len(struct tg3 *tp,
3668                            const struct tg3_firmware_hdr *fw_hdr)
3669 {
3670         int fw_len;
3671
3672         /* Non fragmented firmware have one firmware header followed by a
3673          * contiguous chunk of data to be written. The length field in that
3674          * header is not the length of data to be written but the complete
3675          * length of the bss. The data length is determined based on
3676          * tp->fw->size minus headers.
3677          *
3678          * Fragmented firmware have a main header followed by multiple
3679          * fragments. Each fragment is identical to non fragmented firmware
3680          * with a firmware header followed by a contiguous chunk of data. In
3681          * the main header, the length field is unused and set to 0xffffffff.
3682          * In each fragment header the length is the entire size of that
3683          * fragment i.e. fragment data + header length. Data length is
3684          * therefore length field in the header minus TG3_FW_HDR_LEN.
3685          */
3686         if (tp->fw_len == 0xffffffff)
3687                 fw_len = be32_to_cpu(fw_hdr->len);
3688         else
3689                 fw_len = tp->fw->size;
3690
3691         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3692 }
3693
3694 /* tp->lock is held. */
3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696                                  u32 cpu_scratch_base, int cpu_scratch_size,
3697                                  const struct tg3_firmware_hdr *fw_hdr)
3698 {
3699         int err, i;
3700         void (*write_op)(struct tg3 *, u32, u32);
3701         int total_len = tp->fw->size;
3702
3703         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704                 netdev_err(tp->dev,
3705                            "%s: Trying to load TX cpu firmware which is 5705\n",
3706                            __func__);
3707                 return -EINVAL;
3708         }
3709
3710         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711                 write_op = tg3_write_mem;
3712         else
3713                 write_op = tg3_write_indirect_reg32;
3714
3715         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716                 /* It is possible that bootcode is still loading at this point.
3717                  * Get the nvram lock first before halting the cpu.
3718                  */
3719                 int lock_err = tg3_nvram_lock(tp);
3720                 err = tg3_halt_cpu(tp, cpu_base);
3721                 if (!lock_err)
3722                         tg3_nvram_unlock(tp);
3723                 if (err)
3724                         goto out;
3725
3726                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727                         write_op(tp, cpu_scratch_base + i, 0);
3728                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3729                 tw32(cpu_base + CPU_MODE,
3730                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731         } else {
3732                 /* Subtract additional main header for fragmented firmware and
3733                  * advance to the first fragment
3734                  */
3735                 total_len -= TG3_FW_HDR_LEN;
3736                 fw_hdr++;
3737         }
3738
3739         do {
3740                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3741                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742                         write_op(tp, cpu_scratch_base +
3743                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744                                      (i * sizeof(u32)),
3745                                  be32_to_cpu(fw_data[i]));
3746
3747                 total_len -= be32_to_cpu(fw_hdr->len);
3748
3749                 /* Advance to next fragment */
3750                 fw_hdr = (struct tg3_firmware_hdr *)
3751                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752         } while (total_len > 0);
3753
3754         err = 0;
3755
3756 out:
3757         return err;
3758 }
3759
3760 /* tp->lock is held. */
3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3762 {
3763         int i;
3764         const int iters = 5;
3765
3766         tw32(cpu_base + CPU_STATE, 0xffffffff);
3767         tw32_f(cpu_base + CPU_PC, pc);
3768
3769         for (i = 0; i < iters; i++) {
3770                 if (tr32(cpu_base + CPU_PC) == pc)
3771                         break;
3772                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3774                 tw32_f(cpu_base + CPU_PC, pc);
3775                 udelay(1000);
3776         }
3777
3778         return (i == iters) ? -EBUSY : 0;
3779 }
3780
3781 /* tp->lock is held. */
3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 {
3784         const struct tg3_firmware_hdr *fw_hdr;
3785         int err;
3786
3787         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788
3789         /* Firmware blob starts with version numbers, followed by
3790            start address and length. We are setting complete length.
3791            length = end_address_of_bss - start_address_of_text.
3792            Remainder is the blob to be loaded contiguously
3793            from start address. */
3794
3795         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3797                                     fw_hdr);
3798         if (err)
3799                 return err;
3800
3801         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3803                                     fw_hdr);
3804         if (err)
3805                 return err;
3806
3807         /* Now startup only the RX cpu. */
3808         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809                                        be32_to_cpu(fw_hdr->base_addr));
3810         if (err) {
3811                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812                            "should be %08x\n", __func__,
3813                            tr32(RX_CPU_BASE + CPU_PC),
3814                                 be32_to_cpu(fw_hdr->base_addr));
3815                 return -ENODEV;
3816         }
3817
3818         tg3_rxcpu_resume(tp);
3819
3820         return 0;
3821 }
3822
3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 {
3825         const int iters = 1000;
3826         int i;
3827         u32 val;
3828
3829         /* Wait for boot code to complete initialization and enter service
3830          * loop. It is then safe to download service patches
3831          */
3832         for (i = 0; i < iters; i++) {
3833                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3834                         break;
3835
3836                 udelay(10);
3837         }
3838
3839         if (i == iters) {
3840                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3841                 return -EBUSY;
3842         }
3843
3844         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845         if (val & 0xff) {
3846                 netdev_warn(tp->dev,
3847                             "Other patches exist. Not downloading EEE patch\n");
3848                 return -EEXIST;
3849         }
3850
3851         return 0;
3852 }
3853
3854 /* tp->lock is held. */
3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 {
3857         struct tg3_firmware_hdr *fw_hdr;
3858
3859         if (!tg3_flag(tp, NO_NVRAM))
3860                 return;
3861
3862         if (tg3_validate_rxcpu_state(tp))
3863                 return;
3864
3865         if (!tp->fw)
3866                 return;
3867
3868         /* This firmware blob has a different format than older firmware
3869          * releases as given below. The main difference is we have fragmented
3870          * data to be written to non-contiguous locations.
3871          *
3872          * In the beginning we have a firmware header identical to other
3873          * firmware which consists of version, base addr and length. The length
3874          * here is unused and set to 0xffffffff.
3875          *
3876          * This is followed by a series of firmware fragments which are
3877          * individually identical to previous firmware. i.e. they have the
3878          * firmware header and followed by data for that fragment. The version
3879          * field of the individual fragment header is unused.
3880          */
3881
3882         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3884                 return;
3885
3886         if (tg3_rxcpu_pause(tp))
3887                 return;
3888
3889         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891
3892         tg3_rxcpu_resume(tp);
3893 }
3894
3895 /* tp->lock is held. */
3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 {
3898         const struct tg3_firmware_hdr *fw_hdr;
3899         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3900         int err;
3901
3902         if (!tg3_flag(tp, FW_TSO))
3903                 return 0;
3904
3905         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906
3907         /* Firmware blob starts with version numbers, followed by
3908            start address and length. We are setting complete length.
3909            length = end_address_of_bss - start_address_of_text.
3910            Remainder is the blob to be loaded contiguously
3911            from start address. */
3912
3913         cpu_scratch_size = tp->fw_len;
3914
3915         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916                 cpu_base = RX_CPU_BASE;
3917                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918         } else {
3919                 cpu_base = TX_CPU_BASE;
3920                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3922         }
3923
3924         err = tg3_load_firmware_cpu(tp, cpu_base,
3925                                     cpu_scratch_base, cpu_scratch_size,
3926                                     fw_hdr);
3927         if (err)
3928                 return err;
3929
3930         /* Now startup the cpu. */
3931         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932                                        be32_to_cpu(fw_hdr->base_addr));
3933         if (err) {
3934                 netdev_err(tp->dev,
3935                            "%s fails to set CPU PC, is %08x should be %08x\n",
3936                            __func__, tr32(cpu_base + CPU_PC),
3937                            be32_to_cpu(fw_hdr->base_addr));
3938                 return -ENODEV;
3939         }
3940
3941         tg3_resume_cpu(tp, cpu_base);
3942         return 0;
3943 }
3944
3945 /* tp->lock is held. */
3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3947                                    int index)
3948 {
3949         u32 addr_high, addr_low;
3950
3951         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953                     (mac_addr[4] <<  8) | mac_addr[5]);
3954
3955         if (index < 4) {
3956                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3958         } else {
3959                 index -= 4;
3960                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3962         }
3963 }
3964
3965 /* tp->lock is held. */
3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3967 {
3968         u32 addr_high;
3969         int i;
3970
3971         for (i = 0; i < 4; i++) {
3972                 if (i == 1 && skip_mac_1)
3973                         continue;
3974                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975         }
3976
3977         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978             tg3_asic_rev(tp) == ASIC_REV_5704) {
3979                 for (i = 4; i < 16; i++)
3980                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981         }
3982
3983         addr_high = (tp->dev->dev_addr[0] +
3984                      tp->dev->dev_addr[1] +
3985                      tp->dev->dev_addr[2] +
3986                      tp->dev->dev_addr[3] +
3987                      tp->dev->dev_addr[4] +
3988                      tp->dev->dev_addr[5]) &
3989                 TX_BACKOFF_SEED_MASK;
3990         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3991 }
3992
3993 static void tg3_enable_register_access(struct tg3 *tp)
3994 {
3995         /*
3996          * Make sure register accesses (indirect or otherwise) will function
3997          * correctly.
3998          */
3999         pci_write_config_dword(tp->pdev,
4000                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4001 }
4002
4003 static int tg3_power_up(struct tg3 *tp)
4004 {
4005         int err;
4006
4007         tg3_enable_register_access(tp);
4008
4009         err = pci_set_power_state(tp->pdev, PCI_D0);
4010         if (!err) {
4011                 /* Switch out of Vaux if it is a NIC */
4012                 tg3_pwrsrc_switch_to_vmain(tp);
4013         } else {
4014                 netdev_err(tp->dev, "Transition to D0 failed\n");
4015         }
4016
4017         return err;
4018 }
4019
4020 static int tg3_setup_phy(struct tg3 *, bool);
4021
4022 static int tg3_power_down_prepare(struct tg3 *tp)
4023 {
4024         u32 misc_host_ctrl;
4025         bool device_should_wake, do_low_power;
4026
4027         tg3_enable_register_access(tp);
4028
4029         /* Restore the CLKREQ setting. */
4030         if (tg3_flag(tp, CLKREQ_BUG))
4031                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4033
4034         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035         tw32(TG3PCI_MISC_HOST_CTRL,
4036              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037
4038         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039                              tg3_flag(tp, WOL_ENABLE);
4040
4041         if (tg3_flag(tp, USE_PHYLIB)) {
4042                 do_low_power = false;
4043                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046                         struct phy_device *phydev;
4047                         u32 phyid;
4048
4049                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053                         tp->link_config.speed = phydev->speed;
4054                         tp->link_config.duplex = phydev->duplex;
4055                         tp->link_config.autoneg = phydev->autoneg;
4056                         ethtool_convert_link_mode_to_legacy_u32(
4057                                 &tp->link_config.advertising,
4058                                 phydev->advertising);
4059
4060                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062                                          advertising);
4063                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064                                          advertising);
4065                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4066                                          advertising);
4067
4068                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071                                                          advertising);
4072                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073                                                          advertising);
4074                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4075                                                          advertising);
4076                                 } else {
4077                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4078                                                          advertising);
4079                                 }
4080                         }
4081
4082                         linkmode_copy(phydev->advertising, advertising);
4083                         phy_start_aneg(phydev);
4084
4085                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086                         if (phyid != PHY_ID_BCMAC131) {
4087                                 phyid &= PHY_BCM_OUI_MASK;
4088                                 if (phyid == PHY_BCM_OUI_1 ||
4089                                     phyid == PHY_BCM_OUI_2 ||
4090                                     phyid == PHY_BCM_OUI_3)
4091                                         do_low_power = true;
4092                         }
4093                 }
4094         } else {
4095                 do_low_power = true;
4096
4097                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099
4100                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101                         tg3_setup_phy(tp, false);
4102         }
4103
4104         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105                 u32 val;
4106
4107                 val = tr32(GRC_VCPU_EXT_CTRL);
4108                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4110                 int i;
4111                 u32 val;
4112
4113                 for (i = 0; i < 200; i++) {
4114                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116                                 break;
4117                         msleep(1);
4118                 }
4119         }
4120         if (tg3_flag(tp, WOL_CAP))
4121                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122                                                      WOL_DRV_STATE_SHUTDOWN |
4123                                                      WOL_DRV_WOL |
4124                                                      WOL_SET_MAGIC_PKT);
4125
4126         if (device_should_wake) {
4127                 u32 mac_mode;
4128
4129                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130                         if (do_low_power &&
4131                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132                                 tg3_phy_auxctl_write(tp,
4133                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4135                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137                                 udelay(40);
4138                         }
4139
4140                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142                         else if (tp->phy_flags &
4143                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144                                 if (tp->link_config.active_speed == SPEED_1000)
4145                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4146                                 else
4147                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4148                         } else
4149                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4150
4151                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154                                              SPEED_100 : SPEED_10;
4155                                 if (tg3_5700_link_polarity(tp, speed))
4156                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4157                                 else
4158                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159                         }
4160                 } else {
4161                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4162                 }
4163
4164                 if (!tg3_flag(tp, 5750_PLUS))
4165                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4166
4167                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171
4172                 if (tg3_flag(tp, ENABLE_APE))
4173                         mac_mode |= MAC_MODE_APE_TX_EN |
4174                                     MAC_MODE_APE_RX_EN |
4175                                     MAC_MODE_TDE_ENABLE;
4176
4177                 tw32_f(MAC_MODE, mac_mode);
4178                 udelay(100);
4179
4180                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181                 udelay(10);
4182         }
4183
4184         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187                 u32 base_val;
4188
4189                 base_val = tp->pci_clock_ctrl;
4190                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191                              CLOCK_CTRL_TXCLK_DISABLE);
4192
4193                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195         } else if (tg3_flag(tp, 5780_CLASS) ||
4196                    tg3_flag(tp, CPMU_PRESENT) ||
4197                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4198                 /* do nothing */
4199         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200                 u32 newbits1, newbits2;
4201
4202                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4204                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205                                     CLOCK_CTRL_TXCLK_DISABLE |
4206                                     CLOCK_CTRL_ALTCLK);
4207                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208                 } else if (tg3_flag(tp, 5705_PLUS)) {
4209                         newbits1 = CLOCK_CTRL_625_CORE;
4210                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211                 } else {
4212                         newbits1 = CLOCK_CTRL_ALTCLK;
4213                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214                 }
4215
4216                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217                             40);
4218
4219                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220                             40);
4221
4222                 if (!tg3_flag(tp, 5705_PLUS)) {
4223                         u32 newbits3;
4224
4225                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4227                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228                                             CLOCK_CTRL_TXCLK_DISABLE |
4229                                             CLOCK_CTRL_44MHZ_CORE);
4230                         } else {
4231                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232                         }
4233
4234                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235                                     tp->pci_clock_ctrl | newbits3, 40);
4236                 }
4237         }
4238
4239         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240                 tg3_power_down_phy(tp, do_low_power);
4241
4242         tg3_frob_aux_power(tp, true);
4243
4244         /* Workaround for unstable PLL clock */
4245         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248                 u32 val = tr32(0x7d00);
4249
4250                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251                 tw32(0x7d00, val);
4252                 if (!tg3_flag(tp, ENABLE_ASF)) {
4253                         int err;
4254
4255                         err = tg3_nvram_lock(tp);
4256                         tg3_halt_cpu(tp, RX_CPU_BASE);
4257                         if (!err)
4258                                 tg3_nvram_unlock(tp);
4259                 }
4260         }
4261
4262         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263
4264         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265
4266         return 0;
4267 }
4268
4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272         pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274
4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 {
4277         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278         case MII_TG3_AUX_STAT_10HALF:
4279                 *speed = SPEED_10;
4280                 *duplex = DUPLEX_HALF;
4281                 break;
4282
4283         case MII_TG3_AUX_STAT_10FULL:
4284                 *speed = SPEED_10;
4285                 *duplex = DUPLEX_FULL;
4286                 break;
4287
4288         case MII_TG3_AUX_STAT_100HALF:
4289                 *speed = SPEED_100;
4290                 *duplex = DUPLEX_HALF;
4291                 break;
4292
4293         case MII_TG3_AUX_STAT_100FULL:
4294                 *speed = SPEED_100;
4295                 *duplex = DUPLEX_FULL;
4296                 break;
4297
4298         case MII_TG3_AUX_STAT_1000HALF:
4299                 *speed = SPEED_1000;
4300                 *duplex = DUPLEX_HALF;
4301                 break;
4302
4303         case MII_TG3_AUX_STAT_1000FULL:
4304                 *speed = SPEED_1000;
4305                 *duplex = DUPLEX_FULL;
4306                 break;
4307
4308         default:
4309                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311                                  SPEED_10;
4312                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313                                   DUPLEX_HALF;
4314                         break;
4315                 }
4316                 *speed = SPEED_UNKNOWN;
4317                 *duplex = DUPLEX_UNKNOWN;
4318                 break;
4319         }
4320 }
4321
4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324         int err = 0;
4325         u32 val, new_adv;
4326
4327         new_adv = ADVERTISE_CSMA;
4328         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329         new_adv |= mii_advertise_flowctrl(flowctrl);
4330
4331         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332         if (err)
4333                 goto done;
4334
4335         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337
4338                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341
4342                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343                 if (err)
4344                         goto done;
4345         }
4346
4347         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348                 goto done;
4349
4350         tw32(TG3_CPMU_EEE_MODE,
4351              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352
4353         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354         if (!err) {
4355                 u32 err2;
4356
4357                 if (!tp->eee.eee_enabled)
4358                         val = 0;
4359                 else
4360                         val = ethtool_adv_to_mmd_eee_adv_t(advertise);
4361
4362                 mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
4363                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4364                 if (err)
4365                         val = 0;
4366
4367                 switch (tg3_asic_rev(tp)) {
4368                 case ASIC_REV_5717:
4369                 case ASIC_REV_57765:
4370                 case ASIC_REV_57766:
4371                 case ASIC_REV_5719:
4372                         /* If we advertised any eee advertisements above... */
4373                         if (val)
4374                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4375                                       MII_TG3_DSP_TAP26_RMRXSTO |
4376                                       MII_TG3_DSP_TAP26_OPCSINPT;
4377                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4378                         fallthrough;
4379                 case ASIC_REV_5720:
4380                 case ASIC_REV_5762:
4381                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4382                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4383                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4384                 }
4385
4386                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4387                 if (!err)
4388                         err = err2;
4389         }
4390
4391 done:
4392         return err;
4393 }
4394
4395 static void tg3_phy_copper_begin(struct tg3 *tp)
4396 {
4397         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4398             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4399                 u32 adv, fc;
4400
4401                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4402                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4403                         adv = ADVERTISED_10baseT_Half |
4404                               ADVERTISED_10baseT_Full;
4405                         if (tg3_flag(tp, WOL_SPEED_100MB))
4406                                 adv |= ADVERTISED_100baseT_Half |
4407                                        ADVERTISED_100baseT_Full;
4408                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4409                                 if (!(tp->phy_flags &
4410                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4411                                         adv |= ADVERTISED_1000baseT_Half;
4412                                 adv |= ADVERTISED_1000baseT_Full;
4413                         }
4414
4415                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4416                 } else {
4417                         adv = tp->link_config.advertising;
4418                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4419                                 adv &= ~(ADVERTISED_1000baseT_Half |
4420                                          ADVERTISED_1000baseT_Full);
4421
4422                         fc = tp->link_config.flowctrl;
4423                 }
4424
4425                 tg3_phy_autoneg_cfg(tp, adv, fc);
4426
4427                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4428                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4429                         /* Normally during power down we want to autonegotiate
4430                          * the lowest possible speed for WOL. However, to avoid
4431                          * link flap, we leave it untouched.
4432                          */
4433                         return;
4434                 }
4435
4436                 tg3_writephy(tp, MII_BMCR,
4437                              BMCR_ANENABLE | BMCR_ANRESTART);
4438         } else {
4439                 int i;
4440                 u32 bmcr, orig_bmcr;
4441
4442                 tp->link_config.active_speed = tp->link_config.speed;
4443                 tp->link_config.active_duplex = tp->link_config.duplex;
4444
4445                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4446                         /* With autoneg disabled, 5715 only links up when the
4447                          * advertisement register has the configured speed
4448                          * enabled.
4449                          */
4450                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4451                 }
4452
4453                 bmcr = 0;
4454                 switch (tp->link_config.speed) {
4455                 default:
4456                 case SPEED_10:
4457                         break;
4458
4459                 case SPEED_100:
4460                         bmcr |= BMCR_SPEED100;
4461                         break;
4462
4463                 case SPEED_1000:
4464                         bmcr |= BMCR_SPEED1000;
4465                         break;
4466                 }
4467
4468                 if (tp->link_config.duplex == DUPLEX_FULL)
4469                         bmcr |= BMCR_FULLDPLX;
4470
4471                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4472                     (bmcr != orig_bmcr)) {
4473                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4474                         for (i = 0; i < 1500; i++) {
4475                                 u32 tmp;
4476
4477                                 udelay(10);
4478                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4479                                     tg3_readphy(tp, MII_BMSR, &tmp))
4480                                         continue;
4481                                 if (!(tmp & BMSR_LSTATUS)) {
4482                                         udelay(40);
4483                                         break;
4484                                 }
4485                         }
4486                         tg3_writephy(tp, MII_BMCR, bmcr);
4487                         udelay(40);
4488                 }
4489         }
4490 }
4491
4492 static int tg3_phy_pull_config(struct tg3 *tp)
4493 {
4494         int err;
4495         u32 val;
4496
4497         err = tg3_readphy(tp, MII_BMCR, &val);
4498         if (err)
4499                 goto done;
4500
4501         if (!(val & BMCR_ANENABLE)) {
4502                 tp->link_config.autoneg = AUTONEG_DISABLE;
4503                 tp->link_config.advertising = 0;
4504                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4505
4506                 err = -EIO;
4507
4508                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4509                 case 0:
4510                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4511                                 goto done;
4512
4513                         tp->link_config.speed = SPEED_10;
4514                         break;
4515                 case BMCR_SPEED100:
4516                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4517                                 goto done;
4518
4519                         tp->link_config.speed = SPEED_100;
4520                         break;
4521                 case BMCR_SPEED1000:
4522                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4523                                 tp->link_config.speed = SPEED_1000;
4524                                 break;
4525                         }
4526                         fallthrough;
4527                 default:
4528                         goto done;
4529                 }
4530
4531                 if (val & BMCR_FULLDPLX)
4532                         tp->link_config.duplex = DUPLEX_FULL;
4533                 else
4534                         tp->link_config.duplex = DUPLEX_HALF;
4535
4536                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4537
4538                 err = 0;
4539                 goto done;
4540         }
4541
4542         tp->link_config.autoneg = AUTONEG_ENABLE;
4543         tp->link_config.advertising = ADVERTISED_Autoneg;
4544         tg3_flag_set(tp, PAUSE_AUTONEG);
4545
4546         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4547                 u32 adv;
4548
4549                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4550                 if (err)
4551                         goto done;
4552
4553                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4554                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4555
4556                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4557         } else {
4558                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4559         }
4560
4561         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4562                 u32 adv;
4563
4564                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4565                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4566                         if (err)
4567                                 goto done;
4568
4569                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4570                 } else {
4571                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4572                         if (err)
4573                                 goto done;
4574
4575                         adv = tg3_decode_flowctrl_1000X(val);
4576                         tp->link_config.flowctrl = adv;
4577
4578                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4579                         adv = mii_adv_to_ethtool_adv_x(val);
4580                 }
4581
4582                 tp->link_config.advertising |= adv;
4583         }
4584
4585 done:
4586         return err;
4587 }
4588
4589 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4590 {
4591         int err;
4592
4593         /* Turn off tap power management. */
4594         /* Set Extended packet length bit */
4595         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4596
4597         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4598         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4599         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4600         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4601         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4602
4603         udelay(40);
4604
4605         return err;
4606 }
4607
4608 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4609 {
4610         struct ethtool_keee eee = {};
4611
4612         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4613                 return true;
4614
4615         tg3_eee_pull_config(tp, &eee);
4616
4617         if (tp->eee.eee_enabled) {
4618                 if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
4619                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4620                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4621                         return false;
4622         } else {
4623                 /* EEE is disabled but we're advertising */
4624                 if (!linkmode_empty(eee.advertised))
4625                         return false;
4626         }
4627
4628         return true;
4629 }
4630
4631 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4632 {
4633         u32 advmsk, tgtadv, advertising;
4634
4635         advertising = tp->link_config.advertising;
4636         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4637
4638         advmsk = ADVERTISE_ALL;
4639         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4640                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4641                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4642         }
4643
4644         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4645                 return false;
4646
4647         if ((*lcladv & advmsk) != tgtadv)
4648                 return false;
4649
4650         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4651                 u32 tg3_ctrl;
4652
4653                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4654
4655                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4656                         return false;
4657
4658                 if (tgtadv &&
4659                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4660                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4661                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4662                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4663                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4664                 } else {
4665                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4666                 }
4667
4668                 if (tg3_ctrl != tgtadv)
4669                         return false;
4670         }
4671
4672         return true;
4673 }
4674
4675 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4676 {
4677         u32 lpeth = 0;
4678
4679         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4680                 u32 val;
4681
4682                 if (tg3_readphy(tp, MII_STAT1000, &val))
4683                         return false;
4684
4685                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4686         }
4687
4688         if (tg3_readphy(tp, MII_LPA, rmtadv))
4689                 return false;
4690
4691         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4692         tp->link_config.rmt_adv = lpeth;
4693
4694         return true;
4695 }
4696
4697 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4698 {
4699         if (curr_link_up != tp->link_up) {
4700                 if (curr_link_up) {
4701                         netif_carrier_on(tp->dev);
4702                 } else {
4703                         netif_carrier_off(tp->dev);
4704                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4705                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4706                 }
4707
4708                 tg3_link_report(tp);
4709                 return true;
4710         }
4711
4712         return false;
4713 }
4714
4715 static void tg3_clear_mac_status(struct tg3 *tp)
4716 {
4717         tw32(MAC_EVENT, 0);
4718
4719         tw32_f(MAC_STATUS,
4720                MAC_STATUS_SYNC_CHANGED |
4721                MAC_STATUS_CFG_CHANGED |
4722                MAC_STATUS_MI_COMPLETION |
4723                MAC_STATUS_LNKSTATE_CHANGED);
4724         udelay(40);
4725 }
4726
4727 static void tg3_setup_eee(struct tg3 *tp)
4728 {
4729         u32 val;
4730
4731         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4732               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4733         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4734                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4735
4736         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4737
4738         tw32_f(TG3_CPMU_EEE_CTRL,
4739                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4740
4741         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4742               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4743               TG3_CPMU_EEEMD_LPI_IN_RX |
4744               TG3_CPMU_EEEMD_EEE_ENABLE;
4745
4746         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4747                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4748
4749         if (tg3_flag(tp, ENABLE_APE))
4750                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4751
4752         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4753
4754         tw32_f(TG3_CPMU_EEE_DBTMR1,
4755                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4756                (tp->eee.tx_lpi_timer & 0xffff));
4757
4758         tw32_f(TG3_CPMU_EEE_DBTMR2,
4759                TG3_CPMU_DBTMR2_APE_TX_2047US |
4760                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4761 }
4762
4763 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4764 {
4765         bool current_link_up;
4766         u32 bmsr, val;
4767         u32 lcl_adv, rmt_adv;
4768         u32 current_speed;
4769         u8 current_duplex;
4770         int i, err;
4771
4772         tg3_clear_mac_status(tp);
4773
4774         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4775                 tw32_f(MAC_MI_MODE,
4776                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4777                 udelay(80);
4778         }
4779
4780         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4781
4782         /* Some third-party PHYs need to be reset on link going
4783          * down.
4784          */
4785         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4786              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4787              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4788             tp->link_up) {
4789                 tg3_readphy(tp, MII_BMSR, &bmsr);
4790                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4791                     !(bmsr & BMSR_LSTATUS))
4792                         force_reset = true;
4793         }
4794         if (force_reset)
4795                 tg3_phy_reset(tp);
4796
4797         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4798                 tg3_readphy(tp, MII_BMSR, &bmsr);
4799                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4800                     !tg3_flag(tp, INIT_COMPLETE))
4801                         bmsr = 0;
4802
4803                 if (!(bmsr & BMSR_LSTATUS)) {
4804                         err = tg3_init_5401phy_dsp(tp);
4805                         if (err)
4806                                 return err;
4807
4808                         tg3_readphy(tp, MII_BMSR, &bmsr);
4809                         for (i = 0; i < 1000; i++) {
4810                                 udelay(10);
4811                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4812                                     (bmsr & BMSR_LSTATUS)) {
4813                                         udelay(40);
4814                                         break;
4815                                 }
4816                         }
4817
4818                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4819                             TG3_PHY_REV_BCM5401_B0 &&
4820                             !(bmsr & BMSR_LSTATUS) &&
4821                             tp->link_config.active_speed == SPEED_1000) {
4822                                 err = tg3_phy_reset(tp);
4823                                 if (!err)
4824                                         err = tg3_init_5401phy_dsp(tp);
4825                                 if (err)
4826                                         return err;
4827                         }
4828                 }
4829         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4830                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4831                 /* 5701 {A0,B0} CRC bug workaround */
4832                 tg3_writephy(tp, 0x15, 0x0a75);
4833                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4834                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4835                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4836         }
4837
4838         /* Clear pending interrupts... */
4839         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4840         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4841
4842         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4843                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4844         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4845                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4846
4847         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4848             tg3_asic_rev(tp) == ASIC_REV_5701) {
4849                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4850                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4851                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4852                 else
4853                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4854         }
4855
4856         current_link_up = false;
4857         current_speed = SPEED_UNKNOWN;
4858         current_duplex = DUPLEX_UNKNOWN;
4859         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4860         tp->link_config.rmt_adv = 0;
4861
4862         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4863                 err = tg3_phy_auxctl_read(tp,
4864                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4865                                           &val);
4866                 if (!err && !(val & (1 << 10))) {
4867                         tg3_phy_auxctl_write(tp,
4868                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4869                                              val | (1 << 10));
4870                         goto relink;
4871                 }
4872         }
4873
4874         bmsr = 0;
4875         for (i = 0; i < 100; i++) {
4876                 tg3_readphy(tp, MII_BMSR, &bmsr);
4877                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4878                     (bmsr & BMSR_LSTATUS))
4879                         break;
4880                 udelay(40);
4881         }
4882
4883         if (bmsr & BMSR_LSTATUS) {
4884                 u32 aux_stat, bmcr;
4885
4886                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4887                 for (i = 0; i < 2000; i++) {
4888                         udelay(10);
4889                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4890                             aux_stat)
4891                                 break;
4892                 }
4893
4894                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4895                                              &current_speed,
4896                                              &current_duplex);
4897
4898                 bmcr = 0;
4899                 for (i = 0; i < 200; i++) {
4900                         tg3_readphy(tp, MII_BMCR, &bmcr);
4901                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4902                                 continue;
4903                         if (bmcr && bmcr != 0x7fff)
4904                                 break;
4905                         udelay(10);
4906                 }
4907
4908                 lcl_adv = 0;
4909                 rmt_adv = 0;
4910
4911                 tp->link_config.active_speed = current_speed;
4912                 tp->link_config.active_duplex = current_duplex;
4913
4914                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4915                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4916
4917                         if ((bmcr & BMCR_ANENABLE) &&
4918                             eee_config_ok &&
4919                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4920                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4921                                 current_link_up = true;
4922
4923                         /* EEE settings changes take effect only after a phy
4924                          * reset.  If we have skipped a reset due to Link Flap
4925                          * Avoidance being enabled, do it now.
4926                          */
4927                         if (!eee_config_ok &&
4928                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4929                             !force_reset) {
4930                                 tg3_setup_eee(tp);
4931                                 tg3_phy_reset(tp);
4932                         }
4933                 } else {
4934                         if (!(bmcr & BMCR_ANENABLE) &&
4935                             tp->link_config.speed == current_speed &&
4936                             tp->link_config.duplex == current_duplex) {
4937                                 current_link_up = true;
4938                         }
4939                 }
4940
4941                 if (current_link_up &&
4942                     tp->link_config.active_duplex == DUPLEX_FULL) {
4943                         u32 reg, bit;
4944
4945                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4946                                 reg = MII_TG3_FET_GEN_STAT;
4947                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4948                         } else {
4949                                 reg = MII_TG3_EXT_STAT;
4950                                 bit = MII_TG3_EXT_STAT_MDIX;
4951                         }
4952
4953                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4954                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4955
4956                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4957                 }
4958         }
4959
4960 relink:
4961         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4962                 tg3_phy_copper_begin(tp);
4963
4964                 if (tg3_flag(tp, ROBOSWITCH)) {
4965                         current_link_up = true;
4966                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4967                         current_speed = SPEED_1000;
4968                         current_duplex = DUPLEX_FULL;
4969                         tp->link_config.active_speed = current_speed;
4970                         tp->link_config.active_duplex = current_duplex;
4971                 }
4972
4973                 tg3_readphy(tp, MII_BMSR, &bmsr);
4974                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4975                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4976                         current_link_up = true;
4977         }
4978
4979         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4980         if (current_link_up) {
4981                 if (tp->link_config.active_speed == SPEED_100 ||
4982                     tp->link_config.active_speed == SPEED_10)
4983                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4984                 else
4985                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4986         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4987                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4988         else
4989                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4990
4991         /* In order for the 5750 core in BCM4785 chip to work properly
4992          * in RGMII mode, the Led Control Register must be set up.
4993          */
4994         if (tg3_flag(tp, RGMII_MODE)) {
4995                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4996                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4997
4998                 if (tp->link_config.active_speed == SPEED_10)
4999                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5000                 else if (tp->link_config.active_speed == SPEED_100)
5001                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5002                                      LED_CTRL_100MBPS_ON);
5003                 else if (tp->link_config.active_speed == SPEED_1000)
5004                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5005                                      LED_CTRL_1000MBPS_ON);
5006
5007                 tw32(MAC_LED_CTRL, led_ctrl);
5008                 udelay(40);
5009         }
5010
5011         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5012         if (tp->link_config.active_duplex == DUPLEX_HALF)
5013                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5014
5015         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5016                 if (current_link_up &&
5017                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5018                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5019                 else
5020                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5021         }
5022
5023         /* ??? Without this setting Netgear GA302T PHY does not
5024          * ??? send/receive packets...
5025          */
5026         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5027             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5028                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5029                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5030                 udelay(80);
5031         }
5032
5033         tw32_f(MAC_MODE, tp->mac_mode);
5034         udelay(40);
5035
5036         tg3_phy_eee_adjust(tp, current_link_up);
5037
5038         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5039                 /* Polled via timer. */
5040                 tw32_f(MAC_EVENT, 0);
5041         } else {
5042                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5043         }
5044         udelay(40);
5045
5046         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5047             current_link_up &&
5048             tp->link_config.active_speed == SPEED_1000 &&
5049             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5050                 udelay(120);
5051                 tw32_f(MAC_STATUS,
5052                      (MAC_STATUS_SYNC_CHANGED |
5053                       MAC_STATUS_CFG_CHANGED));
5054                 udelay(40);
5055                 tg3_write_mem(tp,
5056                               NIC_SRAM_FIRMWARE_MBOX,
5057                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5058         }
5059
5060         /* Prevent send BD corruption. */
5061         if (tg3_flag(tp, CLKREQ_BUG)) {
5062                 if (tp->link_config.active_speed == SPEED_100 ||
5063                     tp->link_config.active_speed == SPEED_10)
5064                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5065                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5066                 else
5067                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5068                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5069         }
5070
5071         tg3_test_and_report_link_chg(tp, current_link_up);
5072
5073         return 0;
5074 }
5075
5076 struct tg3_fiber_aneginfo {
5077         int state;
5078 #define ANEG_STATE_UNKNOWN              0
5079 #define ANEG_STATE_AN_ENABLE            1
5080 #define ANEG_STATE_RESTART_INIT         2
5081 #define ANEG_STATE_RESTART              3
5082 #define ANEG_STATE_DISABLE_LINK_OK      4
5083 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5084 #define ANEG_STATE_ABILITY_DETECT       6
5085 #define ANEG_STATE_ACK_DETECT_INIT      7
5086 #define ANEG_STATE_ACK_DETECT           8
5087 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5088 #define ANEG_STATE_COMPLETE_ACK         10
5089 #define ANEG_STATE_IDLE_DETECT_INIT     11
5090 #define ANEG_STATE_IDLE_DETECT          12
5091 #define ANEG_STATE_LINK_OK              13
5092 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5093 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5094
5095         u32 flags;
5096 #define MR_AN_ENABLE            0x00000001
5097 #define MR_RESTART_AN           0x00000002
5098 #define MR_AN_COMPLETE          0x00000004
5099 #define MR_PAGE_RX              0x00000008
5100 #define MR_NP_LOADED            0x00000010
5101 #define MR_TOGGLE_TX            0x00000020
5102 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5103 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5104 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5105 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5106 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5107 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5108 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5109 #define MR_TOGGLE_RX            0x00002000
5110 #define MR_NP_RX                0x00004000
5111
5112 #define MR_LINK_OK              0x80000000
5113
5114         unsigned long link_time, cur_time;
5115
5116         u32 ability_match_cfg;
5117         int ability_match_count;
5118
5119         char ability_match, idle_match, ack_match;
5120
5121         u32 txconfig, rxconfig;
5122 #define ANEG_CFG_NP             0x00000080
5123 #define ANEG_CFG_ACK            0x00000040
5124 #define ANEG_CFG_RF2            0x00000020
5125 #define ANEG_CFG_RF1            0x00000010
5126 #define ANEG_CFG_PS2            0x00000001
5127 #define ANEG_CFG_PS1            0x00008000
5128 #define ANEG_CFG_HD             0x00004000
5129 #define ANEG_CFG_FD             0x00002000
5130 #define ANEG_CFG_INVAL          0x00001f06
5131
5132 };
5133 #define ANEG_OK         0
5134 #define ANEG_DONE       1
5135 #define ANEG_TIMER_ENAB 2
5136 #define ANEG_FAILED     -1
5137
5138 #define ANEG_STATE_SETTLE_TIME  10000
5139
5140 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5141                                    struct tg3_fiber_aneginfo *ap)
5142 {
5143         u16 flowctrl;
5144         unsigned long delta;
5145         u32 rx_cfg_reg;
5146         int ret;
5147
5148         if (ap->state == ANEG_STATE_UNKNOWN) {
5149                 ap->rxconfig = 0;
5150                 ap->link_time = 0;
5151                 ap->cur_time = 0;
5152                 ap->ability_match_cfg = 0;
5153                 ap->ability_match_count = 0;
5154                 ap->ability_match = 0;
5155                 ap->idle_match = 0;
5156                 ap->ack_match = 0;
5157         }
5158         ap->cur_time++;
5159
5160         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5161                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5162
5163                 if (rx_cfg_reg != ap->ability_match_cfg) {
5164                         ap->ability_match_cfg = rx_cfg_reg;
5165                         ap->ability_match = 0;
5166                         ap->ability_match_count = 0;
5167                 } else {
5168                         if (++ap->ability_match_count > 1) {
5169                                 ap->ability_match = 1;
5170                                 ap->ability_match_cfg = rx_cfg_reg;
5171                         }
5172                 }
5173                 if (rx_cfg_reg & ANEG_CFG_ACK)
5174                         ap->ack_match = 1;
5175                 else
5176                         ap->ack_match = 0;
5177
5178                 ap->idle_match = 0;
5179         } else {
5180                 ap->idle_match = 1;
5181                 ap->ability_match_cfg = 0;
5182                 ap->ability_match_count = 0;
5183                 ap->ability_match = 0;
5184                 ap->ack_match = 0;
5185
5186                 rx_cfg_reg = 0;
5187         }
5188
5189         ap->rxconfig = rx_cfg_reg;
5190         ret = ANEG_OK;
5191
5192         switch (ap->state) {
5193         case ANEG_STATE_UNKNOWN:
5194                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5195                         ap->state = ANEG_STATE_AN_ENABLE;
5196
5197                 fallthrough;
5198         case ANEG_STATE_AN_ENABLE:
5199                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5200                 if (ap->flags & MR_AN_ENABLE) {
5201                         ap->link_time = 0;
5202                         ap->cur_time = 0;
5203                         ap->ability_match_cfg = 0;
5204                         ap->ability_match_count = 0;
5205                         ap->ability_match = 0;
5206                         ap->idle_match = 0;
5207                         ap->ack_match = 0;
5208
5209                         ap->state = ANEG_STATE_RESTART_INIT;
5210                 } else {
5211                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5212                 }
5213                 break;
5214
5215         case ANEG_STATE_RESTART_INIT:
5216                 ap->link_time = ap->cur_time;
5217                 ap->flags &= ~(MR_NP_LOADED);
5218                 ap->txconfig = 0;
5219                 tw32(MAC_TX_AUTO_NEG, 0);
5220                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5221                 tw32_f(MAC_MODE, tp->mac_mode);
5222                 udelay(40);
5223
5224                 ret = ANEG_TIMER_ENAB;
5225                 ap->state = ANEG_STATE_RESTART;
5226
5227                 fallthrough;
5228         case ANEG_STATE_RESTART:
5229                 delta = ap->cur_time - ap->link_time;
5230                 if (delta > ANEG_STATE_SETTLE_TIME)
5231                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5232                 else
5233                         ret = ANEG_TIMER_ENAB;
5234                 break;
5235
5236         case ANEG_STATE_DISABLE_LINK_OK:
5237                 ret = ANEG_DONE;
5238                 break;
5239
5240         case ANEG_STATE_ABILITY_DETECT_INIT:
5241                 ap->flags &= ~(MR_TOGGLE_TX);
5242                 ap->txconfig = ANEG_CFG_FD;
5243                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5244                 if (flowctrl & ADVERTISE_1000XPAUSE)
5245                         ap->txconfig |= ANEG_CFG_PS1;
5246                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5247                         ap->txconfig |= ANEG_CFG_PS2;
5248                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5249                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5250                 tw32_f(MAC_MODE, tp->mac_mode);
5251                 udelay(40);
5252
5253                 ap->state = ANEG_STATE_ABILITY_DETECT;
5254                 break;
5255
5256         case ANEG_STATE_ABILITY_DETECT:
5257                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5258                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5259                 break;
5260
5261         case ANEG_STATE_ACK_DETECT_INIT:
5262                 ap->txconfig |= ANEG_CFG_ACK;
5263                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5264                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5265                 tw32_f(MAC_MODE, tp->mac_mode);
5266                 udelay(40);
5267
5268                 ap->state = ANEG_STATE_ACK_DETECT;
5269
5270                 fallthrough;
5271         case ANEG_STATE_ACK_DETECT:
5272                 if (ap->ack_match != 0) {
5273                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5274                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5275                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5276                         } else {
5277                                 ap->state = ANEG_STATE_AN_ENABLE;
5278                         }
5279                 } else if (ap->ability_match != 0 &&
5280                            ap->rxconfig == 0) {
5281                         ap->state = ANEG_STATE_AN_ENABLE;
5282                 }
5283                 break;
5284
5285         case ANEG_STATE_COMPLETE_ACK_INIT:
5286                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5287                         ret = ANEG_FAILED;
5288                         break;
5289                 }
5290                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5291                                MR_LP_ADV_HALF_DUPLEX |
5292                                MR_LP_ADV_SYM_PAUSE |
5293                                MR_LP_ADV_ASYM_PAUSE |
5294                                MR_LP_ADV_REMOTE_FAULT1 |
5295                                MR_LP_ADV_REMOTE_FAULT2 |
5296                                MR_LP_ADV_NEXT_PAGE |
5297                                MR_TOGGLE_RX |
5298                                MR_NP_RX);
5299                 if (ap->rxconfig & ANEG_CFG_FD)
5300                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5301                 if (ap->rxconfig & ANEG_CFG_HD)
5302                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5303                 if (ap->rxconfig & ANEG_CFG_PS1)
5304                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5305                 if (ap->rxconfig & ANEG_CFG_PS2)
5306                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5307                 if (ap->rxconfig & ANEG_CFG_RF1)
5308                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5309                 if (ap->rxconfig & ANEG_CFG_RF2)
5310                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5311                 if (ap->rxconfig & ANEG_CFG_NP)
5312                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5313
5314                 ap->link_time = ap->cur_time;
5315
5316                 ap->flags ^= (MR_TOGGLE_TX);
5317                 if (ap->rxconfig & 0x0008)
5318                         ap->flags |= MR_TOGGLE_RX;
5319                 if (ap->rxconfig & ANEG_CFG_NP)
5320                         ap->flags |= MR_NP_RX;
5321                 ap->flags |= MR_PAGE_RX;
5322
5323                 ap->state = ANEG_STATE_COMPLETE_ACK;
5324                 ret = ANEG_TIMER_ENAB;
5325                 break;
5326
5327         case ANEG_STATE_COMPLETE_ACK:
5328                 if (ap->ability_match != 0 &&
5329                     ap->rxconfig == 0) {
5330                         ap->state = ANEG_STATE_AN_ENABLE;
5331                         break;
5332                 }
5333                 delta = ap->cur_time - ap->link_time;
5334                 if (delta > ANEG_STATE_SETTLE_TIME) {
5335                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5336                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5337                         } else {
5338                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5339                                     !(ap->flags & MR_NP_RX)) {
5340                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5341                                 } else {
5342                                         ret = ANEG_FAILED;
5343                                 }
5344                         }
5345                 }
5346                 break;
5347
5348         case ANEG_STATE_IDLE_DETECT_INIT:
5349                 ap->link_time = ap->cur_time;
5350                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5351                 tw32_f(MAC_MODE, tp->mac_mode);
5352                 udelay(40);
5353
5354                 ap->state = ANEG_STATE_IDLE_DETECT;
5355                 ret = ANEG_TIMER_ENAB;
5356                 break;
5357
5358         case ANEG_STATE_IDLE_DETECT:
5359                 if (ap->ability_match != 0 &&
5360                     ap->rxconfig == 0) {
5361                         ap->state = ANEG_STATE_AN_ENABLE;
5362                         break;
5363                 }
5364                 delta = ap->cur_time - ap->link_time;
5365                 if (delta > ANEG_STATE_SETTLE_TIME) {
5366                         /* XXX another gem from the Broadcom driver :( */
5367                         ap->state = ANEG_STATE_LINK_OK;
5368                 }
5369                 break;
5370
5371         case ANEG_STATE_LINK_OK:
5372                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5373                 ret = ANEG_DONE;
5374                 break;
5375
5376         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5377                 /* ??? unimplemented */
5378                 break;
5379
5380         case ANEG_STATE_NEXT_PAGE_WAIT:
5381                 /* ??? unimplemented */
5382                 break;
5383
5384         default:
5385                 ret = ANEG_FAILED;
5386                 break;
5387         }
5388
5389         return ret;
5390 }
5391
5392 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5393 {
5394         int res = 0;
5395         struct tg3_fiber_aneginfo aninfo;
5396         int status = ANEG_FAILED;
5397         unsigned int tick;
5398         u32 tmp;
5399
5400         tw32_f(MAC_TX_AUTO_NEG, 0);
5401
5402         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5403         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5404         udelay(40);
5405
5406         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5407         udelay(40);
5408
5409         memset(&aninfo, 0, sizeof(aninfo));
5410         aninfo.flags |= MR_AN_ENABLE;
5411         aninfo.state = ANEG_STATE_UNKNOWN;
5412         aninfo.cur_time = 0;
5413         tick = 0;
5414         while (++tick < 195000) {
5415                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5416                 if (status == ANEG_DONE || status == ANEG_FAILED)
5417                         break;
5418
5419                 udelay(1);
5420         }
5421
5422         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5423         tw32_f(MAC_MODE, tp->mac_mode);
5424         udelay(40);
5425
5426         *txflags = aninfo.txconfig;
5427         *rxflags = aninfo.flags;
5428
5429         if (status == ANEG_DONE &&
5430             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5431                              MR_LP_ADV_FULL_DUPLEX)))
5432                 res = 1;
5433
5434         return res;
5435 }
5436
5437 static void tg3_init_bcm8002(struct tg3 *tp)
5438 {
5439         u32 mac_status = tr32(MAC_STATUS);
5440         int i;
5441
5442         /* Reset when initting first time or we have a link. */
5443         if (tg3_flag(tp, INIT_COMPLETE) &&
5444             !(mac_status & MAC_STATUS_PCS_SYNCED))
5445                 return;
5446
5447         /* Set PLL lock range. */
5448         tg3_writephy(tp, 0x16, 0x8007);
5449
5450         /* SW reset */
5451         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5452
5453         /* Wait for reset to complete. */
5454         /* XXX schedule_timeout() ... */
5455         for (i = 0; i < 500; i++)
5456                 udelay(10);
5457
5458         /* Config mode; select PMA/Ch 1 regs. */
5459         tg3_writephy(tp, 0x10, 0x8411);
5460
5461         /* Enable auto-lock and comdet, select txclk for tx. */
5462         tg3_writephy(tp, 0x11, 0x0a10);
5463
5464         tg3_writephy(tp, 0x18, 0x00a0);
5465         tg3_writephy(tp, 0x16, 0x41ff);
5466
5467         /* Assert and deassert POR. */
5468         tg3_writephy(tp, 0x13, 0x0400);
5469         udelay(40);
5470         tg3_writephy(tp, 0x13, 0x0000);
5471
5472         tg3_writephy(tp, 0x11, 0x0a50);
5473         udelay(40);
5474         tg3_writephy(tp, 0x11, 0x0a10);
5475
5476         /* Wait for signal to stabilize */
5477         /* XXX schedule_timeout() ... */
5478         for (i = 0; i < 15000; i++)
5479                 udelay(10);
5480
5481         /* Deselect the channel register so we can read the PHYID
5482          * later.
5483          */
5484         tg3_writephy(tp, 0x10, 0x8011);
5485 }
5486
5487 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5488 {
5489         u16 flowctrl;
5490         bool current_link_up;
5491         u32 sg_dig_ctrl, sg_dig_status;
5492         u32 serdes_cfg, expected_sg_dig_ctrl;
5493         int workaround, port_a;
5494
5495         serdes_cfg = 0;
5496         workaround = 0;
5497         port_a = 1;
5498         current_link_up = false;
5499
5500         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5501             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5502                 workaround = 1;
5503                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5504                         port_a = 0;
5505
5506                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5507                 /* preserve bits 20-23 for voltage regulator */
5508                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5509         }
5510
5511         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5512
5513         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5514                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5515                         if (workaround) {
5516                                 u32 val = serdes_cfg;
5517
5518                                 if (port_a)
5519                                         val |= 0xc010000;
5520                                 else
5521                                         val |= 0x4010000;
5522                                 tw32_f(MAC_SERDES_CFG, val);
5523                         }
5524
5525                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5526                 }
5527                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5528                         tg3_setup_flow_control(tp, 0, 0);
5529                         current_link_up = true;
5530                 }
5531                 goto out;
5532         }
5533
5534         /* Want auto-negotiation.  */
5535         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5536
5537         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5538         if (flowctrl & ADVERTISE_1000XPAUSE)
5539                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5540         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5541                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5542
5543         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5544                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5545                     tp->serdes_counter &&
5546                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5547                                     MAC_STATUS_RCVD_CFG)) ==
5548                      MAC_STATUS_PCS_SYNCED)) {
5549                         tp->serdes_counter--;
5550                         current_link_up = true;
5551                         goto out;
5552                 }
5553 restart_autoneg:
5554                 if (workaround)
5555                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5556                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5557                 udelay(5);
5558                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5559
5560                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5561                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5562         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5563                                  MAC_STATUS_SIGNAL_DET)) {
5564                 sg_dig_status = tr32(SG_DIG_STATUS);
5565                 mac_status = tr32(MAC_STATUS);
5566
5567                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5568                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5569                         u32 local_adv = 0, remote_adv = 0;
5570
5571                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5572                                 local_adv |= ADVERTISE_1000XPAUSE;
5573                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5574                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5575
5576                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5577                                 remote_adv |= LPA_1000XPAUSE;
5578                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5579                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5580
5581                         tp->link_config.rmt_adv =
5582                                            mii_adv_to_ethtool_adv_x(remote_adv);
5583
5584                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5585                         current_link_up = true;
5586                         tp->serdes_counter = 0;
5587                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5588                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5589                         if (tp->serdes_counter)
5590                                 tp->serdes_counter--;
5591                         else {
5592                                 if (workaround) {
5593                                         u32 val = serdes_cfg;
5594
5595                                         if (port_a)
5596                                                 val |= 0xc010000;
5597                                         else
5598                                                 val |= 0x4010000;
5599
5600                                         tw32_f(MAC_SERDES_CFG, val);
5601                                 }
5602
5603                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5604                                 udelay(40);
5605
5606                                 /* Link parallel detection - link is up */
5607                                 /* only if we have PCS_SYNC and not */
5608                                 /* receiving config code words */
5609                                 mac_status = tr32(MAC_STATUS);
5610                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5611                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5612                                         tg3_setup_flow_control(tp, 0, 0);
5613                                         current_link_up = true;
5614                                         tp->phy_flags |=
5615                                                 TG3_PHYFLG_PARALLEL_DETECT;
5616                                         tp->serdes_counter =
5617                                                 SERDES_PARALLEL_DET_TIMEOUT;
5618                                 } else
5619                                         goto restart_autoneg;
5620                         }
5621                 }
5622         } else {
5623                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5624                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5625         }
5626
5627 out:
5628         return current_link_up;
5629 }
5630
5631 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5632 {
5633         bool current_link_up = false;
5634
5635         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5636                 goto out;
5637
5638         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5639                 u32 txflags, rxflags;
5640                 int i;
5641
5642                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5643                         u32 local_adv = 0, remote_adv = 0;
5644
5645                         if (txflags & ANEG_CFG_PS1)
5646                                 local_adv |= ADVERTISE_1000XPAUSE;
5647                         if (txflags & ANEG_CFG_PS2)
5648                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5649
5650                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5651                                 remote_adv |= LPA_1000XPAUSE;
5652                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5653                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5654
5655                         tp->link_config.rmt_adv =
5656                                            mii_adv_to_ethtool_adv_x(remote_adv);
5657
5658                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5659
5660                         current_link_up = true;
5661                 }
5662                 for (i = 0; i < 30; i++) {
5663                         udelay(20);
5664                         tw32_f(MAC_STATUS,
5665                                (MAC_STATUS_SYNC_CHANGED |
5666                                 MAC_STATUS_CFG_CHANGED));
5667                         udelay(40);
5668                         if ((tr32(MAC_STATUS) &
5669                              (MAC_STATUS_SYNC_CHANGED |
5670                               MAC_STATUS_CFG_CHANGED)) == 0)
5671                                 break;
5672                 }
5673
5674                 mac_status = tr32(MAC_STATUS);
5675                 if (!current_link_up &&
5676                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5677                     !(mac_status & MAC_STATUS_RCVD_CFG))
5678                         current_link_up = true;
5679         } else {
5680                 tg3_setup_flow_control(tp, 0, 0);
5681
5682                 /* Forcing 1000FD link up. */
5683                 current_link_up = true;
5684
5685                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5686                 udelay(40);
5687
5688                 tw32_f(MAC_MODE, tp->mac_mode);
5689                 udelay(40);
5690         }
5691
5692 out:
5693         return current_link_up;
5694 }
5695
5696 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5697 {
5698         u32 orig_pause_cfg;
5699         u32 orig_active_speed;
5700         u8 orig_active_duplex;
5701         u32 mac_status;
5702         bool current_link_up;
5703         int i;
5704
5705         orig_pause_cfg = tp->link_config.active_flowctrl;
5706         orig_active_speed = tp->link_config.active_speed;
5707         orig_active_duplex = tp->link_config.active_duplex;
5708
5709         if (!tg3_flag(tp, HW_AUTONEG) &&
5710             tp->link_up &&
5711             tg3_flag(tp, INIT_COMPLETE)) {
5712                 mac_status = tr32(MAC_STATUS);
5713                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5714                                MAC_STATUS_SIGNAL_DET |
5715                                MAC_STATUS_CFG_CHANGED |
5716                                MAC_STATUS_RCVD_CFG);
5717                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5718                                    MAC_STATUS_SIGNAL_DET)) {
5719                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5720                                             MAC_STATUS_CFG_CHANGED));
5721                         return 0;
5722                 }
5723         }
5724
5725         tw32_f(MAC_TX_AUTO_NEG, 0);
5726
5727         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5728         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5729         tw32_f(MAC_MODE, tp->mac_mode);
5730         udelay(40);
5731
5732         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5733                 tg3_init_bcm8002(tp);
5734
5735         /* Enable link change event even when serdes polling.  */
5736         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5737         udelay(40);
5738
5739         tp->link_config.rmt_adv = 0;
5740         mac_status = tr32(MAC_STATUS);
5741
5742         if (tg3_flag(tp, HW_AUTONEG))
5743                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5744         else
5745                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5746
5747         tp->napi[0].hw_status->status =
5748                 (SD_STATUS_UPDATED |
5749                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5750
5751         for (i = 0; i < 100; i++) {
5752                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5753                                     MAC_STATUS_CFG_CHANGED));
5754                 udelay(5);
5755                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5756                                          MAC_STATUS_CFG_CHANGED |
5757                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5758                         break;
5759         }
5760
5761         mac_status = tr32(MAC_STATUS);
5762         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5763                 current_link_up = false;
5764                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5765                     tp->serdes_counter == 0) {
5766                         tw32_f(MAC_MODE, (tp->mac_mode |
5767                                           MAC_MODE_SEND_CONFIGS));
5768                         udelay(1);
5769                         tw32_f(MAC_MODE, tp->mac_mode);
5770                 }
5771         }
5772
5773         if (current_link_up) {
5774                 tp->link_config.active_speed = SPEED_1000;
5775                 tp->link_config.active_duplex = DUPLEX_FULL;
5776                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5777                                     LED_CTRL_LNKLED_OVERRIDE |
5778                                     LED_CTRL_1000MBPS_ON));
5779         } else {
5780                 tp->link_config.active_speed = SPEED_UNKNOWN;
5781                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5782                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5783                                     LED_CTRL_LNKLED_OVERRIDE |
5784                                     LED_CTRL_TRAFFIC_OVERRIDE));
5785         }
5786
5787         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5788                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5789                 if (orig_pause_cfg != now_pause_cfg ||
5790                     orig_active_speed != tp->link_config.active_speed ||
5791                     orig_active_duplex != tp->link_config.active_duplex)
5792                         tg3_link_report(tp);
5793         }
5794
5795         return 0;
5796 }
5797
5798 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5799 {
5800         int err = 0;
5801         u32 bmsr, bmcr;
5802         u32 current_speed = SPEED_UNKNOWN;
5803         u8 current_duplex = DUPLEX_UNKNOWN;
5804         bool current_link_up = false;
5805         u32 local_adv, remote_adv, sgsr;
5806
5807         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5808              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5809              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5810              (sgsr & SERDES_TG3_SGMII_MODE)) {
5811
5812                 if (force_reset)
5813                         tg3_phy_reset(tp);
5814
5815                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5816
5817                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5818                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5819                 } else {
5820                         current_link_up = true;
5821                         if (sgsr & SERDES_TG3_SPEED_1000) {
5822                                 current_speed = SPEED_1000;
5823                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5824                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5825                                 current_speed = SPEED_100;
5826                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5827                         } else {
5828                                 current_speed = SPEED_10;
5829                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5830                         }
5831
5832                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5833                                 current_duplex = DUPLEX_FULL;
5834                         else
5835                                 current_duplex = DUPLEX_HALF;
5836                 }
5837
5838                 tw32_f(MAC_MODE, tp->mac_mode);
5839                 udelay(40);
5840
5841                 tg3_clear_mac_status(tp);
5842
5843                 goto fiber_setup_done;
5844         }
5845
5846         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847         tw32_f(MAC_MODE, tp->mac_mode);
5848         udelay(40);
5849
5850         tg3_clear_mac_status(tp);
5851
5852         if (force_reset)
5853                 tg3_phy_reset(tp);
5854
5855         tp->link_config.rmt_adv = 0;
5856
5857         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5858         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5859         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5860                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5861                         bmsr |= BMSR_LSTATUS;
5862                 else
5863                         bmsr &= ~BMSR_LSTATUS;
5864         }
5865
5866         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5867
5868         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5869             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5870                 /* do nothing, just check for link up at the end */
5871         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5872                 u32 adv, newadv;
5873
5874                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5875                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5876                                  ADVERTISE_1000XPAUSE |
5877                                  ADVERTISE_1000XPSE_ASYM |
5878                                  ADVERTISE_SLCT);
5879
5880                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5881                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5882
5883                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5884                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5885                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5886                         tg3_writephy(tp, MII_BMCR, bmcr);
5887
5888                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5889                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5890                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5891
5892                         return err;
5893                 }
5894         } else {
5895                 u32 new_bmcr;
5896
5897                 bmcr &= ~BMCR_SPEED1000;
5898                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5899
5900                 if (tp->link_config.duplex == DUPLEX_FULL)
5901                         new_bmcr |= BMCR_FULLDPLX;
5902
5903                 if (new_bmcr != bmcr) {
5904                         /* BMCR_SPEED1000 is a reserved bit that needs
5905                          * to be set on write.
5906                          */
5907                         new_bmcr |= BMCR_SPEED1000;
5908
5909                         /* Force a linkdown */
5910                         if (tp->link_up) {
5911                                 u32 adv;
5912
5913                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5914                                 adv &= ~(ADVERTISE_1000XFULL |
5915                                          ADVERTISE_1000XHALF |
5916                                          ADVERTISE_SLCT);
5917                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5918                                 tg3_writephy(tp, MII_BMCR, bmcr |
5919                                                            BMCR_ANRESTART |
5920                                                            BMCR_ANENABLE);
5921                                 udelay(10);
5922                                 tg3_carrier_off(tp);
5923                         }
5924                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5925                         bmcr = new_bmcr;
5926                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5927                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5928                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5929                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5930                                         bmsr |= BMSR_LSTATUS;
5931                                 else
5932                                         bmsr &= ~BMSR_LSTATUS;
5933                         }
5934                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5935                 }
5936         }
5937
5938         if (bmsr & BMSR_LSTATUS) {
5939                 current_speed = SPEED_1000;
5940                 current_link_up = true;
5941                 if (bmcr & BMCR_FULLDPLX)
5942                         current_duplex = DUPLEX_FULL;
5943                 else
5944                         current_duplex = DUPLEX_HALF;
5945
5946                 local_adv = 0;
5947                 remote_adv = 0;
5948
5949                 if (bmcr & BMCR_ANENABLE) {
5950                         u32 common;
5951
5952                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5953                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5954                         common = local_adv & remote_adv;
5955                         if (common & (ADVERTISE_1000XHALF |
5956                                       ADVERTISE_1000XFULL)) {
5957                                 if (common & ADVERTISE_1000XFULL)
5958                                         current_duplex = DUPLEX_FULL;
5959                                 else
5960                                         current_duplex = DUPLEX_HALF;
5961
5962                                 tp->link_config.rmt_adv =
5963                                            mii_adv_to_ethtool_adv_x(remote_adv);
5964                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5965                                 /* Link is up via parallel detect */
5966                         } else {
5967                                 current_link_up = false;
5968                         }
5969                 }
5970         }
5971
5972 fiber_setup_done:
5973         if (current_link_up && current_duplex == DUPLEX_FULL)
5974                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5975
5976         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5977         if (tp->link_config.active_duplex == DUPLEX_HALF)
5978                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5979
5980         tw32_f(MAC_MODE, tp->mac_mode);
5981         udelay(40);
5982
5983         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5984
5985         tp->link_config.active_speed = current_speed;
5986         tp->link_config.active_duplex = current_duplex;
5987
5988         tg3_test_and_report_link_chg(tp, current_link_up);
5989         return err;
5990 }
5991
5992 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5993 {
5994         if (tp->serdes_counter) {
5995                 /* Give autoneg time to complete. */
5996                 tp->serdes_counter--;
5997                 return;
5998         }
5999
6000         if (!tp->link_up &&
6001             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6002                 u32 bmcr;
6003
6004                 tg3_readphy(tp, MII_BMCR, &bmcr);
6005                 if (bmcr & BMCR_ANENABLE) {
6006                         u32 phy1, phy2;
6007
6008                         /* Select shadow register 0x1f */
6009                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6010                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6011
6012                         /* Select expansion interrupt status register */
6013                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6014                                          MII_TG3_DSP_EXP1_INT_STAT);
6015                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6016                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6017
6018                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6019                                 /* We have signal detect and not receiving
6020                                  * config code words, link is up by parallel
6021                                  * detection.
6022                                  */
6023
6024                                 bmcr &= ~BMCR_ANENABLE;
6025                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6026                                 tg3_writephy(tp, MII_BMCR, bmcr);
6027                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6028                         }
6029                 }
6030         } else if (tp->link_up &&
6031                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6032                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6033                 u32 phy2;
6034
6035                 /* Select expansion interrupt status register */
6036                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037                                  MII_TG3_DSP_EXP1_INT_STAT);
6038                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039                 if (phy2 & 0x20) {
6040                         u32 bmcr;
6041
6042                         /* Config code words received, turn on autoneg. */
6043                         tg3_readphy(tp, MII_BMCR, &bmcr);
6044                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6045
6046                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6047
6048                 }
6049         }
6050 }
6051
6052 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6053 {
6054         u32 val;
6055         int err;
6056
6057         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6058                 err = tg3_setup_fiber_phy(tp, force_reset);
6059         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6060                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6061         else
6062                 err = tg3_setup_copper_phy(tp, force_reset);
6063
6064         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6065                 u32 scale;
6066
6067                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6068                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6069                         scale = 65;
6070                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6071                         scale = 6;
6072                 else
6073                         scale = 12;
6074
6075                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6076                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6077                 tw32(GRC_MISC_CFG, val);
6078         }
6079
6080         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6081               (6 << TX_LENGTHS_IPG_SHIFT);
6082         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6083             tg3_asic_rev(tp) == ASIC_REV_5762)
6084                 val |= tr32(MAC_TX_LENGTHS) &
6085                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6086                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6087
6088         if (tp->link_config.active_speed == SPEED_1000 &&
6089             tp->link_config.active_duplex == DUPLEX_HALF)
6090                 tw32(MAC_TX_LENGTHS, val |
6091                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6092         else
6093                 tw32(MAC_TX_LENGTHS, val |
6094                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6095
6096         if (!tg3_flag(tp, 5705_PLUS)) {
6097                 if (tp->link_up) {
6098                         tw32(HOSTCC_STAT_COAL_TICKS,
6099                              tp->coal.stats_block_coalesce_usecs);
6100                 } else {
6101                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6102                 }
6103         }
6104
6105         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6106                 val = tr32(PCIE_PWR_MGMT_THRESH);
6107                 if (!tp->link_up)
6108                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6109                               tp->pwrmgmt_thresh;
6110                 else
6111                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6112                 tw32(PCIE_PWR_MGMT_THRESH, val);
6113         }
6114
6115         return err;
6116 }
6117
6118 /* tp->lock must be held */
6119 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6120 {
6121         u64 stamp;
6122
6123         ptp_read_system_prets(sts);
6124         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6125         ptp_read_system_postts(sts);
6126         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6127
6128         return stamp;
6129 }
6130
6131 /* tp->lock must be held */
6132 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6133 {
6134         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6135
6136         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6137         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6138         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6139         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6140 }
6141
6142 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6143 static inline void tg3_full_unlock(struct tg3 *tp);
6144 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6145 {
6146         struct tg3 *tp = netdev_priv(dev);
6147
6148         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6149                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6150                                 SOF_TIMESTAMPING_SOFTWARE;
6151
6152         if (tg3_flag(tp, PTP_CAPABLE)) {
6153                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6154                                         SOF_TIMESTAMPING_RX_HARDWARE |
6155                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6156         }
6157
6158         if (tp->ptp_clock)
6159                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6160         else
6161                 info->phc_index = -1;
6162
6163         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6164
6165         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6166                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6167                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6168                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6169         return 0;
6170 }
6171
6172 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6173 {
6174         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6175         u64 correction;
6176         bool neg_adj;
6177
6178         /* Frequency adjustment is performed using hardware with a 24 bit
6179          * accumulator and a programmable correction value. On each clk, the
6180          * correction value gets added to the accumulator and when it
6181          * overflows, the time counter is incremented/decremented.
6182          */
6183         neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6184
6185         tg3_full_lock(tp, 0);
6186
6187         if (correction)
6188                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6189                      TG3_EAV_REF_CLK_CORRECT_EN |
6190                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6191                      ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6192         else
6193                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6194
6195         tg3_full_unlock(tp);
6196
6197         return 0;
6198 }
6199
6200 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6201 {
6202         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6203
6204         tg3_full_lock(tp, 0);
6205         tp->ptp_adjust += delta;
6206         tg3_full_unlock(tp);
6207
6208         return 0;
6209 }
6210
6211 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6212                             struct ptp_system_timestamp *sts)
6213 {
6214         u64 ns;
6215         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6216
6217         tg3_full_lock(tp, 0);
6218         ns = tg3_refclk_read(tp, sts);
6219         ns += tp->ptp_adjust;
6220         tg3_full_unlock(tp);
6221
6222         *ts = ns_to_timespec64(ns);
6223
6224         return 0;
6225 }
6226
6227 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6228                            const struct timespec64 *ts)
6229 {
6230         u64 ns;
6231         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6232
6233         ns = timespec64_to_ns(ts);
6234
6235         tg3_full_lock(tp, 0);
6236         tg3_refclk_write(tp, ns);
6237         tp->ptp_adjust = 0;
6238         tg3_full_unlock(tp);
6239
6240         return 0;
6241 }
6242
6243 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6244                           struct ptp_clock_request *rq, int on)
6245 {
6246         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6247         u32 clock_ctl;
6248         int rval = 0;
6249
6250         switch (rq->type) {
6251         case PTP_CLK_REQ_PEROUT:
6252                 /* Reject requests with unsupported flags */
6253                 if (rq->perout.flags)
6254                         return -EOPNOTSUPP;
6255
6256                 if (rq->perout.index != 0)
6257                         return -EINVAL;
6258
6259                 tg3_full_lock(tp, 0);
6260                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6261                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6262
6263                 if (on) {
6264                         u64 nsec;
6265
6266                         nsec = rq->perout.start.sec * 1000000000ULL +
6267                                rq->perout.start.nsec;
6268
6269                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6270                                 netdev_warn(tp->dev,
6271                                             "Device supports only a one-shot timesync output, period must be 0\n");
6272                                 rval = -EINVAL;
6273                                 goto err_out;
6274                         }
6275
6276                         if (nsec & (1ULL << 63)) {
6277                                 netdev_warn(tp->dev,
6278                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6279                                 rval = -EINVAL;
6280                                 goto err_out;
6281                         }
6282
6283                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6284                         tw32(TG3_EAV_WATCHDOG0_MSB,
6285                              TG3_EAV_WATCHDOG0_EN |
6286                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6287
6288                         tw32(TG3_EAV_REF_CLCK_CTL,
6289                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6290                 } else {
6291                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6292                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6293                 }
6294
6295 err_out:
6296                 tg3_full_unlock(tp);
6297                 return rval;
6298
6299         default:
6300                 break;
6301         }
6302
6303         return -EOPNOTSUPP;
6304 }
6305
6306 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6307                                      struct skb_shared_hwtstamps *timestamp)
6308 {
6309         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6310         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6311                                            tp->ptp_adjust);
6312 }
6313
6314 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6315 {
6316         *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6317         *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6318 }
6319
6320 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6321 {
6322         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6323         struct skb_shared_hwtstamps timestamp;
6324         u64 hwclock;
6325
6326         if (tp->ptp_txts_retrycnt > 2)
6327                 goto done;
6328
6329         tg3_read_tx_tstamp(tp, &hwclock);
6330
6331         if (hwclock != tp->pre_tx_ts) {
6332                 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6333                 skb_tstamp_tx(tp->tx_tstamp_skb, &timestamp);
6334                 goto done;
6335         }
6336         tp->ptp_txts_retrycnt++;
6337         return HZ / 10;
6338 done:
6339         dev_consume_skb_any(tp->tx_tstamp_skb);
6340         tp->tx_tstamp_skb = NULL;
6341         tp->ptp_txts_retrycnt = 0;
6342         tp->pre_tx_ts = 0;
6343         return -1;
6344 }
6345
6346 static const struct ptp_clock_info tg3_ptp_caps = {
6347         .owner          = THIS_MODULE,
6348         .name           = "tg3 clock",
6349         .max_adj        = 250000000,
6350         .n_alarm        = 0,
6351         .n_ext_ts       = 0,
6352         .n_per_out      = 1,
6353         .n_pins         = 0,
6354         .pps            = 0,
6355         .adjfine        = tg3_ptp_adjfine,
6356         .adjtime        = tg3_ptp_adjtime,
6357         .do_aux_work    = tg3_ptp_ts_aux_work,
6358         .gettimex64     = tg3_ptp_gettimex,
6359         .settime64      = tg3_ptp_settime,
6360         .enable         = tg3_ptp_enable,
6361 };
6362
6363 /* tp->lock must be held */
6364 static void tg3_ptp_init(struct tg3 *tp)
6365 {
6366         if (!tg3_flag(tp, PTP_CAPABLE))
6367                 return;
6368
6369         /* Initialize the hardware clock to the system time. */
6370         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6371         tp->ptp_adjust = 0;
6372         tp->ptp_info = tg3_ptp_caps;
6373 }
6374
6375 /* tp->lock must be held */
6376 static void tg3_ptp_resume(struct tg3 *tp)
6377 {
6378         if (!tg3_flag(tp, PTP_CAPABLE))
6379                 return;
6380
6381         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6382         tp->ptp_adjust = 0;
6383 }
6384
6385 static void tg3_ptp_fini(struct tg3 *tp)
6386 {
6387         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6388                 return;
6389
6390         ptp_clock_unregister(tp->ptp_clock);
6391         tp->ptp_clock = NULL;
6392         tp->ptp_adjust = 0;
6393         dev_consume_skb_any(tp->tx_tstamp_skb);
6394         tp->tx_tstamp_skb = NULL;
6395 }
6396
6397 static inline int tg3_irq_sync(struct tg3 *tp)
6398 {
6399         return tp->irq_sync;
6400 }
6401
6402 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6403 {
6404         int i;
6405
6406         dst = (u32 *)((u8 *)dst + off);
6407         for (i = 0; i < len; i += sizeof(u32))
6408                 *dst++ = tr32(off + i);
6409 }
6410
6411 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6412 {
6413         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6414         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6415         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6416         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6417         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6418         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6419         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6420         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6421         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6422         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6423         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6424         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6425         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6426         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6427         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6428         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6429         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6430         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6431         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6432
6433         if (tg3_flag(tp, SUPPORT_MSIX))
6434                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6435
6436         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6437         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6438         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6439         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6440         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6441         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6442         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6443         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6444
6445         if (!tg3_flag(tp, 5705_PLUS)) {
6446                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6447                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6448                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6449         }
6450
6451         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6452         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6453         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6454         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6455         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6456
6457         if (tg3_flag(tp, NVRAM))
6458                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6459 }
6460
6461 static void tg3_dump_state(struct tg3 *tp)
6462 {
6463         int i;
6464         u32 *regs;
6465
6466         /* If it is a PCI error, all registers will be 0xffff,
6467          * we don't dump them out, just report the error and return
6468          */
6469         if (tp->pdev->error_state != pci_channel_io_normal) {
6470                 netdev_err(tp->dev, "PCI channel ERROR!\n");
6471                 return;
6472         }
6473
6474         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6475         if (!regs)
6476                 return;
6477
6478         if (tg3_flag(tp, PCI_EXPRESS)) {
6479                 /* Read up to but not including private PCI registers */
6480                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6481                         regs[i / sizeof(u32)] = tr32(i);
6482         } else
6483                 tg3_dump_legacy_regs(tp, regs);
6484
6485         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6486                 if (!regs[i + 0] && !regs[i + 1] &&
6487                     !regs[i + 2] && !regs[i + 3])
6488                         continue;
6489
6490                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6491                            i * 4,
6492                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6493         }
6494
6495         kfree(regs);
6496
6497         for (i = 0; i < tp->irq_cnt; i++) {
6498                 struct tg3_napi *tnapi = &tp->napi[i];
6499
6500                 /* SW status block */
6501                 netdev_err(tp->dev,
6502                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6503                            i,
6504                            tnapi->hw_status->status,
6505                            tnapi->hw_status->status_tag,
6506                            tnapi->hw_status->rx_jumbo_consumer,
6507                            tnapi->hw_status->rx_consumer,
6508                            tnapi->hw_status->rx_mini_consumer,
6509                            tnapi->hw_status->idx[0].rx_producer,
6510                            tnapi->hw_status->idx[0].tx_consumer);
6511
6512                 netdev_err(tp->dev,
6513                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6514                            i,
6515                            tnapi->last_tag, tnapi->last_irq_tag,
6516                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6517                            tnapi->rx_rcb_ptr,
6518                            tnapi->prodring.rx_std_prod_idx,
6519                            tnapi->prodring.rx_std_cons_idx,
6520                            tnapi->prodring.rx_jmb_prod_idx,
6521                            tnapi->prodring.rx_jmb_cons_idx);
6522         }
6523 }
6524
6525 /* This is called whenever we suspect that the system chipset is re-
6526  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6527  * is bogus tx completions. We try to recover by setting the
6528  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6529  * in the workqueue.
6530  */
6531 static void tg3_tx_recover(struct tg3 *tp)
6532 {
6533         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6534                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6535
6536         netdev_warn(tp->dev,
6537                     "The system may be re-ordering memory-mapped I/O "
6538                     "cycles to the network device, attempting to recover. "
6539                     "Please report the problem to the driver maintainer "
6540                     "and include system chipset information.\n");
6541
6542         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6543 }
6544
6545 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6546 {
6547         /* Tell compiler to fetch tx indices from memory. */
6548         barrier();
6549         return tnapi->tx_pending -
6550                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6551 }
6552
6553 /* Tigon3 never reports partial packet sends.  So we do not
6554  * need special logic to handle SKBs that have not had all
6555  * of their frags sent yet, like SunGEM does.
6556  */
6557 static void tg3_tx(struct tg3_napi *tnapi)
6558 {
6559         struct tg3 *tp = tnapi->tp;
6560         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6561         u32 sw_idx = tnapi->tx_cons;
6562         struct netdev_queue *txq;
6563         int index = tnapi - tp->napi;
6564         unsigned int pkts_compl = 0, bytes_compl = 0;
6565
6566         if (tg3_flag(tp, ENABLE_TSS))
6567                 index--;
6568
6569         txq = netdev_get_tx_queue(tp->dev, index);
6570
6571         while (sw_idx != hw_idx) {
6572                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6573                 bool complete_skb_later = false;
6574                 struct sk_buff *skb = ri->skb;
6575                 int i, tx_bug = 0;
6576
6577                 if (unlikely(skb == NULL)) {
6578                         tg3_tx_recover(tp);
6579                         return;
6580                 }
6581
6582                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6583                         struct skb_shared_hwtstamps timestamp;
6584                         u64 hwclock;
6585
6586                         tg3_read_tx_tstamp(tp, &hwclock);
6587                         if (hwclock != tp->pre_tx_ts) {
6588                                 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6589                                 skb_tstamp_tx(skb, &timestamp);
6590                                 tp->pre_tx_ts = 0;
6591                         } else {
6592                                 tp->tx_tstamp_skb = skb;
6593                                 complete_skb_later = true;
6594                         }
6595                 }
6596
6597                 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6598                                  skb_headlen(skb), DMA_TO_DEVICE);
6599
6600                 ri->skb = NULL;
6601
6602                 while (ri->fragmented) {
6603                         ri->fragmented = false;
6604                         sw_idx = NEXT_TX(sw_idx);
6605                         ri = &tnapi->tx_buffers[sw_idx];
6606                 }
6607
6608                 sw_idx = NEXT_TX(sw_idx);
6609
6610                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6611                         ri = &tnapi->tx_buffers[sw_idx];
6612                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6613                                 tx_bug = 1;
6614
6615                         dma_unmap_page(&tp->pdev->dev,
6616                                        dma_unmap_addr(ri, mapping),
6617                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6618                                        DMA_TO_DEVICE);
6619
6620                         while (ri->fragmented) {
6621                                 ri->fragmented = false;
6622                                 sw_idx = NEXT_TX(sw_idx);
6623                                 ri = &tnapi->tx_buffers[sw_idx];
6624                         }
6625
6626                         sw_idx = NEXT_TX(sw_idx);
6627                 }
6628
6629                 pkts_compl++;
6630                 bytes_compl += skb->len;
6631
6632                 if (!complete_skb_later)
6633                         dev_consume_skb_any(skb);
6634                 else
6635                         ptp_schedule_worker(tp->ptp_clock, 0);
6636
6637                 if (unlikely(tx_bug)) {
6638                         tg3_tx_recover(tp);
6639                         return;
6640                 }
6641         }
6642
6643         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6644
6645         tnapi->tx_cons = sw_idx;
6646
6647         /* Need to make the tx_cons update visible to __tg3_start_xmit()
6648          * before checking for netif_queue_stopped().  Without the
6649          * memory barrier, there is a small possibility that __tg3_start_xmit()
6650          * will miss it and cause the queue to be stopped forever.
6651          */
6652         smp_mb();
6653
6654         if (unlikely(netif_tx_queue_stopped(txq) &&
6655                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6656                 __netif_tx_lock(txq, smp_processor_id());
6657                 if (netif_tx_queue_stopped(txq) &&
6658                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6659                         netif_tx_wake_queue(txq);
6660                 __netif_tx_unlock(txq);
6661         }
6662 }
6663
6664 static void tg3_frag_free(bool is_frag, void *data)
6665 {
6666         if (is_frag)
6667                 skb_free_frag(data);
6668         else
6669                 kfree(data);
6670 }
6671
6672 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6673 {
6674         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6675                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6676
6677         if (!ri->data)
6678                 return;
6679
6680         dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6681                          DMA_FROM_DEVICE);
6682         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6683         ri->data = NULL;
6684 }
6685
6686
6687 /* Returns size of skb allocated or < 0 on error.
6688  *
6689  * We only need to fill in the address because the other members
6690  * of the RX descriptor are invariant, see tg3_init_rings.
6691  *
6692  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6693  * posting buffers we only dirty the first cache line of the RX
6694  * descriptor (containing the address).  Whereas for the RX status
6695  * buffers the cpu only reads the last cacheline of the RX descriptor
6696  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6697  */
6698 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6699                              u32 opaque_key, u32 dest_idx_unmasked,
6700                              unsigned int *frag_size)
6701 {
6702         struct tg3_rx_buffer_desc *desc;
6703         struct ring_info *map;
6704         u8 *data;
6705         dma_addr_t mapping;
6706         int skb_size, data_size, dest_idx;
6707
6708         switch (opaque_key) {
6709         case RXD_OPAQUE_RING_STD:
6710                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6711                 desc = &tpr->rx_std[dest_idx];
6712                 map = &tpr->rx_std_buffers[dest_idx];
6713                 data_size = tp->rx_pkt_map_sz;
6714                 break;
6715
6716         case RXD_OPAQUE_RING_JUMBO:
6717                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6718                 desc = &tpr->rx_jmb[dest_idx].std;
6719                 map = &tpr->rx_jmb_buffers[dest_idx];
6720                 data_size = TG3_RX_JMB_MAP_SZ;
6721                 break;
6722
6723         default:
6724                 return -EINVAL;
6725         }
6726
6727         /* Do not overwrite any of the map or rp information
6728          * until we are sure we can commit to a new buffer.
6729          *
6730          * Callers depend upon this behavior and assume that
6731          * we leave everything unchanged if we fail.
6732          */
6733         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6734                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6735         if (skb_size <= PAGE_SIZE) {
6736                 data = napi_alloc_frag(skb_size);
6737                 *frag_size = skb_size;
6738         } else {
6739                 data = kmalloc(skb_size, GFP_ATOMIC);
6740                 *frag_size = 0;
6741         }
6742         if (!data)
6743                 return -ENOMEM;
6744
6745         mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6746                                  data_size, DMA_FROM_DEVICE);
6747         if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6748                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6749                 return -EIO;
6750         }
6751
6752         map->data = data;
6753         dma_unmap_addr_set(map, mapping, mapping);
6754
6755         desc->addr_hi = ((u64)mapping >> 32);
6756         desc->addr_lo = ((u64)mapping & 0xffffffff);
6757
6758         return data_size;
6759 }
6760
6761 /* We only need to move over in the address because the other
6762  * members of the RX descriptor are invariant.  See notes above
6763  * tg3_alloc_rx_data for full details.
6764  */
6765 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6766                            struct tg3_rx_prodring_set *dpr,
6767                            u32 opaque_key, int src_idx,
6768                            u32 dest_idx_unmasked)
6769 {
6770         struct tg3 *tp = tnapi->tp;
6771         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6772         struct ring_info *src_map, *dest_map;
6773         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6774         int dest_idx;
6775
6776         switch (opaque_key) {
6777         case RXD_OPAQUE_RING_STD:
6778                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6779                 dest_desc = &dpr->rx_std[dest_idx];
6780                 dest_map = &dpr->rx_std_buffers[dest_idx];
6781                 src_desc = &spr->rx_std[src_idx];
6782                 src_map = &spr->rx_std_buffers[src_idx];
6783                 break;
6784
6785         case RXD_OPAQUE_RING_JUMBO:
6786                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6787                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6788                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6789                 src_desc = &spr->rx_jmb[src_idx].std;
6790                 src_map = &spr->rx_jmb_buffers[src_idx];
6791                 break;
6792
6793         default:
6794                 return;
6795         }
6796
6797         dest_map->data = src_map->data;
6798         dma_unmap_addr_set(dest_map, mapping,
6799                            dma_unmap_addr(src_map, mapping));
6800         dest_desc->addr_hi = src_desc->addr_hi;
6801         dest_desc->addr_lo = src_desc->addr_lo;
6802
6803         /* Ensure that the update to the skb happens after the physical
6804          * addresses have been transferred to the new BD location.
6805          */
6806         smp_wmb();
6807
6808         src_map->data = NULL;
6809 }
6810
6811 /* The RX ring scheme is composed of multiple rings which post fresh
6812  * buffers to the chip, and one special ring the chip uses to report
6813  * status back to the host.
6814  *
6815  * The special ring reports the status of received packets to the
6816  * host.  The chip does not write into the original descriptor the
6817  * RX buffer was obtained from.  The chip simply takes the original
6818  * descriptor as provided by the host, updates the status and length
6819  * field, then writes this into the next status ring entry.
6820  *
6821  * Each ring the host uses to post buffers to the chip is described
6822  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6823  * it is first placed into the on-chip ram.  When the packet's length
6824  * is known, it walks down the TG3_BDINFO entries to select the ring.
6825  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6826  * which is within the range of the new packet's length is chosen.
6827  *
6828  * The "separate ring for rx status" scheme may sound queer, but it makes
6829  * sense from a cache coherency perspective.  If only the host writes
6830  * to the buffer post rings, and only the chip writes to the rx status
6831  * rings, then cache lines never move beyond shared-modified state.
6832  * If both the host and chip were to write into the same ring, cache line
6833  * eviction could occur since both entities want it in an exclusive state.
6834  */
6835 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6836 {
6837         struct tg3 *tp = tnapi->tp;
6838         u32 work_mask, rx_std_posted = 0;
6839         u32 std_prod_idx, jmb_prod_idx;
6840         u32 sw_idx = tnapi->rx_rcb_ptr;
6841         u16 hw_idx;
6842         int received;
6843         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6844
6845         hw_idx = *(tnapi->rx_rcb_prod_idx);
6846         /*
6847          * We need to order the read of hw_idx and the read of
6848          * the opaque cookie.
6849          */
6850         rmb();
6851         work_mask = 0;
6852         received = 0;
6853         std_prod_idx = tpr->rx_std_prod_idx;
6854         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6855         while (sw_idx != hw_idx && budget > 0) {
6856                 struct ring_info *ri;
6857                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6858                 unsigned int len;
6859                 struct sk_buff *skb;
6860                 dma_addr_t dma_addr;
6861                 u32 opaque_key, desc_idx, *post_ptr;
6862                 u8 *data;
6863                 u64 tstamp = 0;
6864
6865                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6866                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6867                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6868                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6869                         dma_addr = dma_unmap_addr(ri, mapping);
6870                         data = ri->data;
6871                         post_ptr = &std_prod_idx;
6872                         rx_std_posted++;
6873                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6874                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6875                         dma_addr = dma_unmap_addr(ri, mapping);
6876                         data = ri->data;
6877                         post_ptr = &jmb_prod_idx;
6878                 } else
6879                         goto next_pkt_nopost;
6880
6881                 work_mask |= opaque_key;
6882
6883                 if (desc->err_vlan & RXD_ERR_MASK) {
6884                 drop_it:
6885                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6886                                        desc_idx, *post_ptr);
6887                 drop_it_no_recycle:
6888                         /* Other statistics kept track of by card. */
6889                         tnapi->rx_dropped++;
6890                         goto next_pkt;
6891                 }
6892
6893                 prefetch(data + TG3_RX_OFFSET(tp));
6894                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6895                       ETH_FCS_LEN;
6896
6897                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6898                      RXD_FLAG_PTPSTAT_PTPV1 ||
6899                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6900                      RXD_FLAG_PTPSTAT_PTPV2) {
6901                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6902                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6903                 }
6904
6905                 if (len > TG3_RX_COPY_THRESH(tp)) {
6906                         int skb_size;
6907                         unsigned int frag_size;
6908
6909                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6910                                                     *post_ptr, &frag_size);
6911                         if (skb_size < 0)
6912                                 goto drop_it;
6913
6914                         dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6915                                          DMA_FROM_DEVICE);
6916
6917                         /* Ensure that the update to the data happens
6918                          * after the usage of the old DMA mapping.
6919                          */
6920                         smp_wmb();
6921
6922                         ri->data = NULL;
6923
6924                         if (frag_size)
6925                                 skb = build_skb(data, frag_size);
6926                         else
6927                                 skb = slab_build_skb(data);
6928                         if (!skb) {
6929                                 tg3_frag_free(frag_size != 0, data);
6930                                 goto drop_it_no_recycle;
6931                         }
6932                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6933                 } else {
6934                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6935                                        desc_idx, *post_ptr);
6936
6937                         skb = netdev_alloc_skb(tp->dev,
6938                                                len + TG3_RAW_IP_ALIGN);
6939                         if (skb == NULL)
6940                                 goto drop_it_no_recycle;
6941
6942                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6943                         dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6944                                                 DMA_FROM_DEVICE);
6945                         memcpy(skb->data,
6946                                data + TG3_RX_OFFSET(tp),
6947                                len);
6948                         dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6949                                                    len, DMA_FROM_DEVICE);
6950                 }
6951
6952                 skb_put(skb, len);
6953                 if (tstamp)
6954                         tg3_hwclock_to_timestamp(tp, tstamp,
6955                                                  skb_hwtstamps(skb));
6956
6957                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6958                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6959                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6960                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6961                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6962                 else
6963                         skb_checksum_none_assert(skb);
6964
6965                 skb->protocol = eth_type_trans(skb, tp->dev);
6966
6967                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6968                     skb->protocol != htons(ETH_P_8021Q) &&
6969                     skb->protocol != htons(ETH_P_8021AD)) {
6970                         dev_kfree_skb_any(skb);
6971                         goto drop_it_no_recycle;
6972                 }
6973
6974                 if (desc->type_flags & RXD_FLAG_VLAN &&
6975                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6976                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6977                                                desc->err_vlan & RXD_VLAN_MASK);
6978
6979                 napi_gro_receive(&tnapi->napi, skb);
6980
6981                 received++;
6982                 budget--;
6983
6984 next_pkt:
6985                 (*post_ptr)++;
6986
6987                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6988                         tpr->rx_std_prod_idx = std_prod_idx &
6989                                                tp->rx_std_ring_mask;
6990                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6991                                      tpr->rx_std_prod_idx);
6992                         work_mask &= ~RXD_OPAQUE_RING_STD;
6993                         rx_std_posted = 0;
6994                 }
6995 next_pkt_nopost:
6996                 sw_idx++;
6997                 sw_idx &= tp->rx_ret_ring_mask;
6998
6999                 /* Refresh hw_idx to see if there is new work */
7000                 if (sw_idx == hw_idx) {
7001                         hw_idx = *(tnapi->rx_rcb_prod_idx);
7002                         rmb();
7003                 }
7004         }
7005
7006         /* ACK the status ring. */
7007         tnapi->rx_rcb_ptr = sw_idx;
7008         tw32_rx_mbox(tnapi->consmbox, sw_idx);
7009
7010         /* Refill RX ring(s). */
7011         if (!tg3_flag(tp, ENABLE_RSS)) {
7012                 /* Sync BD data before updating mailbox */
7013                 wmb();
7014
7015                 if (work_mask & RXD_OPAQUE_RING_STD) {
7016                         tpr->rx_std_prod_idx = std_prod_idx &
7017                                                tp->rx_std_ring_mask;
7018                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7019                                      tpr->rx_std_prod_idx);
7020                 }
7021                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7022                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
7023                                                tp->rx_jmb_ring_mask;
7024                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7025                                      tpr->rx_jmb_prod_idx);
7026                 }
7027         } else if (work_mask) {
7028                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7029                  * updated before the producer indices can be updated.
7030                  */
7031                 smp_wmb();
7032
7033                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7034                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7035
7036                 if (tnapi != &tp->napi[1]) {
7037                         tp->rx_refill = true;
7038                         napi_schedule(&tp->napi[1].napi);
7039                 }
7040         }
7041
7042         return received;
7043 }
7044
7045 static void tg3_poll_link(struct tg3 *tp)
7046 {
7047         /* handle link change and other phy events */
7048         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7049                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7050
7051                 if (sblk->status & SD_STATUS_LINK_CHG) {
7052                         sblk->status = SD_STATUS_UPDATED |
7053                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7054                         spin_lock(&tp->lock);
7055                         if (tg3_flag(tp, USE_PHYLIB)) {
7056                                 tw32_f(MAC_STATUS,
7057                                      (MAC_STATUS_SYNC_CHANGED |
7058                                       MAC_STATUS_CFG_CHANGED |
7059                                       MAC_STATUS_MI_COMPLETION |
7060                                       MAC_STATUS_LNKSTATE_CHANGED));
7061                                 udelay(40);
7062                         } else
7063                                 tg3_setup_phy(tp, false);
7064                         spin_unlock(&tp->lock);
7065                 }
7066         }
7067 }
7068
7069 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7070                                 struct tg3_rx_prodring_set *dpr,
7071                                 struct tg3_rx_prodring_set *spr)
7072 {
7073         u32 si, di, cpycnt, src_prod_idx;
7074         int i, err = 0;
7075
7076         while (1) {
7077                 src_prod_idx = spr->rx_std_prod_idx;
7078
7079                 /* Make sure updates to the rx_std_buffers[] entries and the
7080                  * standard producer index are seen in the correct order.
7081                  */
7082                 smp_rmb();
7083
7084                 if (spr->rx_std_cons_idx == src_prod_idx)
7085                         break;
7086
7087                 if (spr->rx_std_cons_idx < src_prod_idx)
7088                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7089                 else
7090                         cpycnt = tp->rx_std_ring_mask + 1 -
7091                                  spr->rx_std_cons_idx;
7092
7093                 cpycnt = min(cpycnt,
7094                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7095
7096                 si = spr->rx_std_cons_idx;
7097                 di = dpr->rx_std_prod_idx;
7098
7099                 for (i = di; i < di + cpycnt; i++) {
7100                         if (dpr->rx_std_buffers[i].data) {
7101                                 cpycnt = i - di;
7102                                 err = -ENOSPC;
7103                                 break;
7104                         }
7105                 }
7106
7107                 if (!cpycnt)
7108                         break;
7109
7110                 /* Ensure that updates to the rx_std_buffers ring and the
7111                  * shadowed hardware producer ring from tg3_recycle_skb() are
7112                  * ordered correctly WRT the skb check above.
7113                  */
7114                 smp_rmb();
7115
7116                 memcpy(&dpr->rx_std_buffers[di],
7117                        &spr->rx_std_buffers[si],
7118                        cpycnt * sizeof(struct ring_info));
7119
7120                 for (i = 0; i < cpycnt; i++, di++, si++) {
7121                         struct tg3_rx_buffer_desc *sbd, *dbd;
7122                         sbd = &spr->rx_std[si];
7123                         dbd = &dpr->rx_std[di];
7124                         dbd->addr_hi = sbd->addr_hi;
7125                         dbd->addr_lo = sbd->addr_lo;
7126                 }
7127
7128                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7129                                        tp->rx_std_ring_mask;
7130                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7131                                        tp->rx_std_ring_mask;
7132         }
7133
7134         while (1) {
7135                 src_prod_idx = spr->rx_jmb_prod_idx;
7136
7137                 /* Make sure updates to the rx_jmb_buffers[] entries and
7138                  * the jumbo producer index are seen in the correct order.
7139                  */
7140                 smp_rmb();
7141
7142                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7143                         break;
7144
7145                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7146                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7147                 else
7148                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7149                                  spr->rx_jmb_cons_idx;
7150
7151                 cpycnt = min(cpycnt,
7152                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7153
7154                 si = spr->rx_jmb_cons_idx;
7155                 di = dpr->rx_jmb_prod_idx;
7156
7157                 for (i = di; i < di + cpycnt; i++) {
7158                         if (dpr->rx_jmb_buffers[i].data) {
7159                                 cpycnt = i - di;
7160                                 err = -ENOSPC;
7161                                 break;
7162                         }
7163                 }
7164
7165                 if (!cpycnt)
7166                         break;
7167
7168                 /* Ensure that updates to the rx_jmb_buffers ring and the
7169                  * shadowed hardware producer ring from tg3_recycle_skb() are
7170                  * ordered correctly WRT the skb check above.
7171                  */
7172                 smp_rmb();
7173
7174                 memcpy(&dpr->rx_jmb_buffers[di],
7175                        &spr->rx_jmb_buffers[si],
7176                        cpycnt * sizeof(struct ring_info));
7177
7178                 for (i = 0; i < cpycnt; i++, di++, si++) {
7179                         struct tg3_rx_buffer_desc *sbd, *dbd;
7180                         sbd = &spr->rx_jmb[si].std;
7181                         dbd = &dpr->rx_jmb[di].std;
7182                         dbd->addr_hi = sbd->addr_hi;
7183                         dbd->addr_lo = sbd->addr_lo;
7184                 }
7185
7186                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7187                                        tp->rx_jmb_ring_mask;
7188                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7189                                        tp->rx_jmb_ring_mask;
7190         }
7191
7192         return err;
7193 }
7194
7195 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7196 {
7197         struct tg3 *tp = tnapi->tp;
7198
7199         /* run TX completion thread */
7200         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7201                 tg3_tx(tnapi);
7202                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7203                         return work_done;
7204         }
7205
7206         if (!tnapi->rx_rcb_prod_idx)
7207                 return work_done;
7208
7209         /* run RX thread, within the bounds set by NAPI.
7210          * All RX "locking" is done by ensuring outside
7211          * code synchronizes with tg3->napi.poll()
7212          */
7213         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7214                 work_done += tg3_rx(tnapi, budget - work_done);
7215
7216         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7217                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7218                 int i, err = 0;
7219                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7220                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7221
7222                 tp->rx_refill = false;
7223                 for (i = 1; i <= tp->rxq_cnt; i++)
7224                         err |= tg3_rx_prodring_xfer(tp, dpr,
7225                                                     &tp->napi[i].prodring);
7226
7227                 wmb();
7228
7229                 if (std_prod_idx != dpr->rx_std_prod_idx)
7230                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7231                                      dpr->rx_std_prod_idx);
7232
7233                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7234                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7235                                      dpr->rx_jmb_prod_idx);
7236
7237                 if (err)
7238                         tw32_f(HOSTCC_MODE, tp->coal_now);
7239         }
7240
7241         return work_done;
7242 }
7243
7244 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7245 {
7246         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7247                 schedule_work(&tp->reset_task);
7248 }
7249
7250 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7251 {
7252         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7253                 cancel_work_sync(&tp->reset_task);
7254         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7255 }
7256
7257 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7258 {
7259         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7260         struct tg3 *tp = tnapi->tp;
7261         int work_done = 0;
7262         struct tg3_hw_status *sblk = tnapi->hw_status;
7263
7264         while (1) {
7265                 work_done = tg3_poll_work(tnapi, work_done, budget);
7266
7267                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7268                         goto tx_recovery;
7269
7270                 if (unlikely(work_done >= budget))
7271                         break;
7272
7273                 /* tp->last_tag is used in tg3_int_reenable() below
7274                  * to tell the hw how much work has been processed,
7275                  * so we must read it before checking for more work.
7276                  */
7277                 tnapi->last_tag = sblk->status_tag;
7278                 tnapi->last_irq_tag = tnapi->last_tag;
7279                 rmb();
7280
7281                 /* check for RX/TX work to do */
7282                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7283                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7284
7285                         /* This test here is not race free, but will reduce
7286                          * the number of interrupts by looping again.
7287                          */
7288                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7289                                 continue;
7290
7291                         napi_complete_done(napi, work_done);
7292                         /* Reenable interrupts. */
7293                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7294
7295                         /* This test here is synchronized by napi_schedule()
7296                          * and napi_complete() to close the race condition.
7297                          */
7298                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7299                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7300                                                   HOSTCC_MODE_ENABLE |
7301                                                   tnapi->coal_now);
7302                         }
7303                         break;
7304                 }
7305         }
7306
7307         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7308         return work_done;
7309
7310 tx_recovery:
7311         /* work_done is guaranteed to be less than budget. */
7312         napi_complete(napi);
7313         tg3_reset_task_schedule(tp);
7314         return work_done;
7315 }
7316
7317 static void tg3_process_error(struct tg3 *tp)
7318 {
7319         u32 val;
7320         bool real_error = false;
7321
7322         if (tg3_flag(tp, ERROR_PROCESSED))
7323                 return;
7324
7325         /* Check Flow Attention register */
7326         val = tr32(HOSTCC_FLOW_ATTN);
7327         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7328                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7329                 real_error = true;
7330         }
7331
7332         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7333                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7334                 real_error = true;
7335         }
7336
7337         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7338                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7339                 real_error = true;
7340         }
7341
7342         if (!real_error)
7343                 return;
7344
7345         tg3_dump_state(tp);
7346
7347         tg3_flag_set(tp, ERROR_PROCESSED);
7348         tg3_reset_task_schedule(tp);
7349 }
7350
7351 static int tg3_poll(struct napi_struct *napi, int budget)
7352 {
7353         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7354         struct tg3 *tp = tnapi->tp;
7355         int work_done = 0;
7356         struct tg3_hw_status *sblk = tnapi->hw_status;
7357
7358         while (1) {
7359                 if (sblk->status & SD_STATUS_ERROR)
7360                         tg3_process_error(tp);
7361
7362                 tg3_poll_link(tp);
7363
7364                 work_done = tg3_poll_work(tnapi, work_done, budget);
7365
7366                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7367                         goto tx_recovery;
7368
7369                 if (unlikely(work_done >= budget))
7370                         break;
7371
7372                 if (tg3_flag(tp, TAGGED_STATUS)) {
7373                         /* tp->last_tag is used in tg3_int_reenable() below
7374                          * to tell the hw how much work has been processed,
7375                          * so we must read it before checking for more work.
7376                          */
7377                         tnapi->last_tag = sblk->status_tag;
7378                         tnapi->last_irq_tag = tnapi->last_tag;
7379                         rmb();
7380                 } else
7381                         sblk->status &= ~SD_STATUS_UPDATED;
7382
7383                 if (likely(!tg3_has_work(tnapi))) {
7384                         napi_complete_done(napi, work_done);
7385                         tg3_int_reenable(tnapi);
7386                         break;
7387                 }
7388         }
7389
7390         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7391         return work_done;
7392
7393 tx_recovery:
7394         /* work_done is guaranteed to be less than budget. */
7395         napi_complete(napi);
7396         tg3_reset_task_schedule(tp);
7397         return work_done;
7398 }
7399
7400 static void tg3_napi_disable(struct tg3 *tp)
7401 {
7402         int i;
7403
7404         for (i = tp->irq_cnt - 1; i >= 0; i--)
7405                 napi_disable(&tp->napi[i].napi);
7406 }
7407
7408 static void tg3_napi_enable(struct tg3 *tp)
7409 {
7410         int i;
7411
7412         for (i = 0; i < tp->irq_cnt; i++)
7413                 napi_enable(&tp->napi[i].napi);
7414 }
7415
7416 static void tg3_napi_init(struct tg3 *tp)
7417 {
7418         int i;
7419
7420         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7421         for (i = 1; i < tp->irq_cnt; i++)
7422                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7423 }
7424
7425 static void tg3_napi_fini(struct tg3 *tp)
7426 {
7427         int i;
7428
7429         for (i = 0; i < tp->irq_cnt; i++)
7430                 netif_napi_del(&tp->napi[i].napi);
7431 }
7432
7433 static inline void tg3_netif_stop(struct tg3 *tp)
7434 {
7435         netif_trans_update(tp->dev);    /* prevent tx timeout */
7436         tg3_napi_disable(tp);
7437         netif_carrier_off(tp->dev);
7438         netif_tx_disable(tp->dev);
7439 }
7440
7441 /* tp->lock must be held */
7442 static inline void tg3_netif_start(struct tg3 *tp)
7443 {
7444         tg3_ptp_resume(tp);
7445
7446         /* NOTE: unconditional netif_tx_wake_all_queues is only
7447          * appropriate so long as all callers are assured to
7448          * have free tx slots (such as after tg3_init_hw)
7449          */
7450         netif_tx_wake_all_queues(tp->dev);
7451
7452         if (tp->link_up)
7453                 netif_carrier_on(tp->dev);
7454
7455         tg3_napi_enable(tp);
7456         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7457         tg3_enable_ints(tp);
7458 }
7459
7460 static void tg3_irq_quiesce(struct tg3 *tp)
7461         __releases(tp->lock)
7462         __acquires(tp->lock)
7463 {
7464         int i;
7465
7466         BUG_ON(tp->irq_sync);
7467
7468         tp->irq_sync = 1;
7469         smp_mb();
7470
7471         spin_unlock_bh(&tp->lock);
7472
7473         for (i = 0; i < tp->irq_cnt; i++)
7474                 synchronize_irq(tp->napi[i].irq_vec);
7475
7476         spin_lock_bh(&tp->lock);
7477 }
7478
7479 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7480  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7481  * with as well.  Most of the time, this is not necessary except when
7482  * shutting down the device.
7483  */
7484 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7485 {
7486         spin_lock_bh(&tp->lock);
7487         if (irq_sync)
7488                 tg3_irq_quiesce(tp);
7489 }
7490
7491 static inline void tg3_full_unlock(struct tg3 *tp)
7492 {
7493         spin_unlock_bh(&tp->lock);
7494 }
7495
7496 /* One-shot MSI handler - Chip automatically disables interrupt
7497  * after sending MSI so driver doesn't have to do it.
7498  */
7499 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7500 {
7501         struct tg3_napi *tnapi = dev_id;
7502         struct tg3 *tp = tnapi->tp;
7503
7504         prefetch(tnapi->hw_status);
7505         if (tnapi->rx_rcb)
7506                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7507
7508         if (likely(!tg3_irq_sync(tp)))
7509                 napi_schedule(&tnapi->napi);
7510
7511         return IRQ_HANDLED;
7512 }
7513
7514 /* MSI ISR - No need to check for interrupt sharing and no need to
7515  * flush status block and interrupt mailbox. PCI ordering rules
7516  * guarantee that MSI will arrive after the status block.
7517  */
7518 static irqreturn_t tg3_msi(int irq, void *dev_id)
7519 {
7520         struct tg3_napi *tnapi = dev_id;
7521         struct tg3 *tp = tnapi->tp;
7522
7523         prefetch(tnapi->hw_status);
7524         if (tnapi->rx_rcb)
7525                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7526         /*
7527          * Writing any value to intr-mbox-0 clears PCI INTA# and
7528          * chip-internal interrupt pending events.
7529          * Writing non-zero to intr-mbox-0 additional tells the
7530          * NIC to stop sending us irqs, engaging "in-intr-handler"
7531          * event coalescing.
7532          */
7533         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7534         if (likely(!tg3_irq_sync(tp)))
7535                 napi_schedule(&tnapi->napi);
7536
7537         return IRQ_RETVAL(1);
7538 }
7539
7540 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7541 {
7542         struct tg3_napi *tnapi = dev_id;
7543         struct tg3 *tp = tnapi->tp;
7544         struct tg3_hw_status *sblk = tnapi->hw_status;
7545         unsigned int handled = 1;
7546
7547         /* In INTx mode, it is possible for the interrupt to arrive at
7548          * the CPU before the status block posted prior to the interrupt.
7549          * Reading the PCI State register will confirm whether the
7550          * interrupt is ours and will flush the status block.
7551          */
7552         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7553                 if (tg3_flag(tp, CHIP_RESETTING) ||
7554                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7555                         handled = 0;
7556                         goto out;
7557                 }
7558         }
7559
7560         /*
7561          * Writing any value to intr-mbox-0 clears PCI INTA# and
7562          * chip-internal interrupt pending events.
7563          * Writing non-zero to intr-mbox-0 additional tells the
7564          * NIC to stop sending us irqs, engaging "in-intr-handler"
7565          * event coalescing.
7566          *
7567          * Flush the mailbox to de-assert the IRQ immediately to prevent
7568          * spurious interrupts.  The flush impacts performance but
7569          * excessive spurious interrupts can be worse in some cases.
7570          */
7571         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7572         if (tg3_irq_sync(tp))
7573                 goto out;
7574         sblk->status &= ~SD_STATUS_UPDATED;
7575         if (likely(tg3_has_work(tnapi))) {
7576                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7577                 napi_schedule(&tnapi->napi);
7578         } else {
7579                 /* No work, shared interrupt perhaps?  re-enable
7580                  * interrupts, and flush that PCI write
7581                  */
7582                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7583                                0x00000000);
7584         }
7585 out:
7586         return IRQ_RETVAL(handled);
7587 }
7588
7589 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7590 {
7591         struct tg3_napi *tnapi = dev_id;
7592         struct tg3 *tp = tnapi->tp;
7593         struct tg3_hw_status *sblk = tnapi->hw_status;
7594         unsigned int handled = 1;
7595
7596         /* In INTx mode, it is possible for the interrupt to arrive at
7597          * the CPU before the status block posted prior to the interrupt.
7598          * Reading the PCI State register will confirm whether the
7599          * interrupt is ours and will flush the status block.
7600          */
7601         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7602                 if (tg3_flag(tp, CHIP_RESETTING) ||
7603                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7604                         handled = 0;
7605                         goto out;
7606                 }
7607         }
7608
7609         /*
7610          * writing any value to intr-mbox-0 clears PCI INTA# and
7611          * chip-internal interrupt pending events.
7612          * writing non-zero to intr-mbox-0 additional tells the
7613          * NIC to stop sending us irqs, engaging "in-intr-handler"
7614          * event coalescing.
7615          *
7616          * Flush the mailbox to de-assert the IRQ immediately to prevent
7617          * spurious interrupts.  The flush impacts performance but
7618          * excessive spurious interrupts can be worse in some cases.
7619          */
7620         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7621
7622         /*
7623          * In a shared interrupt configuration, sometimes other devices'
7624          * interrupts will scream.  We record the current status tag here
7625          * so that the above check can report that the screaming interrupts
7626          * are unhandled.  Eventually they will be silenced.
7627          */
7628         tnapi->last_irq_tag = sblk->status_tag;
7629
7630         if (tg3_irq_sync(tp))
7631                 goto out;
7632
7633         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7634
7635         napi_schedule(&tnapi->napi);
7636
7637 out:
7638         return IRQ_RETVAL(handled);
7639 }
7640
7641 /* ISR for interrupt test */
7642 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7643 {
7644         struct tg3_napi *tnapi = dev_id;
7645         struct tg3 *tp = tnapi->tp;
7646         struct tg3_hw_status *sblk = tnapi->hw_status;
7647
7648         if ((sblk->status & SD_STATUS_UPDATED) ||
7649             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7650                 tg3_disable_ints(tp);
7651                 return IRQ_RETVAL(1);
7652         }
7653         return IRQ_RETVAL(0);
7654 }
7655
7656 #ifdef CONFIG_NET_POLL_CONTROLLER
7657 static void tg3_poll_controller(struct net_device *dev)
7658 {
7659         int i;
7660         struct tg3 *tp = netdev_priv(dev);
7661
7662         if (tg3_irq_sync(tp))
7663                 return;
7664
7665         for (i = 0; i < tp->irq_cnt; i++)
7666                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7667 }
7668 #endif
7669
7670 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7671 {
7672         struct tg3 *tp = netdev_priv(dev);
7673
7674         if (netif_msg_tx_err(tp)) {
7675                 netdev_err(dev, "transmit timed out, resetting\n");
7676                 tg3_dump_state(tp);
7677         }
7678
7679         tg3_reset_task_schedule(tp);
7680 }
7681
7682 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7683 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7684 {
7685         u32 base = (u32) mapping & 0xffffffff;
7686
7687         return base + len + 8 < base;
7688 }
7689
7690 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7691  * of any 4GB boundaries: 4G, 8G, etc
7692  */
7693 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7694                                            u32 len, u32 mss)
7695 {
7696         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7697                 u32 base = (u32) mapping & 0xffffffff;
7698
7699                 return ((base + len + (mss & 0x3fff)) < base);
7700         }
7701         return 0;
7702 }
7703
7704 /* Test for DMA addresses > 40-bit */
7705 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7706                                           int len)
7707 {
7708 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7709         if (tg3_flag(tp, 40BIT_DMA_BUG))
7710                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7711         return 0;
7712 #else
7713         return 0;
7714 #endif
7715 }
7716
7717 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7718                                  dma_addr_t mapping, u32 len, u32 flags,
7719                                  u32 mss, u32 vlan)
7720 {
7721         txbd->addr_hi = ((u64) mapping >> 32);
7722         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7723         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7724         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7725 }
7726
7727 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7728                             dma_addr_t map, u32 len, u32 flags,
7729                             u32 mss, u32 vlan)
7730 {
7731         struct tg3 *tp = tnapi->tp;
7732         bool hwbug = false;
7733
7734         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7735                 hwbug = true;
7736
7737         if (tg3_4g_overflow_test(map, len))
7738                 hwbug = true;
7739
7740         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7741                 hwbug = true;
7742
7743         if (tg3_40bit_overflow_test(tp, map, len))
7744                 hwbug = true;
7745
7746         if (tp->dma_limit) {
7747                 u32 prvidx = *entry;
7748                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7749                 while (len > tp->dma_limit && *budget) {
7750                         u32 frag_len = tp->dma_limit;
7751                         len -= tp->dma_limit;
7752
7753                         /* Avoid the 8byte DMA problem */
7754                         if (len <= 8) {
7755                                 len += tp->dma_limit / 2;
7756                                 frag_len = tp->dma_limit / 2;
7757                         }
7758
7759                         tnapi->tx_buffers[*entry].fragmented = true;
7760
7761                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7762                                       frag_len, tmp_flag, mss, vlan);
7763                         *budget -= 1;
7764                         prvidx = *entry;
7765                         *entry = NEXT_TX(*entry);
7766
7767                         map += frag_len;
7768                 }
7769
7770                 if (len) {
7771                         if (*budget) {
7772                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7773                                               len, flags, mss, vlan);
7774                                 *budget -= 1;
7775                                 *entry = NEXT_TX(*entry);
7776                         } else {
7777                                 hwbug = true;
7778                                 tnapi->tx_buffers[prvidx].fragmented = false;
7779                         }
7780                 }
7781         } else {
7782                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7783                               len, flags, mss, vlan);
7784                 *entry = NEXT_TX(*entry);
7785         }
7786
7787         return hwbug;
7788 }
7789
7790 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7791 {
7792         int i;
7793         struct sk_buff *skb;
7794         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7795
7796         skb = txb->skb;
7797         txb->skb = NULL;
7798
7799         dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7800                          skb_headlen(skb), DMA_TO_DEVICE);
7801
7802         while (txb->fragmented) {
7803                 txb->fragmented = false;
7804                 entry = NEXT_TX(entry);
7805                 txb = &tnapi->tx_buffers[entry];
7806         }
7807
7808         for (i = 0; i <= last; i++) {
7809                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7810
7811                 entry = NEXT_TX(entry);
7812                 txb = &tnapi->tx_buffers[entry];
7813
7814                 dma_unmap_page(&tnapi->tp->pdev->dev,
7815                                dma_unmap_addr(txb, mapping),
7816                                skb_frag_size(frag), DMA_TO_DEVICE);
7817
7818                 while (txb->fragmented) {
7819                         txb->fragmented = false;
7820                         entry = NEXT_TX(entry);
7821                         txb = &tnapi->tx_buffers[entry];
7822                 }
7823         }
7824 }
7825
7826 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7827 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7828                                        struct sk_buff **pskb,
7829                                        u32 *entry, u32 *budget,
7830                                        u32 base_flags, u32 mss, u32 vlan)
7831 {
7832         struct tg3 *tp = tnapi->tp;
7833         struct sk_buff *new_skb, *skb = *pskb;
7834         dma_addr_t new_addr = 0;
7835         int ret = 0;
7836
7837         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7838                 new_skb = skb_copy(skb, GFP_ATOMIC);
7839         else {
7840                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7841
7842                 new_skb = skb_copy_expand(skb,
7843                                           skb_headroom(skb) + more_headroom,
7844                                           skb_tailroom(skb), GFP_ATOMIC);
7845         }
7846
7847         if (!new_skb) {
7848                 ret = -1;
7849         } else {
7850                 /* New SKB is guaranteed to be linear. */
7851                 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7852                                           new_skb->len, DMA_TO_DEVICE);
7853                 /* Make sure the mapping succeeded */
7854                 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7855                         dev_kfree_skb_any(new_skb);
7856                         ret = -1;
7857                 } else {
7858                         u32 save_entry = *entry;
7859
7860                         base_flags |= TXD_FLAG_END;
7861
7862                         tnapi->tx_buffers[*entry].skb = new_skb;
7863                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7864                                            mapping, new_addr);
7865
7866                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7867                                             new_skb->len, base_flags,
7868                                             mss, vlan)) {
7869                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7870                                 dev_kfree_skb_any(new_skb);
7871                                 ret = -1;
7872                         }
7873                 }
7874         }
7875
7876         dev_consume_skb_any(skb);
7877         *pskb = new_skb;
7878         return ret;
7879 }
7880
7881 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7882 {
7883         /* Check if we will never have enough descriptors,
7884          * as gso_segs can be more than current ring size
7885          */
7886         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7887 }
7888
7889 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7890
7891 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7892  * indicated in tg3_tx_frag_set()
7893  */
7894 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7895                        struct netdev_queue *txq, struct sk_buff *skb)
7896 {
7897         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7898         struct sk_buff *segs, *seg, *next;
7899
7900         /* Estimate the number of fragments in the worst case */
7901         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7902                 netif_tx_stop_queue(txq);
7903
7904                 /* netif_tx_stop_queue() must be done before checking
7905                  * checking tx index in tg3_tx_avail() below, because in
7906                  * tg3_tx(), we update tx index before checking for
7907                  * netif_tx_queue_stopped().
7908                  */
7909                 smp_mb();
7910                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7911                         return NETDEV_TX_BUSY;
7912
7913                 netif_tx_wake_queue(txq);
7914         }
7915
7916         segs = skb_gso_segment(skb, tp->dev->features &
7917                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7918         if (IS_ERR(segs) || !segs) {
7919                 tnapi->tx_dropped++;
7920                 goto tg3_tso_bug_end;
7921         }
7922
7923         skb_list_walk_safe(segs, seg, next) {
7924                 skb_mark_not_on_list(seg);
7925                 __tg3_start_xmit(seg, tp->dev);
7926         }
7927
7928 tg3_tso_bug_end:
7929         dev_consume_skb_any(skb);
7930
7931         return NETDEV_TX_OK;
7932 }
7933
7934 /* hard_start_xmit for all devices */
7935 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7936 {
7937         struct tg3 *tp = netdev_priv(dev);
7938         u32 len, entry, base_flags, mss, vlan = 0;
7939         u32 budget;
7940         int i = -1, would_hit_hwbug;
7941         dma_addr_t mapping;
7942         struct tg3_napi *tnapi;
7943         struct netdev_queue *txq;
7944         unsigned int last;
7945         struct iphdr *iph = NULL;
7946         struct tcphdr *tcph = NULL;
7947         __sum16 tcp_csum = 0, ip_csum = 0;
7948         __be16 ip_tot_len = 0;
7949
7950         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7951         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7952         if (tg3_flag(tp, ENABLE_TSS))
7953                 tnapi++;
7954
7955         budget = tg3_tx_avail(tnapi);
7956
7957         /* We are running in BH disabled context with netif_tx_lock
7958          * and TX reclaim runs via tp->napi.poll inside of a software
7959          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7960          * no IRQ context deadlocks to worry about either.  Rejoice!
7961          */
7962         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7963                 if (!netif_tx_queue_stopped(txq)) {
7964                         netif_tx_stop_queue(txq);
7965
7966                         /* This is a hard error, log it. */
7967                         netdev_err(dev,
7968                                    "BUG! Tx Ring full when queue awake!\n");
7969                 }
7970                 return NETDEV_TX_BUSY;
7971         }
7972
7973         entry = tnapi->tx_prod;
7974         base_flags = 0;
7975
7976         mss = skb_shinfo(skb)->gso_size;
7977         if (mss) {
7978                 u32 tcp_opt_len, hdr_len;
7979
7980                 if (skb_cow_head(skb, 0))
7981                         goto drop;
7982
7983                 iph = ip_hdr(skb);
7984                 tcp_opt_len = tcp_optlen(skb);
7985
7986                 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7987
7988                 /* HW/FW can not correctly segment packets that have been
7989                  * vlan encapsulated.
7990                  */
7991                 if (skb->protocol == htons(ETH_P_8021Q) ||
7992                     skb->protocol == htons(ETH_P_8021AD)) {
7993                         if (tg3_tso_bug_gso_check(tnapi, skb))
7994                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7995                         goto drop;
7996                 }
7997
7998                 if (!skb_is_gso_v6(skb)) {
7999                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8000                             tg3_flag(tp, TSO_BUG)) {
8001                                 if (tg3_tso_bug_gso_check(tnapi, skb))
8002                                         return tg3_tso_bug(tp, tnapi, txq, skb);
8003                                 goto drop;
8004                         }
8005                         ip_csum = iph->check;
8006                         ip_tot_len = iph->tot_len;
8007                         iph->check = 0;
8008                         iph->tot_len = htons(mss + hdr_len);
8009                 }
8010
8011                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8012                                TXD_FLAG_CPU_POST_DMA);
8013
8014                 tcph = tcp_hdr(skb);
8015                 tcp_csum = tcph->check;
8016
8017                 if (tg3_flag(tp, HW_TSO_1) ||
8018                     tg3_flag(tp, HW_TSO_2) ||
8019                     tg3_flag(tp, HW_TSO_3)) {
8020                         tcph->check = 0;
8021                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8022                 } else {
8023                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8024                                                          0, IPPROTO_TCP, 0);
8025                 }
8026
8027                 if (tg3_flag(tp, HW_TSO_3)) {
8028                         mss |= (hdr_len & 0xc) << 12;
8029                         if (hdr_len & 0x10)
8030                                 base_flags |= 0x00000010;
8031                         base_flags |= (hdr_len & 0x3e0) << 5;
8032                 } else if (tg3_flag(tp, HW_TSO_2))
8033                         mss |= hdr_len << 9;
8034                 else if (tg3_flag(tp, HW_TSO_1) ||
8035                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8036                         if (tcp_opt_len || iph->ihl > 5) {
8037                                 int tsflags;
8038
8039                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8040                                 mss |= (tsflags << 11);
8041                         }
8042                 } else {
8043                         if (tcp_opt_len || iph->ihl > 5) {
8044                                 int tsflags;
8045
8046                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8047                                 base_flags |= tsflags << 12;
8048                         }
8049                 }
8050         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8051                 /* HW/FW can not correctly checksum packets that have been
8052                  * vlan encapsulated.
8053                  */
8054                 if (skb->protocol == htons(ETH_P_8021Q) ||
8055                     skb->protocol == htons(ETH_P_8021AD)) {
8056                         if (skb_checksum_help(skb))
8057                                 goto drop;
8058                 } else  {
8059                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8060                 }
8061         }
8062
8063         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8064             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8065                 base_flags |= TXD_FLAG_JMB_PKT;
8066
8067         if (skb_vlan_tag_present(skb)) {
8068                 base_flags |= TXD_FLAG_VLAN;
8069                 vlan = skb_vlan_tag_get(skb);
8070         }
8071
8072         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8073             tg3_flag(tp, TX_TSTAMP_EN)) {
8074                 tg3_full_lock(tp, 0);
8075                 if (!tp->pre_tx_ts) {
8076                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8077                         base_flags |= TXD_FLAG_HWTSTAMP;
8078                         tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8079                 }
8080                 tg3_full_unlock(tp);
8081         }
8082
8083         len = skb_headlen(skb);
8084
8085         mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8086                                  DMA_TO_DEVICE);
8087         if (dma_mapping_error(&tp->pdev->dev, mapping))
8088                 goto drop;
8089
8090
8091         tnapi->tx_buffers[entry].skb = skb;
8092         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8093
8094         would_hit_hwbug = 0;
8095
8096         if (tg3_flag(tp, 5701_DMA_BUG))
8097                 would_hit_hwbug = 1;
8098
8099         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8100                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8101                             mss, vlan)) {
8102                 would_hit_hwbug = 1;
8103         } else if (skb_shinfo(skb)->nr_frags > 0) {
8104                 u32 tmp_mss = mss;
8105
8106                 if (!tg3_flag(tp, HW_TSO_1) &&
8107                     !tg3_flag(tp, HW_TSO_2) &&
8108                     !tg3_flag(tp, HW_TSO_3))
8109                         tmp_mss = 0;
8110
8111                 /* Now loop through additional data
8112                  * fragments, and queue them.
8113                  */
8114                 last = skb_shinfo(skb)->nr_frags - 1;
8115                 for (i = 0; i <= last; i++) {
8116                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8117
8118                         len = skb_frag_size(frag);
8119                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8120                                                    len, DMA_TO_DEVICE);
8121
8122                         tnapi->tx_buffers[entry].skb = NULL;
8123                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8124                                            mapping);
8125                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8126                                 goto dma_error;
8127
8128                         if (!budget ||
8129                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8130                                             len, base_flags |
8131                                             ((i == last) ? TXD_FLAG_END : 0),
8132                                             tmp_mss, vlan)) {
8133                                 would_hit_hwbug = 1;
8134                                 break;
8135                         }
8136                 }
8137         }
8138
8139         if (would_hit_hwbug) {
8140                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8141
8142                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8143                         /* If it's a TSO packet, do GSO instead of
8144                          * allocating and copying to a large linear SKB
8145                          */
8146                         if (ip_tot_len) {
8147                                 iph->check = ip_csum;
8148                                 iph->tot_len = ip_tot_len;
8149                         }
8150                         tcph->check = tcp_csum;
8151                         return tg3_tso_bug(tp, tnapi, txq, skb);
8152                 }
8153
8154                 /* If the workaround fails due to memory/mapping
8155                  * failure, silently drop this packet.
8156                  */
8157                 entry = tnapi->tx_prod;
8158                 budget = tg3_tx_avail(tnapi);
8159                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8160                                                 base_flags, mss, vlan))
8161                         goto drop_nofree;
8162         }
8163
8164         skb_tx_timestamp(skb);
8165         netdev_tx_sent_queue(txq, skb->len);
8166
8167         /* Sync BD data before updating mailbox */
8168         wmb();
8169
8170         tnapi->tx_prod = entry;
8171         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8172                 netif_tx_stop_queue(txq);
8173
8174                 /* netif_tx_stop_queue() must be done before checking
8175                  * checking tx index in tg3_tx_avail() below, because in
8176                  * tg3_tx(), we update tx index before checking for
8177                  * netif_tx_queue_stopped().
8178                  */
8179                 smp_mb();
8180                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8181                         netif_tx_wake_queue(txq);
8182         }
8183
8184         return NETDEV_TX_OK;
8185
8186 dma_error:
8187         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8188         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8189 drop:
8190         dev_kfree_skb_any(skb);
8191 drop_nofree:
8192         tnapi->tx_dropped++;
8193         return NETDEV_TX_OK;
8194 }
8195
8196 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8197 {
8198         struct netdev_queue *txq;
8199         u16 skb_queue_mapping;
8200         netdev_tx_t ret;
8201
8202         skb_queue_mapping = skb_get_queue_mapping(skb);
8203         txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8204
8205         ret = __tg3_start_xmit(skb, dev);
8206
8207         /* Notify the hardware that packets are ready by updating the TX ring
8208          * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8209          * the hardware for every packet. To guarantee forward progress the TX
8210          * ring must be drained when it is full as indicated by
8211          * netif_xmit_stopped(). This needs to happen even when the current
8212          * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8213          * queued by previous __tg3_start_xmit() calls might get stuck in
8214          * the queue forever.
8215          */
8216         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8217                 struct tg3_napi *tnapi;
8218                 struct tg3 *tp;
8219
8220                 tp = netdev_priv(dev);
8221                 tnapi = &tp->napi[skb_queue_mapping];
8222
8223                 if (tg3_flag(tp, ENABLE_TSS))
8224                         tnapi++;
8225
8226                 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8227         }
8228
8229         return ret;
8230 }
8231
8232 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8233 {
8234         if (enable) {
8235                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8236                                   MAC_MODE_PORT_MODE_MASK);
8237
8238                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8239
8240                 if (!tg3_flag(tp, 5705_PLUS))
8241                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8242
8243                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8244                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8245                 else
8246                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8247         } else {
8248                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8249
8250                 if (tg3_flag(tp, 5705_PLUS) ||
8251                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8252                     tg3_asic_rev(tp) == ASIC_REV_5700)
8253                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8254         }
8255
8256         tw32(MAC_MODE, tp->mac_mode);
8257         udelay(40);
8258 }
8259
8260 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8261 {
8262         u32 val, bmcr, mac_mode, ptest = 0;
8263
8264         tg3_phy_toggle_apd(tp, false);
8265         tg3_phy_toggle_automdix(tp, false);
8266
8267         if (extlpbk && tg3_phy_set_extloopbk(tp))
8268                 return -EIO;
8269
8270         bmcr = BMCR_FULLDPLX;
8271         switch (speed) {
8272         case SPEED_10:
8273                 break;
8274         case SPEED_100:
8275                 bmcr |= BMCR_SPEED100;
8276                 break;
8277         case SPEED_1000:
8278         default:
8279                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8280                         speed = SPEED_100;
8281                         bmcr |= BMCR_SPEED100;
8282                 } else {
8283                         speed = SPEED_1000;
8284                         bmcr |= BMCR_SPEED1000;
8285                 }
8286         }
8287
8288         if (extlpbk) {
8289                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8290                         tg3_readphy(tp, MII_CTRL1000, &val);
8291                         val |= CTL1000_AS_MASTER |
8292                                CTL1000_ENABLE_MASTER;
8293                         tg3_writephy(tp, MII_CTRL1000, val);
8294                 } else {
8295                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8296                                 MII_TG3_FET_PTEST_TRIM_2;
8297                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8298                 }
8299         } else
8300                 bmcr |= BMCR_LOOPBACK;
8301
8302         tg3_writephy(tp, MII_BMCR, bmcr);
8303
8304         /* The write needs to be flushed for the FETs */
8305         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8306                 tg3_readphy(tp, MII_BMCR, &bmcr);
8307
8308         udelay(40);
8309
8310         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8311             tg3_asic_rev(tp) == ASIC_REV_5785) {
8312                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8313                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8314                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8315
8316                 /* The write needs to be flushed for the AC131 */
8317                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8318         }
8319
8320         /* Reset to prevent losing 1st rx packet intermittently */
8321         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8322             tg3_flag(tp, 5780_CLASS)) {
8323                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8324                 udelay(10);
8325                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8326         }
8327
8328         mac_mode = tp->mac_mode &
8329                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8330         if (speed == SPEED_1000)
8331                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8332         else
8333                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8334
8335         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8336                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8337
8338                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8339                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8340                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8341                         mac_mode |= MAC_MODE_LINK_POLARITY;
8342
8343                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8344                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8345         }
8346
8347         tw32(MAC_MODE, mac_mode);
8348         udelay(40);
8349
8350         return 0;
8351 }
8352
8353 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8354 {
8355         struct tg3 *tp = netdev_priv(dev);
8356
8357         if (features & NETIF_F_LOOPBACK) {
8358                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8359                         return;
8360
8361                 spin_lock_bh(&tp->lock);
8362                 tg3_mac_loopback(tp, true);
8363                 netif_carrier_on(tp->dev);
8364                 spin_unlock_bh(&tp->lock);
8365                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8366         } else {
8367                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8368                         return;
8369
8370                 spin_lock_bh(&tp->lock);
8371                 tg3_mac_loopback(tp, false);
8372                 /* Force link status check */
8373                 tg3_setup_phy(tp, true);
8374                 spin_unlock_bh(&tp->lock);
8375                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8376         }
8377 }
8378
8379 static netdev_features_t tg3_fix_features(struct net_device *dev,
8380         netdev_features_t features)
8381 {
8382         struct tg3 *tp = netdev_priv(dev);
8383
8384         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8385                 features &= ~NETIF_F_ALL_TSO;
8386
8387         return features;
8388 }
8389
8390 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8391 {
8392         netdev_features_t changed = dev->features ^ features;
8393
8394         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8395                 tg3_set_loopback(dev, features);
8396
8397         return 0;
8398 }
8399
8400 static void tg3_rx_prodring_free(struct tg3 *tp,
8401                                  struct tg3_rx_prodring_set *tpr)
8402 {
8403         int i;
8404
8405         if (tpr != &tp->napi[0].prodring) {
8406                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8407                      i = (i + 1) & tp->rx_std_ring_mask)
8408                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8409                                         tp->rx_pkt_map_sz);
8410
8411                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8412                         for (i = tpr->rx_jmb_cons_idx;
8413                              i != tpr->rx_jmb_prod_idx;
8414                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8415                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8416                                                 TG3_RX_JMB_MAP_SZ);
8417                         }
8418                 }
8419
8420                 return;
8421         }
8422
8423         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8424                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8425                                 tp->rx_pkt_map_sz);
8426
8427         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8428                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8429                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8430                                         TG3_RX_JMB_MAP_SZ);
8431         }
8432 }
8433
8434 /* Initialize rx rings for packet processing.
8435  *
8436  * The chip has been shut down and the driver detached from
8437  * the networking, so no interrupts or new tx packets will
8438  * end up in the driver.  tp->{tx,}lock are held and thus
8439  * we may not sleep.
8440  */
8441 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8442                                  struct tg3_rx_prodring_set *tpr)
8443 {
8444         u32 i, rx_pkt_dma_sz;
8445
8446         tpr->rx_std_cons_idx = 0;
8447         tpr->rx_std_prod_idx = 0;
8448         tpr->rx_jmb_cons_idx = 0;
8449         tpr->rx_jmb_prod_idx = 0;
8450
8451         if (tpr != &tp->napi[0].prodring) {
8452                 memset(&tpr->rx_std_buffers[0], 0,
8453                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8454                 if (tpr->rx_jmb_buffers)
8455                         memset(&tpr->rx_jmb_buffers[0], 0,
8456                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8457                 goto done;
8458         }
8459
8460         /* Zero out all descriptors. */
8461         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8462
8463         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8464         if (tg3_flag(tp, 5780_CLASS) &&
8465             tp->dev->mtu > ETH_DATA_LEN)
8466                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8467         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8468
8469         /* Initialize invariants of the rings, we only set this
8470          * stuff once.  This works because the card does not
8471          * write into the rx buffer posting rings.
8472          */
8473         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8474                 struct tg3_rx_buffer_desc *rxd;
8475
8476                 rxd = &tpr->rx_std[i];
8477                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8478                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8479                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8480                                (i << RXD_OPAQUE_INDEX_SHIFT));
8481         }
8482
8483         /* Now allocate fresh SKBs for each rx ring. */
8484         for (i = 0; i < tp->rx_pending; i++) {
8485                 unsigned int frag_size;
8486
8487                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8488                                       &frag_size) < 0) {
8489                         netdev_warn(tp->dev,
8490                                     "Using a smaller RX standard ring. Only "
8491                                     "%d out of %d buffers were allocated "
8492                                     "successfully\n", i, tp->rx_pending);
8493                         if (i == 0)
8494                                 goto initfail;
8495                         tp->rx_pending = i;
8496                         break;
8497                 }
8498         }
8499
8500         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8501                 goto done;
8502
8503         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8504
8505         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8506                 goto done;
8507
8508         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8509                 struct tg3_rx_buffer_desc *rxd;
8510
8511                 rxd = &tpr->rx_jmb[i].std;
8512                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8513                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8514                                   RXD_FLAG_JUMBO;
8515                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8516                        (i << RXD_OPAQUE_INDEX_SHIFT));
8517         }
8518
8519         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8520                 unsigned int frag_size;
8521
8522                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8523                                       &frag_size) < 0) {
8524                         netdev_warn(tp->dev,
8525                                     "Using a smaller RX jumbo ring. Only %d "
8526                                     "out of %d buffers were allocated "
8527                                     "successfully\n", i, tp->rx_jumbo_pending);
8528                         if (i == 0)
8529                                 goto initfail;
8530                         tp->rx_jumbo_pending = i;
8531                         break;
8532                 }
8533         }
8534
8535 done:
8536         return 0;
8537
8538 initfail:
8539         tg3_rx_prodring_free(tp, tpr);
8540         return -ENOMEM;
8541 }
8542
8543 static void tg3_rx_prodring_fini(struct tg3 *tp,
8544                                  struct tg3_rx_prodring_set *tpr)
8545 {
8546         kfree(tpr->rx_std_buffers);
8547         tpr->rx_std_buffers = NULL;
8548         kfree(tpr->rx_jmb_buffers);
8549         tpr->rx_jmb_buffers = NULL;
8550         if (tpr->rx_std) {
8551                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8552                                   tpr->rx_std, tpr->rx_std_mapping);
8553                 tpr->rx_std = NULL;
8554         }
8555         if (tpr->rx_jmb) {
8556                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8557                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8558                 tpr->rx_jmb = NULL;
8559         }
8560 }
8561
8562 static int tg3_rx_prodring_init(struct tg3 *tp,
8563                                 struct tg3_rx_prodring_set *tpr)
8564 {
8565         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8566                                       GFP_KERNEL);
8567         if (!tpr->rx_std_buffers)
8568                 return -ENOMEM;
8569
8570         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8571                                          TG3_RX_STD_RING_BYTES(tp),
8572                                          &tpr->rx_std_mapping,
8573                                          GFP_KERNEL);
8574         if (!tpr->rx_std)
8575                 goto err_out;
8576
8577         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8578                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8579                                               GFP_KERNEL);
8580                 if (!tpr->rx_jmb_buffers)
8581                         goto err_out;
8582
8583                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8584                                                  TG3_RX_JMB_RING_BYTES(tp),
8585                                                  &tpr->rx_jmb_mapping,
8586                                                  GFP_KERNEL);
8587                 if (!tpr->rx_jmb)
8588                         goto err_out;
8589         }
8590
8591         return 0;
8592
8593 err_out:
8594         tg3_rx_prodring_fini(tp, tpr);
8595         return -ENOMEM;
8596 }
8597
8598 /* Free up pending packets in all rx/tx rings.
8599  *
8600  * The chip has been shut down and the driver detached from
8601  * the networking, so no interrupts or new tx packets will
8602  * end up in the driver.  tp->{tx,}lock is not held and we are not
8603  * in an interrupt context and thus may sleep.
8604  */
8605 static void tg3_free_rings(struct tg3 *tp)
8606 {
8607         int i, j;
8608
8609         for (j = 0; j < tp->irq_cnt; j++) {
8610                 struct tg3_napi *tnapi = &tp->napi[j];
8611
8612                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8613
8614                 if (!tnapi->tx_buffers)
8615                         continue;
8616
8617                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8618                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8619
8620                         if (!skb)
8621                                 continue;
8622
8623                         tg3_tx_skb_unmap(tnapi, i,
8624                                          skb_shinfo(skb)->nr_frags - 1);
8625
8626                         dev_consume_skb_any(skb);
8627                 }
8628                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8629         }
8630 }
8631
8632 /* Initialize tx/rx rings for packet processing.
8633  *
8634  * The chip has been shut down and the driver detached from
8635  * the networking, so no interrupts or new tx packets will
8636  * end up in the driver.  tp->{tx,}lock are held and thus
8637  * we may not sleep.
8638  */
8639 static int tg3_init_rings(struct tg3 *tp)
8640 {
8641         int i;
8642
8643         /* Free up all the SKBs. */
8644         tg3_free_rings(tp);
8645
8646         for (i = 0; i < tp->irq_cnt; i++) {
8647                 struct tg3_napi *tnapi = &tp->napi[i];
8648
8649                 tnapi->last_tag = 0;
8650                 tnapi->last_irq_tag = 0;
8651                 tnapi->hw_status->status = 0;
8652                 tnapi->hw_status->status_tag = 0;
8653                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8654
8655                 tnapi->tx_prod = 0;
8656                 tnapi->tx_cons = 0;
8657                 if (tnapi->tx_ring)
8658                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8659
8660                 tnapi->rx_rcb_ptr = 0;
8661                 if (tnapi->rx_rcb)
8662                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8663
8664                 if (tnapi->prodring.rx_std &&
8665                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8666                         tg3_free_rings(tp);
8667                         return -ENOMEM;
8668                 }
8669         }
8670
8671         return 0;
8672 }
8673
8674 static void tg3_mem_tx_release(struct tg3 *tp)
8675 {
8676         int i;
8677
8678         for (i = 0; i < tp->irq_max; i++) {
8679                 struct tg3_napi *tnapi = &tp->napi[i];
8680
8681                 if (tnapi->tx_ring) {
8682                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8683                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8684                         tnapi->tx_ring = NULL;
8685                 }
8686
8687                 kfree(tnapi->tx_buffers);
8688                 tnapi->tx_buffers = NULL;
8689         }
8690 }
8691
8692 static int tg3_mem_tx_acquire(struct tg3 *tp)
8693 {
8694         int i;
8695         struct tg3_napi *tnapi = &tp->napi[0];
8696
8697         /* If multivector TSS is enabled, vector 0 does not handle
8698          * tx interrupts.  Don't allocate any resources for it.
8699          */
8700         if (tg3_flag(tp, ENABLE_TSS))
8701                 tnapi++;
8702
8703         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8704                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8705                                             sizeof(struct tg3_tx_ring_info),
8706                                             GFP_KERNEL);
8707                 if (!tnapi->tx_buffers)
8708                         goto err_out;
8709
8710                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8711                                                     TG3_TX_RING_BYTES,
8712                                                     &tnapi->tx_desc_mapping,
8713                                                     GFP_KERNEL);
8714                 if (!tnapi->tx_ring)
8715                         goto err_out;
8716         }
8717
8718         return 0;
8719
8720 err_out:
8721         tg3_mem_tx_release(tp);
8722         return -ENOMEM;
8723 }
8724
8725 static void tg3_mem_rx_release(struct tg3 *tp)
8726 {
8727         int i;
8728
8729         for (i = 0; i < tp->irq_max; i++) {
8730                 struct tg3_napi *tnapi = &tp->napi[i];
8731
8732                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8733
8734                 if (!tnapi->rx_rcb)
8735                         continue;
8736
8737                 dma_free_coherent(&tp->pdev->dev,
8738                                   TG3_RX_RCB_RING_BYTES(tp),
8739                                   tnapi->rx_rcb,
8740                                   tnapi->rx_rcb_mapping);
8741                 tnapi->rx_rcb = NULL;
8742         }
8743 }
8744
8745 static int tg3_mem_rx_acquire(struct tg3 *tp)
8746 {
8747         unsigned int i, limit;
8748
8749         limit = tp->rxq_cnt;
8750
8751         /* If RSS is enabled, we need a (dummy) producer ring
8752          * set on vector zero.  This is the true hw prodring.
8753          */
8754         if (tg3_flag(tp, ENABLE_RSS))
8755                 limit++;
8756
8757         for (i = 0; i < limit; i++) {
8758                 struct tg3_napi *tnapi = &tp->napi[i];
8759
8760                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8761                         goto err_out;
8762
8763                 /* If multivector RSS is enabled, vector 0
8764                  * does not handle rx or tx interrupts.
8765                  * Don't allocate any resources for it.
8766                  */
8767                 if (!i && tg3_flag(tp, ENABLE_RSS))
8768                         continue;
8769
8770                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8771                                                    TG3_RX_RCB_RING_BYTES(tp),
8772                                                    &tnapi->rx_rcb_mapping,
8773                                                    GFP_KERNEL);
8774                 if (!tnapi->rx_rcb)
8775                         goto err_out;
8776         }
8777
8778         return 0;
8779
8780 err_out:
8781         tg3_mem_rx_release(tp);
8782         return -ENOMEM;
8783 }
8784
8785 /*
8786  * Must not be invoked with interrupt sources disabled and
8787  * the hardware shutdown down.
8788  */
8789 static void tg3_free_consistent(struct tg3 *tp)
8790 {
8791         int i;
8792
8793         for (i = 0; i < tp->irq_cnt; i++) {
8794                 struct tg3_napi *tnapi = &tp->napi[i];
8795
8796                 if (tnapi->hw_status) {
8797                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8798                                           tnapi->hw_status,
8799                                           tnapi->status_mapping);
8800                         tnapi->hw_status = NULL;
8801                 }
8802         }
8803
8804         tg3_mem_rx_release(tp);
8805         tg3_mem_tx_release(tp);
8806
8807         /* tp->hw_stats can be referenced safely:
8808          *     1. under rtnl_lock
8809          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8810          */
8811         if (tp->hw_stats) {
8812                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8813                                   tp->hw_stats, tp->stats_mapping);
8814                 tp->hw_stats = NULL;
8815         }
8816 }
8817
8818 /*
8819  * Must not be invoked with interrupt sources disabled and
8820  * the hardware shutdown down.  Can sleep.
8821  */
8822 static int tg3_alloc_consistent(struct tg3 *tp)
8823 {
8824         int i;
8825
8826         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8827                                           sizeof(struct tg3_hw_stats),
8828                                           &tp->stats_mapping, GFP_KERNEL);
8829         if (!tp->hw_stats)
8830                 goto err_out;
8831
8832         for (i = 0; i < tp->irq_cnt; i++) {
8833                 struct tg3_napi *tnapi = &tp->napi[i];
8834                 struct tg3_hw_status *sblk;
8835
8836                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8837                                                       TG3_HW_STATUS_SIZE,
8838                                                       &tnapi->status_mapping,
8839                                                       GFP_KERNEL);
8840                 if (!tnapi->hw_status)
8841                         goto err_out;
8842
8843                 sblk = tnapi->hw_status;
8844
8845                 if (tg3_flag(tp, ENABLE_RSS)) {
8846                         u16 *prodptr = NULL;
8847
8848                         /*
8849                          * When RSS is enabled, the status block format changes
8850                          * slightly.  The "rx_jumbo_consumer", "reserved",
8851                          * and "rx_mini_consumer" members get mapped to the
8852                          * other three rx return ring producer indexes.
8853                          */
8854                         switch (i) {
8855                         case 1:
8856                                 prodptr = &sblk->idx[0].rx_producer;
8857                                 break;
8858                         case 2:
8859                                 prodptr = &sblk->rx_jumbo_consumer;
8860                                 break;
8861                         case 3:
8862                                 prodptr = &sblk->reserved;
8863                                 break;
8864                         case 4:
8865                                 prodptr = &sblk->rx_mini_consumer;
8866                                 break;
8867                         }
8868                         tnapi->rx_rcb_prod_idx = prodptr;
8869                 } else {
8870                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8871                 }
8872         }
8873
8874         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8875                 goto err_out;
8876
8877         return 0;
8878
8879 err_out:
8880         tg3_free_consistent(tp);
8881         return -ENOMEM;
8882 }
8883
8884 #define MAX_WAIT_CNT 1000
8885
8886 /* To stop a block, clear the enable bit and poll till it
8887  * clears.  tp->lock is held.
8888  */
8889 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8890 {
8891         unsigned int i;
8892         u32 val;
8893
8894         if (tg3_flag(tp, 5705_PLUS)) {
8895                 switch (ofs) {
8896                 case RCVLSC_MODE:
8897                 case DMAC_MODE:
8898                 case MBFREE_MODE:
8899                 case BUFMGR_MODE:
8900                 case MEMARB_MODE:
8901                         /* We can't enable/disable these bits of the
8902                          * 5705/5750, just say success.
8903                          */
8904                         return 0;
8905
8906                 default:
8907                         break;
8908                 }
8909         }
8910
8911         val = tr32(ofs);
8912         val &= ~enable_bit;
8913         tw32_f(ofs, val);
8914
8915         for (i = 0; i < MAX_WAIT_CNT; i++) {
8916                 if (pci_channel_offline(tp->pdev)) {
8917                         dev_err(&tp->pdev->dev,
8918                                 "tg3_stop_block device offline, "
8919                                 "ofs=%lx enable_bit=%x\n",
8920                                 ofs, enable_bit);
8921                         return -ENODEV;
8922                 }
8923
8924                 udelay(100);
8925                 val = tr32(ofs);
8926                 if ((val & enable_bit) == 0)
8927                         break;
8928         }
8929
8930         if (i == MAX_WAIT_CNT && !silent) {
8931                 dev_err(&tp->pdev->dev,
8932                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8933                         ofs, enable_bit);
8934                 return -ENODEV;
8935         }
8936
8937         return 0;
8938 }
8939
8940 /* tp->lock is held. */
8941 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8942 {
8943         int i, err;
8944
8945         tg3_disable_ints(tp);
8946
8947         if (pci_channel_offline(tp->pdev)) {
8948                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8949                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8950                 err = -ENODEV;
8951                 goto err_no_dev;
8952         }
8953
8954         tp->rx_mode &= ~RX_MODE_ENABLE;
8955         tw32_f(MAC_RX_MODE, tp->rx_mode);
8956         udelay(10);
8957
8958         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8959         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8960         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8961         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8962         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8963         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8964
8965         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8966         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8967         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8968         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8969         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8970         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8971         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8972
8973         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8974         tw32_f(MAC_MODE, tp->mac_mode);
8975         udelay(40);
8976
8977         tp->tx_mode &= ~TX_MODE_ENABLE;
8978         tw32_f(MAC_TX_MODE, tp->tx_mode);
8979
8980         for (i = 0; i < MAX_WAIT_CNT; i++) {
8981                 udelay(100);
8982                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8983                         break;
8984         }
8985         if (i >= MAX_WAIT_CNT) {
8986                 dev_err(&tp->pdev->dev,
8987                         "%s timed out, TX_MODE_ENABLE will not clear "
8988                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8989                 err |= -ENODEV;
8990         }
8991
8992         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8993         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8994         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8995
8996         tw32(FTQ_RESET, 0xffffffff);
8997         tw32(FTQ_RESET, 0x00000000);
8998
8999         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9000         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9001
9002 err_no_dev:
9003         for (i = 0; i < tp->irq_cnt; i++) {
9004                 struct tg3_napi *tnapi = &tp->napi[i];
9005                 if (tnapi->hw_status)
9006                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9007         }
9008
9009         return err;
9010 }
9011
9012 /* Save PCI command register before chip reset */
9013 static void tg3_save_pci_state(struct tg3 *tp)
9014 {
9015         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9016 }
9017
9018 /* Restore PCI state after chip reset */
9019 static void tg3_restore_pci_state(struct tg3 *tp)
9020 {
9021         u32 val;
9022
9023         /* Re-enable indirect register accesses. */
9024         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9025                                tp->misc_host_ctrl);
9026
9027         /* Set MAX PCI retry to zero. */
9028         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9029         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9030             tg3_flag(tp, PCIX_MODE))
9031                 val |= PCISTATE_RETRY_SAME_DMA;
9032         /* Allow reads and writes to the APE register and memory space. */
9033         if (tg3_flag(tp, ENABLE_APE))
9034                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9035                        PCISTATE_ALLOW_APE_SHMEM_WR |
9036                        PCISTATE_ALLOW_APE_PSPACE_WR;
9037         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9038
9039         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9040
9041         if (!tg3_flag(tp, PCI_EXPRESS)) {
9042                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9043                                       tp->pci_cacheline_sz);
9044                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9045                                       tp->pci_lat_timer);
9046         }
9047
9048         /* Make sure PCI-X relaxed ordering bit is clear. */
9049         if (tg3_flag(tp, PCIX_MODE)) {
9050                 u16 pcix_cmd;
9051
9052                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9053                                      &pcix_cmd);
9054                 pcix_cmd &= ~PCI_X_CMD_ERO;
9055                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9056                                       pcix_cmd);
9057         }
9058
9059         if (tg3_flag(tp, 5780_CLASS)) {
9060
9061                 /* Chip reset on 5780 will reset MSI enable bit,
9062                  * so need to restore it.
9063                  */
9064                 if (tg3_flag(tp, USING_MSI)) {
9065                         u16 ctrl;
9066
9067                         pci_read_config_word(tp->pdev,
9068                                              tp->msi_cap + PCI_MSI_FLAGS,
9069                                              &ctrl);
9070                         pci_write_config_word(tp->pdev,
9071                                               tp->msi_cap + PCI_MSI_FLAGS,
9072                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9073                         val = tr32(MSGINT_MODE);
9074                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9075                 }
9076         }
9077 }
9078
9079 static void tg3_override_clk(struct tg3 *tp)
9080 {
9081         u32 val;
9082
9083         switch (tg3_asic_rev(tp)) {
9084         case ASIC_REV_5717:
9085                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9086                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9087                      TG3_CPMU_MAC_ORIDE_ENABLE);
9088                 break;
9089
9090         case ASIC_REV_5719:
9091         case ASIC_REV_5720:
9092                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9093                 break;
9094
9095         default:
9096                 return;
9097         }
9098 }
9099
9100 static void tg3_restore_clk(struct tg3 *tp)
9101 {
9102         u32 val;
9103
9104         switch (tg3_asic_rev(tp)) {
9105         case ASIC_REV_5717:
9106                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9107                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9108                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9109                 break;
9110
9111         case ASIC_REV_5719:
9112         case ASIC_REV_5720:
9113                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9114                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9115                 break;
9116
9117         default:
9118                 return;
9119         }
9120 }
9121
9122 /* tp->lock is held. */
9123 static int tg3_chip_reset(struct tg3 *tp)
9124         __releases(tp->lock)
9125         __acquires(tp->lock)
9126 {
9127         u32 val;
9128         void (*write_op)(struct tg3 *, u32, u32);
9129         int i, err;
9130
9131         if (!pci_device_is_present(tp->pdev))
9132                 return -ENODEV;
9133
9134         tg3_nvram_lock(tp);
9135
9136         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9137
9138         /* No matching tg3_nvram_unlock() after this because
9139          * chip reset below will undo the nvram lock.
9140          */
9141         tp->nvram_lock_cnt = 0;
9142
9143         /* GRC_MISC_CFG core clock reset will clear the memory
9144          * enable bit in PCI register 4 and the MSI enable bit
9145          * on some chips, so we save relevant registers here.
9146          */
9147         tg3_save_pci_state(tp);
9148
9149         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9150             tg3_flag(tp, 5755_PLUS))
9151                 tw32(GRC_FASTBOOT_PC, 0);
9152
9153         /*
9154          * We must avoid the readl() that normally takes place.
9155          * It locks machines, causes machine checks, and other
9156          * fun things.  So, temporarily disable the 5701
9157          * hardware workaround, while we do the reset.
9158          */
9159         write_op = tp->write32;
9160         if (write_op == tg3_write_flush_reg32)
9161                 tp->write32 = tg3_write32;
9162
9163         /* Prevent the irq handler from reading or writing PCI registers
9164          * during chip reset when the memory enable bit in the PCI command
9165          * register may be cleared.  The chip does not generate interrupt
9166          * at this time, but the irq handler may still be called due to irq
9167          * sharing or irqpoll.
9168          */
9169         tg3_flag_set(tp, CHIP_RESETTING);
9170         for (i = 0; i < tp->irq_cnt; i++) {
9171                 struct tg3_napi *tnapi = &tp->napi[i];
9172                 if (tnapi->hw_status) {
9173                         tnapi->hw_status->status = 0;
9174                         tnapi->hw_status->status_tag = 0;
9175                 }
9176                 tnapi->last_tag = 0;
9177                 tnapi->last_irq_tag = 0;
9178         }
9179         smp_mb();
9180
9181         tg3_full_unlock(tp);
9182
9183         for (i = 0; i < tp->irq_cnt; i++)
9184                 synchronize_irq(tp->napi[i].irq_vec);
9185
9186         tg3_full_lock(tp, 0);
9187
9188         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9189                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9190                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9191         }
9192
9193         /* do the reset */
9194         val = GRC_MISC_CFG_CORECLK_RESET;
9195
9196         if (tg3_flag(tp, PCI_EXPRESS)) {
9197                 /* Force PCIe 1.0a mode */
9198                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9199                     !tg3_flag(tp, 57765_PLUS) &&
9200                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9201                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9202                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9203
9204                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9205                         tw32(GRC_MISC_CFG, (1 << 29));
9206                         val |= (1 << 29);
9207                 }
9208         }
9209
9210         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9211                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9212                 tw32(GRC_VCPU_EXT_CTRL,
9213                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9214         }
9215
9216         /* Set the clock to the highest frequency to avoid timeouts. With link
9217          * aware mode, the clock speed could be slow and bootcode does not
9218          * complete within the expected time. Override the clock to allow the
9219          * bootcode to finish sooner and then restore it.
9220          */
9221         tg3_override_clk(tp);
9222
9223         /* Manage gphy power for all CPMU absent PCIe devices. */
9224         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9225                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9226
9227         tw32(GRC_MISC_CFG, val);
9228
9229         /* restore 5701 hardware bug workaround write method */
9230         tp->write32 = write_op;
9231
9232         /* Unfortunately, we have to delay before the PCI read back.
9233          * Some 575X chips even will not respond to a PCI cfg access
9234          * when the reset command is given to the chip.
9235          *
9236          * How do these hardware designers expect things to work
9237          * properly if the PCI write is posted for a long period
9238          * of time?  It is always necessary to have some method by
9239          * which a register read back can occur to push the write
9240          * out which does the reset.
9241          *
9242          * For most tg3 variants the trick below was working.
9243          * Ho hum...
9244          */
9245         udelay(120);
9246
9247         /* Flush PCI posted writes.  The normal MMIO registers
9248          * are inaccessible at this time so this is the only
9249          * way to make this reliably (actually, this is no longer
9250          * the case, see above).  I tried to use indirect
9251          * register read/write but this upset some 5701 variants.
9252          */
9253         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9254
9255         udelay(120);
9256
9257         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9258                 u16 val16;
9259
9260                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9261                         int j;
9262                         u32 cfg_val;
9263
9264                         /* Wait for link training to complete.  */
9265                         for (j = 0; j < 5000; j++)
9266                                 udelay(100);
9267
9268                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9269                         pci_write_config_dword(tp->pdev, 0xc4,
9270                                                cfg_val | (1 << 15));
9271                 }
9272
9273                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9274                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9275                 /*
9276                  * Older PCIe devices only support the 128 byte
9277                  * MPS setting.  Enforce the restriction.
9278                  */
9279                 if (!tg3_flag(tp, CPMU_PRESENT))
9280                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9281                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9282
9283                 /* Clear error status */
9284                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9285                                       PCI_EXP_DEVSTA_CED |
9286                                       PCI_EXP_DEVSTA_NFED |
9287                                       PCI_EXP_DEVSTA_FED |
9288                                       PCI_EXP_DEVSTA_URD);
9289         }
9290
9291         tg3_restore_pci_state(tp);
9292
9293         tg3_flag_clear(tp, CHIP_RESETTING);
9294         tg3_flag_clear(tp, ERROR_PROCESSED);
9295
9296         val = 0;
9297         if (tg3_flag(tp, 5780_CLASS))
9298                 val = tr32(MEMARB_MODE);
9299         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9300
9301         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9302                 tg3_stop_fw(tp);
9303                 tw32(0x5000, 0x400);
9304         }
9305
9306         if (tg3_flag(tp, IS_SSB_CORE)) {
9307                 /*
9308                  * BCM4785: In order to avoid repercussions from using
9309                  * potentially defective internal ROM, stop the Rx RISC CPU,
9310                  * which is not required.
9311                  */
9312                 tg3_stop_fw(tp);
9313                 tg3_halt_cpu(tp, RX_CPU_BASE);
9314         }
9315
9316         err = tg3_poll_fw(tp);
9317         if (err)
9318                 return err;
9319
9320         tw32(GRC_MODE, tp->grc_mode);
9321
9322         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9323                 val = tr32(0xc4);
9324
9325                 tw32(0xc4, val | (1 << 15));
9326         }
9327
9328         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9329             tg3_asic_rev(tp) == ASIC_REV_5705) {
9330                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9331                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9332                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9333                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9334         }
9335
9336         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9337                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9338                 val = tp->mac_mode;
9339         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9340                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9341                 val = tp->mac_mode;
9342         } else
9343                 val = 0;
9344
9345         tw32_f(MAC_MODE, val);
9346         udelay(40);
9347
9348         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9349
9350         tg3_mdio_start(tp);
9351
9352         if (tg3_flag(tp, PCI_EXPRESS) &&
9353             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9354             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9355             !tg3_flag(tp, 57765_PLUS)) {
9356                 val = tr32(0x7c00);
9357
9358                 tw32(0x7c00, val | (1 << 25));
9359         }
9360
9361         tg3_restore_clk(tp);
9362
9363         /* Increase the core clock speed to fix tx timeout issue for 5762
9364          * with 100Mbps link speed.
9365          */
9366         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9367                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9368                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9369                      TG3_CPMU_MAC_ORIDE_ENABLE);
9370         }
9371
9372         /* Reprobe ASF enable state.  */
9373         tg3_flag_clear(tp, ENABLE_ASF);
9374         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9375                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9376
9377         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9378         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9379         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9380                 u32 nic_cfg;
9381
9382                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9383                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9384                         tg3_flag_set(tp, ENABLE_ASF);
9385                         tp->last_event_jiffies = jiffies;
9386                         if (tg3_flag(tp, 5750_PLUS))
9387                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9388
9389                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9390                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9391                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9392                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9393                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9394                 }
9395         }
9396
9397         return 0;
9398 }
9399
9400 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9401 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9402 static void __tg3_set_rx_mode(struct net_device *);
9403
9404 /* tp->lock is held. */
9405 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9406 {
9407         int err, i;
9408
9409         tg3_stop_fw(tp);
9410
9411         tg3_write_sig_pre_reset(tp, kind);
9412
9413         tg3_abort_hw(tp, silent);
9414         err = tg3_chip_reset(tp);
9415
9416         __tg3_set_mac_addr(tp, false);
9417
9418         tg3_write_sig_legacy(tp, kind);
9419         tg3_write_sig_post_reset(tp, kind);
9420
9421         if (tp->hw_stats) {
9422                 /* Save the stats across chip resets... */
9423                 tg3_get_nstats(tp, &tp->net_stats_prev);
9424                 tg3_get_estats(tp, &tp->estats_prev);
9425
9426                 /* And make sure the next sample is new data */
9427                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9428
9429                 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9430                         struct tg3_napi *tnapi = &tp->napi[i];
9431
9432                         tnapi->rx_dropped = 0;
9433                         tnapi->tx_dropped = 0;
9434                 }
9435         }
9436
9437         return err;
9438 }
9439
9440 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9441 {
9442         struct tg3 *tp = netdev_priv(dev);
9443         struct sockaddr *addr = p;
9444         int err = 0;
9445         bool skip_mac_1 = false;
9446
9447         if (!is_valid_ether_addr(addr->sa_data))
9448                 return -EADDRNOTAVAIL;
9449
9450         eth_hw_addr_set(dev, addr->sa_data);
9451
9452         if (!netif_running(dev))
9453                 return 0;
9454
9455         if (tg3_flag(tp, ENABLE_ASF)) {
9456                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9457
9458                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9459                 addr0_low = tr32(MAC_ADDR_0_LOW);
9460                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9461                 addr1_low = tr32(MAC_ADDR_1_LOW);
9462
9463                 /* Skip MAC addr 1 if ASF is using it. */
9464                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9465                     !(addr1_high == 0 && addr1_low == 0))
9466                         skip_mac_1 = true;
9467         }
9468         spin_lock_bh(&tp->lock);
9469         __tg3_set_mac_addr(tp, skip_mac_1);
9470         __tg3_set_rx_mode(dev);
9471         spin_unlock_bh(&tp->lock);
9472
9473         return err;
9474 }
9475
9476 /* tp->lock is held. */
9477 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9478                            dma_addr_t mapping, u32 maxlen_flags,
9479                            u32 nic_addr)
9480 {
9481         tg3_write_mem(tp,
9482                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9483                       ((u64) mapping >> 32));
9484         tg3_write_mem(tp,
9485                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9486                       ((u64) mapping & 0xffffffff));
9487         tg3_write_mem(tp,
9488                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9489                        maxlen_flags);
9490
9491         if (!tg3_flag(tp, 5705_PLUS))
9492                 tg3_write_mem(tp,
9493                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9494                               nic_addr);
9495 }
9496
9497
9498 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9499 {
9500         int i = 0;
9501
9502         if (!tg3_flag(tp, ENABLE_TSS)) {
9503                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9504                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9505                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9506         } else {
9507                 tw32(HOSTCC_TXCOL_TICKS, 0);
9508                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9509                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9510
9511                 for (; i < tp->txq_cnt; i++) {
9512                         u32 reg;
9513
9514                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9515                         tw32(reg, ec->tx_coalesce_usecs);
9516                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9517                         tw32(reg, ec->tx_max_coalesced_frames);
9518                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9519                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9520                 }
9521         }
9522
9523         for (; i < tp->irq_max - 1; i++) {
9524                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9525                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9526                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9527         }
9528 }
9529
9530 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9531 {
9532         int i = 0;
9533         u32 limit = tp->rxq_cnt;
9534
9535         if (!tg3_flag(tp, ENABLE_RSS)) {
9536                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9537                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9538                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9539                 limit--;
9540         } else {
9541                 tw32(HOSTCC_RXCOL_TICKS, 0);
9542                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9543                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9544         }
9545
9546         for (; i < limit; i++) {
9547                 u32 reg;
9548
9549                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9550                 tw32(reg, ec->rx_coalesce_usecs);
9551                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9552                 tw32(reg, ec->rx_max_coalesced_frames);
9553                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9554                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9555         }
9556
9557         for (; i < tp->irq_max - 1; i++) {
9558                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9559                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9560                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9561         }
9562 }
9563
9564 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9565 {
9566         tg3_coal_tx_init(tp, ec);
9567         tg3_coal_rx_init(tp, ec);
9568
9569         if (!tg3_flag(tp, 5705_PLUS)) {
9570                 u32 val = ec->stats_block_coalesce_usecs;
9571
9572                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9573                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9574
9575                 if (!tp->link_up)
9576                         val = 0;
9577
9578                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9579         }
9580 }
9581
9582 /* tp->lock is held. */
9583 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9584 {
9585         u32 txrcb, limit;
9586
9587         /* Disable all transmit rings but the first. */
9588         if (!tg3_flag(tp, 5705_PLUS))
9589                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9590         else if (tg3_flag(tp, 5717_PLUS))
9591                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9592         else if (tg3_flag(tp, 57765_CLASS) ||
9593                  tg3_asic_rev(tp) == ASIC_REV_5762)
9594                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9595         else
9596                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9597
9598         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9599              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9600                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9601                               BDINFO_FLAGS_DISABLED);
9602 }
9603
9604 /* tp->lock is held. */
9605 static void tg3_tx_rcbs_init(struct tg3 *tp)
9606 {
9607         int i = 0;
9608         u32 txrcb = NIC_SRAM_SEND_RCB;
9609
9610         if (tg3_flag(tp, ENABLE_TSS))
9611                 i++;
9612
9613         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9614                 struct tg3_napi *tnapi = &tp->napi[i];
9615
9616                 if (!tnapi->tx_ring)
9617                         continue;
9618
9619                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9620                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9621                                NIC_SRAM_TX_BUFFER_DESC);
9622         }
9623 }
9624
9625 /* tp->lock is held. */
9626 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9627 {
9628         u32 rxrcb, limit;
9629
9630         /* Disable all receive return rings but the first. */
9631         if (tg3_flag(tp, 5717_PLUS))
9632                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9633         else if (!tg3_flag(tp, 5705_PLUS))
9634                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9635         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9636                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9637                  tg3_flag(tp, 57765_CLASS))
9638                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9639         else
9640                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9641
9642         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9643              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9644                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9645                               BDINFO_FLAGS_DISABLED);
9646 }
9647
9648 /* tp->lock is held. */
9649 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9650 {
9651         int i = 0;
9652         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9653
9654         if (tg3_flag(tp, ENABLE_RSS))
9655                 i++;
9656
9657         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9658                 struct tg3_napi *tnapi = &tp->napi[i];
9659
9660                 if (!tnapi->rx_rcb)
9661                         continue;
9662
9663                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9664                                (tp->rx_ret_ring_mask + 1) <<
9665                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9666         }
9667 }
9668
9669 /* tp->lock is held. */
9670 static void tg3_rings_reset(struct tg3 *tp)
9671 {
9672         int i;
9673         u32 stblk;
9674         struct tg3_napi *tnapi = &tp->napi[0];
9675
9676         tg3_tx_rcbs_disable(tp);
9677
9678         tg3_rx_ret_rcbs_disable(tp);
9679
9680         /* Disable interrupts */
9681         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9682         tp->napi[0].chk_msi_cnt = 0;
9683         tp->napi[0].last_rx_cons = 0;
9684         tp->napi[0].last_tx_cons = 0;
9685
9686         /* Zero mailbox registers. */
9687         if (tg3_flag(tp, SUPPORT_MSIX)) {
9688                 for (i = 1; i < tp->irq_max; i++) {
9689                         tp->napi[i].tx_prod = 0;
9690                         tp->napi[i].tx_cons = 0;
9691                         if (tg3_flag(tp, ENABLE_TSS))
9692                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9693                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9694                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9695                         tp->napi[i].chk_msi_cnt = 0;
9696                         tp->napi[i].last_rx_cons = 0;
9697                         tp->napi[i].last_tx_cons = 0;
9698                 }
9699                 if (!tg3_flag(tp, ENABLE_TSS))
9700                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9701         } else {
9702                 tp->napi[0].tx_prod = 0;
9703                 tp->napi[0].tx_cons = 0;
9704                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9705                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9706         }
9707
9708         /* Make sure the NIC-based send BD rings are disabled. */
9709         if (!tg3_flag(tp, 5705_PLUS)) {
9710                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9711                 for (i = 0; i < 16; i++)
9712                         tw32_tx_mbox(mbox + i * 8, 0);
9713         }
9714
9715         /* Clear status block in ram. */
9716         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9717
9718         /* Set status block DMA address */
9719         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9720              ((u64) tnapi->status_mapping >> 32));
9721         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9722              ((u64) tnapi->status_mapping & 0xffffffff));
9723
9724         stblk = HOSTCC_STATBLCK_RING1;
9725
9726         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9727                 u64 mapping = (u64)tnapi->status_mapping;
9728                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9729                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9730                 stblk += 8;
9731
9732                 /* Clear status block in ram. */
9733                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9734         }
9735
9736         tg3_tx_rcbs_init(tp);
9737         tg3_rx_ret_rcbs_init(tp);
9738 }
9739
9740 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9741 {
9742         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9743
9744         if (!tg3_flag(tp, 5750_PLUS) ||
9745             tg3_flag(tp, 5780_CLASS) ||
9746             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9747             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9748             tg3_flag(tp, 57765_PLUS))
9749                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9750         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9751                  tg3_asic_rev(tp) == ASIC_REV_5787)
9752                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9753         else
9754                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9755
9756         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9757         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9758
9759         val = min(nic_rep_thresh, host_rep_thresh);
9760         tw32(RCVBDI_STD_THRESH, val);
9761
9762         if (tg3_flag(tp, 57765_PLUS))
9763                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9764
9765         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9766                 return;
9767
9768         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9769
9770         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9771
9772         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9773         tw32(RCVBDI_JUMBO_THRESH, val);
9774
9775         if (tg3_flag(tp, 57765_PLUS))
9776                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9777 }
9778
9779 static inline u32 calc_crc(unsigned char *buf, int len)
9780 {
9781         u32 reg;
9782         u32 tmp;
9783         int j, k;
9784
9785         reg = 0xffffffff;
9786
9787         for (j = 0; j < len; j++) {
9788                 reg ^= buf[j];
9789
9790                 for (k = 0; k < 8; k++) {
9791                         tmp = reg & 0x01;
9792
9793                         reg >>= 1;
9794
9795                         if (tmp)
9796                                 reg ^= CRC32_POLY_LE;
9797                 }
9798         }
9799
9800         return ~reg;
9801 }
9802
9803 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9804 {
9805         /* accept or reject all multicast frames */
9806         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9807         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9808         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9809         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9810 }
9811
9812 static void __tg3_set_rx_mode(struct net_device *dev)
9813 {
9814         struct tg3 *tp = netdev_priv(dev);
9815         u32 rx_mode;
9816
9817         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9818                                   RX_MODE_KEEP_VLAN_TAG);
9819
9820 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9821         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9822          * flag clear.
9823          */
9824         if (!tg3_flag(tp, ENABLE_ASF))
9825                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9826 #endif
9827
9828         if (dev->flags & IFF_PROMISC) {
9829                 /* Promiscuous mode. */
9830                 rx_mode |= RX_MODE_PROMISC;
9831         } else if (dev->flags & IFF_ALLMULTI) {
9832                 /* Accept all multicast. */
9833                 tg3_set_multi(tp, 1);
9834         } else if (netdev_mc_empty(dev)) {
9835                 /* Reject all multicast. */
9836                 tg3_set_multi(tp, 0);
9837         } else {
9838                 /* Accept one or more multicast(s). */
9839                 struct netdev_hw_addr *ha;
9840                 u32 mc_filter[4] = { 0, };
9841                 u32 regidx;
9842                 u32 bit;
9843                 u32 crc;
9844
9845                 netdev_for_each_mc_addr(ha, dev) {
9846                         crc = calc_crc(ha->addr, ETH_ALEN);
9847                         bit = ~crc & 0x7f;
9848                         regidx = (bit & 0x60) >> 5;
9849                         bit &= 0x1f;
9850                         mc_filter[regidx] |= (1 << bit);
9851                 }
9852
9853                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9854                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9855                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9856                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9857         }
9858
9859         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9860                 rx_mode |= RX_MODE_PROMISC;
9861         } else if (!(dev->flags & IFF_PROMISC)) {
9862                 /* Add all entries into to the mac addr filter list */
9863                 int i = 0;
9864                 struct netdev_hw_addr *ha;
9865
9866                 netdev_for_each_uc_addr(ha, dev) {
9867                         __tg3_set_one_mac_addr(tp, ha->addr,
9868                                                i + TG3_UCAST_ADDR_IDX(tp));
9869                         i++;
9870                 }
9871         }
9872
9873         if (rx_mode != tp->rx_mode) {
9874                 tp->rx_mode = rx_mode;
9875                 tw32_f(MAC_RX_MODE, rx_mode);
9876                 udelay(10);
9877         }
9878 }
9879
9880 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9881 {
9882         int i;
9883
9884         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9885                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9886 }
9887
9888 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9889 {
9890         int i;
9891
9892         if (!tg3_flag(tp, SUPPORT_MSIX))
9893                 return;
9894
9895         if (tp->rxq_cnt == 1) {
9896                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9897                 return;
9898         }
9899
9900         /* Validate table against current IRQ count */
9901         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9902                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9903                         break;
9904         }
9905
9906         if (i != TG3_RSS_INDIR_TBL_SIZE)
9907                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9908 }
9909
9910 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9911 {
9912         int i = 0;
9913         u32 reg = MAC_RSS_INDIR_TBL_0;
9914
9915         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9916                 u32 val = tp->rss_ind_tbl[i];
9917                 i++;
9918                 for (; i % 8; i++) {
9919                         val <<= 4;
9920                         val |= tp->rss_ind_tbl[i];
9921                 }
9922                 tw32(reg, val);
9923                 reg += 4;
9924         }
9925 }
9926
9927 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9928 {
9929         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9930                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9931         else
9932                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9933 }
9934
9935 /* tp->lock is held. */
9936 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9937 {
9938         u32 val, rdmac_mode;
9939         int i, err, limit;
9940         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9941
9942         tg3_disable_ints(tp);
9943
9944         tg3_stop_fw(tp);
9945
9946         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9947
9948         if (tg3_flag(tp, INIT_COMPLETE))
9949                 tg3_abort_hw(tp, 1);
9950
9951         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9952             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9953                 tg3_phy_pull_config(tp);
9954                 tg3_eee_pull_config(tp, NULL);
9955                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9956         }
9957
9958         /* Enable MAC control of LPI */
9959         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9960                 tg3_setup_eee(tp);
9961
9962         if (reset_phy)
9963                 tg3_phy_reset(tp);
9964
9965         err = tg3_chip_reset(tp);
9966         if (err)
9967                 return err;
9968
9969         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9970
9971         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9972                 val = tr32(TG3_CPMU_CTRL);
9973                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9974                 tw32(TG3_CPMU_CTRL, val);
9975
9976                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9977                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9978                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9979                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9980
9981                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9982                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9983                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9984                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9985
9986                 val = tr32(TG3_CPMU_HST_ACC);
9987                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9988                 val |= CPMU_HST_ACC_MACCLK_6_25;
9989                 tw32(TG3_CPMU_HST_ACC, val);
9990         }
9991
9992         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9993                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9994                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9995                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9996                 tw32(PCIE_PWR_MGMT_THRESH, val);
9997
9998                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9999                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
10000
10001                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10002
10003                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10004                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10005         }
10006
10007         if (tg3_flag(tp, L1PLLPD_EN)) {
10008                 u32 grc_mode = tr32(GRC_MODE);
10009
10010                 /* Access the lower 1K of PL PCIE block registers. */
10011                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10012                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10013
10014                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10015                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10016                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10017
10018                 tw32(GRC_MODE, grc_mode);
10019         }
10020
10021         if (tg3_flag(tp, 57765_CLASS)) {
10022                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10023                         u32 grc_mode = tr32(GRC_MODE);
10024
10025                         /* Access the lower 1K of PL PCIE block registers. */
10026                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10027                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10028
10029                         val = tr32(TG3_PCIE_TLDLPL_PORT +
10030                                    TG3_PCIE_PL_LO_PHYCTL5);
10031                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10032                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10033
10034                         tw32(GRC_MODE, grc_mode);
10035                 }
10036
10037                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10038                         u32 grc_mode;
10039
10040                         /* Fix transmit hangs */
10041                         val = tr32(TG3_CPMU_PADRNG_CTL);
10042                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10043                         tw32(TG3_CPMU_PADRNG_CTL, val);
10044
10045                         grc_mode = tr32(GRC_MODE);
10046
10047                         /* Access the lower 1K of DL PCIE block registers. */
10048                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10049                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10050
10051                         val = tr32(TG3_PCIE_TLDLPL_PORT +
10052                                    TG3_PCIE_DL_LO_FTSMAX);
10053                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10054                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10055                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10056
10057                         tw32(GRC_MODE, grc_mode);
10058                 }
10059
10060                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10061                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10062                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10063                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10064         }
10065
10066         /* This works around an issue with Athlon chipsets on
10067          * B3 tigon3 silicon.  This bit has no effect on any
10068          * other revision.  But do not set this on PCI Express
10069          * chips and don't even touch the clocks if the CPMU is present.
10070          */
10071         if (!tg3_flag(tp, CPMU_PRESENT)) {
10072                 if (!tg3_flag(tp, PCI_EXPRESS))
10073                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10074                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10075         }
10076
10077         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10078             tg3_flag(tp, PCIX_MODE)) {
10079                 val = tr32(TG3PCI_PCISTATE);
10080                 val |= PCISTATE_RETRY_SAME_DMA;
10081                 tw32(TG3PCI_PCISTATE, val);
10082         }
10083
10084         if (tg3_flag(tp, ENABLE_APE)) {
10085                 /* Allow reads and writes to the
10086                  * APE register and memory space.
10087                  */
10088                 val = tr32(TG3PCI_PCISTATE);
10089                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10090                        PCISTATE_ALLOW_APE_SHMEM_WR |
10091                        PCISTATE_ALLOW_APE_PSPACE_WR;
10092                 tw32(TG3PCI_PCISTATE, val);
10093         }
10094
10095         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10096                 /* Enable some hw fixes.  */
10097                 val = tr32(TG3PCI_MSI_DATA);
10098                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10099                 tw32(TG3PCI_MSI_DATA, val);
10100         }
10101
10102         /* Descriptor ring init may make accesses to the
10103          * NIC SRAM area to setup the TX descriptors, so we
10104          * can only do this after the hardware has been
10105          * successfully reset.
10106          */
10107         err = tg3_init_rings(tp);
10108         if (err)
10109                 return err;
10110
10111         if (tg3_flag(tp, 57765_PLUS)) {
10112                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10113                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10114                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10115                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10116                 if (!tg3_flag(tp, 57765_CLASS) &&
10117                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10118                     tg3_asic_rev(tp) != ASIC_REV_5762)
10119                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10120                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10121         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10122                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10123                 /* This value is determined during the probe time DMA
10124                  * engine test, tg3_test_dma.
10125                  */
10126                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10127         }
10128
10129         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10130                           GRC_MODE_4X_NIC_SEND_RINGS |
10131                           GRC_MODE_NO_TX_PHDR_CSUM |
10132                           GRC_MODE_NO_RX_PHDR_CSUM);
10133         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10134
10135         /* Pseudo-header checksum is done by hardware logic and not
10136          * the offload processers, so make the chip do the pseudo-
10137          * header checksums on receive.  For transmit it is more
10138          * convenient to do the pseudo-header checksum in software
10139          * as Linux does that on transmit for us in all cases.
10140          */
10141         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10142
10143         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10144         if (tp->rxptpctl)
10145                 tw32(TG3_RX_PTP_CTL,
10146                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10147
10148         if (tg3_flag(tp, PTP_CAPABLE))
10149                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10150
10151         tw32(GRC_MODE, tp->grc_mode | val);
10152
10153         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10154          * south bridge limitation. As a workaround, Driver is setting MRRS
10155          * to 2048 instead of default 4096.
10156          */
10157         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10158             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10159                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10160                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10161         }
10162
10163         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10164         val = tr32(GRC_MISC_CFG);
10165         val &= ~0xff;
10166         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10167         tw32(GRC_MISC_CFG, val);
10168
10169         /* Initialize MBUF/DESC pool. */
10170         if (tg3_flag(tp, 5750_PLUS)) {
10171                 /* Do nothing.  */
10172         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10173                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10174                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10175                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10176                 else
10177                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10178                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10179                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10180         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10181                 int fw_len;
10182
10183                 fw_len = tp->fw_len;
10184                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10185                 tw32(BUFMGR_MB_POOL_ADDR,
10186                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10187                 tw32(BUFMGR_MB_POOL_SIZE,
10188                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10189         }
10190
10191         if (tp->dev->mtu <= ETH_DATA_LEN) {
10192                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10193                      tp->bufmgr_config.mbuf_read_dma_low_water);
10194                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10195                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10196                 tw32(BUFMGR_MB_HIGH_WATER,
10197                      tp->bufmgr_config.mbuf_high_water);
10198         } else {
10199                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10200                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10201                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10202                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10203                 tw32(BUFMGR_MB_HIGH_WATER,
10204                      tp->bufmgr_config.mbuf_high_water_jumbo);
10205         }
10206         tw32(BUFMGR_DMA_LOW_WATER,
10207              tp->bufmgr_config.dma_low_water);
10208         tw32(BUFMGR_DMA_HIGH_WATER,
10209              tp->bufmgr_config.dma_high_water);
10210
10211         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10212         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10213                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10214         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10215             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10216             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10217             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10218                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10219         tw32(BUFMGR_MODE, val);
10220         for (i = 0; i < 2000; i++) {
10221                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10222                         break;
10223                 udelay(10);
10224         }
10225         if (i >= 2000) {
10226                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10227                 return -ENODEV;
10228         }
10229
10230         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10231                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10232
10233         tg3_setup_rxbd_thresholds(tp);
10234
10235         /* Initialize TG3_BDINFO's at:
10236          *  RCVDBDI_STD_BD:     standard eth size rx ring
10237          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10238          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10239          *
10240          * like so:
10241          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10242          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10243          *                              ring attribute flags
10244          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10245          *
10246          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10247          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10248          *
10249          * The size of each ring is fixed in the firmware, but the location is
10250          * configurable.
10251          */
10252         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10253              ((u64) tpr->rx_std_mapping >> 32));
10254         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10255              ((u64) tpr->rx_std_mapping & 0xffffffff));
10256         if (!tg3_flag(tp, 5717_PLUS))
10257                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10258                      NIC_SRAM_RX_BUFFER_DESC);
10259
10260         /* Disable the mini ring */
10261         if (!tg3_flag(tp, 5705_PLUS))
10262                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10263                      BDINFO_FLAGS_DISABLED);
10264
10265         /* Program the jumbo buffer descriptor ring control
10266          * blocks on those devices that have them.
10267          */
10268         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10269             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10270
10271                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10272                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10273                              ((u64) tpr->rx_jmb_mapping >> 32));
10274                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10275                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10276                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10277                               BDINFO_FLAGS_MAXLEN_SHIFT;
10278                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10279                              val | BDINFO_FLAGS_USE_EXT_RECV);
10280                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10281                             tg3_flag(tp, 57765_CLASS) ||
10282                             tg3_asic_rev(tp) == ASIC_REV_5762)
10283                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10284                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10285                 } else {
10286                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10287                              BDINFO_FLAGS_DISABLED);
10288                 }
10289
10290                 if (tg3_flag(tp, 57765_PLUS)) {
10291                         val = TG3_RX_STD_RING_SIZE(tp);
10292                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10293                         val |= (TG3_RX_STD_DMA_SZ << 2);
10294                 } else
10295                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10296         } else
10297                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10298
10299         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10300
10301         tpr->rx_std_prod_idx = tp->rx_pending;
10302         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10303
10304         tpr->rx_jmb_prod_idx =
10305                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10306         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10307
10308         tg3_rings_reset(tp);
10309
10310         /* Initialize MAC address and backoff seed. */
10311         __tg3_set_mac_addr(tp, false);
10312
10313         /* MTU + ethernet header + FCS + optional VLAN tag */
10314         tw32(MAC_RX_MTU_SIZE,
10315              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10316
10317         /* The slot time is changed by tg3_setup_phy if we
10318          * run at gigabit with half duplex.
10319          */
10320         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10321               (6 << TX_LENGTHS_IPG_SHIFT) |
10322               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10323
10324         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10325             tg3_asic_rev(tp) == ASIC_REV_5762)
10326                 val |= tr32(MAC_TX_LENGTHS) &
10327                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10328                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10329
10330         tw32(MAC_TX_LENGTHS, val);
10331
10332         /* Receive rules. */
10333         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10334         tw32(RCVLPC_CONFIG, 0x0181);
10335
10336         /* Calculate RDMAC_MODE setting early, we need it to determine
10337          * the RCVLPC_STATE_ENABLE mask.
10338          */
10339         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10340                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10341                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10342                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10343                       RDMAC_MODE_LNGREAD_ENAB);
10344
10345         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10346                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10347
10348         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10349             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10350             tg3_asic_rev(tp) == ASIC_REV_57780)
10351                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10352                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10353                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10354
10355         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10356             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10357                 if (tg3_flag(tp, TSO_CAPABLE)) {
10358                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10359                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10360                            !tg3_flag(tp, IS_5788)) {
10361                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10362                 }
10363         }
10364
10365         if (tg3_flag(tp, PCI_EXPRESS))
10366                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10367
10368         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10369                 tp->dma_limit = 0;
10370                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10371                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10372                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10373                 }
10374         }
10375
10376         if (tg3_flag(tp, HW_TSO_1) ||
10377             tg3_flag(tp, HW_TSO_2) ||
10378             tg3_flag(tp, HW_TSO_3))
10379                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10380
10381         if (tg3_flag(tp, 57765_PLUS) ||
10382             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10383             tg3_asic_rev(tp) == ASIC_REV_57780)
10384                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10385
10386         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10387             tg3_asic_rev(tp) == ASIC_REV_5762)
10388                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10389
10390         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10391             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10392             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10393             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10394             tg3_flag(tp, 57765_PLUS)) {
10395                 u32 tgtreg;
10396
10397                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10398                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10399                 else
10400                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10401
10402                 val = tr32(tgtreg);
10403                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10404                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10405                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10406                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10407                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10408                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10409                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10410                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10411                 }
10412                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10413         }
10414
10415         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10416             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10417             tg3_asic_rev(tp) == ASIC_REV_5762) {
10418                 u32 tgtreg;
10419
10420                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10421                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10422                 else
10423                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10424
10425                 val = tr32(tgtreg);
10426                 tw32(tgtreg, val |
10427                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10428                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10429         }
10430
10431         /* Receive/send statistics. */
10432         if (tg3_flag(tp, 5750_PLUS)) {
10433                 val = tr32(RCVLPC_STATS_ENABLE);
10434                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10435                 tw32(RCVLPC_STATS_ENABLE, val);
10436         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10437                    tg3_flag(tp, TSO_CAPABLE)) {
10438                 val = tr32(RCVLPC_STATS_ENABLE);
10439                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10440                 tw32(RCVLPC_STATS_ENABLE, val);
10441         } else {
10442                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10443         }
10444         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10445         tw32(SNDDATAI_STATSENAB, 0xffffff);
10446         tw32(SNDDATAI_STATSCTRL,
10447              (SNDDATAI_SCTRL_ENABLE |
10448               SNDDATAI_SCTRL_FASTUPD));
10449
10450         /* Setup host coalescing engine. */
10451         tw32(HOSTCC_MODE, 0);
10452         for (i = 0; i < 2000; i++) {
10453                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10454                         break;
10455                 udelay(10);
10456         }
10457
10458         __tg3_set_coalesce(tp, &tp->coal);
10459
10460         if (!tg3_flag(tp, 5705_PLUS)) {
10461                 /* Status/statistics block address.  See tg3_timer,
10462                  * the tg3_periodic_fetch_stats call there, and
10463                  * tg3_get_stats to see how this works for 5705/5750 chips.
10464                  */
10465                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10466                      ((u64) tp->stats_mapping >> 32));
10467                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10468                      ((u64) tp->stats_mapping & 0xffffffff));
10469                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10470
10471                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10472
10473                 /* Clear statistics and status block memory areas */
10474                 for (i = NIC_SRAM_STATS_BLK;
10475                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10476                      i += sizeof(u32)) {
10477                         tg3_write_mem(tp, i, 0);
10478                         udelay(40);
10479                 }
10480         }
10481
10482         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10483
10484         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10485         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10486         if (!tg3_flag(tp, 5705_PLUS))
10487                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10488
10489         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10490                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10491                 /* reset to prevent losing 1st rx packet intermittently */
10492                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10493                 udelay(10);
10494         }
10495
10496         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10497                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10498                         MAC_MODE_FHDE_ENABLE;
10499         if (tg3_flag(tp, ENABLE_APE))
10500                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10501         if (!tg3_flag(tp, 5705_PLUS) &&
10502             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10503             tg3_asic_rev(tp) != ASIC_REV_5700)
10504                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10505         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10506         udelay(40);
10507
10508         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10509          * If TG3_FLAG_IS_NIC is zero, we should read the
10510          * register to preserve the GPIO settings for LOMs. The GPIOs,
10511          * whether used as inputs or outputs, are set by boot code after
10512          * reset.
10513          */
10514         if (!tg3_flag(tp, IS_NIC)) {
10515                 u32 gpio_mask;
10516
10517                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10518                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10519                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10520
10521                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10522                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10523                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10524
10525                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10526                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10527
10528                 tp->grc_local_ctrl &= ~gpio_mask;
10529                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10530
10531                 /* GPIO1 must be driven high for eeprom write protect */
10532                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10533                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10534                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10535         }
10536         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10537         udelay(100);
10538
10539         if (tg3_flag(tp, USING_MSIX)) {
10540                 val = tr32(MSGINT_MODE);
10541                 val |= MSGINT_MODE_ENABLE;
10542                 if (tp->irq_cnt > 1)
10543                         val |= MSGINT_MODE_MULTIVEC_EN;
10544                 if (!tg3_flag(tp, 1SHOT_MSI))
10545                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10546                 tw32(MSGINT_MODE, val);
10547         }
10548
10549         if (!tg3_flag(tp, 5705_PLUS)) {
10550                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10551                 udelay(40);
10552         }
10553
10554         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10555                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10556                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10557                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10558                WDMAC_MODE_LNGREAD_ENAB);
10559
10560         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10561             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10562                 if (tg3_flag(tp, TSO_CAPABLE) &&
10563                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10564                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10565                         /* nothing */
10566                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10567                            !tg3_flag(tp, IS_5788)) {
10568                         val |= WDMAC_MODE_RX_ACCEL;
10569                 }
10570         }
10571
10572         /* Enable host coalescing bug fix */
10573         if (tg3_flag(tp, 5755_PLUS))
10574                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10575
10576         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10577                 val |= WDMAC_MODE_BURST_ALL_DATA;
10578
10579         tw32_f(WDMAC_MODE, val);
10580         udelay(40);
10581
10582         if (tg3_flag(tp, PCIX_MODE)) {
10583                 u16 pcix_cmd;
10584
10585                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10586                                      &pcix_cmd);
10587                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10588                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10589                         pcix_cmd |= PCI_X_CMD_READ_2K;
10590                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10591                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10592                         pcix_cmd |= PCI_X_CMD_READ_2K;
10593                 }
10594                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10595                                       pcix_cmd);
10596         }
10597
10598         tw32_f(RDMAC_MODE, rdmac_mode);
10599         udelay(40);
10600
10601         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10602             tg3_asic_rev(tp) == ASIC_REV_5720) {
10603                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10604                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10605                                 break;
10606                 }
10607                 if (i < TG3_NUM_RDMA_CHANNELS) {
10608                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10609                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10610                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10611                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10612                 }
10613         }
10614
10615         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10616         if (!tg3_flag(tp, 5705_PLUS))
10617                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10618
10619         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10620                 tw32(SNDDATAC_MODE,
10621                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10622         else
10623                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10624
10625         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10626         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10627         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10628         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10629                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10630         tw32(RCVDBDI_MODE, val);
10631         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10632         if (tg3_flag(tp, HW_TSO_1) ||
10633             tg3_flag(tp, HW_TSO_2) ||
10634             tg3_flag(tp, HW_TSO_3))
10635                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10636         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10637         if (tg3_flag(tp, ENABLE_TSS))
10638                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10639         tw32(SNDBDI_MODE, val);
10640         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10641
10642         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10643                 err = tg3_load_5701_a0_firmware_fix(tp);
10644                 if (err)
10645                         return err;
10646         }
10647
10648         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10649                 /* Ignore any errors for the firmware download. If download
10650                  * fails, the device will operate with EEE disabled
10651                  */
10652                 tg3_load_57766_firmware(tp);
10653         }
10654
10655         if (tg3_flag(tp, TSO_CAPABLE)) {
10656                 err = tg3_load_tso_firmware(tp);
10657                 if (err)
10658                         return err;
10659         }
10660
10661         tp->tx_mode = TX_MODE_ENABLE;
10662
10663         if (tg3_flag(tp, 5755_PLUS) ||
10664             tg3_asic_rev(tp) == ASIC_REV_5906)
10665                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10666
10667         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10668             tg3_asic_rev(tp) == ASIC_REV_5762) {
10669                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10670                 tp->tx_mode &= ~val;
10671                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10672         }
10673
10674         tw32_f(MAC_TX_MODE, tp->tx_mode);
10675         udelay(100);
10676
10677         if (tg3_flag(tp, ENABLE_RSS)) {
10678                 u32 rss_key[10];
10679
10680                 tg3_rss_write_indir_tbl(tp);
10681
10682                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10683
10684                 for (i = 0; i < 10 ; i++)
10685                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10686         }
10687
10688         tp->rx_mode = RX_MODE_ENABLE;
10689         if (tg3_flag(tp, 5755_PLUS))
10690                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10691
10692         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10693                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10694
10695         if (tg3_flag(tp, ENABLE_RSS))
10696                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10697                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10698                                RX_MODE_RSS_IPV6_HASH_EN |
10699                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10700                                RX_MODE_RSS_IPV4_HASH_EN |
10701                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10702
10703         tw32_f(MAC_RX_MODE, tp->rx_mode);
10704         udelay(10);
10705
10706         tw32(MAC_LED_CTRL, tp->led_ctrl);
10707
10708         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10709         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10710                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10711                 udelay(10);
10712         }
10713         tw32_f(MAC_RX_MODE, tp->rx_mode);
10714         udelay(10);
10715
10716         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10717                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10718                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10719                         /* Set drive transmission level to 1.2V  */
10720                         /* only if the signal pre-emphasis bit is not set  */
10721                         val = tr32(MAC_SERDES_CFG);
10722                         val &= 0xfffff000;
10723                         val |= 0x880;
10724                         tw32(MAC_SERDES_CFG, val);
10725                 }
10726                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10727                         tw32(MAC_SERDES_CFG, 0x616000);
10728         }
10729
10730         /* Prevent chip from dropping frames when flow control
10731          * is enabled.
10732          */
10733         if (tg3_flag(tp, 57765_CLASS))
10734                 val = 1;
10735         else
10736                 val = 2;
10737         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10738
10739         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10740             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10741                 /* Use hardware link auto-negotiation */
10742                 tg3_flag_set(tp, HW_AUTONEG);
10743         }
10744
10745         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10746             tg3_asic_rev(tp) == ASIC_REV_5714) {
10747                 u32 tmp;
10748
10749                 tmp = tr32(SERDES_RX_CTRL);
10750                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10751                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10752                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10753                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10754         }
10755
10756         if (!tg3_flag(tp, USE_PHYLIB)) {
10757                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10758                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10759
10760                 err = tg3_setup_phy(tp, false);
10761                 if (err)
10762                         return err;
10763
10764                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10765                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10766                         u32 tmp;
10767
10768                         /* Clear CRC stats. */
10769                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10770                                 tg3_writephy(tp, MII_TG3_TEST1,
10771                                              tmp | MII_TG3_TEST1_CRC_EN);
10772                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10773                         }
10774                 }
10775         }
10776
10777         __tg3_set_rx_mode(tp->dev);
10778
10779         /* Initialize receive rules. */
10780         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10781         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10782         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10783         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10784
10785         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10786                 limit = 8;
10787         else
10788                 limit = 16;
10789         if (tg3_flag(tp, ENABLE_ASF))
10790                 limit -= 4;
10791         switch (limit) {
10792         case 16:
10793                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10794                 fallthrough;
10795         case 15:
10796                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10797                 fallthrough;
10798         case 14:
10799                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10800                 fallthrough;
10801         case 13:
10802                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10803                 fallthrough;
10804         case 12:
10805                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10806                 fallthrough;
10807         case 11:
10808                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10809                 fallthrough;
10810         case 10:
10811                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10812                 fallthrough;
10813         case 9:
10814                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10815                 fallthrough;
10816         case 8:
10817                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10818                 fallthrough;
10819         case 7:
10820                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10821                 fallthrough;
10822         case 6:
10823                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10824                 fallthrough;
10825         case 5:
10826                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10827                 fallthrough;
10828         case 4:
10829                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10830         case 3:
10831                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10832         case 2:
10833         case 1:
10834
10835         default:
10836                 break;
10837         }
10838
10839         if (tg3_flag(tp, ENABLE_APE))
10840                 /* Write our heartbeat update interval to APE. */
10841                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10842                                 APE_HOST_HEARTBEAT_INT_5SEC);
10843
10844         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10845
10846         return 0;
10847 }
10848
10849 /* Called at device open time to get the chip ready for
10850  * packet processing.  Invoked with tp->lock held.
10851  */
10852 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10853 {
10854         /* Chip may have been just powered on. If so, the boot code may still
10855          * be running initialization. Wait for it to finish to avoid races in
10856          * accessing the hardware.
10857          */
10858         tg3_enable_register_access(tp);
10859         tg3_poll_fw(tp);
10860
10861         tg3_switch_clocks(tp);
10862
10863         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10864
10865         return tg3_reset_hw(tp, reset_phy);
10866 }
10867
10868 #ifdef CONFIG_TIGON3_HWMON
10869 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10870 {
10871         u32 off, len = TG3_OCIR_LEN;
10872         int i;
10873
10874         for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10875                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10876
10877                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10878                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10879                         memset(ocir, 0, len);
10880         }
10881 }
10882
10883 /* sysfs attributes for hwmon */
10884 static ssize_t tg3_show_temp(struct device *dev,
10885                              struct device_attribute *devattr, char *buf)
10886 {
10887         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10888         struct tg3 *tp = dev_get_drvdata(dev);
10889         u32 temperature;
10890
10891         spin_lock_bh(&tp->lock);
10892         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10893                                 sizeof(temperature));
10894         spin_unlock_bh(&tp->lock);
10895         return sprintf(buf, "%u\n", temperature * 1000);
10896 }
10897
10898
10899 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10900                           TG3_TEMP_SENSOR_OFFSET);
10901 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10902                           TG3_TEMP_CAUTION_OFFSET);
10903 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10904                           TG3_TEMP_MAX_OFFSET);
10905
10906 static struct attribute *tg3_attrs[] = {
10907         &sensor_dev_attr_temp1_input.dev_attr.attr,
10908         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10909         &sensor_dev_attr_temp1_max.dev_attr.attr,
10910         NULL
10911 };
10912 ATTRIBUTE_GROUPS(tg3);
10913
10914 static void tg3_hwmon_close(struct tg3 *tp)
10915 {
10916         if (tp->hwmon_dev) {
10917                 hwmon_device_unregister(tp->hwmon_dev);
10918                 tp->hwmon_dev = NULL;
10919         }
10920 }
10921
10922 static void tg3_hwmon_open(struct tg3 *tp)
10923 {
10924         int i;
10925         u32 size = 0;
10926         struct pci_dev *pdev = tp->pdev;
10927         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10928
10929         tg3_sd_scan_scratchpad(tp, ocirs);
10930
10931         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10932                 if (!ocirs[i].src_data_length)
10933                         continue;
10934
10935                 size += ocirs[i].src_hdr_length;
10936                 size += ocirs[i].src_data_length;
10937         }
10938
10939         if (!size)
10940                 return;
10941
10942         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10943                                                           tp, tg3_groups);
10944         if (IS_ERR(tp->hwmon_dev)) {
10945                 tp->hwmon_dev = NULL;
10946                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10947         }
10948 }
10949 #else
10950 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10951 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10952 #endif /* CONFIG_TIGON3_HWMON */
10953
10954
10955 #define TG3_STAT_ADD32(PSTAT, REG) \
10956 do {    u32 __val = tr32(REG); \
10957         (PSTAT)->low += __val; \
10958         if ((PSTAT)->low < __val) \
10959                 (PSTAT)->high += 1; \
10960 } while (0)
10961
10962 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10963 {
10964         struct tg3_hw_stats *sp = tp->hw_stats;
10965
10966         if (!tp->link_up)
10967                 return;
10968
10969         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10970         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10971         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10972         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10973         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10974         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10975         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10976         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10977         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10978         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10979         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10980         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10981         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10982         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10983                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10984                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10985                 u32 val;
10986
10987                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10988                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10989                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10990                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10991         }
10992
10993         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10994         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10995         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10996         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10997         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10998         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10999         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
11000         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
11001         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11002         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11003         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11004         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11005         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11006         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11007
11008         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11009         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11010             tg3_asic_rev(tp) != ASIC_REV_5762 &&
11011             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11012             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11013                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11014         } else {
11015                 u32 val = tr32(HOSTCC_FLOW_ATTN);
11016                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11017                 if (val) {
11018                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11019                         sp->rx_discards.low += val;
11020                         if (sp->rx_discards.low < val)
11021                                 sp->rx_discards.high += 1;
11022                 }
11023                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11024         }
11025         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11026 }
11027
11028 static void tg3_chk_missed_msi(struct tg3 *tp)
11029 {
11030         u32 i;
11031
11032         for (i = 0; i < tp->irq_cnt; i++) {
11033                 struct tg3_napi *tnapi = &tp->napi[i];
11034
11035                 if (tg3_has_work(tnapi)) {
11036                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11037                             tnapi->last_tx_cons == tnapi->tx_cons) {
11038                                 if (tnapi->chk_msi_cnt < 1) {
11039                                         tnapi->chk_msi_cnt++;
11040                                         return;
11041                                 }
11042                                 tg3_msi(0, tnapi);
11043                         }
11044                 }
11045                 tnapi->chk_msi_cnt = 0;
11046                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11047                 tnapi->last_tx_cons = tnapi->tx_cons;
11048         }
11049 }
11050
11051 static void tg3_timer(struct timer_list *t)
11052 {
11053         struct tg3 *tp = from_timer(tp, t, timer);
11054
11055         spin_lock(&tp->lock);
11056
11057         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11058                 spin_unlock(&tp->lock);
11059                 goto restart_timer;
11060         }
11061
11062         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11063             tg3_flag(tp, 57765_CLASS))
11064                 tg3_chk_missed_msi(tp);
11065
11066         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11067                 /* BCM4785: Flush posted writes from GbE to host memory. */
11068                 tr32(HOSTCC_MODE);
11069         }
11070
11071         if (!tg3_flag(tp, TAGGED_STATUS)) {
11072                 /* All of this garbage is because when using non-tagged
11073                  * IRQ status the mailbox/status_block protocol the chip
11074                  * uses with the cpu is race prone.
11075                  */
11076                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11077                         tw32(GRC_LOCAL_CTRL,
11078                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11079                 } else {
11080                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11081                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11082                 }
11083
11084                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11085                         spin_unlock(&tp->lock);
11086                         tg3_reset_task_schedule(tp);
11087                         goto restart_timer;
11088                 }
11089         }
11090
11091         /* This part only runs once per second. */
11092         if (!--tp->timer_counter) {
11093                 if (tg3_flag(tp, 5705_PLUS))
11094                         tg3_periodic_fetch_stats(tp);
11095
11096                 if (tp->setlpicnt && !--tp->setlpicnt)
11097                         tg3_phy_eee_enable(tp);
11098
11099                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11100                         u32 mac_stat;
11101                         int phy_event;
11102
11103                         mac_stat = tr32(MAC_STATUS);
11104
11105                         phy_event = 0;
11106                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11107                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11108                                         phy_event = 1;
11109                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11110                                 phy_event = 1;
11111
11112                         if (phy_event)
11113                                 tg3_setup_phy(tp, false);
11114                 } else if (tg3_flag(tp, POLL_SERDES)) {
11115                         u32 mac_stat = tr32(MAC_STATUS);
11116                         int need_setup = 0;
11117
11118                         if (tp->link_up &&
11119                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11120                                 need_setup = 1;
11121                         }
11122                         if (!tp->link_up &&
11123                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11124                                          MAC_STATUS_SIGNAL_DET))) {
11125                                 need_setup = 1;
11126                         }
11127                         if (need_setup) {
11128                                 if (!tp->serdes_counter) {
11129                                         tw32_f(MAC_MODE,
11130                                              (tp->mac_mode &
11131                                               ~MAC_MODE_PORT_MODE_MASK));
11132                                         udelay(40);
11133                                         tw32_f(MAC_MODE, tp->mac_mode);
11134                                         udelay(40);
11135                                 }
11136                                 tg3_setup_phy(tp, false);
11137                         }
11138                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11139                            tg3_flag(tp, 5780_CLASS)) {
11140                         tg3_serdes_parallel_detect(tp);
11141                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11142                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11143                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11144                                          TG3_CPMU_STATUS_LINK_MASK);
11145
11146                         if (link_up != tp->link_up)
11147                                 tg3_setup_phy(tp, false);
11148                 }
11149
11150                 tp->timer_counter = tp->timer_multiplier;
11151         }
11152
11153         /* Heartbeat is only sent once every 2 seconds.
11154          *
11155          * The heartbeat is to tell the ASF firmware that the host
11156          * driver is still alive.  In the event that the OS crashes,
11157          * ASF needs to reset the hardware to free up the FIFO space
11158          * that may be filled with rx packets destined for the host.
11159          * If the FIFO is full, ASF will no longer function properly.
11160          *
11161          * Unintended resets have been reported on real time kernels
11162          * where the timer doesn't run on time.  Netpoll will also have
11163          * same problem.
11164          *
11165          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11166          * to check the ring condition when the heartbeat is expiring
11167          * before doing the reset.  This will prevent most unintended
11168          * resets.
11169          */
11170         if (!--tp->asf_counter) {
11171                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11172                         tg3_wait_for_event_ack(tp);
11173
11174                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11175                                       FWCMD_NICDRV_ALIVE3);
11176                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11177                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11178                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11179
11180                         tg3_generate_fw_event(tp);
11181                 }
11182                 tp->asf_counter = tp->asf_multiplier;
11183         }
11184
11185         /* Update the APE heartbeat every 5 seconds.*/
11186         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11187
11188         spin_unlock(&tp->lock);
11189
11190 restart_timer:
11191         tp->timer.expires = jiffies + tp->timer_offset;
11192         add_timer(&tp->timer);
11193 }
11194
11195 static void tg3_timer_init(struct tg3 *tp)
11196 {
11197         if (tg3_flag(tp, TAGGED_STATUS) &&
11198             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11199             !tg3_flag(tp, 57765_CLASS))
11200                 tp->timer_offset = HZ;
11201         else
11202                 tp->timer_offset = HZ / 10;
11203
11204         BUG_ON(tp->timer_offset > HZ);
11205
11206         tp->timer_multiplier = (HZ / tp->timer_offset);
11207         tp->asf_multiplier = (HZ / tp->timer_offset) *
11208                              TG3_FW_UPDATE_FREQ_SEC;
11209
11210         timer_setup(&tp->timer, tg3_timer, 0);
11211 }
11212
11213 static void tg3_timer_start(struct tg3 *tp)
11214 {
11215         tp->asf_counter   = tp->asf_multiplier;
11216         tp->timer_counter = tp->timer_multiplier;
11217
11218         tp->timer.expires = jiffies + tp->timer_offset;
11219         add_timer(&tp->timer);
11220 }
11221
11222 static void tg3_timer_stop(struct tg3 *tp)
11223 {
11224         del_timer_sync(&tp->timer);
11225 }
11226
11227 /* Restart hardware after configuration changes, self-test, etc.
11228  * Invoked with tp->lock held.
11229  */
11230 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11231         __releases(tp->lock)
11232         __acquires(tp->lock)
11233 {
11234         int err;
11235
11236         err = tg3_init_hw(tp, reset_phy);
11237         if (err) {
11238                 netdev_err(tp->dev,
11239                            "Failed to re-initialize device, aborting\n");
11240                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11241                 tg3_full_unlock(tp);
11242                 tg3_timer_stop(tp);
11243                 tp->irq_sync = 0;
11244                 tg3_napi_enable(tp);
11245                 dev_close(tp->dev);
11246                 tg3_full_lock(tp, 0);
11247         }
11248         return err;
11249 }
11250
11251 static void tg3_reset_task(struct work_struct *work)
11252 {
11253         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11254         int err;
11255
11256         rtnl_lock();
11257         tg3_full_lock(tp, 0);
11258
11259         if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11260             tp->pdev->error_state != pci_channel_io_normal) {
11261                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11262                 tg3_full_unlock(tp);
11263                 rtnl_unlock();
11264                 return;
11265         }
11266
11267         tg3_full_unlock(tp);
11268
11269         tg3_phy_stop(tp);
11270
11271         tg3_netif_stop(tp);
11272
11273         tg3_full_lock(tp, 1);
11274
11275         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11276                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11277                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11278                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11279                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11280         }
11281
11282         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11283         err = tg3_init_hw(tp, true);
11284         if (err) {
11285                 tg3_full_unlock(tp);
11286                 tp->irq_sync = 0;
11287                 tg3_napi_enable(tp);
11288                 /* Clear this flag so that tg3_reset_task_cancel() will not
11289                  * call cancel_work_sync() and wait forever.
11290                  */
11291                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11292                 dev_close(tp->dev);
11293                 goto out;
11294         }
11295
11296         tg3_netif_start(tp);
11297         tg3_full_unlock(tp);
11298         tg3_phy_start(tp);
11299         tg3_flag_clear(tp, RESET_TASK_PENDING);
11300 out:
11301         rtnl_unlock();
11302 }
11303
11304 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11305 {
11306         irq_handler_t fn;
11307         unsigned long flags;
11308         char *name;
11309         struct tg3_napi *tnapi = &tp->napi[irq_num];
11310
11311         if (tp->irq_cnt == 1)
11312                 name = tp->dev->name;
11313         else {
11314                 name = &tnapi->irq_lbl[0];
11315                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11316                         snprintf(name, IFNAMSIZ,
11317                                  "%s-txrx-%d", tp->dev->name, irq_num);
11318                 else if (tnapi->tx_buffers)
11319                         snprintf(name, IFNAMSIZ,
11320                                  "%s-tx-%d", tp->dev->name, irq_num);
11321                 else if (tnapi->rx_rcb)
11322                         snprintf(name, IFNAMSIZ,
11323                                  "%s-rx-%d", tp->dev->name, irq_num);
11324                 else
11325                         snprintf(name, IFNAMSIZ,
11326                                  "%s-%d", tp->dev->name, irq_num);
11327                 name[IFNAMSIZ-1] = 0;
11328         }
11329
11330         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11331                 fn = tg3_msi;
11332                 if (tg3_flag(tp, 1SHOT_MSI))
11333                         fn = tg3_msi_1shot;
11334                 flags = 0;
11335         } else {
11336                 fn = tg3_interrupt;
11337                 if (tg3_flag(tp, TAGGED_STATUS))
11338                         fn = tg3_interrupt_tagged;
11339                 flags = IRQF_SHARED;
11340         }
11341
11342         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11343 }
11344
11345 static int tg3_test_interrupt(struct tg3 *tp)
11346 {
11347         struct tg3_napi *tnapi = &tp->napi[0];
11348         struct net_device *dev = tp->dev;
11349         int err, i, intr_ok = 0;
11350         u32 val;
11351
11352         if (!netif_running(dev))
11353                 return -ENODEV;
11354
11355         tg3_disable_ints(tp);
11356
11357         free_irq(tnapi->irq_vec, tnapi);
11358
11359         /*
11360          * Turn off MSI one shot mode.  Otherwise this test has no
11361          * observable way to know whether the interrupt was delivered.
11362          */
11363         if (tg3_flag(tp, 57765_PLUS)) {
11364                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11365                 tw32(MSGINT_MODE, val);
11366         }
11367
11368         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11369                           IRQF_SHARED, dev->name, tnapi);
11370         if (err)
11371                 return err;
11372
11373         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11374         tg3_enable_ints(tp);
11375
11376         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11377                tnapi->coal_now);
11378
11379         for (i = 0; i < 5; i++) {
11380                 u32 int_mbox, misc_host_ctrl;
11381
11382                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11383                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11384
11385                 if ((int_mbox != 0) ||
11386                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11387                         intr_ok = 1;
11388                         break;
11389                 }
11390
11391                 if (tg3_flag(tp, 57765_PLUS) &&
11392                     tnapi->hw_status->status_tag != tnapi->last_tag)
11393                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11394
11395                 msleep(10);
11396         }
11397
11398         tg3_disable_ints(tp);
11399
11400         free_irq(tnapi->irq_vec, tnapi);
11401
11402         err = tg3_request_irq(tp, 0);
11403
11404         if (err)
11405                 return err;
11406
11407         if (intr_ok) {
11408                 /* Reenable MSI one shot mode. */
11409                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11410                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11411                         tw32(MSGINT_MODE, val);
11412                 }
11413                 return 0;
11414         }
11415
11416         return -EIO;
11417 }
11418
11419 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11420  * successfully restored
11421  */
11422 static int tg3_test_msi(struct tg3 *tp)
11423 {
11424         int err;
11425         u16 pci_cmd;
11426
11427         if (!tg3_flag(tp, USING_MSI))
11428                 return 0;
11429
11430         /* Turn off SERR reporting in case MSI terminates with Master
11431          * Abort.
11432          */
11433         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11434         pci_write_config_word(tp->pdev, PCI_COMMAND,
11435                               pci_cmd & ~PCI_COMMAND_SERR);
11436
11437         err = tg3_test_interrupt(tp);
11438
11439         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11440
11441         if (!err)
11442                 return 0;
11443
11444         /* other failures */
11445         if (err != -EIO)
11446                 return err;
11447
11448         /* MSI test failed, go back to INTx mode */
11449         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11450                     "to INTx mode. Please report this failure to the PCI "
11451                     "maintainer and include system chipset information\n");
11452
11453         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11454
11455         pci_disable_msi(tp->pdev);
11456
11457         tg3_flag_clear(tp, USING_MSI);
11458         tp->napi[0].irq_vec = tp->pdev->irq;
11459
11460         err = tg3_request_irq(tp, 0);
11461         if (err)
11462                 return err;
11463
11464         /* Need to reset the chip because the MSI cycle may have terminated
11465          * with Master Abort.
11466          */
11467         tg3_full_lock(tp, 1);
11468
11469         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11470         err = tg3_init_hw(tp, true);
11471
11472         tg3_full_unlock(tp);
11473
11474         if (err)
11475                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11476
11477         return err;
11478 }
11479
11480 static int tg3_request_firmware(struct tg3 *tp)
11481 {
11482         const struct tg3_firmware_hdr *fw_hdr;
11483
11484         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11485                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11486                            tp->fw_needed);
11487                 return -ENOENT;
11488         }
11489
11490         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11491
11492         /* Firmware blob starts with version numbers, followed by
11493          * start address and _full_ length including BSS sections
11494          * (which must be longer than the actual data, of course
11495          */
11496
11497         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11498         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11499                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11500                            tp->fw_len, tp->fw_needed);
11501                 release_firmware(tp->fw);
11502                 tp->fw = NULL;
11503                 return -EINVAL;
11504         }
11505
11506         /* We no longer need firmware; we have it. */
11507         tp->fw_needed = NULL;
11508         return 0;
11509 }
11510
11511 static u32 tg3_irq_count(struct tg3 *tp)
11512 {
11513         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11514
11515         if (irq_cnt > 1) {
11516                 /* We want as many rx rings enabled as there are cpus.
11517                  * In multiqueue MSI-X mode, the first MSI-X vector
11518                  * only deals with link interrupts, etc, so we add
11519                  * one to the number of vectors we are requesting.
11520                  */
11521                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11522         }
11523
11524         return irq_cnt;
11525 }
11526
11527 static bool tg3_enable_msix(struct tg3 *tp)
11528 {
11529         int i, rc;
11530         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11531
11532         tp->txq_cnt = tp->txq_req;
11533         tp->rxq_cnt = tp->rxq_req;
11534         if (!tp->rxq_cnt)
11535                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11536         if (tp->rxq_cnt > tp->rxq_max)
11537                 tp->rxq_cnt = tp->rxq_max;
11538
11539         /* Disable multiple TX rings by default.  Simple round-robin hardware
11540          * scheduling of the TX rings can cause starvation of rings with
11541          * small packets when other rings have TSO or jumbo packets.
11542          */
11543         if (!tp->txq_req)
11544                 tp->txq_cnt = 1;
11545
11546         tp->irq_cnt = tg3_irq_count(tp);
11547
11548         for (i = 0; i < tp->irq_max; i++) {
11549                 msix_ent[i].entry  = i;
11550                 msix_ent[i].vector = 0;
11551         }
11552
11553         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11554         if (rc < 0) {
11555                 return false;
11556         } else if (rc < tp->irq_cnt) {
11557                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11558                               tp->irq_cnt, rc);
11559                 tp->irq_cnt = rc;
11560                 tp->rxq_cnt = max(rc - 1, 1);
11561                 if (tp->txq_cnt)
11562                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11563         }
11564
11565         for (i = 0; i < tp->irq_max; i++)
11566                 tp->napi[i].irq_vec = msix_ent[i].vector;
11567
11568         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11569                 pci_disable_msix(tp->pdev);
11570                 return false;
11571         }
11572
11573         if (tp->irq_cnt == 1)
11574                 return true;
11575
11576         tg3_flag_set(tp, ENABLE_RSS);
11577
11578         if (tp->txq_cnt > 1)
11579                 tg3_flag_set(tp, ENABLE_TSS);
11580
11581         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11582
11583         return true;
11584 }
11585
11586 static void tg3_ints_init(struct tg3 *tp)
11587 {
11588         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11589             !tg3_flag(tp, TAGGED_STATUS)) {
11590                 /* All MSI supporting chips should support tagged
11591                  * status.  Assert that this is the case.
11592                  */
11593                 netdev_warn(tp->dev,
11594                             "MSI without TAGGED_STATUS? Not using MSI\n");
11595                 goto defcfg;
11596         }
11597
11598         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11599                 tg3_flag_set(tp, USING_MSIX);
11600         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11601                 tg3_flag_set(tp, USING_MSI);
11602
11603         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11604                 u32 msi_mode = tr32(MSGINT_MODE);
11605                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11606                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11607                 if (!tg3_flag(tp, 1SHOT_MSI))
11608                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11609                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11610         }
11611 defcfg:
11612         if (!tg3_flag(tp, USING_MSIX)) {
11613                 tp->irq_cnt = 1;
11614                 tp->napi[0].irq_vec = tp->pdev->irq;
11615         }
11616
11617         if (tp->irq_cnt == 1) {
11618                 tp->txq_cnt = 1;
11619                 tp->rxq_cnt = 1;
11620                 netif_set_real_num_tx_queues(tp->dev, 1);
11621                 netif_set_real_num_rx_queues(tp->dev, 1);
11622         }
11623 }
11624
11625 static void tg3_ints_fini(struct tg3 *tp)
11626 {
11627         if (tg3_flag(tp, USING_MSIX))
11628                 pci_disable_msix(tp->pdev);
11629         else if (tg3_flag(tp, USING_MSI))
11630                 pci_disable_msi(tp->pdev);
11631         tg3_flag_clear(tp, USING_MSI);
11632         tg3_flag_clear(tp, USING_MSIX);
11633         tg3_flag_clear(tp, ENABLE_RSS);
11634         tg3_flag_clear(tp, ENABLE_TSS);
11635 }
11636
11637 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11638                      bool init)
11639 {
11640         struct net_device *dev = tp->dev;
11641         int i, err;
11642
11643         /*
11644          * Setup interrupts first so we know how
11645          * many NAPI resources to allocate
11646          */
11647         tg3_ints_init(tp);
11648
11649         tg3_rss_check_indir_tbl(tp);
11650
11651         /* The placement of this call is tied
11652          * to the setup and use of Host TX descriptors.
11653          */
11654         err = tg3_alloc_consistent(tp);
11655         if (err)
11656                 goto out_ints_fini;
11657
11658         tg3_napi_init(tp);
11659
11660         tg3_napi_enable(tp);
11661
11662         for (i = 0; i < tp->irq_cnt; i++) {
11663                 err = tg3_request_irq(tp, i);
11664                 if (err) {
11665                         for (i--; i >= 0; i--) {
11666                                 struct tg3_napi *tnapi = &tp->napi[i];
11667
11668                                 free_irq(tnapi->irq_vec, tnapi);
11669                         }
11670                         goto out_napi_fini;
11671                 }
11672         }
11673
11674         tg3_full_lock(tp, 0);
11675
11676         if (init)
11677                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11678
11679         err = tg3_init_hw(tp, reset_phy);
11680         if (err) {
11681                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11682                 tg3_free_rings(tp);
11683         }
11684
11685         tg3_full_unlock(tp);
11686
11687         if (err)
11688                 goto out_free_irq;
11689
11690         if (test_irq && tg3_flag(tp, USING_MSI)) {
11691                 err = tg3_test_msi(tp);
11692
11693                 if (err) {
11694                         tg3_full_lock(tp, 0);
11695                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11696                         tg3_free_rings(tp);
11697                         tg3_full_unlock(tp);
11698
11699                         goto out_napi_fini;
11700                 }
11701
11702                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11703                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11704
11705                         tw32(PCIE_TRANSACTION_CFG,
11706                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11707                 }
11708         }
11709
11710         tg3_phy_start(tp);
11711
11712         tg3_hwmon_open(tp);
11713
11714         tg3_full_lock(tp, 0);
11715
11716         tg3_timer_start(tp);
11717         tg3_flag_set(tp, INIT_COMPLETE);
11718         tg3_enable_ints(tp);
11719
11720         tg3_ptp_resume(tp);
11721
11722         tg3_full_unlock(tp);
11723
11724         netif_tx_start_all_queues(dev);
11725
11726         /*
11727          * Reset loopback feature if it was turned on while the device was down
11728          * make sure that it's installed properly now.
11729          */
11730         if (dev->features & NETIF_F_LOOPBACK)
11731                 tg3_set_loopback(dev, dev->features);
11732
11733         return 0;
11734
11735 out_free_irq:
11736         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11737                 struct tg3_napi *tnapi = &tp->napi[i];
11738                 free_irq(tnapi->irq_vec, tnapi);
11739         }
11740
11741 out_napi_fini:
11742         tg3_napi_disable(tp);
11743         tg3_napi_fini(tp);
11744         tg3_free_consistent(tp);
11745
11746 out_ints_fini:
11747         tg3_ints_fini(tp);
11748
11749         return err;
11750 }
11751
11752 static void tg3_stop(struct tg3 *tp)
11753 {
11754         int i;
11755
11756         tg3_reset_task_cancel(tp);
11757         tg3_netif_stop(tp);
11758
11759         tg3_timer_stop(tp);
11760
11761         tg3_hwmon_close(tp);
11762
11763         tg3_phy_stop(tp);
11764
11765         tg3_full_lock(tp, 1);
11766
11767         tg3_disable_ints(tp);
11768
11769         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11770         tg3_free_rings(tp);
11771         tg3_flag_clear(tp, INIT_COMPLETE);
11772
11773         tg3_full_unlock(tp);
11774
11775         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11776                 struct tg3_napi *tnapi = &tp->napi[i];
11777                 free_irq(tnapi->irq_vec, tnapi);
11778         }
11779
11780         tg3_ints_fini(tp);
11781
11782         tg3_napi_fini(tp);
11783
11784         tg3_free_consistent(tp);
11785 }
11786
11787 static int tg3_open(struct net_device *dev)
11788 {
11789         struct tg3 *tp = netdev_priv(dev);
11790         int err;
11791
11792         if (tp->pcierr_recovery) {
11793                 netdev_err(dev, "Failed to open device. PCI error recovery "
11794                            "in progress\n");
11795                 return -EAGAIN;
11796         }
11797
11798         if (tp->fw_needed) {
11799                 err = tg3_request_firmware(tp);
11800                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11801                         if (err) {
11802                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11803                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11804                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11805                                 netdev_warn(tp->dev, "EEE capability restored\n");
11806                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11807                         }
11808                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11809                         if (err)
11810                                 return err;
11811                 } else if (err) {
11812                         netdev_warn(tp->dev, "TSO capability disabled\n");
11813                         tg3_flag_clear(tp, TSO_CAPABLE);
11814                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11815                         netdev_notice(tp->dev, "TSO capability restored\n");
11816                         tg3_flag_set(tp, TSO_CAPABLE);
11817                 }
11818         }
11819
11820         tg3_carrier_off(tp);
11821
11822         err = tg3_power_up(tp);
11823         if (err)
11824                 return err;
11825
11826         tg3_full_lock(tp, 0);
11827
11828         tg3_disable_ints(tp);
11829         tg3_flag_clear(tp, INIT_COMPLETE);
11830
11831         tg3_full_unlock(tp);
11832
11833         err = tg3_start(tp,
11834                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11835                         true, true);
11836         if (err) {
11837                 tg3_frob_aux_power(tp, false);
11838                 pci_set_power_state(tp->pdev, PCI_D3hot);
11839         }
11840
11841         return err;
11842 }
11843
11844 static int tg3_close(struct net_device *dev)
11845 {
11846         struct tg3 *tp = netdev_priv(dev);
11847
11848         if (tp->pcierr_recovery) {
11849                 netdev_err(dev, "Failed to close device. PCI error recovery "
11850                            "in progress\n");
11851                 return -EAGAIN;
11852         }
11853
11854         tg3_stop(tp);
11855
11856         if (pci_device_is_present(tp->pdev)) {
11857                 tg3_power_down_prepare(tp);
11858
11859                 tg3_carrier_off(tp);
11860         }
11861         return 0;
11862 }
11863
11864 static inline u64 get_stat64(tg3_stat64_t *val)
11865 {
11866        return ((u64)val->high << 32) | ((u64)val->low);
11867 }
11868
11869 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11870 {
11871         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11872
11873         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11874             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11875              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11876                 u32 val;
11877
11878                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11879                         tg3_writephy(tp, MII_TG3_TEST1,
11880                                      val | MII_TG3_TEST1_CRC_EN);
11881                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11882                 } else
11883                         val = 0;
11884
11885                 tp->phy_crc_errors += val;
11886
11887                 return tp->phy_crc_errors;
11888         }
11889
11890         return get_stat64(&hw_stats->rx_fcs_errors);
11891 }
11892
11893 #define ESTAT_ADD(member) \
11894         estats->member =        old_estats->member + \
11895                                 get_stat64(&hw_stats->member)
11896
11897 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11898 {
11899         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11900         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11901
11902         ESTAT_ADD(rx_octets);
11903         ESTAT_ADD(rx_fragments);
11904         ESTAT_ADD(rx_ucast_packets);
11905         ESTAT_ADD(rx_mcast_packets);
11906         ESTAT_ADD(rx_bcast_packets);
11907         ESTAT_ADD(rx_fcs_errors);
11908         ESTAT_ADD(rx_align_errors);
11909         ESTAT_ADD(rx_xon_pause_rcvd);
11910         ESTAT_ADD(rx_xoff_pause_rcvd);
11911         ESTAT_ADD(rx_mac_ctrl_rcvd);
11912         ESTAT_ADD(rx_xoff_entered);
11913         ESTAT_ADD(rx_frame_too_long_errors);
11914         ESTAT_ADD(rx_jabbers);
11915         ESTAT_ADD(rx_undersize_packets);
11916         ESTAT_ADD(rx_in_length_errors);
11917         ESTAT_ADD(rx_out_length_errors);
11918         ESTAT_ADD(rx_64_or_less_octet_packets);
11919         ESTAT_ADD(rx_65_to_127_octet_packets);
11920         ESTAT_ADD(rx_128_to_255_octet_packets);
11921         ESTAT_ADD(rx_256_to_511_octet_packets);
11922         ESTAT_ADD(rx_512_to_1023_octet_packets);
11923         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11924         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11925         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11926         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11927         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11928
11929         ESTAT_ADD(tx_octets);
11930         ESTAT_ADD(tx_collisions);
11931         ESTAT_ADD(tx_xon_sent);
11932         ESTAT_ADD(tx_xoff_sent);
11933         ESTAT_ADD(tx_flow_control);
11934         ESTAT_ADD(tx_mac_errors);
11935         ESTAT_ADD(tx_single_collisions);
11936         ESTAT_ADD(tx_mult_collisions);
11937         ESTAT_ADD(tx_deferred);
11938         ESTAT_ADD(tx_excessive_collisions);
11939         ESTAT_ADD(tx_late_collisions);
11940         ESTAT_ADD(tx_collide_2times);
11941         ESTAT_ADD(tx_collide_3times);
11942         ESTAT_ADD(tx_collide_4times);
11943         ESTAT_ADD(tx_collide_5times);
11944         ESTAT_ADD(tx_collide_6times);
11945         ESTAT_ADD(tx_collide_7times);
11946         ESTAT_ADD(tx_collide_8times);
11947         ESTAT_ADD(tx_collide_9times);
11948         ESTAT_ADD(tx_collide_10times);
11949         ESTAT_ADD(tx_collide_11times);
11950         ESTAT_ADD(tx_collide_12times);
11951         ESTAT_ADD(tx_collide_13times);
11952         ESTAT_ADD(tx_collide_14times);
11953         ESTAT_ADD(tx_collide_15times);
11954         ESTAT_ADD(tx_ucast_packets);
11955         ESTAT_ADD(tx_mcast_packets);
11956         ESTAT_ADD(tx_bcast_packets);
11957         ESTAT_ADD(tx_carrier_sense_errors);
11958         ESTAT_ADD(tx_discards);
11959         ESTAT_ADD(tx_errors);
11960
11961         ESTAT_ADD(dma_writeq_full);
11962         ESTAT_ADD(dma_write_prioq_full);
11963         ESTAT_ADD(rxbds_empty);
11964         ESTAT_ADD(rx_discards);
11965         ESTAT_ADD(rx_errors);
11966         ESTAT_ADD(rx_threshold_hit);
11967
11968         ESTAT_ADD(dma_readq_full);
11969         ESTAT_ADD(dma_read_prioq_full);
11970         ESTAT_ADD(tx_comp_queue_full);
11971
11972         ESTAT_ADD(ring_set_send_prod_index);
11973         ESTAT_ADD(ring_status_update);
11974         ESTAT_ADD(nic_irqs);
11975         ESTAT_ADD(nic_avoided_irqs);
11976         ESTAT_ADD(nic_tx_threshold_hit);
11977
11978         ESTAT_ADD(mbuf_lwm_thresh_hit);
11979 }
11980
11981 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11982 {
11983         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11984         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11985         unsigned long rx_dropped;
11986         unsigned long tx_dropped;
11987         int i;
11988
11989         stats->rx_packets = old_stats->rx_packets +
11990                 get_stat64(&hw_stats->rx_ucast_packets) +
11991                 get_stat64(&hw_stats->rx_mcast_packets) +
11992                 get_stat64(&hw_stats->rx_bcast_packets);
11993
11994         stats->tx_packets = old_stats->tx_packets +
11995                 get_stat64(&hw_stats->tx_ucast_packets) +
11996                 get_stat64(&hw_stats->tx_mcast_packets) +
11997                 get_stat64(&hw_stats->tx_bcast_packets);
11998
11999         stats->rx_bytes = old_stats->rx_bytes +
12000                 get_stat64(&hw_stats->rx_octets);
12001         stats->tx_bytes = old_stats->tx_bytes +
12002                 get_stat64(&hw_stats->tx_octets);
12003
12004         stats->rx_errors = old_stats->rx_errors +
12005                 get_stat64(&hw_stats->rx_errors);
12006         stats->tx_errors = old_stats->tx_errors +
12007                 get_stat64(&hw_stats->tx_errors) +
12008                 get_stat64(&hw_stats->tx_mac_errors) +
12009                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12010                 get_stat64(&hw_stats->tx_discards);
12011
12012         stats->multicast = old_stats->multicast +
12013                 get_stat64(&hw_stats->rx_mcast_packets);
12014         stats->collisions = old_stats->collisions +
12015                 get_stat64(&hw_stats->tx_collisions);
12016
12017         stats->rx_length_errors = old_stats->rx_length_errors +
12018                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12019                 get_stat64(&hw_stats->rx_undersize_packets);
12020
12021         stats->rx_frame_errors = old_stats->rx_frame_errors +
12022                 get_stat64(&hw_stats->rx_align_errors);
12023         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12024                 get_stat64(&hw_stats->tx_discards);
12025         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12026                 get_stat64(&hw_stats->tx_carrier_sense_errors);
12027
12028         stats->rx_crc_errors = old_stats->rx_crc_errors +
12029                 tg3_calc_crc_errors(tp);
12030
12031         stats->rx_missed_errors = old_stats->rx_missed_errors +
12032                 get_stat64(&hw_stats->rx_discards);
12033
12034         /* Aggregate per-queue counters. The per-queue counters are updated
12035          * by a single writer, race-free. The result computed by this loop
12036          * might not be 100% accurate (counters can be updated in the middle of
12037          * the loop) but the next tg3_get_nstats() will recompute the current
12038          * value so it is acceptable.
12039          *
12040          * Note that these counters wrap around at 4G on 32bit machines.
12041          */
12042         rx_dropped = (unsigned long)(old_stats->rx_dropped);
12043         tx_dropped = (unsigned long)(old_stats->tx_dropped);
12044
12045         for (i = 0; i < tp->irq_cnt; i++) {
12046                 struct tg3_napi *tnapi = &tp->napi[i];
12047
12048                 rx_dropped += tnapi->rx_dropped;
12049                 tx_dropped += tnapi->tx_dropped;
12050         }
12051
12052         stats->rx_dropped = rx_dropped;
12053         stats->tx_dropped = tx_dropped;
12054 }
12055
12056 static int tg3_get_regs_len(struct net_device *dev)
12057 {
12058         return TG3_REG_BLK_SIZE;
12059 }
12060
12061 static void tg3_get_regs(struct net_device *dev,
12062                 struct ethtool_regs *regs, void *_p)
12063 {
12064         struct tg3 *tp = netdev_priv(dev);
12065
12066         regs->version = 0;
12067
12068         memset(_p, 0, TG3_REG_BLK_SIZE);
12069
12070         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12071                 return;
12072
12073         tg3_full_lock(tp, 0);
12074
12075         tg3_dump_legacy_regs(tp, (u32 *)_p);
12076
12077         tg3_full_unlock(tp);
12078 }
12079
12080 static int tg3_get_eeprom_len(struct net_device *dev)
12081 {
12082         struct tg3 *tp = netdev_priv(dev);
12083
12084         return tp->nvram_size;
12085 }
12086
12087 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12088 {
12089         struct tg3 *tp = netdev_priv(dev);
12090         int ret, cpmu_restore = 0;
12091         u8  *pd;
12092         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12093         __be32 val;
12094
12095         if (tg3_flag(tp, NO_NVRAM))
12096                 return -EINVAL;
12097
12098         offset = eeprom->offset;
12099         len = eeprom->len;
12100         eeprom->len = 0;
12101
12102         eeprom->magic = TG3_EEPROM_MAGIC;
12103
12104         /* Override clock, link aware and link idle modes */
12105         if (tg3_flag(tp, CPMU_PRESENT)) {
12106                 cpmu_val = tr32(TG3_CPMU_CTRL);
12107                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12108                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12109                         tw32(TG3_CPMU_CTRL, cpmu_val &
12110                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12111                                              CPMU_CTRL_LINK_IDLE_MODE));
12112                         cpmu_restore = 1;
12113                 }
12114         }
12115         tg3_override_clk(tp);
12116
12117         if (offset & 3) {
12118                 /* adjustments to start on required 4 byte boundary */
12119                 b_offset = offset & 3;
12120                 b_count = 4 - b_offset;
12121                 if (b_count > len) {
12122                         /* i.e. offset=1 len=2 */
12123                         b_count = len;
12124                 }
12125                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12126                 if (ret)
12127                         goto eeprom_done;
12128                 memcpy(data, ((char *)&val) + b_offset, b_count);
12129                 len -= b_count;
12130                 offset += b_count;
12131                 eeprom->len += b_count;
12132         }
12133
12134         /* read bytes up to the last 4 byte boundary */
12135         pd = &data[eeprom->len];
12136         for (i = 0; i < (len - (len & 3)); i += 4) {
12137                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12138                 if (ret) {
12139                         if (i)
12140                                 i -= 4;
12141                         eeprom->len += i;
12142                         goto eeprom_done;
12143                 }
12144                 memcpy(pd + i, &val, 4);
12145                 if (need_resched()) {
12146                         if (signal_pending(current)) {
12147                                 eeprom->len += i;
12148                                 ret = -EINTR;
12149                                 goto eeprom_done;
12150                         }
12151                         cond_resched();
12152                 }
12153         }
12154         eeprom->len += i;
12155
12156         if (len & 3) {
12157                 /* read last bytes not ending on 4 byte boundary */
12158                 pd = &data[eeprom->len];
12159                 b_count = len & 3;
12160                 b_offset = offset + len - b_count;
12161                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12162                 if (ret)
12163                         goto eeprom_done;
12164                 memcpy(pd, &val, b_count);
12165                 eeprom->len += b_count;
12166         }
12167         ret = 0;
12168
12169 eeprom_done:
12170         /* Restore clock, link aware and link idle modes */
12171         tg3_restore_clk(tp);
12172         if (cpmu_restore)
12173                 tw32(TG3_CPMU_CTRL, cpmu_val);
12174
12175         return ret;
12176 }
12177
12178 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12179 {
12180         struct tg3 *tp = netdev_priv(dev);
12181         int ret;
12182         u32 offset, len, b_offset, odd_len;
12183         u8 *buf;
12184         __be32 start = 0, end;
12185
12186         if (tg3_flag(tp, NO_NVRAM) ||
12187             eeprom->magic != TG3_EEPROM_MAGIC)
12188                 return -EINVAL;
12189
12190         offset = eeprom->offset;
12191         len = eeprom->len;
12192
12193         if ((b_offset = (offset & 3))) {
12194                 /* adjustments to start on required 4 byte boundary */
12195                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12196                 if (ret)
12197                         return ret;
12198                 len += b_offset;
12199                 offset &= ~3;
12200                 if (len < 4)
12201                         len = 4;
12202         }
12203
12204         odd_len = 0;
12205         if (len & 3) {
12206                 /* adjustments to end on required 4 byte boundary */
12207                 odd_len = 1;
12208                 len = (len + 3) & ~3;
12209                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12210                 if (ret)
12211                         return ret;
12212         }
12213
12214         buf = data;
12215         if (b_offset || odd_len) {
12216                 buf = kmalloc(len, GFP_KERNEL);
12217                 if (!buf)
12218                         return -ENOMEM;
12219                 if (b_offset)
12220                         memcpy(buf, &start, 4);
12221                 if (odd_len)
12222                         memcpy(buf+len-4, &end, 4);
12223                 memcpy(buf + b_offset, data, eeprom->len);
12224         }
12225
12226         ret = tg3_nvram_write_block(tp, offset, len, buf);
12227
12228         if (buf != data)
12229                 kfree(buf);
12230
12231         return ret;
12232 }
12233
12234 static int tg3_get_link_ksettings(struct net_device *dev,
12235                                   struct ethtool_link_ksettings *cmd)
12236 {
12237         struct tg3 *tp = netdev_priv(dev);
12238         u32 supported, advertising;
12239
12240         if (tg3_flag(tp, USE_PHYLIB)) {
12241                 struct phy_device *phydev;
12242                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12243                         return -EAGAIN;
12244                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12245                 phy_ethtool_ksettings_get(phydev, cmd);
12246
12247                 return 0;
12248         }
12249
12250         supported = (SUPPORTED_Autoneg);
12251
12252         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12253                 supported |= (SUPPORTED_1000baseT_Half |
12254                               SUPPORTED_1000baseT_Full);
12255
12256         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12257                 supported |= (SUPPORTED_100baseT_Half |
12258                               SUPPORTED_100baseT_Full |
12259                               SUPPORTED_10baseT_Half |
12260                               SUPPORTED_10baseT_Full |
12261                               SUPPORTED_TP);
12262                 cmd->base.port = PORT_TP;
12263         } else {
12264                 supported |= SUPPORTED_FIBRE;
12265                 cmd->base.port = PORT_FIBRE;
12266         }
12267         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12268                                                 supported);
12269
12270         advertising = tp->link_config.advertising;
12271         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12272                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12273                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12274                                 advertising |= ADVERTISED_Pause;
12275                         } else {
12276                                 advertising |= ADVERTISED_Pause |
12277                                         ADVERTISED_Asym_Pause;
12278                         }
12279                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12280                         advertising |= ADVERTISED_Asym_Pause;
12281                 }
12282         }
12283         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12284                                                 advertising);
12285
12286         if (netif_running(dev) && tp->link_up) {
12287                 cmd->base.speed = tp->link_config.active_speed;
12288                 cmd->base.duplex = tp->link_config.active_duplex;
12289                 ethtool_convert_legacy_u32_to_link_mode(
12290                         cmd->link_modes.lp_advertising,
12291                         tp->link_config.rmt_adv);
12292
12293                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12294                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12295                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12296                         else
12297                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12298                 }
12299         } else {
12300                 cmd->base.speed = SPEED_UNKNOWN;
12301                 cmd->base.duplex = DUPLEX_UNKNOWN;
12302                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12303         }
12304         cmd->base.phy_address = tp->phy_addr;
12305         cmd->base.autoneg = tp->link_config.autoneg;
12306         return 0;
12307 }
12308
12309 static int tg3_set_link_ksettings(struct net_device *dev,
12310                                   const struct ethtool_link_ksettings *cmd)
12311 {
12312         struct tg3 *tp = netdev_priv(dev);
12313         u32 speed = cmd->base.speed;
12314         u32 advertising;
12315
12316         if (tg3_flag(tp, USE_PHYLIB)) {
12317                 struct phy_device *phydev;
12318                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12319                         return -EAGAIN;
12320                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12321                 return phy_ethtool_ksettings_set(phydev, cmd);
12322         }
12323
12324         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12325             cmd->base.autoneg != AUTONEG_DISABLE)
12326                 return -EINVAL;
12327
12328         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12329             cmd->base.duplex != DUPLEX_FULL &&
12330             cmd->base.duplex != DUPLEX_HALF)
12331                 return -EINVAL;
12332
12333         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12334                                                 cmd->link_modes.advertising);
12335
12336         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12337                 u32 mask = ADVERTISED_Autoneg |
12338                            ADVERTISED_Pause |
12339                            ADVERTISED_Asym_Pause;
12340
12341                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12342                         mask |= ADVERTISED_1000baseT_Half |
12343                                 ADVERTISED_1000baseT_Full;
12344
12345                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12346                         mask |= ADVERTISED_100baseT_Half |
12347                                 ADVERTISED_100baseT_Full |
12348                                 ADVERTISED_10baseT_Half |
12349                                 ADVERTISED_10baseT_Full |
12350                                 ADVERTISED_TP;
12351                 else
12352                         mask |= ADVERTISED_FIBRE;
12353
12354                 if (advertising & ~mask)
12355                         return -EINVAL;
12356
12357                 mask &= (ADVERTISED_1000baseT_Half |
12358                          ADVERTISED_1000baseT_Full |
12359                          ADVERTISED_100baseT_Half |
12360                          ADVERTISED_100baseT_Full |
12361                          ADVERTISED_10baseT_Half |
12362                          ADVERTISED_10baseT_Full);
12363
12364                 advertising &= mask;
12365         } else {
12366                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12367                         if (speed != SPEED_1000)
12368                                 return -EINVAL;
12369
12370                         if (cmd->base.duplex != DUPLEX_FULL)
12371                                 return -EINVAL;
12372                 } else {
12373                         if (speed != SPEED_100 &&
12374                             speed != SPEED_10)
12375                                 return -EINVAL;
12376                 }
12377         }
12378
12379         tg3_full_lock(tp, 0);
12380
12381         tp->link_config.autoneg = cmd->base.autoneg;
12382         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12383                 tp->link_config.advertising = (advertising |
12384                                               ADVERTISED_Autoneg);
12385                 tp->link_config.speed = SPEED_UNKNOWN;
12386                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12387         } else {
12388                 tp->link_config.advertising = 0;
12389                 tp->link_config.speed = speed;
12390                 tp->link_config.duplex = cmd->base.duplex;
12391         }
12392
12393         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12394
12395         tg3_warn_mgmt_link_flap(tp);
12396
12397         if (netif_running(dev))
12398                 tg3_setup_phy(tp, true);
12399
12400         tg3_full_unlock(tp);
12401
12402         return 0;
12403 }
12404
12405 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12406 {
12407         struct tg3 *tp = netdev_priv(dev);
12408
12409         strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12410         strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12411         strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12412 }
12413
12414 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12415 {
12416         struct tg3 *tp = netdev_priv(dev);
12417
12418         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12419                 wol->supported = WAKE_MAGIC;
12420         else
12421                 wol->supported = 0;
12422         wol->wolopts = 0;
12423         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12424                 wol->wolopts = WAKE_MAGIC;
12425         memset(&wol->sopass, 0, sizeof(wol->sopass));
12426 }
12427
12428 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12429 {
12430         struct tg3 *tp = netdev_priv(dev);
12431         struct device *dp = &tp->pdev->dev;
12432
12433         if (wol->wolopts & ~WAKE_MAGIC)
12434                 return -EINVAL;
12435         if ((wol->wolopts & WAKE_MAGIC) &&
12436             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12437                 return -EINVAL;
12438
12439         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12440
12441         if (device_may_wakeup(dp))
12442                 tg3_flag_set(tp, WOL_ENABLE);
12443         else
12444                 tg3_flag_clear(tp, WOL_ENABLE);
12445
12446         return 0;
12447 }
12448
12449 static u32 tg3_get_msglevel(struct net_device *dev)
12450 {
12451         struct tg3 *tp = netdev_priv(dev);
12452         return tp->msg_enable;
12453 }
12454
12455 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12456 {
12457         struct tg3 *tp = netdev_priv(dev);
12458         tp->msg_enable = value;
12459 }
12460
12461 static int tg3_nway_reset(struct net_device *dev)
12462 {
12463         struct tg3 *tp = netdev_priv(dev);
12464         int r;
12465
12466         if (!netif_running(dev))
12467                 return -EAGAIN;
12468
12469         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12470                 return -EINVAL;
12471
12472         tg3_warn_mgmt_link_flap(tp);
12473
12474         if (tg3_flag(tp, USE_PHYLIB)) {
12475                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12476                         return -EAGAIN;
12477                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12478         } else {
12479                 u32 bmcr;
12480
12481                 spin_lock_bh(&tp->lock);
12482                 r = -EINVAL;
12483                 tg3_readphy(tp, MII_BMCR, &bmcr);
12484                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12485                     ((bmcr & BMCR_ANENABLE) ||
12486                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12487                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12488                                                    BMCR_ANENABLE);
12489                         r = 0;
12490                 }
12491                 spin_unlock_bh(&tp->lock);
12492         }
12493
12494         return r;
12495 }
12496
12497 static void tg3_get_ringparam(struct net_device *dev,
12498                               struct ethtool_ringparam *ering,
12499                               struct kernel_ethtool_ringparam *kernel_ering,
12500                               struct netlink_ext_ack *extack)
12501 {
12502         struct tg3 *tp = netdev_priv(dev);
12503
12504         ering->rx_max_pending = tp->rx_std_ring_mask;
12505         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12506                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12507         else
12508                 ering->rx_jumbo_max_pending = 0;
12509
12510         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12511
12512         ering->rx_pending = tp->rx_pending;
12513         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12514                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12515         else
12516                 ering->rx_jumbo_pending = 0;
12517
12518         ering->tx_pending = tp->napi[0].tx_pending;
12519 }
12520
12521 static int tg3_set_ringparam(struct net_device *dev,
12522                              struct ethtool_ringparam *ering,
12523                              struct kernel_ethtool_ringparam *kernel_ering,
12524                              struct netlink_ext_ack *extack)
12525 {
12526         struct tg3 *tp = netdev_priv(dev);
12527         int i, irq_sync = 0, err = 0;
12528         bool reset_phy = false;
12529
12530         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12531             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12532             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12533             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12534             (tg3_flag(tp, TSO_BUG) &&
12535              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12536                 return -EINVAL;
12537
12538         if (netif_running(dev)) {
12539                 tg3_phy_stop(tp);
12540                 tg3_netif_stop(tp);
12541                 irq_sync = 1;
12542         }
12543
12544         tg3_full_lock(tp, irq_sync);
12545
12546         tp->rx_pending = ering->rx_pending;
12547
12548         if (tg3_flag(tp, MAX_RXPEND_64) &&
12549             tp->rx_pending > 63)
12550                 tp->rx_pending = 63;
12551
12552         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12553                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12554
12555         for (i = 0; i < tp->irq_max; i++)
12556                 tp->napi[i].tx_pending = ering->tx_pending;
12557
12558         if (netif_running(dev)) {
12559                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12560                 /* Reset PHY to avoid PHY lock up */
12561                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12562                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12563                     tg3_asic_rev(tp) == ASIC_REV_5720)
12564                         reset_phy = true;
12565
12566                 err = tg3_restart_hw(tp, reset_phy);
12567                 if (!err)
12568                         tg3_netif_start(tp);
12569         }
12570
12571         tg3_full_unlock(tp);
12572
12573         if (irq_sync && !err)
12574                 tg3_phy_start(tp);
12575
12576         return err;
12577 }
12578
12579 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12580 {
12581         struct tg3 *tp = netdev_priv(dev);
12582
12583         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12584
12585         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12586                 epause->rx_pause = 1;
12587         else
12588                 epause->rx_pause = 0;
12589
12590         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12591                 epause->tx_pause = 1;
12592         else
12593                 epause->tx_pause = 0;
12594 }
12595
12596 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12597 {
12598         struct tg3 *tp = netdev_priv(dev);
12599         int err = 0;
12600         bool reset_phy = false;
12601
12602         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12603                 tg3_warn_mgmt_link_flap(tp);
12604
12605         if (tg3_flag(tp, USE_PHYLIB)) {
12606                 struct phy_device *phydev;
12607
12608                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12609
12610                 if (!phy_validate_pause(phydev, epause))
12611                         return -EINVAL;
12612
12613                 tp->link_config.flowctrl = 0;
12614                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12615                 if (epause->rx_pause) {
12616                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12617
12618                         if (epause->tx_pause) {
12619                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12620                         }
12621                 } else if (epause->tx_pause) {
12622                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12623                 }
12624
12625                 if (epause->autoneg)
12626                         tg3_flag_set(tp, PAUSE_AUTONEG);
12627                 else
12628                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12629
12630                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12631                         if (phydev->autoneg) {
12632                                 /* phy_set_asym_pause() will
12633                                  * renegotiate the link to inform our
12634                                  * link partner of our flow control
12635                                  * settings, even if the flow control
12636                                  * is forced.  Let tg3_adjust_link()
12637                                  * do the final flow control setup.
12638                                  */
12639                                 return 0;
12640                         }
12641
12642                         if (!epause->autoneg)
12643                                 tg3_setup_flow_control(tp, 0, 0);
12644                 }
12645         } else {
12646                 int irq_sync = 0;
12647
12648                 if (netif_running(dev)) {
12649                         tg3_netif_stop(tp);
12650                         irq_sync = 1;
12651                 }
12652
12653                 tg3_full_lock(tp, irq_sync);
12654
12655                 if (epause->autoneg)
12656                         tg3_flag_set(tp, PAUSE_AUTONEG);
12657                 else
12658                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12659                 if (epause->rx_pause)
12660                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12661                 else
12662                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12663                 if (epause->tx_pause)
12664                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12665                 else
12666                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12667
12668                 if (netif_running(dev)) {
12669                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12670                         /* Reset PHY to avoid PHY lock up */
12671                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12672                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12673                             tg3_asic_rev(tp) == ASIC_REV_5720)
12674                                 reset_phy = true;
12675
12676                         err = tg3_restart_hw(tp, reset_phy);
12677                         if (!err)
12678                                 tg3_netif_start(tp);
12679                 }
12680
12681                 tg3_full_unlock(tp);
12682         }
12683
12684         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12685
12686         return err;
12687 }
12688
12689 static int tg3_get_sset_count(struct net_device *dev, int sset)
12690 {
12691         switch (sset) {
12692         case ETH_SS_TEST:
12693                 return TG3_NUM_TEST;
12694         case ETH_SS_STATS:
12695                 return TG3_NUM_STATS;
12696         default:
12697                 return -EOPNOTSUPP;
12698         }
12699 }
12700
12701 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12702                          u32 *rules __always_unused)
12703 {
12704         struct tg3 *tp = netdev_priv(dev);
12705
12706         if (!tg3_flag(tp, SUPPORT_MSIX))
12707                 return -EOPNOTSUPP;
12708
12709         switch (info->cmd) {
12710         case ETHTOOL_GRXRINGS:
12711                 if (netif_running(tp->dev))
12712                         info->data = tp->rxq_cnt;
12713                 else {
12714                         info->data = num_online_cpus();
12715                         if (info->data > TG3_RSS_MAX_NUM_QS)
12716                                 info->data = TG3_RSS_MAX_NUM_QS;
12717                 }
12718
12719                 return 0;
12720
12721         default:
12722                 return -EOPNOTSUPP;
12723         }
12724 }
12725
12726 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12727 {
12728         u32 size = 0;
12729         struct tg3 *tp = netdev_priv(dev);
12730
12731         if (tg3_flag(tp, SUPPORT_MSIX))
12732                 size = TG3_RSS_INDIR_TBL_SIZE;
12733
12734         return size;
12735 }
12736
12737 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12738 {
12739         struct tg3 *tp = netdev_priv(dev);
12740         int i;
12741
12742         rxfh->hfunc = ETH_RSS_HASH_TOP;
12743         if (!rxfh->indir)
12744                 return 0;
12745
12746         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12747                 rxfh->indir[i] = tp->rss_ind_tbl[i];
12748
12749         return 0;
12750 }
12751
12752 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12753                         struct netlink_ext_ack *extack)
12754 {
12755         struct tg3 *tp = netdev_priv(dev);
12756         size_t i;
12757
12758         /* We require at least one supported parameter to be changed and no
12759          * change in any of the unsupported parameters
12760          */
12761         if (rxfh->key ||
12762             (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12763              rxfh->hfunc != ETH_RSS_HASH_TOP))
12764                 return -EOPNOTSUPP;
12765
12766         if (!rxfh->indir)
12767                 return 0;
12768
12769         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12770                 tp->rss_ind_tbl[i] = rxfh->indir[i];
12771
12772         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12773                 return 0;
12774
12775         /* It is legal to write the indirection
12776          * table while the device is running.
12777          */
12778         tg3_full_lock(tp, 0);
12779         tg3_rss_write_indir_tbl(tp);
12780         tg3_full_unlock(tp);
12781
12782         return 0;
12783 }
12784
12785 static void tg3_get_channels(struct net_device *dev,
12786                              struct ethtool_channels *channel)
12787 {
12788         struct tg3 *tp = netdev_priv(dev);
12789         u32 deflt_qs = netif_get_num_default_rss_queues();
12790
12791         channel->max_rx = tp->rxq_max;
12792         channel->max_tx = tp->txq_max;
12793
12794         if (netif_running(dev)) {
12795                 channel->rx_count = tp->rxq_cnt;
12796                 channel->tx_count = tp->txq_cnt;
12797         } else {
12798                 if (tp->rxq_req)
12799                         channel->rx_count = tp->rxq_req;
12800                 else
12801                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12802
12803                 if (tp->txq_req)
12804                         channel->tx_count = tp->txq_req;
12805                 else
12806                         channel->tx_count = min(deflt_qs, tp->txq_max);
12807         }
12808 }
12809
12810 static int tg3_set_channels(struct net_device *dev,
12811                             struct ethtool_channels *channel)
12812 {
12813         struct tg3 *tp = netdev_priv(dev);
12814
12815         if (!tg3_flag(tp, SUPPORT_MSIX))
12816                 return -EOPNOTSUPP;
12817
12818         if (channel->rx_count > tp->rxq_max ||
12819             channel->tx_count > tp->txq_max)
12820                 return -EINVAL;
12821
12822         tp->rxq_req = channel->rx_count;
12823         tp->txq_req = channel->tx_count;
12824
12825         if (!netif_running(dev))
12826                 return 0;
12827
12828         tg3_stop(tp);
12829
12830         tg3_carrier_off(tp);
12831
12832         tg3_start(tp, true, false, false);
12833
12834         return 0;
12835 }
12836
12837 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12838 {
12839         switch (stringset) {
12840         case ETH_SS_STATS:
12841                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12842                 break;
12843         case ETH_SS_TEST:
12844                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12845                 break;
12846         default:
12847                 WARN_ON(1);     /* we need a WARN() */
12848                 break;
12849         }
12850 }
12851
12852 static int tg3_set_phys_id(struct net_device *dev,
12853                             enum ethtool_phys_id_state state)
12854 {
12855         struct tg3 *tp = netdev_priv(dev);
12856
12857         switch (state) {
12858         case ETHTOOL_ID_ACTIVE:
12859                 return 1;       /* cycle on/off once per second */
12860
12861         case ETHTOOL_ID_ON:
12862                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12863                      LED_CTRL_1000MBPS_ON |
12864                      LED_CTRL_100MBPS_ON |
12865                      LED_CTRL_10MBPS_ON |
12866                      LED_CTRL_TRAFFIC_OVERRIDE |
12867                      LED_CTRL_TRAFFIC_BLINK |
12868                      LED_CTRL_TRAFFIC_LED);
12869                 break;
12870
12871         case ETHTOOL_ID_OFF:
12872                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12873                      LED_CTRL_TRAFFIC_OVERRIDE);
12874                 break;
12875
12876         case ETHTOOL_ID_INACTIVE:
12877                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12878                 break;
12879         }
12880
12881         return 0;
12882 }
12883
12884 static void tg3_get_ethtool_stats(struct net_device *dev,
12885                                    struct ethtool_stats *estats, u64 *tmp_stats)
12886 {
12887         struct tg3 *tp = netdev_priv(dev);
12888
12889         if (tp->hw_stats)
12890                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12891         else
12892                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12893 }
12894
12895 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12896 {
12897         int i;
12898         __be32 *buf;
12899         u32 offset = 0, len = 0;
12900         u32 magic, val;
12901
12902         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12903                 return NULL;
12904
12905         if (magic == TG3_EEPROM_MAGIC) {
12906                 for (offset = TG3_NVM_DIR_START;
12907                      offset < TG3_NVM_DIR_END;
12908                      offset += TG3_NVM_DIRENT_SIZE) {
12909                         if (tg3_nvram_read(tp, offset, &val))
12910                                 return NULL;
12911
12912                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12913                             TG3_NVM_DIRTYPE_EXTVPD)
12914                                 break;
12915                 }
12916
12917                 if (offset != TG3_NVM_DIR_END) {
12918                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12919                         if (tg3_nvram_read(tp, offset + 4, &offset))
12920                                 return NULL;
12921
12922                         offset = tg3_nvram_logical_addr(tp, offset);
12923                 }
12924
12925                 if (!offset || !len) {
12926                         offset = TG3_NVM_VPD_OFF;
12927                         len = TG3_NVM_VPD_LEN;
12928                 }
12929
12930                 buf = kmalloc(len, GFP_KERNEL);
12931                 if (!buf)
12932                         return NULL;
12933
12934                 for (i = 0; i < len; i += 4) {
12935                         /* The data is in little-endian format in NVRAM.
12936                          * Use the big-endian read routines to preserve
12937                          * the byte order as it exists in NVRAM.
12938                          */
12939                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12940                                 goto error;
12941                 }
12942                 *vpdlen = len;
12943         } else {
12944                 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12945                 if (IS_ERR(buf))
12946                         return NULL;
12947         }
12948
12949         return buf;
12950
12951 error:
12952         kfree(buf);
12953         return NULL;
12954 }
12955
12956 #define NVRAM_TEST_SIZE 0x100
12957 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12958 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12959 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12960 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12961 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12962 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12963 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12964 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12965
12966 static int tg3_test_nvram(struct tg3 *tp)
12967 {
12968         u32 csum, magic;
12969         __be32 *buf;
12970         int i, j, k, err = 0, size;
12971         unsigned int len;
12972
12973         if (tg3_flag(tp, NO_NVRAM))
12974                 return 0;
12975
12976         if (tg3_nvram_read(tp, 0, &magic) != 0)
12977                 return -EIO;
12978
12979         if (magic == TG3_EEPROM_MAGIC)
12980                 size = NVRAM_TEST_SIZE;
12981         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12982                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12983                     TG3_EEPROM_SB_FORMAT_1) {
12984                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12985                         case TG3_EEPROM_SB_REVISION_0:
12986                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12987                                 break;
12988                         case TG3_EEPROM_SB_REVISION_2:
12989                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12990                                 break;
12991                         case TG3_EEPROM_SB_REVISION_3:
12992                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12993                                 break;
12994                         case TG3_EEPROM_SB_REVISION_4:
12995                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12996                                 break;
12997                         case TG3_EEPROM_SB_REVISION_5:
12998                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12999                                 break;
13000                         case TG3_EEPROM_SB_REVISION_6:
13001                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13002                                 break;
13003                         default:
13004                                 return -EIO;
13005                         }
13006                 } else
13007                         return 0;
13008         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13009                 size = NVRAM_SELFBOOT_HW_SIZE;
13010         else
13011                 return -EIO;
13012
13013         buf = kmalloc(size, GFP_KERNEL);
13014         if (buf == NULL)
13015                 return -ENOMEM;
13016
13017         err = -EIO;
13018         for (i = 0, j = 0; i < size; i += 4, j++) {
13019                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
13020                 if (err)
13021                         break;
13022         }
13023         if (i < size)
13024                 goto out;
13025
13026         /* Selfboot format */
13027         magic = be32_to_cpu(buf[0]);
13028         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13029             TG3_EEPROM_MAGIC_FW) {
13030                 u8 *buf8 = (u8 *) buf, csum8 = 0;
13031
13032                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13033                     TG3_EEPROM_SB_REVISION_2) {
13034                         /* For rev 2, the csum doesn't include the MBA. */
13035                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13036                                 csum8 += buf8[i];
13037                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13038                                 csum8 += buf8[i];
13039                 } else {
13040                         for (i = 0; i < size; i++)
13041                                 csum8 += buf8[i];
13042                 }
13043
13044                 if (csum8 == 0) {
13045                         err = 0;
13046                         goto out;
13047                 }
13048
13049                 err = -EIO;
13050                 goto out;
13051         }
13052
13053         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13054             TG3_EEPROM_MAGIC_HW) {
13055                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13056                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13057                 u8 *buf8 = (u8 *) buf;
13058
13059                 /* Separate the parity bits and the data bytes.  */
13060                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13061                         if ((i == 0) || (i == 8)) {
13062                                 int l;
13063                                 u8 msk;
13064
13065                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13066                                         parity[k++] = buf8[i] & msk;
13067                                 i++;
13068                         } else if (i == 16) {
13069                                 int l;
13070                                 u8 msk;
13071
13072                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13073                                         parity[k++] = buf8[i] & msk;
13074                                 i++;
13075
13076                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13077                                         parity[k++] = buf8[i] & msk;
13078                                 i++;
13079                         }
13080                         data[j++] = buf8[i];
13081                 }
13082
13083                 err = -EIO;
13084                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13085                         u8 hw8 = hweight8(data[i]);
13086
13087                         if ((hw8 & 0x1) && parity[i])
13088                                 goto out;
13089                         else if (!(hw8 & 0x1) && !parity[i])
13090                                 goto out;
13091                 }
13092                 err = 0;
13093                 goto out;
13094         }
13095
13096         err = -EIO;
13097
13098         /* Bootstrap checksum at offset 0x10 */
13099         csum = calc_crc((unsigned char *) buf, 0x10);
13100         if (csum != le32_to_cpu(buf[0x10/4]))
13101                 goto out;
13102
13103         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13104         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13105         if (csum != le32_to_cpu(buf[0xfc/4]))
13106                 goto out;
13107
13108         kfree(buf);
13109
13110         buf = tg3_vpd_readblock(tp, &len);
13111         if (!buf)
13112                 return -ENOMEM;
13113
13114         err = pci_vpd_check_csum(buf, len);
13115         /* go on if no checksum found */
13116         if (err == 1)
13117                 err = 0;
13118 out:
13119         kfree(buf);
13120         return err;
13121 }
13122
13123 #define TG3_SERDES_TIMEOUT_SEC  2
13124 #define TG3_COPPER_TIMEOUT_SEC  6
13125
13126 static int tg3_test_link(struct tg3 *tp)
13127 {
13128         int i, max;
13129
13130         if (!netif_running(tp->dev))
13131                 return -ENODEV;
13132
13133         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13134                 max = TG3_SERDES_TIMEOUT_SEC;
13135         else
13136                 max = TG3_COPPER_TIMEOUT_SEC;
13137
13138         for (i = 0; i < max; i++) {
13139                 if (tp->link_up)
13140                         return 0;
13141
13142                 if (msleep_interruptible(1000))
13143                         break;
13144         }
13145
13146         return -EIO;
13147 }
13148
13149 /* Only test the commonly used registers */
13150 static int tg3_test_registers(struct tg3 *tp)
13151 {
13152         int i, is_5705, is_5750;
13153         u32 offset, read_mask, write_mask, val, save_val, read_val;
13154         static struct {
13155                 u16 offset;
13156                 u16 flags;
13157 #define TG3_FL_5705     0x1
13158 #define TG3_FL_NOT_5705 0x2
13159 #define TG3_FL_NOT_5788 0x4
13160 #define TG3_FL_NOT_5750 0x8
13161                 u32 read_mask;
13162                 u32 write_mask;
13163         } reg_tbl[] = {
13164                 /* MAC Control Registers */
13165                 { MAC_MODE, TG3_FL_NOT_5705,
13166                         0x00000000, 0x00ef6f8c },
13167                 { MAC_MODE, TG3_FL_5705,
13168                         0x00000000, 0x01ef6b8c },
13169                 { MAC_STATUS, TG3_FL_NOT_5705,
13170                         0x03800107, 0x00000000 },
13171                 { MAC_STATUS, TG3_FL_5705,
13172                         0x03800100, 0x00000000 },
13173                 { MAC_ADDR_0_HIGH, 0x0000,
13174                         0x00000000, 0x0000ffff },
13175                 { MAC_ADDR_0_LOW, 0x0000,
13176                         0x00000000, 0xffffffff },
13177                 { MAC_RX_MTU_SIZE, 0x0000,
13178                         0x00000000, 0x0000ffff },
13179                 { MAC_TX_MODE, 0x0000,
13180                         0x00000000, 0x00000070 },
13181                 { MAC_TX_LENGTHS, 0x0000,
13182                         0x00000000, 0x00003fff },
13183                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13184                         0x00000000, 0x000007fc },
13185                 { MAC_RX_MODE, TG3_FL_5705,
13186                         0x00000000, 0x000007dc },
13187                 { MAC_HASH_REG_0, 0x0000,
13188                         0x00000000, 0xffffffff },
13189                 { MAC_HASH_REG_1, 0x0000,
13190                         0x00000000, 0xffffffff },
13191                 { MAC_HASH_REG_2, 0x0000,
13192                         0x00000000, 0xffffffff },
13193                 { MAC_HASH_REG_3, 0x0000,
13194                         0x00000000, 0xffffffff },
13195
13196                 /* Receive Data and Receive BD Initiator Control Registers. */
13197                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13198                         0x00000000, 0xffffffff },
13199                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13200                         0x00000000, 0xffffffff },
13201                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13202                         0x00000000, 0x00000003 },
13203                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13204                         0x00000000, 0xffffffff },
13205                 { RCVDBDI_STD_BD+0, 0x0000,
13206                         0x00000000, 0xffffffff },
13207                 { RCVDBDI_STD_BD+4, 0x0000,
13208                         0x00000000, 0xffffffff },
13209                 { RCVDBDI_STD_BD+8, 0x0000,
13210                         0x00000000, 0xffff0002 },
13211                 { RCVDBDI_STD_BD+0xc, 0x0000,
13212                         0x00000000, 0xffffffff },
13213
13214                 /* Receive BD Initiator Control Registers. */
13215                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13216                         0x00000000, 0xffffffff },
13217                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13218                         0x00000000, 0x000003ff },
13219                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13220                         0x00000000, 0xffffffff },
13221
13222                 /* Host Coalescing Control Registers. */
13223                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13224                         0x00000000, 0x00000004 },
13225                 { HOSTCC_MODE, TG3_FL_5705,
13226                         0x00000000, 0x000000f6 },
13227                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13228                         0x00000000, 0xffffffff },
13229                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13230                         0x00000000, 0x000003ff },
13231                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13232                         0x00000000, 0xffffffff },
13233                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13234                         0x00000000, 0x000003ff },
13235                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13236                         0x00000000, 0xffffffff },
13237                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13238                         0x00000000, 0x000000ff },
13239                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13240                         0x00000000, 0xffffffff },
13241                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13242                         0x00000000, 0x000000ff },
13243                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13244                         0x00000000, 0xffffffff },
13245                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13246                         0x00000000, 0xffffffff },
13247                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13248                         0x00000000, 0xffffffff },
13249                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13250                         0x00000000, 0x000000ff },
13251                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13252                         0x00000000, 0xffffffff },
13253                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13254                         0x00000000, 0x000000ff },
13255                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13256                         0x00000000, 0xffffffff },
13257                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13258                         0x00000000, 0xffffffff },
13259                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13260                         0x00000000, 0xffffffff },
13261                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13262                         0x00000000, 0xffffffff },
13263                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13264                         0x00000000, 0xffffffff },
13265                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13266                         0xffffffff, 0x00000000 },
13267                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13268                         0xffffffff, 0x00000000 },
13269
13270                 /* Buffer Manager Control Registers. */
13271                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13272                         0x00000000, 0x007fff80 },
13273                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13274                         0x00000000, 0x007fffff },
13275                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13276                         0x00000000, 0x0000003f },
13277                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13278                         0x00000000, 0x000001ff },
13279                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13280                         0x00000000, 0x000001ff },
13281                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13282                         0xffffffff, 0x00000000 },
13283                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13284                         0xffffffff, 0x00000000 },
13285
13286                 /* Mailbox Registers */
13287                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13288                         0x00000000, 0x000001ff },
13289                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13290                         0x00000000, 0x000001ff },
13291                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13292                         0x00000000, 0x000007ff },
13293                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13294                         0x00000000, 0x000001ff },
13295
13296                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13297         };
13298
13299         is_5705 = is_5750 = 0;
13300         if (tg3_flag(tp, 5705_PLUS)) {
13301                 is_5705 = 1;
13302                 if (tg3_flag(tp, 5750_PLUS))
13303                         is_5750 = 1;
13304         }
13305
13306         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13307                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13308                         continue;
13309
13310                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13311                         continue;
13312
13313                 if (tg3_flag(tp, IS_5788) &&
13314                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13315                         continue;
13316
13317                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13318                         continue;
13319
13320                 offset = (u32) reg_tbl[i].offset;
13321                 read_mask = reg_tbl[i].read_mask;
13322                 write_mask = reg_tbl[i].write_mask;
13323
13324                 /* Save the original register content */
13325                 save_val = tr32(offset);
13326
13327                 /* Determine the read-only value. */
13328                 read_val = save_val & read_mask;
13329
13330                 /* Write zero to the register, then make sure the read-only bits
13331                  * are not changed and the read/write bits are all zeros.
13332                  */
13333                 tw32(offset, 0);
13334
13335                 val = tr32(offset);
13336
13337                 /* Test the read-only and read/write bits. */
13338                 if (((val & read_mask) != read_val) || (val & write_mask))
13339                         goto out;
13340
13341                 /* Write ones to all the bits defined by RdMask and WrMask, then
13342                  * make sure the read-only bits are not changed and the
13343                  * read/write bits are all ones.
13344                  */
13345                 tw32(offset, read_mask | write_mask);
13346
13347                 val = tr32(offset);
13348
13349                 /* Test the read-only bits. */
13350                 if ((val & read_mask) != read_val)
13351                         goto out;
13352
13353                 /* Test the read/write bits. */
13354                 if ((val & write_mask) != write_mask)
13355                         goto out;
13356
13357                 tw32(offset, save_val);
13358         }
13359
13360         return 0;
13361
13362 out:
13363         if (netif_msg_hw(tp))
13364                 netdev_err(tp->dev,
13365                            "Register test failed at offset %x\n", offset);
13366         tw32(offset, save_val);
13367         return -EIO;
13368 }
13369
13370 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13371 {
13372         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13373         int i;
13374         u32 j;
13375
13376         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13377                 for (j = 0; j < len; j += 4) {
13378                         u32 val;
13379
13380                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13381                         tg3_read_mem(tp, offset + j, &val);
13382                         if (val != test_pattern[i])
13383                                 return -EIO;
13384                 }
13385         }
13386         return 0;
13387 }
13388
13389 static int tg3_test_memory(struct tg3 *tp)
13390 {
13391         static struct mem_entry {
13392                 u32 offset;
13393                 u32 len;
13394         } mem_tbl_570x[] = {
13395                 { 0x00000000, 0x00b50},
13396                 { 0x00002000, 0x1c000},
13397                 { 0xffffffff, 0x00000}
13398         }, mem_tbl_5705[] = {
13399                 { 0x00000100, 0x0000c},
13400                 { 0x00000200, 0x00008},
13401                 { 0x00004000, 0x00800},
13402                 { 0x00006000, 0x01000},
13403                 { 0x00008000, 0x02000},
13404                 { 0x00010000, 0x0e000},
13405                 { 0xffffffff, 0x00000}
13406         }, mem_tbl_5755[] = {
13407                 { 0x00000200, 0x00008},
13408                 { 0x00004000, 0x00800},
13409                 { 0x00006000, 0x00800},
13410                 { 0x00008000, 0x02000},
13411                 { 0x00010000, 0x0c000},
13412                 { 0xffffffff, 0x00000}
13413         }, mem_tbl_5906[] = {
13414                 { 0x00000200, 0x00008},
13415                 { 0x00004000, 0x00400},
13416                 { 0x00006000, 0x00400},
13417                 { 0x00008000, 0x01000},
13418                 { 0x00010000, 0x01000},
13419                 { 0xffffffff, 0x00000}
13420         }, mem_tbl_5717[] = {
13421                 { 0x00000200, 0x00008},
13422                 { 0x00010000, 0x0a000},
13423                 { 0x00020000, 0x13c00},
13424                 { 0xffffffff, 0x00000}
13425         }, mem_tbl_57765[] = {
13426                 { 0x00000200, 0x00008},
13427                 { 0x00004000, 0x00800},
13428                 { 0x00006000, 0x09800},
13429                 { 0x00010000, 0x0a000},
13430                 { 0xffffffff, 0x00000}
13431         };
13432         struct mem_entry *mem_tbl;
13433         int err = 0;
13434         int i;
13435
13436         if (tg3_flag(tp, 5717_PLUS))
13437                 mem_tbl = mem_tbl_5717;
13438         else if (tg3_flag(tp, 57765_CLASS) ||
13439                  tg3_asic_rev(tp) == ASIC_REV_5762)
13440                 mem_tbl = mem_tbl_57765;
13441         else if (tg3_flag(tp, 5755_PLUS))
13442                 mem_tbl = mem_tbl_5755;
13443         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13444                 mem_tbl = mem_tbl_5906;
13445         else if (tg3_flag(tp, 5705_PLUS))
13446                 mem_tbl = mem_tbl_5705;
13447         else
13448                 mem_tbl = mem_tbl_570x;
13449
13450         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13451                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13452                 if (err)
13453                         break;
13454         }
13455
13456         return err;
13457 }
13458
13459 #define TG3_TSO_MSS             500
13460
13461 #define TG3_TSO_IP_HDR_LEN      20
13462 #define TG3_TSO_TCP_HDR_LEN     20
13463 #define TG3_TSO_TCP_OPT_LEN     12
13464
13465 static const u8 tg3_tso_header[] = {
13466 0x08, 0x00,
13467 0x45, 0x00, 0x00, 0x00,
13468 0x00, 0x00, 0x40, 0x00,
13469 0x40, 0x06, 0x00, 0x00,
13470 0x0a, 0x00, 0x00, 0x01,
13471 0x0a, 0x00, 0x00, 0x02,
13472 0x0d, 0x00, 0xe0, 0x00,
13473 0x00, 0x00, 0x01, 0x00,
13474 0x00, 0x00, 0x02, 0x00,
13475 0x80, 0x10, 0x10, 0x00,
13476 0x14, 0x09, 0x00, 0x00,
13477 0x01, 0x01, 0x08, 0x0a,
13478 0x11, 0x11, 0x11, 0x11,
13479 0x11, 0x11, 0x11, 0x11,
13480 };
13481
13482 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13483 {
13484         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13485         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13486         u32 budget;
13487         struct sk_buff *skb;
13488         u8 *tx_data, *rx_data;
13489         dma_addr_t map;
13490         int num_pkts, tx_len, rx_len, i, err;
13491         struct tg3_rx_buffer_desc *desc;
13492         struct tg3_napi *tnapi, *rnapi;
13493         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13494
13495         tnapi = &tp->napi[0];
13496         rnapi = &tp->napi[0];
13497         if (tp->irq_cnt > 1) {
13498                 if (tg3_flag(tp, ENABLE_RSS))
13499                         rnapi = &tp->napi[1];
13500                 if (tg3_flag(tp, ENABLE_TSS))
13501                         tnapi = &tp->napi[1];
13502         }
13503         coal_now = tnapi->coal_now | rnapi->coal_now;
13504
13505         err = -EIO;
13506
13507         tx_len = pktsz;
13508         skb = netdev_alloc_skb(tp->dev, tx_len);
13509         if (!skb)
13510                 return -ENOMEM;
13511
13512         tx_data = skb_put(skb, tx_len);
13513         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13514         memset(tx_data + ETH_ALEN, 0x0, 8);
13515
13516         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13517
13518         if (tso_loopback) {
13519                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13520
13521                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13522                               TG3_TSO_TCP_OPT_LEN;
13523
13524                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13525                        sizeof(tg3_tso_header));
13526                 mss = TG3_TSO_MSS;
13527
13528                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13529                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13530
13531                 /* Set the total length field in the IP header */
13532                 iph->tot_len = htons((u16)(mss + hdr_len));
13533
13534                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13535                               TXD_FLAG_CPU_POST_DMA);
13536
13537                 if (tg3_flag(tp, HW_TSO_1) ||
13538                     tg3_flag(tp, HW_TSO_2) ||
13539                     tg3_flag(tp, HW_TSO_3)) {
13540                         struct tcphdr *th;
13541                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13542                         th = (struct tcphdr *)&tx_data[val];
13543                         th->check = 0;
13544                 } else
13545                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13546
13547                 if (tg3_flag(tp, HW_TSO_3)) {
13548                         mss |= (hdr_len & 0xc) << 12;
13549                         if (hdr_len & 0x10)
13550                                 base_flags |= 0x00000010;
13551                         base_flags |= (hdr_len & 0x3e0) << 5;
13552                 } else if (tg3_flag(tp, HW_TSO_2))
13553                         mss |= hdr_len << 9;
13554                 else if (tg3_flag(tp, HW_TSO_1) ||
13555                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13556                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13557                 } else {
13558                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13559                 }
13560
13561                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13562         } else {
13563                 num_pkts = 1;
13564                 data_off = ETH_HLEN;
13565
13566                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13567                     tx_len > VLAN_ETH_FRAME_LEN)
13568                         base_flags |= TXD_FLAG_JMB_PKT;
13569         }
13570
13571         for (i = data_off; i < tx_len; i++)
13572                 tx_data[i] = (u8) (i & 0xff);
13573
13574         map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13575         if (dma_mapping_error(&tp->pdev->dev, map)) {
13576                 dev_kfree_skb(skb);
13577                 return -EIO;
13578         }
13579
13580         val = tnapi->tx_prod;
13581         tnapi->tx_buffers[val].skb = skb;
13582         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13583
13584         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13585                rnapi->coal_now);
13586
13587         udelay(10);
13588
13589         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13590
13591         budget = tg3_tx_avail(tnapi);
13592         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13593                             base_flags | TXD_FLAG_END, mss, 0)) {
13594                 tnapi->tx_buffers[val].skb = NULL;
13595                 dev_kfree_skb(skb);
13596                 return -EIO;
13597         }
13598
13599         tnapi->tx_prod++;
13600
13601         /* Sync BD data before updating mailbox */
13602         wmb();
13603
13604         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13605         tr32_mailbox(tnapi->prodmbox);
13606
13607         udelay(10);
13608
13609         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13610         for (i = 0; i < 35; i++) {
13611                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13612                        coal_now);
13613
13614                 udelay(10);
13615
13616                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13617                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13618                 if ((tx_idx == tnapi->tx_prod) &&
13619                     (rx_idx == (rx_start_idx + num_pkts)))
13620                         break;
13621         }
13622
13623         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13624         dev_kfree_skb(skb);
13625
13626         if (tx_idx != tnapi->tx_prod)
13627                 goto out;
13628
13629         if (rx_idx != rx_start_idx + num_pkts)
13630                 goto out;
13631
13632         val = data_off;
13633         while (rx_idx != rx_start_idx) {
13634                 desc = &rnapi->rx_rcb[rx_start_idx++];
13635                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13636                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13637
13638                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13639                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13640                         goto out;
13641
13642                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13643                          - ETH_FCS_LEN;
13644
13645                 if (!tso_loopback) {
13646                         if (rx_len != tx_len)
13647                                 goto out;
13648
13649                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13650                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13651                                         goto out;
13652                         } else {
13653                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13654                                         goto out;
13655                         }
13656                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13657                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13658                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13659                         goto out;
13660                 }
13661
13662                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13663                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13664                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13665                                              mapping);
13666                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13667                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13668                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13669                                              mapping);
13670                 } else
13671                         goto out;
13672
13673                 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13674                                         DMA_FROM_DEVICE);
13675
13676                 rx_data += TG3_RX_OFFSET(tp);
13677                 for (i = data_off; i < rx_len; i++, val++) {
13678                         if (*(rx_data + i) != (u8) (val & 0xff))
13679                                 goto out;
13680                 }
13681         }
13682
13683         err = 0;
13684
13685         /* tg3_free_rings will unmap and free the rx_data */
13686 out:
13687         return err;
13688 }
13689
13690 #define TG3_STD_LOOPBACK_FAILED         1
13691 #define TG3_JMB_LOOPBACK_FAILED         2
13692 #define TG3_TSO_LOOPBACK_FAILED         4
13693 #define TG3_LOOPBACK_FAILED \
13694         (TG3_STD_LOOPBACK_FAILED | \
13695          TG3_JMB_LOOPBACK_FAILED | \
13696          TG3_TSO_LOOPBACK_FAILED)
13697
13698 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13699 {
13700         int err = -EIO;
13701         u32 eee_cap;
13702         u32 jmb_pkt_sz = 9000;
13703
13704         if (tp->dma_limit)
13705                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13706
13707         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13708         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13709
13710         if (!netif_running(tp->dev)) {
13711                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13712                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13713                 if (do_extlpbk)
13714                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13715                 goto done;
13716         }
13717
13718         err = tg3_reset_hw(tp, true);
13719         if (err) {
13720                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13721                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13722                 if (do_extlpbk)
13723                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13724                 goto done;
13725         }
13726
13727         if (tg3_flag(tp, ENABLE_RSS)) {
13728                 int i;
13729
13730                 /* Reroute all rx packets to the 1st queue */
13731                 for (i = MAC_RSS_INDIR_TBL_0;
13732                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13733                         tw32(i, 0x0);
13734         }
13735
13736         /* HW errata - mac loopback fails in some cases on 5780.
13737          * Normal traffic and PHY loopback are not affected by
13738          * errata.  Also, the MAC loopback test is deprecated for
13739          * all newer ASIC revisions.
13740          */
13741         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13742             !tg3_flag(tp, CPMU_PRESENT)) {
13743                 tg3_mac_loopback(tp, true);
13744
13745                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13746                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13747
13748                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13749                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13750                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13751
13752                 tg3_mac_loopback(tp, false);
13753         }
13754
13755         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13756             !tg3_flag(tp, USE_PHYLIB)) {
13757                 int i;
13758
13759                 tg3_phy_lpbk_set(tp, 0, false);
13760
13761                 /* Wait for link */
13762                 for (i = 0; i < 100; i++) {
13763                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13764                                 break;
13765                         mdelay(1);
13766                 }
13767
13768                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13769                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13770                 if (tg3_flag(tp, TSO_CAPABLE) &&
13771                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13772                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13773                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13774                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13775                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13776
13777                 if (do_extlpbk) {
13778                         tg3_phy_lpbk_set(tp, 0, true);
13779
13780                         /* All link indications report up, but the hardware
13781                          * isn't really ready for about 20 msec.  Double it
13782                          * to be sure.
13783                          */
13784                         mdelay(40);
13785
13786                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13787                                 data[TG3_EXT_LOOPB_TEST] |=
13788                                                         TG3_STD_LOOPBACK_FAILED;
13789                         if (tg3_flag(tp, TSO_CAPABLE) &&
13790                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13791                                 data[TG3_EXT_LOOPB_TEST] |=
13792                                                         TG3_TSO_LOOPBACK_FAILED;
13793                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13794                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13795                                 data[TG3_EXT_LOOPB_TEST] |=
13796                                                         TG3_JMB_LOOPBACK_FAILED;
13797                 }
13798
13799                 /* Re-enable gphy autopowerdown. */
13800                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13801                         tg3_phy_toggle_apd(tp, true);
13802         }
13803
13804         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13805                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13806
13807 done:
13808         tp->phy_flags |= eee_cap;
13809
13810         return err;
13811 }
13812
13813 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13814                           u64 *data)
13815 {
13816         struct tg3 *tp = netdev_priv(dev);
13817         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13818
13819         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13820                 if (tg3_power_up(tp)) {
13821                         etest->flags |= ETH_TEST_FL_FAILED;
13822                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13823                         return;
13824                 }
13825                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13826         }
13827
13828         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13829
13830         if (tg3_test_nvram(tp) != 0) {
13831                 etest->flags |= ETH_TEST_FL_FAILED;
13832                 data[TG3_NVRAM_TEST] = 1;
13833         }
13834         if (!doextlpbk && tg3_test_link(tp)) {
13835                 etest->flags |= ETH_TEST_FL_FAILED;
13836                 data[TG3_LINK_TEST] = 1;
13837         }
13838         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13839                 int err, err2 = 0, irq_sync = 0;
13840
13841                 if (netif_running(dev)) {
13842                         tg3_phy_stop(tp);
13843                         tg3_netif_stop(tp);
13844                         irq_sync = 1;
13845                 }
13846
13847                 tg3_full_lock(tp, irq_sync);
13848                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13849                 err = tg3_nvram_lock(tp);
13850                 tg3_halt_cpu(tp, RX_CPU_BASE);
13851                 if (!tg3_flag(tp, 5705_PLUS))
13852                         tg3_halt_cpu(tp, TX_CPU_BASE);
13853                 if (!err)
13854                         tg3_nvram_unlock(tp);
13855
13856                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13857                         tg3_phy_reset(tp);
13858
13859                 if (tg3_test_registers(tp) != 0) {
13860                         etest->flags |= ETH_TEST_FL_FAILED;
13861                         data[TG3_REGISTER_TEST] = 1;
13862                 }
13863
13864                 if (tg3_test_memory(tp) != 0) {
13865                         etest->flags |= ETH_TEST_FL_FAILED;
13866                         data[TG3_MEMORY_TEST] = 1;
13867                 }
13868
13869                 if (doextlpbk)
13870                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13871
13872                 if (tg3_test_loopback(tp, data, doextlpbk))
13873                         etest->flags |= ETH_TEST_FL_FAILED;
13874
13875                 tg3_full_unlock(tp);
13876
13877                 if (tg3_test_interrupt(tp) != 0) {
13878                         etest->flags |= ETH_TEST_FL_FAILED;
13879                         data[TG3_INTERRUPT_TEST] = 1;
13880                 }
13881
13882                 tg3_full_lock(tp, 0);
13883
13884                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13885                 if (netif_running(dev)) {
13886                         tg3_flag_set(tp, INIT_COMPLETE);
13887                         err2 = tg3_restart_hw(tp, true);
13888                         if (!err2)
13889                                 tg3_netif_start(tp);
13890                 }
13891
13892                 tg3_full_unlock(tp);
13893
13894                 if (irq_sync && !err2)
13895                         tg3_phy_start(tp);
13896         }
13897         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13898                 tg3_power_down_prepare(tp);
13899
13900 }
13901
13902 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13903 {
13904         struct tg3 *tp = netdev_priv(dev);
13905         struct hwtstamp_config stmpconf;
13906
13907         if (!tg3_flag(tp, PTP_CAPABLE))
13908                 return -EOPNOTSUPP;
13909
13910         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13911                 return -EFAULT;
13912
13913         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13914             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13915                 return -ERANGE;
13916
13917         switch (stmpconf.rx_filter) {
13918         case HWTSTAMP_FILTER_NONE:
13919                 tp->rxptpctl = 0;
13920                 break;
13921         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13922                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13923                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13924                 break;
13925         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13926                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13927                                TG3_RX_PTP_CTL_SYNC_EVNT;
13928                 break;
13929         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13930                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13931                                TG3_RX_PTP_CTL_DELAY_REQ;
13932                 break;
13933         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13934                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13935                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13936                 break;
13937         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13938                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13939                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13940                 break;
13941         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13942                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13943                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13944                 break;
13945         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13946                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13947                                TG3_RX_PTP_CTL_SYNC_EVNT;
13948                 break;
13949         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13950                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13951                                TG3_RX_PTP_CTL_SYNC_EVNT;
13952                 break;
13953         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13954                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13955                                TG3_RX_PTP_CTL_SYNC_EVNT;
13956                 break;
13957         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13958                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13959                                TG3_RX_PTP_CTL_DELAY_REQ;
13960                 break;
13961         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13962                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13963                                TG3_RX_PTP_CTL_DELAY_REQ;
13964                 break;
13965         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13966                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13967                                TG3_RX_PTP_CTL_DELAY_REQ;
13968                 break;
13969         default:
13970                 return -ERANGE;
13971         }
13972
13973         if (netif_running(dev) && tp->rxptpctl)
13974                 tw32(TG3_RX_PTP_CTL,
13975                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13976
13977         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13978                 tg3_flag_set(tp, TX_TSTAMP_EN);
13979         else
13980                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13981
13982         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13983                 -EFAULT : 0;
13984 }
13985
13986 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13987 {
13988         struct tg3 *tp = netdev_priv(dev);
13989         struct hwtstamp_config stmpconf;
13990
13991         if (!tg3_flag(tp, PTP_CAPABLE))
13992                 return -EOPNOTSUPP;
13993
13994         stmpconf.flags = 0;
13995         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13996                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13997
13998         switch (tp->rxptpctl) {
13999         case 0:
14000                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
14001                 break;
14002         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14003                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14004                 break;
14005         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14006                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14007                 break;
14008         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14009                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14010                 break;
14011         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14012                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14013                 break;
14014         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14015                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14016                 break;
14017         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14018                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14019                 break;
14020         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14021                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14022                 break;
14023         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14024                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14025                 break;
14026         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14027                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14028                 break;
14029         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14030                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14031                 break;
14032         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14033                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14034                 break;
14035         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14036                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14037                 break;
14038         default:
14039                 WARN_ON_ONCE(1);
14040                 return -ERANGE;
14041         }
14042
14043         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14044                 -EFAULT : 0;
14045 }
14046
14047 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14048 {
14049         struct mii_ioctl_data *data = if_mii(ifr);
14050         struct tg3 *tp = netdev_priv(dev);
14051         int err;
14052
14053         if (tg3_flag(tp, USE_PHYLIB)) {
14054                 struct phy_device *phydev;
14055                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14056                         return -EAGAIN;
14057                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14058                 return phy_mii_ioctl(phydev, ifr, cmd);
14059         }
14060
14061         switch (cmd) {
14062         case SIOCGMIIPHY:
14063                 data->phy_id = tp->phy_addr;
14064
14065                 fallthrough;
14066         case SIOCGMIIREG: {
14067                 u32 mii_regval;
14068
14069                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14070                         break;                  /* We have no PHY */
14071
14072                 if (!netif_running(dev))
14073                         return -EAGAIN;
14074
14075                 spin_lock_bh(&tp->lock);
14076                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14077                                     data->reg_num & 0x1f, &mii_regval);
14078                 spin_unlock_bh(&tp->lock);
14079
14080                 data->val_out = mii_regval;
14081
14082                 return err;
14083         }
14084
14085         case SIOCSMIIREG:
14086                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14087                         break;                  /* We have no PHY */
14088
14089                 if (!netif_running(dev))
14090                         return -EAGAIN;
14091
14092                 spin_lock_bh(&tp->lock);
14093                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14094                                      data->reg_num & 0x1f, data->val_in);
14095                 spin_unlock_bh(&tp->lock);
14096
14097                 return err;
14098
14099         case SIOCSHWTSTAMP:
14100                 return tg3_hwtstamp_set(dev, ifr);
14101
14102         case SIOCGHWTSTAMP:
14103                 return tg3_hwtstamp_get(dev, ifr);
14104
14105         default:
14106                 /* do nothing */
14107                 break;
14108         }
14109         return -EOPNOTSUPP;
14110 }
14111
14112 static int tg3_get_coalesce(struct net_device *dev,
14113                             struct ethtool_coalesce *ec,
14114                             struct kernel_ethtool_coalesce *kernel_coal,
14115                             struct netlink_ext_ack *extack)
14116 {
14117         struct tg3 *tp = netdev_priv(dev);
14118
14119         memcpy(ec, &tp->coal, sizeof(*ec));
14120         return 0;
14121 }
14122
14123 static int tg3_set_coalesce(struct net_device *dev,
14124                             struct ethtool_coalesce *ec,
14125                             struct kernel_ethtool_coalesce *kernel_coal,
14126                             struct netlink_ext_ack *extack)
14127 {
14128         struct tg3 *tp = netdev_priv(dev);
14129         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14130         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14131
14132         if (!tg3_flag(tp, 5705_PLUS)) {
14133                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14134                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14135                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14136                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14137         }
14138
14139         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14140             (!ec->rx_coalesce_usecs) ||
14141             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14142             (!ec->tx_coalesce_usecs) ||
14143             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14144             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14145             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14146             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14147             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14148             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14149             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14150             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14151                 return -EINVAL;
14152
14153         /* Only copy relevant parameters, ignore all others. */
14154         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14155         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14156         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14157         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14158         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14159         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14160         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14161         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14162         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14163
14164         if (netif_running(dev)) {
14165                 tg3_full_lock(tp, 0);
14166                 __tg3_set_coalesce(tp, &tp->coal);
14167                 tg3_full_unlock(tp);
14168         }
14169         return 0;
14170 }
14171
14172 static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
14173 {
14174         struct tg3 *tp = netdev_priv(dev);
14175
14176         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14177                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14178                 return -EOPNOTSUPP;
14179         }
14180
14181         if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
14182                 netdev_warn(tp->dev,
14183                             "Direct manipulation of EEE advertisement is not supported\n");
14184                 return -EINVAL;
14185         }
14186
14187         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14188                 netdev_warn(tp->dev,
14189                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14190                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14191                 return -EINVAL;
14192         }
14193
14194         tp->eee.eee_enabled = edata->eee_enabled;
14195         tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
14196         tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
14197
14198         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14199         tg3_warn_mgmt_link_flap(tp);
14200
14201         if (netif_running(tp->dev)) {
14202                 tg3_full_lock(tp, 0);
14203                 tg3_setup_eee(tp);
14204                 tg3_phy_reset(tp);
14205                 tg3_full_unlock(tp);
14206         }
14207
14208         return 0;
14209 }
14210
14211 static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
14212 {
14213         struct tg3 *tp = netdev_priv(dev);
14214
14215         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14216                 netdev_warn(tp->dev,
14217                             "Board does not support EEE!\n");
14218                 return -EOPNOTSUPP;
14219         }
14220
14221         *edata = tp->eee;
14222         return 0;
14223 }
14224
14225 static const struct ethtool_ops tg3_ethtool_ops = {
14226         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14227                                      ETHTOOL_COALESCE_MAX_FRAMES |
14228                                      ETHTOOL_COALESCE_USECS_IRQ |
14229                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14230                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14231         .get_drvinfo            = tg3_get_drvinfo,
14232         .get_regs_len           = tg3_get_regs_len,
14233         .get_regs               = tg3_get_regs,
14234         .get_wol                = tg3_get_wol,
14235         .set_wol                = tg3_set_wol,
14236         .get_msglevel           = tg3_get_msglevel,
14237         .set_msglevel           = tg3_set_msglevel,
14238         .nway_reset             = tg3_nway_reset,
14239         .get_link               = ethtool_op_get_link,
14240         .get_eeprom_len         = tg3_get_eeprom_len,
14241         .get_eeprom             = tg3_get_eeprom,
14242         .set_eeprom             = tg3_set_eeprom,
14243         .get_ringparam          = tg3_get_ringparam,
14244         .set_ringparam          = tg3_set_ringparam,
14245         .get_pauseparam         = tg3_get_pauseparam,
14246         .set_pauseparam         = tg3_set_pauseparam,
14247         .self_test              = tg3_self_test,
14248         .get_strings            = tg3_get_strings,
14249         .set_phys_id            = tg3_set_phys_id,
14250         .get_ethtool_stats      = tg3_get_ethtool_stats,
14251         .get_coalesce           = tg3_get_coalesce,
14252         .set_coalesce           = tg3_set_coalesce,
14253         .get_sset_count         = tg3_get_sset_count,
14254         .get_rxnfc              = tg3_get_rxnfc,
14255         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14256         .get_rxfh               = tg3_get_rxfh,
14257         .set_rxfh               = tg3_set_rxfh,
14258         .get_channels           = tg3_get_channels,
14259         .set_channels           = tg3_set_channels,
14260         .get_ts_info            = tg3_get_ts_info,
14261         .get_eee                = tg3_get_eee,
14262         .set_eee                = tg3_set_eee,
14263         .get_link_ksettings     = tg3_get_link_ksettings,
14264         .set_link_ksettings     = tg3_set_link_ksettings,
14265 };
14266
14267 static void tg3_get_stats64(struct net_device *dev,
14268                             struct rtnl_link_stats64 *stats)
14269 {
14270         struct tg3 *tp = netdev_priv(dev);
14271
14272         spin_lock_bh(&tp->lock);
14273         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14274                 *stats = tp->net_stats_prev;
14275                 spin_unlock_bh(&tp->lock);
14276                 return;
14277         }
14278
14279         tg3_get_nstats(tp, stats);
14280         spin_unlock_bh(&tp->lock);
14281 }
14282
14283 static void tg3_set_rx_mode(struct net_device *dev)
14284 {
14285         struct tg3 *tp = netdev_priv(dev);
14286
14287         if (!netif_running(dev))
14288                 return;
14289
14290         tg3_full_lock(tp, 0);
14291         __tg3_set_rx_mode(dev);
14292         tg3_full_unlock(tp);
14293 }
14294
14295 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14296                                int new_mtu)
14297 {
14298         dev->mtu = new_mtu;
14299
14300         if (new_mtu > ETH_DATA_LEN) {
14301                 if (tg3_flag(tp, 5780_CLASS)) {
14302                         netdev_update_features(dev);
14303                         tg3_flag_clear(tp, TSO_CAPABLE);
14304                 } else {
14305                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14306                 }
14307         } else {
14308                 if (tg3_flag(tp, 5780_CLASS)) {
14309                         tg3_flag_set(tp, TSO_CAPABLE);
14310                         netdev_update_features(dev);
14311                 }
14312                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14313         }
14314 }
14315
14316 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14317 {
14318         struct tg3 *tp = netdev_priv(dev);
14319         int err;
14320         bool reset_phy = false;
14321
14322         if (!netif_running(dev)) {
14323                 /* We'll just catch it later when the
14324                  * device is up'd.
14325                  */
14326                 tg3_set_mtu(dev, tp, new_mtu);
14327                 return 0;
14328         }
14329
14330         tg3_phy_stop(tp);
14331
14332         tg3_netif_stop(tp);
14333
14334         tg3_set_mtu(dev, tp, new_mtu);
14335
14336         tg3_full_lock(tp, 1);
14337
14338         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14339
14340         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14341          * breaks all requests to 256 bytes.
14342          */
14343         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14344             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14345             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14346             tg3_asic_rev(tp) == ASIC_REV_5720)
14347                 reset_phy = true;
14348
14349         err = tg3_restart_hw(tp, reset_phy);
14350
14351         if (!err)
14352                 tg3_netif_start(tp);
14353
14354         tg3_full_unlock(tp);
14355
14356         if (!err)
14357                 tg3_phy_start(tp);
14358
14359         return err;
14360 }
14361
14362 static const struct net_device_ops tg3_netdev_ops = {
14363         .ndo_open               = tg3_open,
14364         .ndo_stop               = tg3_close,
14365         .ndo_start_xmit         = tg3_start_xmit,
14366         .ndo_get_stats64        = tg3_get_stats64,
14367         .ndo_validate_addr      = eth_validate_addr,
14368         .ndo_set_rx_mode        = tg3_set_rx_mode,
14369         .ndo_set_mac_address    = tg3_set_mac_addr,
14370         .ndo_eth_ioctl          = tg3_ioctl,
14371         .ndo_tx_timeout         = tg3_tx_timeout,
14372         .ndo_change_mtu         = tg3_change_mtu,
14373         .ndo_fix_features       = tg3_fix_features,
14374         .ndo_set_features       = tg3_set_features,
14375 #ifdef CONFIG_NET_POLL_CONTROLLER
14376         .ndo_poll_controller    = tg3_poll_controller,
14377 #endif
14378 };
14379
14380 static void tg3_get_eeprom_size(struct tg3 *tp)
14381 {
14382         u32 cursize, val, magic;
14383
14384         tp->nvram_size = EEPROM_CHIP_SIZE;
14385
14386         if (tg3_nvram_read(tp, 0, &magic) != 0)
14387                 return;
14388
14389         if ((magic != TG3_EEPROM_MAGIC) &&
14390             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14391             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14392                 return;
14393
14394         /*
14395          * Size the chip by reading offsets at increasing powers of two.
14396          * When we encounter our validation signature, we know the addressing
14397          * has wrapped around, and thus have our chip size.
14398          */
14399         cursize = 0x10;
14400
14401         while (cursize < tp->nvram_size) {
14402                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14403                         return;
14404
14405                 if (val == magic)
14406                         break;
14407
14408                 cursize <<= 1;
14409         }
14410
14411         tp->nvram_size = cursize;
14412 }
14413
14414 static void tg3_get_nvram_size(struct tg3 *tp)
14415 {
14416         u32 val;
14417
14418         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14419                 return;
14420
14421         /* Selfboot format */
14422         if (val != TG3_EEPROM_MAGIC) {
14423                 tg3_get_eeprom_size(tp);
14424                 return;
14425         }
14426
14427         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14428                 if (val != 0) {
14429                         /* This is confusing.  We want to operate on the
14430                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14431                          * call will read from NVRAM and byteswap the data
14432                          * according to the byteswapping settings for all
14433                          * other register accesses.  This ensures the data we
14434                          * want will always reside in the lower 16-bits.
14435                          * However, the data in NVRAM is in LE format, which
14436                          * means the data from the NVRAM read will always be
14437                          * opposite the endianness of the CPU.  The 16-bit
14438                          * byteswap then brings the data to CPU endianness.
14439                          */
14440                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14441                         return;
14442                 }
14443         }
14444         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14445 }
14446
14447 static void tg3_get_nvram_info(struct tg3 *tp)
14448 {
14449         u32 nvcfg1;
14450
14451         nvcfg1 = tr32(NVRAM_CFG1);
14452         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14453                 tg3_flag_set(tp, FLASH);
14454         } else {
14455                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14456                 tw32(NVRAM_CFG1, nvcfg1);
14457         }
14458
14459         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14460             tg3_flag(tp, 5780_CLASS)) {
14461                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14462                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14463                         tp->nvram_jedecnum = JEDEC_ATMEL;
14464                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14465                         tg3_flag_set(tp, NVRAM_BUFFERED);
14466                         break;
14467                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14468                         tp->nvram_jedecnum = JEDEC_ATMEL;
14469                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14470                         break;
14471                 case FLASH_VENDOR_ATMEL_EEPROM:
14472                         tp->nvram_jedecnum = JEDEC_ATMEL;
14473                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14474                         tg3_flag_set(tp, NVRAM_BUFFERED);
14475                         break;
14476                 case FLASH_VENDOR_ST:
14477                         tp->nvram_jedecnum = JEDEC_ST;
14478                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14479                         tg3_flag_set(tp, NVRAM_BUFFERED);
14480                         break;
14481                 case FLASH_VENDOR_SAIFUN:
14482                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14483                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14484                         break;
14485                 case FLASH_VENDOR_SST_SMALL:
14486                 case FLASH_VENDOR_SST_LARGE:
14487                         tp->nvram_jedecnum = JEDEC_SST;
14488                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14489                         break;
14490                 }
14491         } else {
14492                 tp->nvram_jedecnum = JEDEC_ATMEL;
14493                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14494                 tg3_flag_set(tp, NVRAM_BUFFERED);
14495         }
14496 }
14497
14498 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14499 {
14500         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14501         case FLASH_5752PAGE_SIZE_256:
14502                 tp->nvram_pagesize = 256;
14503                 break;
14504         case FLASH_5752PAGE_SIZE_512:
14505                 tp->nvram_pagesize = 512;
14506                 break;
14507         case FLASH_5752PAGE_SIZE_1K:
14508                 tp->nvram_pagesize = 1024;
14509                 break;
14510         case FLASH_5752PAGE_SIZE_2K:
14511                 tp->nvram_pagesize = 2048;
14512                 break;
14513         case FLASH_5752PAGE_SIZE_4K:
14514                 tp->nvram_pagesize = 4096;
14515                 break;
14516         case FLASH_5752PAGE_SIZE_264:
14517                 tp->nvram_pagesize = 264;
14518                 break;
14519         case FLASH_5752PAGE_SIZE_528:
14520                 tp->nvram_pagesize = 528;
14521                 break;
14522         }
14523 }
14524
14525 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14526 {
14527         u32 nvcfg1;
14528
14529         nvcfg1 = tr32(NVRAM_CFG1);
14530
14531         /* NVRAM protection for TPM */
14532         if (nvcfg1 & (1 << 27))
14533                 tg3_flag_set(tp, PROTECTED_NVRAM);
14534
14535         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14536         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14537         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14538                 tp->nvram_jedecnum = JEDEC_ATMEL;
14539                 tg3_flag_set(tp, NVRAM_BUFFERED);
14540                 break;
14541         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14542                 tp->nvram_jedecnum = JEDEC_ATMEL;
14543                 tg3_flag_set(tp, NVRAM_BUFFERED);
14544                 tg3_flag_set(tp, FLASH);
14545                 break;
14546         case FLASH_5752VENDOR_ST_M45PE10:
14547         case FLASH_5752VENDOR_ST_M45PE20:
14548         case FLASH_5752VENDOR_ST_M45PE40:
14549                 tp->nvram_jedecnum = JEDEC_ST;
14550                 tg3_flag_set(tp, NVRAM_BUFFERED);
14551                 tg3_flag_set(tp, FLASH);
14552                 break;
14553         }
14554
14555         if (tg3_flag(tp, FLASH)) {
14556                 tg3_nvram_get_pagesize(tp, nvcfg1);
14557         } else {
14558                 /* For eeprom, set pagesize to maximum eeprom size */
14559                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14560
14561                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14562                 tw32(NVRAM_CFG1, nvcfg1);
14563         }
14564 }
14565
14566 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14567 {
14568         u32 nvcfg1, protect = 0;
14569
14570         nvcfg1 = tr32(NVRAM_CFG1);
14571
14572         /* NVRAM protection for TPM */
14573         if (nvcfg1 & (1 << 27)) {
14574                 tg3_flag_set(tp, PROTECTED_NVRAM);
14575                 protect = 1;
14576         }
14577
14578         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14579         switch (nvcfg1) {
14580         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14581         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14582         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14583         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14584                 tp->nvram_jedecnum = JEDEC_ATMEL;
14585                 tg3_flag_set(tp, NVRAM_BUFFERED);
14586                 tg3_flag_set(tp, FLASH);
14587                 tp->nvram_pagesize = 264;
14588                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14589                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14590                         tp->nvram_size = (protect ? 0x3e200 :
14591                                           TG3_NVRAM_SIZE_512KB);
14592                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14593                         tp->nvram_size = (protect ? 0x1f200 :
14594                                           TG3_NVRAM_SIZE_256KB);
14595                 else
14596                         tp->nvram_size = (protect ? 0x1f200 :
14597                                           TG3_NVRAM_SIZE_128KB);
14598                 break;
14599         case FLASH_5752VENDOR_ST_M45PE10:
14600         case FLASH_5752VENDOR_ST_M45PE20:
14601         case FLASH_5752VENDOR_ST_M45PE40:
14602                 tp->nvram_jedecnum = JEDEC_ST;
14603                 tg3_flag_set(tp, NVRAM_BUFFERED);
14604                 tg3_flag_set(tp, FLASH);
14605                 tp->nvram_pagesize = 256;
14606                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14607                         tp->nvram_size = (protect ?
14608                                           TG3_NVRAM_SIZE_64KB :
14609                                           TG3_NVRAM_SIZE_128KB);
14610                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14611                         tp->nvram_size = (protect ?
14612                                           TG3_NVRAM_SIZE_64KB :
14613                                           TG3_NVRAM_SIZE_256KB);
14614                 else
14615                         tp->nvram_size = (protect ?
14616                                           TG3_NVRAM_SIZE_128KB :
14617                                           TG3_NVRAM_SIZE_512KB);
14618                 break;
14619         }
14620 }
14621
14622 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14623 {
14624         u32 nvcfg1;
14625
14626         nvcfg1 = tr32(NVRAM_CFG1);
14627
14628         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14629         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14630         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14631         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14632         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14633                 tp->nvram_jedecnum = JEDEC_ATMEL;
14634                 tg3_flag_set(tp, NVRAM_BUFFERED);
14635                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14636
14637                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14638                 tw32(NVRAM_CFG1, nvcfg1);
14639                 break;
14640         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14641         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14642         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14643         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14644                 tp->nvram_jedecnum = JEDEC_ATMEL;
14645                 tg3_flag_set(tp, NVRAM_BUFFERED);
14646                 tg3_flag_set(tp, FLASH);
14647                 tp->nvram_pagesize = 264;
14648                 break;
14649         case FLASH_5752VENDOR_ST_M45PE10:
14650         case FLASH_5752VENDOR_ST_M45PE20:
14651         case FLASH_5752VENDOR_ST_M45PE40:
14652                 tp->nvram_jedecnum = JEDEC_ST;
14653                 tg3_flag_set(tp, NVRAM_BUFFERED);
14654                 tg3_flag_set(tp, FLASH);
14655                 tp->nvram_pagesize = 256;
14656                 break;
14657         }
14658 }
14659
14660 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14661 {
14662         u32 nvcfg1, protect = 0;
14663
14664         nvcfg1 = tr32(NVRAM_CFG1);
14665
14666         /* NVRAM protection for TPM */
14667         if (nvcfg1 & (1 << 27)) {
14668                 tg3_flag_set(tp, PROTECTED_NVRAM);
14669                 protect = 1;
14670         }
14671
14672         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14673         switch (nvcfg1) {
14674         case FLASH_5761VENDOR_ATMEL_ADB021D:
14675         case FLASH_5761VENDOR_ATMEL_ADB041D:
14676         case FLASH_5761VENDOR_ATMEL_ADB081D:
14677         case FLASH_5761VENDOR_ATMEL_ADB161D:
14678         case FLASH_5761VENDOR_ATMEL_MDB021D:
14679         case FLASH_5761VENDOR_ATMEL_MDB041D:
14680         case FLASH_5761VENDOR_ATMEL_MDB081D:
14681         case FLASH_5761VENDOR_ATMEL_MDB161D:
14682                 tp->nvram_jedecnum = JEDEC_ATMEL;
14683                 tg3_flag_set(tp, NVRAM_BUFFERED);
14684                 tg3_flag_set(tp, FLASH);
14685                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14686                 tp->nvram_pagesize = 256;
14687                 break;
14688         case FLASH_5761VENDOR_ST_A_M45PE20:
14689         case FLASH_5761VENDOR_ST_A_M45PE40:
14690         case FLASH_5761VENDOR_ST_A_M45PE80:
14691         case FLASH_5761VENDOR_ST_A_M45PE16:
14692         case FLASH_5761VENDOR_ST_M_M45PE20:
14693         case FLASH_5761VENDOR_ST_M_M45PE40:
14694         case FLASH_5761VENDOR_ST_M_M45PE80:
14695         case FLASH_5761VENDOR_ST_M_M45PE16:
14696                 tp->nvram_jedecnum = JEDEC_ST;
14697                 tg3_flag_set(tp, NVRAM_BUFFERED);
14698                 tg3_flag_set(tp, FLASH);
14699                 tp->nvram_pagesize = 256;
14700                 break;
14701         }
14702
14703         if (protect) {
14704                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14705         } else {
14706                 switch (nvcfg1) {
14707                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14708                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14709                 case FLASH_5761VENDOR_ST_A_M45PE16:
14710                 case FLASH_5761VENDOR_ST_M_M45PE16:
14711                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14712                         break;
14713                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14714                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14715                 case FLASH_5761VENDOR_ST_A_M45PE80:
14716                 case FLASH_5761VENDOR_ST_M_M45PE80:
14717                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14718                         break;
14719                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14720                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14721                 case FLASH_5761VENDOR_ST_A_M45PE40:
14722                 case FLASH_5761VENDOR_ST_M_M45PE40:
14723                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14724                         break;
14725                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14726                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14727                 case FLASH_5761VENDOR_ST_A_M45PE20:
14728                 case FLASH_5761VENDOR_ST_M_M45PE20:
14729                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14730                         break;
14731                 }
14732         }
14733 }
14734
14735 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14736 {
14737         tp->nvram_jedecnum = JEDEC_ATMEL;
14738         tg3_flag_set(tp, NVRAM_BUFFERED);
14739         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14740 }
14741
14742 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14743 {
14744         u32 nvcfg1;
14745
14746         nvcfg1 = tr32(NVRAM_CFG1);
14747
14748         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14749         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14750         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14751                 tp->nvram_jedecnum = JEDEC_ATMEL;
14752                 tg3_flag_set(tp, NVRAM_BUFFERED);
14753                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14754
14755                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14756                 tw32(NVRAM_CFG1, nvcfg1);
14757                 return;
14758         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14759         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14760         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14761         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14762         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14763         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14764         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14765                 tp->nvram_jedecnum = JEDEC_ATMEL;
14766                 tg3_flag_set(tp, NVRAM_BUFFERED);
14767                 tg3_flag_set(tp, FLASH);
14768
14769                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14770                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14771                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14772                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14773                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14774                         break;
14775                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14776                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14777                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14778                         break;
14779                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14780                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14781                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14782                         break;
14783                 }
14784                 break;
14785         case FLASH_5752VENDOR_ST_M45PE10:
14786         case FLASH_5752VENDOR_ST_M45PE20:
14787         case FLASH_5752VENDOR_ST_M45PE40:
14788                 tp->nvram_jedecnum = JEDEC_ST;
14789                 tg3_flag_set(tp, NVRAM_BUFFERED);
14790                 tg3_flag_set(tp, FLASH);
14791
14792                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14793                 case FLASH_5752VENDOR_ST_M45PE10:
14794                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14795                         break;
14796                 case FLASH_5752VENDOR_ST_M45PE20:
14797                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14798                         break;
14799                 case FLASH_5752VENDOR_ST_M45PE40:
14800                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14801                         break;
14802                 }
14803                 break;
14804         default:
14805                 tg3_flag_set(tp, NO_NVRAM);
14806                 return;
14807         }
14808
14809         tg3_nvram_get_pagesize(tp, nvcfg1);
14810         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14811                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14812 }
14813
14814
14815 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14816 {
14817         u32 nvcfg1;
14818
14819         nvcfg1 = tr32(NVRAM_CFG1);
14820
14821         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14822         case FLASH_5717VENDOR_ATMEL_EEPROM:
14823         case FLASH_5717VENDOR_MICRO_EEPROM:
14824                 tp->nvram_jedecnum = JEDEC_ATMEL;
14825                 tg3_flag_set(tp, NVRAM_BUFFERED);
14826                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14827
14828                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14829                 tw32(NVRAM_CFG1, nvcfg1);
14830                 return;
14831         case FLASH_5717VENDOR_ATMEL_MDB011D:
14832         case FLASH_5717VENDOR_ATMEL_ADB011B:
14833         case FLASH_5717VENDOR_ATMEL_ADB011D:
14834         case FLASH_5717VENDOR_ATMEL_MDB021D:
14835         case FLASH_5717VENDOR_ATMEL_ADB021B:
14836         case FLASH_5717VENDOR_ATMEL_ADB021D:
14837         case FLASH_5717VENDOR_ATMEL_45USPT:
14838                 tp->nvram_jedecnum = JEDEC_ATMEL;
14839                 tg3_flag_set(tp, NVRAM_BUFFERED);
14840                 tg3_flag_set(tp, FLASH);
14841
14842                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14843                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14844                         /* Detect size with tg3_nvram_get_size() */
14845                         break;
14846                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14847                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14848                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14849                         break;
14850                 default:
14851                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14852                         break;
14853                 }
14854                 break;
14855         case FLASH_5717VENDOR_ST_M_M25PE10:
14856         case FLASH_5717VENDOR_ST_A_M25PE10:
14857         case FLASH_5717VENDOR_ST_M_M45PE10:
14858         case FLASH_5717VENDOR_ST_A_M45PE10:
14859         case FLASH_5717VENDOR_ST_M_M25PE20:
14860         case FLASH_5717VENDOR_ST_A_M25PE20:
14861         case FLASH_5717VENDOR_ST_M_M45PE20:
14862         case FLASH_5717VENDOR_ST_A_M45PE20:
14863         case FLASH_5717VENDOR_ST_25USPT:
14864         case FLASH_5717VENDOR_ST_45USPT:
14865                 tp->nvram_jedecnum = JEDEC_ST;
14866                 tg3_flag_set(tp, NVRAM_BUFFERED);
14867                 tg3_flag_set(tp, FLASH);
14868
14869                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14870                 case FLASH_5717VENDOR_ST_M_M25PE20:
14871                 case FLASH_5717VENDOR_ST_M_M45PE20:
14872                         /* Detect size with tg3_nvram_get_size() */
14873                         break;
14874                 case FLASH_5717VENDOR_ST_A_M25PE20:
14875                 case FLASH_5717VENDOR_ST_A_M45PE20:
14876                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14877                         break;
14878                 default:
14879                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14880                         break;
14881                 }
14882                 break;
14883         default:
14884                 tg3_flag_set(tp, NO_NVRAM);
14885                 return;
14886         }
14887
14888         tg3_nvram_get_pagesize(tp, nvcfg1);
14889         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14890                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14891 }
14892
14893 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14894 {
14895         u32 nvcfg1, nvmpinstrp, nv_status;
14896
14897         nvcfg1 = tr32(NVRAM_CFG1);
14898         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14899
14900         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14901                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14902                         tg3_flag_set(tp, NO_NVRAM);
14903                         return;
14904                 }
14905
14906                 switch (nvmpinstrp) {
14907                 case FLASH_5762_MX25L_100:
14908                 case FLASH_5762_MX25L_200:
14909                 case FLASH_5762_MX25L_400:
14910                 case FLASH_5762_MX25L_800:
14911                 case FLASH_5762_MX25L_160_320:
14912                         tp->nvram_pagesize = 4096;
14913                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14914                         tg3_flag_set(tp, NVRAM_BUFFERED);
14915                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14916                         tg3_flag_set(tp, FLASH);
14917                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14918                         tp->nvram_size =
14919                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14920                                                 AUTOSENSE_DEVID_MASK)
14921                                         << AUTOSENSE_SIZE_IN_MB);
14922                         return;
14923
14924                 case FLASH_5762_EEPROM_HD:
14925                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14926                         break;
14927                 case FLASH_5762_EEPROM_LD:
14928                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14929                         break;
14930                 case FLASH_5720VENDOR_M_ST_M45PE20:
14931                         /* This pinstrap supports multiple sizes, so force it
14932                          * to read the actual size from location 0xf0.
14933                          */
14934                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14935                         break;
14936                 }
14937         }
14938
14939         switch (nvmpinstrp) {
14940         case FLASH_5720_EEPROM_HD:
14941         case FLASH_5720_EEPROM_LD:
14942                 tp->nvram_jedecnum = JEDEC_ATMEL;
14943                 tg3_flag_set(tp, NVRAM_BUFFERED);
14944
14945                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14946                 tw32(NVRAM_CFG1, nvcfg1);
14947                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14948                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14949                 else
14950                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14951                 return;
14952         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14953         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14954         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14955         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14956         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14957         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14958         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14959         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14960         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14961         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14962         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14963         case FLASH_5720VENDOR_ATMEL_45USPT:
14964                 tp->nvram_jedecnum = JEDEC_ATMEL;
14965                 tg3_flag_set(tp, NVRAM_BUFFERED);
14966                 tg3_flag_set(tp, FLASH);
14967
14968                 switch (nvmpinstrp) {
14969                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14970                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14971                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14972                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14973                         break;
14974                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14975                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14976                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14977                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14978                         break;
14979                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14980                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14981                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14982                         break;
14983                 default:
14984                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14985                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14986                         break;
14987                 }
14988                 break;
14989         case FLASH_5720VENDOR_M_ST_M25PE10:
14990         case FLASH_5720VENDOR_M_ST_M45PE10:
14991         case FLASH_5720VENDOR_A_ST_M25PE10:
14992         case FLASH_5720VENDOR_A_ST_M45PE10:
14993         case FLASH_5720VENDOR_M_ST_M25PE20:
14994         case FLASH_5720VENDOR_M_ST_M45PE20:
14995         case FLASH_5720VENDOR_A_ST_M25PE20:
14996         case FLASH_5720VENDOR_A_ST_M45PE20:
14997         case FLASH_5720VENDOR_M_ST_M25PE40:
14998         case FLASH_5720VENDOR_M_ST_M45PE40:
14999         case FLASH_5720VENDOR_A_ST_M25PE40:
15000         case FLASH_5720VENDOR_A_ST_M45PE40:
15001         case FLASH_5720VENDOR_M_ST_M25PE80:
15002         case FLASH_5720VENDOR_M_ST_M45PE80:
15003         case FLASH_5720VENDOR_A_ST_M25PE80:
15004         case FLASH_5720VENDOR_A_ST_M45PE80:
15005         case FLASH_5720VENDOR_ST_25USPT:
15006         case FLASH_5720VENDOR_ST_45USPT:
15007                 tp->nvram_jedecnum = JEDEC_ST;
15008                 tg3_flag_set(tp, NVRAM_BUFFERED);
15009                 tg3_flag_set(tp, FLASH);
15010
15011                 switch (nvmpinstrp) {
15012                 case FLASH_5720VENDOR_M_ST_M25PE20:
15013                 case FLASH_5720VENDOR_M_ST_M45PE20:
15014                 case FLASH_5720VENDOR_A_ST_M25PE20:
15015                 case FLASH_5720VENDOR_A_ST_M45PE20:
15016                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15017                         break;
15018                 case FLASH_5720VENDOR_M_ST_M25PE40:
15019                 case FLASH_5720VENDOR_M_ST_M45PE40:
15020                 case FLASH_5720VENDOR_A_ST_M25PE40:
15021                 case FLASH_5720VENDOR_A_ST_M45PE40:
15022                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15023                         break;
15024                 case FLASH_5720VENDOR_M_ST_M25PE80:
15025                 case FLASH_5720VENDOR_M_ST_M45PE80:
15026                 case FLASH_5720VENDOR_A_ST_M25PE80:
15027                 case FLASH_5720VENDOR_A_ST_M45PE80:
15028                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15029                         break;
15030                 default:
15031                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15032                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15033                         break;
15034                 }
15035                 break;
15036         default:
15037                 tg3_flag_set(tp, NO_NVRAM);
15038                 return;
15039         }
15040
15041         tg3_nvram_get_pagesize(tp, nvcfg1);
15042         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15043                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15044
15045         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15046                 u32 val;
15047
15048                 if (tg3_nvram_read(tp, 0, &val))
15049                         return;
15050
15051                 if (val != TG3_EEPROM_MAGIC &&
15052                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15053                         tg3_flag_set(tp, NO_NVRAM);
15054         }
15055 }
15056
15057 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
15058 static void tg3_nvram_init(struct tg3 *tp)
15059 {
15060         if (tg3_flag(tp, IS_SSB_CORE)) {
15061                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15062                 tg3_flag_clear(tp, NVRAM);
15063                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15064                 tg3_flag_set(tp, NO_NVRAM);
15065                 return;
15066         }
15067
15068         tw32_f(GRC_EEPROM_ADDR,
15069              (EEPROM_ADDR_FSM_RESET |
15070               (EEPROM_DEFAULT_CLOCK_PERIOD <<
15071                EEPROM_ADDR_CLKPERD_SHIFT)));
15072
15073         msleep(1);
15074
15075         /* Enable seeprom accesses. */
15076         tw32_f(GRC_LOCAL_CTRL,
15077              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15078         udelay(100);
15079
15080         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15081             tg3_asic_rev(tp) != ASIC_REV_5701) {
15082                 tg3_flag_set(tp, NVRAM);
15083
15084                 if (tg3_nvram_lock(tp)) {
15085                         netdev_warn(tp->dev,
15086                                     "Cannot get nvram lock, %s failed\n",
15087                                     __func__);
15088                         return;
15089                 }
15090                 tg3_enable_nvram_access(tp);
15091
15092                 tp->nvram_size = 0;
15093
15094                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15095                         tg3_get_5752_nvram_info(tp);
15096                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15097                         tg3_get_5755_nvram_info(tp);
15098                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15099                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15100                          tg3_asic_rev(tp) == ASIC_REV_5785)
15101                         tg3_get_5787_nvram_info(tp);
15102                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15103                         tg3_get_5761_nvram_info(tp);
15104                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15105                         tg3_get_5906_nvram_info(tp);
15106                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15107                          tg3_flag(tp, 57765_CLASS))
15108                         tg3_get_57780_nvram_info(tp);
15109                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15110                          tg3_asic_rev(tp) == ASIC_REV_5719)
15111                         tg3_get_5717_nvram_info(tp);
15112                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15113                          tg3_asic_rev(tp) == ASIC_REV_5762)
15114                         tg3_get_5720_nvram_info(tp);
15115                 else
15116                         tg3_get_nvram_info(tp);
15117
15118                 if (tp->nvram_size == 0)
15119                         tg3_get_nvram_size(tp);
15120
15121                 tg3_disable_nvram_access(tp);
15122                 tg3_nvram_unlock(tp);
15123
15124         } else {
15125                 tg3_flag_clear(tp, NVRAM);
15126                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15127
15128                 tg3_get_eeprom_size(tp);
15129         }
15130 }
15131
15132 struct subsys_tbl_ent {
15133         u16 subsys_vendor, subsys_devid;
15134         u32 phy_id;
15135 };
15136
15137 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15138         /* Broadcom boards. */
15139         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15140           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15141         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15142           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15143         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15144           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15145         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15146           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15147         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15148           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15149         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15150           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15151         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15152           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15153         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15154           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15155         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15156           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15157         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15158           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15159         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15160           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15161
15162         /* 3com boards. */
15163         { TG3PCI_SUBVENDOR_ID_3COM,
15164           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15165         { TG3PCI_SUBVENDOR_ID_3COM,
15166           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15167         { TG3PCI_SUBVENDOR_ID_3COM,
15168           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15169         { TG3PCI_SUBVENDOR_ID_3COM,
15170           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15171         { TG3PCI_SUBVENDOR_ID_3COM,
15172           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15173
15174         /* DELL boards. */
15175         { TG3PCI_SUBVENDOR_ID_DELL,
15176           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15177         { TG3PCI_SUBVENDOR_ID_DELL,
15178           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15179         { TG3PCI_SUBVENDOR_ID_DELL,
15180           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15181         { TG3PCI_SUBVENDOR_ID_DELL,
15182           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15183
15184         /* Compaq boards. */
15185         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15186           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15187         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15188           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15189         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15190           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15191         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15192           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15193         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15194           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15195
15196         /* IBM boards. */
15197         { TG3PCI_SUBVENDOR_ID_IBM,
15198           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15199 };
15200
15201 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15202 {
15203         int i;
15204
15205         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15206                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15207                      tp->pdev->subsystem_vendor) &&
15208                     (subsys_id_to_phy_id[i].subsys_devid ==
15209                      tp->pdev->subsystem_device))
15210                         return &subsys_id_to_phy_id[i];
15211         }
15212         return NULL;
15213 }
15214
15215 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15216 {
15217         u32 val;
15218
15219         tp->phy_id = TG3_PHY_ID_INVALID;
15220         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15221
15222         /* Assume an onboard device and WOL capable by default.  */
15223         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15224         tg3_flag_set(tp, WOL_CAP);
15225
15226         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15227                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15228                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15229                         tg3_flag_set(tp, IS_NIC);
15230                 }
15231                 val = tr32(VCPU_CFGSHDW);
15232                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15233                         tg3_flag_set(tp, ASPM_WORKAROUND);
15234                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15235                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15236                         tg3_flag_set(tp, WOL_ENABLE);
15237                         device_set_wakeup_enable(&tp->pdev->dev, true);
15238                 }
15239                 goto done;
15240         }
15241
15242         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15243         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15244                 u32 nic_cfg, led_cfg;
15245                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15246                 u32 nic_phy_id, ver, eeprom_phy_id;
15247                 int eeprom_phy_serdes = 0;
15248
15249                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15250                 tp->nic_sram_data_cfg = nic_cfg;
15251
15252                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15253                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15254                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15255                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15256                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15257                     (ver > 0) && (ver < 0x100))
15258                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15259
15260                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15261                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15262
15263                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15264                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15265                     tg3_asic_rev(tp) == ASIC_REV_5720)
15266                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15267
15268                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15269                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15270                         eeprom_phy_serdes = 1;
15271
15272                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15273                 if (nic_phy_id != 0) {
15274                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15275                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15276
15277                         eeprom_phy_id  = (id1 >> 16) << 10;
15278                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15279                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15280                 } else
15281                         eeprom_phy_id = 0;
15282
15283                 tp->phy_id = eeprom_phy_id;
15284                 if (eeprom_phy_serdes) {
15285                         if (!tg3_flag(tp, 5705_PLUS))
15286                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15287                         else
15288                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15289                 }
15290
15291                 if (tg3_flag(tp, 5750_PLUS))
15292                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15293                                     SHASTA_EXT_LED_MODE_MASK);
15294                 else
15295                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15296
15297                 switch (led_cfg) {
15298                 default:
15299                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15300                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15301                         break;
15302
15303                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15304                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15305                         break;
15306
15307                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15308                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15309
15310                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15311                          * read on some older 5700/5701 bootcode.
15312                          */
15313                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15314                             tg3_asic_rev(tp) == ASIC_REV_5701)
15315                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15316
15317                         break;
15318
15319                 case SHASTA_EXT_LED_SHARED:
15320                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15321                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15322                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15323                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15324                                                  LED_CTRL_MODE_PHY_2);
15325
15326                         if (tg3_flag(tp, 5717_PLUS) ||
15327                             tg3_asic_rev(tp) == ASIC_REV_5762)
15328                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15329                                                 LED_CTRL_BLINK_RATE_MASK;
15330
15331                         break;
15332
15333                 case SHASTA_EXT_LED_MAC:
15334                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15335                         break;
15336
15337                 case SHASTA_EXT_LED_COMBO:
15338                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15339                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15340                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15341                                                  LED_CTRL_MODE_PHY_2);
15342                         break;
15343
15344                 }
15345
15346                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15347                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15348                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15349                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15350
15351                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15352                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15353
15354                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15355                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15356                         if ((tp->pdev->subsystem_vendor ==
15357                              PCI_VENDOR_ID_ARIMA) &&
15358                             (tp->pdev->subsystem_device == 0x205a ||
15359                              tp->pdev->subsystem_device == 0x2063))
15360                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15361                 } else {
15362                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15363                         tg3_flag_set(tp, IS_NIC);
15364                 }
15365
15366                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15367                         tg3_flag_set(tp, ENABLE_ASF);
15368                         if (tg3_flag(tp, 5750_PLUS))
15369                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15370                 }
15371
15372                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15373                     tg3_flag(tp, 5750_PLUS))
15374                         tg3_flag_set(tp, ENABLE_APE);
15375
15376                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15377                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15378                         tg3_flag_clear(tp, WOL_CAP);
15379
15380                 if (tg3_flag(tp, WOL_CAP) &&
15381                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15382                         tg3_flag_set(tp, WOL_ENABLE);
15383                         device_set_wakeup_enable(&tp->pdev->dev, true);
15384                 }
15385
15386                 if (cfg2 & (1 << 17))
15387                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15388
15389                 /* serdes signal pre-emphasis in register 0x590 set by */
15390                 /* bootcode if bit 18 is set */
15391                 if (cfg2 & (1 << 18))
15392                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15393
15394                 if ((tg3_flag(tp, 57765_PLUS) ||
15395                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15396                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15397                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15398                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15399
15400                 if (tg3_flag(tp, PCI_EXPRESS)) {
15401                         u32 cfg3;
15402
15403                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15404                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15405                             !tg3_flag(tp, 57765_PLUS) &&
15406                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15407                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15408                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15409                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15410                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15411                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15412                 }
15413
15414                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15415                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15416                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15417                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15418                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15419                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15420
15421                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15422                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15423         }
15424 done:
15425         if (tg3_flag(tp, WOL_CAP))
15426                 device_set_wakeup_enable(&tp->pdev->dev,
15427                                          tg3_flag(tp, WOL_ENABLE));
15428         else
15429                 device_set_wakeup_capable(&tp->pdev->dev, false);
15430 }
15431
15432 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15433 {
15434         int i, err;
15435         u32 val2, off = offset * 8;
15436
15437         err = tg3_nvram_lock(tp);
15438         if (err)
15439                 return err;
15440
15441         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15442         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15443                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15444         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15445         udelay(10);
15446
15447         for (i = 0; i < 100; i++) {
15448                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15449                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15450                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15451                         break;
15452                 }
15453                 udelay(10);
15454         }
15455
15456         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15457
15458         tg3_nvram_unlock(tp);
15459         if (val2 & APE_OTP_STATUS_CMD_DONE)
15460                 return 0;
15461
15462         return -EBUSY;
15463 }
15464
15465 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15466 {
15467         int i;
15468         u32 val;
15469
15470         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15471         tw32(OTP_CTRL, cmd);
15472
15473         /* Wait for up to 1 ms for command to execute. */
15474         for (i = 0; i < 100; i++) {
15475                 val = tr32(OTP_STATUS);
15476                 if (val & OTP_STATUS_CMD_DONE)
15477                         break;
15478                 udelay(10);
15479         }
15480
15481         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15482 }
15483
15484 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15485  * configuration is a 32-bit value that straddles the alignment boundary.
15486  * We do two 32-bit reads and then shift and merge the results.
15487  */
15488 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15489 {
15490         u32 bhalf_otp, thalf_otp;
15491
15492         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15493
15494         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15495                 return 0;
15496
15497         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15498
15499         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15500                 return 0;
15501
15502         thalf_otp = tr32(OTP_READ_DATA);
15503
15504         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15505
15506         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15507                 return 0;
15508
15509         bhalf_otp = tr32(OTP_READ_DATA);
15510
15511         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15512 }
15513
15514 static void tg3_phy_init_link_config(struct tg3 *tp)
15515 {
15516         u32 adv = ADVERTISED_Autoneg;
15517
15518         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15519                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15520                         adv |= ADVERTISED_1000baseT_Half;
15521                 adv |= ADVERTISED_1000baseT_Full;
15522         }
15523
15524         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15525                 adv |= ADVERTISED_100baseT_Half |
15526                        ADVERTISED_100baseT_Full |
15527                        ADVERTISED_10baseT_Half |
15528                        ADVERTISED_10baseT_Full |
15529                        ADVERTISED_TP;
15530         else
15531                 adv |= ADVERTISED_FIBRE;
15532
15533         tp->link_config.advertising = adv;
15534         tp->link_config.speed = SPEED_UNKNOWN;
15535         tp->link_config.duplex = DUPLEX_UNKNOWN;
15536         tp->link_config.autoneg = AUTONEG_ENABLE;
15537         tp->link_config.active_speed = SPEED_UNKNOWN;
15538         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15539
15540         tp->old_link = -1;
15541 }
15542
15543 static int tg3_phy_probe(struct tg3 *tp)
15544 {
15545         u32 hw_phy_id_1, hw_phy_id_2;
15546         u32 hw_phy_id, hw_phy_id_masked;
15547         int err;
15548
15549         /* flow control autonegotiation is default behavior */
15550         tg3_flag_set(tp, PAUSE_AUTONEG);
15551         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15552
15553         if (tg3_flag(tp, ENABLE_APE)) {
15554                 switch (tp->pci_fn) {
15555                 case 0:
15556                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15557                         break;
15558                 case 1:
15559                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15560                         break;
15561                 case 2:
15562                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15563                         break;
15564                 case 3:
15565                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15566                         break;
15567                 }
15568         }
15569
15570         if (!tg3_flag(tp, ENABLE_ASF) &&
15571             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15572             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15573                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15574                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15575
15576         if (tg3_flag(tp, USE_PHYLIB))
15577                 return tg3_phy_init(tp);
15578
15579         /* Reading the PHY ID register can conflict with ASF
15580          * firmware access to the PHY hardware.
15581          */
15582         err = 0;
15583         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15584                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15585         } else {
15586                 /* Now read the physical PHY_ID from the chip and verify
15587                  * that it is sane.  If it doesn't look good, we fall back
15588                  * to either the hard-coded table based PHY_ID and failing
15589                  * that the value found in the eeprom area.
15590                  */
15591                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15592                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15593
15594                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15595                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15596                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15597
15598                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15599         }
15600
15601         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15602                 tp->phy_id = hw_phy_id;
15603                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15604                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15605                 else
15606                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15607         } else {
15608                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15609                         /* Do nothing, phy ID already set up in
15610                          * tg3_get_eeprom_hw_cfg().
15611                          */
15612                 } else {
15613                         struct subsys_tbl_ent *p;
15614
15615                         /* No eeprom signature?  Try the hardcoded
15616                          * subsys device table.
15617                          */
15618                         p = tg3_lookup_by_subsys(tp);
15619                         if (p) {
15620                                 tp->phy_id = p->phy_id;
15621                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15622                                 /* For now we saw the IDs 0xbc050cd0,
15623                                  * 0xbc050f80 and 0xbc050c30 on devices
15624                                  * connected to an BCM4785 and there are
15625                                  * probably more. Just assume that the phy is
15626                                  * supported when it is connected to a SSB core
15627                                  * for now.
15628                                  */
15629                                 return -ENODEV;
15630                         }
15631
15632                         if (!tp->phy_id ||
15633                             tp->phy_id == TG3_PHY_ID_BCM8002)
15634                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15635                 }
15636         }
15637
15638         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15639             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15640              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15641              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15642              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15643              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15644               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15645              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15646               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15647                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15648
15649                 linkmode_zero(tp->eee.supported);
15650                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
15651                                  tp->eee.supported);
15652                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
15653                                  tp->eee.supported);
15654                 linkmode_copy(tp->eee.advertised, tp->eee.supported);
15655
15656                 tp->eee.eee_enabled = 1;
15657                 tp->eee.tx_lpi_enabled = 1;
15658                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15659         }
15660
15661         tg3_phy_init_link_config(tp);
15662
15663         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15664             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15665             !tg3_flag(tp, ENABLE_APE) &&
15666             !tg3_flag(tp, ENABLE_ASF)) {
15667                 u32 bmsr, dummy;
15668
15669                 tg3_readphy(tp, MII_BMSR, &bmsr);
15670                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15671                     (bmsr & BMSR_LSTATUS))
15672                         goto skip_phy_reset;
15673
15674                 err = tg3_phy_reset(tp);
15675                 if (err)
15676                         return err;
15677
15678                 tg3_phy_set_wirespeed(tp);
15679
15680                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15681                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15682                                             tp->link_config.flowctrl);
15683
15684                         tg3_writephy(tp, MII_BMCR,
15685                                      BMCR_ANENABLE | BMCR_ANRESTART);
15686                 }
15687         }
15688
15689 skip_phy_reset:
15690         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15691                 err = tg3_init_5401phy_dsp(tp);
15692                 if (err)
15693                         return err;
15694
15695                 err = tg3_init_5401phy_dsp(tp);
15696         }
15697
15698         return err;
15699 }
15700
15701 static void tg3_read_vpd(struct tg3 *tp)
15702 {
15703         u8 *vpd_data;
15704         unsigned int len, vpdlen;
15705         int i;
15706
15707         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15708         if (!vpd_data)
15709                 goto out_no_vpd;
15710
15711         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15712                                          PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15713         if (i < 0)
15714                 goto partno;
15715
15716         if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15717                 goto partno;
15718
15719         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15720                                          PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15721         if (i < 0)
15722                 goto partno;
15723
15724         memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15725         snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15726
15727 partno:
15728         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15729                                          PCI_VPD_RO_KEYWORD_PARTNO, &len);
15730         if (i < 0)
15731                 goto out_not_found;
15732
15733         if (len > TG3_BPN_SIZE)
15734                 goto out_not_found;
15735
15736         memcpy(tp->board_part_number, &vpd_data[i], len);
15737
15738 out_not_found:
15739         kfree(vpd_data);
15740         if (tp->board_part_number[0])
15741                 return;
15742
15743 out_no_vpd:
15744         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15745                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15746                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15747                         strcpy(tp->board_part_number, "BCM5717");
15748                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15749                         strcpy(tp->board_part_number, "BCM5718");
15750                 else
15751                         goto nomatch;
15752         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15753                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15754                         strcpy(tp->board_part_number, "BCM57780");
15755                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15756                         strcpy(tp->board_part_number, "BCM57760");
15757                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15758                         strcpy(tp->board_part_number, "BCM57790");
15759                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15760                         strcpy(tp->board_part_number, "BCM57788");
15761                 else
15762                         goto nomatch;
15763         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15764                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15765                         strcpy(tp->board_part_number, "BCM57761");
15766                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15767                         strcpy(tp->board_part_number, "BCM57765");
15768                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15769                         strcpy(tp->board_part_number, "BCM57781");
15770                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15771                         strcpy(tp->board_part_number, "BCM57785");
15772                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15773                         strcpy(tp->board_part_number, "BCM57791");
15774                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15775                         strcpy(tp->board_part_number, "BCM57795");
15776                 else
15777                         goto nomatch;
15778         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15779                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15780                         strcpy(tp->board_part_number, "BCM57762");
15781                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15782                         strcpy(tp->board_part_number, "BCM57766");
15783                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15784                         strcpy(tp->board_part_number, "BCM57782");
15785                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15786                         strcpy(tp->board_part_number, "BCM57786");
15787                 else
15788                         goto nomatch;
15789         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15790                 strcpy(tp->board_part_number, "BCM95906");
15791         } else {
15792 nomatch:
15793                 strcpy(tp->board_part_number, "none");
15794         }
15795 }
15796
15797 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15798 {
15799         u32 val;
15800
15801         if (tg3_nvram_read(tp, offset, &val) ||
15802             (val & 0xfc000000) != 0x0c000000 ||
15803             tg3_nvram_read(tp, offset + 4, &val) ||
15804             val != 0)
15805                 return 0;
15806
15807         return 1;
15808 }
15809
15810 static void tg3_read_bc_ver(struct tg3 *tp)
15811 {
15812         u32 val, offset, start, ver_offset;
15813         int i, dst_off;
15814         bool newver = false;
15815
15816         if (tg3_nvram_read(tp, 0xc, &offset) ||
15817             tg3_nvram_read(tp, 0x4, &start))
15818                 return;
15819
15820         offset = tg3_nvram_logical_addr(tp, offset);
15821
15822         if (tg3_nvram_read(tp, offset, &val))
15823                 return;
15824
15825         if ((val & 0xfc000000) == 0x0c000000) {
15826                 if (tg3_nvram_read(tp, offset + 4, &val))
15827                         return;
15828
15829                 if (val == 0)
15830                         newver = true;
15831         }
15832
15833         dst_off = strlen(tp->fw_ver);
15834
15835         if (newver) {
15836                 if (TG3_VER_SIZE - dst_off < 16 ||
15837                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15838                         return;
15839
15840                 offset = offset + ver_offset - start;
15841                 for (i = 0; i < 16; i += 4) {
15842                         __be32 v;
15843                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15844                                 return;
15845
15846                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15847                 }
15848         } else {
15849                 u32 major, minor;
15850
15851                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15852                         return;
15853
15854                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15855                         TG3_NVM_BCVER_MAJSFT;
15856                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15857                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15858                          "v%d.%02d", major, minor);
15859         }
15860 }
15861
15862 static void tg3_read_hwsb_ver(struct tg3 *tp)
15863 {
15864         u32 val, major, minor;
15865
15866         /* Use native endian representation */
15867         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15868                 return;
15869
15870         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15871                 TG3_NVM_HWSB_CFG1_MAJSFT;
15872         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15873                 TG3_NVM_HWSB_CFG1_MINSFT;
15874
15875         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15876 }
15877
15878 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15879 {
15880         u32 offset, major, minor, build;
15881
15882         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15883
15884         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15885                 return;
15886
15887         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15888         case TG3_EEPROM_SB_REVISION_0:
15889                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15890                 break;
15891         case TG3_EEPROM_SB_REVISION_2:
15892                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15893                 break;
15894         case TG3_EEPROM_SB_REVISION_3:
15895                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15896                 break;
15897         case TG3_EEPROM_SB_REVISION_4:
15898                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15899                 break;
15900         case TG3_EEPROM_SB_REVISION_5:
15901                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15902                 break;
15903         case TG3_EEPROM_SB_REVISION_6:
15904                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15905                 break;
15906         default:
15907                 return;
15908         }
15909
15910         if (tg3_nvram_read(tp, offset, &val))
15911                 return;
15912
15913         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15914                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15915         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15916                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15917         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15918
15919         if (minor > 99 || build > 26)
15920                 return;
15921
15922         offset = strlen(tp->fw_ver);
15923         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15924                  " v%d.%02d", major, minor);
15925
15926         if (build > 0) {
15927                 offset = strlen(tp->fw_ver);
15928                 if (offset < TG3_VER_SIZE - 1)
15929                         tp->fw_ver[offset] = 'a' + build - 1;
15930         }
15931 }
15932
15933 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15934 {
15935         u32 val, offset, start;
15936         int i, vlen;
15937
15938         for (offset = TG3_NVM_DIR_START;
15939              offset < TG3_NVM_DIR_END;
15940              offset += TG3_NVM_DIRENT_SIZE) {
15941                 if (tg3_nvram_read(tp, offset, &val))
15942                         return;
15943
15944                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15945                         break;
15946         }
15947
15948         if (offset == TG3_NVM_DIR_END)
15949                 return;
15950
15951         if (!tg3_flag(tp, 5705_PLUS))
15952                 start = 0x08000000;
15953         else if (tg3_nvram_read(tp, offset - 4, &start))
15954                 return;
15955
15956         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15957             !tg3_fw_img_is_valid(tp, offset) ||
15958             tg3_nvram_read(tp, offset + 8, &val))
15959                 return;
15960
15961         offset += val - start;
15962
15963         vlen = strlen(tp->fw_ver);
15964
15965         tp->fw_ver[vlen++] = ',';
15966         tp->fw_ver[vlen++] = ' ';
15967
15968         for (i = 0; i < 4; i++) {
15969                 __be32 v;
15970                 if (tg3_nvram_read_be32(tp, offset, &v))
15971                         return;
15972
15973                 offset += sizeof(v);
15974
15975                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15976                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15977                         break;
15978                 }
15979
15980                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15981                 vlen += sizeof(v);
15982         }
15983 }
15984
15985 static void tg3_probe_ncsi(struct tg3 *tp)
15986 {
15987         u32 apedata;
15988
15989         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15990         if (apedata != APE_SEG_SIG_MAGIC)
15991                 return;
15992
15993         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15994         if (!(apedata & APE_FW_STATUS_READY))
15995                 return;
15996
15997         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15998                 tg3_flag_set(tp, APE_HAS_NCSI);
15999 }
16000
16001 static void tg3_read_dash_ver(struct tg3 *tp)
16002 {
16003         int vlen;
16004         u32 apedata;
16005         char *fwtype;
16006
16007         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16008
16009         if (tg3_flag(tp, APE_HAS_NCSI))
16010                 fwtype = "NCSI";
16011         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16012                 fwtype = "SMASH";
16013         else
16014                 fwtype = "DASH";
16015
16016         vlen = strlen(tp->fw_ver);
16017
16018         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16019                  fwtype,
16020                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16021                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16022                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16023                  (apedata & APE_FW_VERSION_BLDMSK));
16024 }
16025
16026 static void tg3_read_otp_ver(struct tg3 *tp)
16027 {
16028         u32 val, val2;
16029
16030         if (tg3_asic_rev(tp) != ASIC_REV_5762)
16031                 return;
16032
16033         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16034             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16035             TG3_OTP_MAGIC0_VALID(val)) {
16036                 u64 val64 = (u64) val << 32 | val2;
16037                 u32 ver = 0;
16038                 int i, vlen;
16039
16040                 for (i = 0; i < 7; i++) {
16041                         if ((val64 & 0xff) == 0)
16042                                 break;
16043                         ver = val64 & 0xff;
16044                         val64 >>= 8;
16045                 }
16046                 vlen = strlen(tp->fw_ver);
16047                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16048         }
16049 }
16050
16051 static void tg3_read_fw_ver(struct tg3 *tp)
16052 {
16053         u32 val;
16054         bool vpd_vers = false;
16055
16056         if (tp->fw_ver[0] != 0)
16057                 vpd_vers = true;
16058
16059         if (tg3_flag(tp, NO_NVRAM)) {
16060                 strcat(tp->fw_ver, "sb");
16061                 tg3_read_otp_ver(tp);
16062                 return;
16063         }
16064
16065         if (tg3_nvram_read(tp, 0, &val))
16066                 return;
16067
16068         if (val == TG3_EEPROM_MAGIC)
16069                 tg3_read_bc_ver(tp);
16070         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16071                 tg3_read_sb_ver(tp, val);
16072         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16073                 tg3_read_hwsb_ver(tp);
16074
16075         if (tg3_flag(tp, ENABLE_ASF)) {
16076                 if (tg3_flag(tp, ENABLE_APE)) {
16077                         tg3_probe_ncsi(tp);
16078                         if (!vpd_vers)
16079                                 tg3_read_dash_ver(tp);
16080                 } else if (!vpd_vers) {
16081                         tg3_read_mgmtfw_ver(tp);
16082                 }
16083         }
16084
16085         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16086 }
16087
16088 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16089 {
16090         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16091                 return TG3_RX_RET_MAX_SIZE_5717;
16092         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16093                 return TG3_RX_RET_MAX_SIZE_5700;
16094         else
16095                 return TG3_RX_RET_MAX_SIZE_5705;
16096 }
16097
16098 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16099         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16100         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16101         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16102         { },
16103 };
16104
16105 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16106 {
16107         struct pci_dev *peer;
16108         unsigned int func, devnr = tp->pdev->devfn & ~7;
16109
16110         for (func = 0; func < 8; func++) {
16111                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16112                 if (peer && peer != tp->pdev)
16113                         break;
16114                 pci_dev_put(peer);
16115         }
16116         /* 5704 can be configured in single-port mode, set peer to
16117          * tp->pdev in that case.
16118          */
16119         if (!peer) {
16120                 peer = tp->pdev;
16121                 return peer;
16122         }
16123
16124         /*
16125          * We don't need to keep the refcount elevated; there's no way
16126          * to remove one half of this device without removing the other
16127          */
16128         pci_dev_put(peer);
16129
16130         return peer;
16131 }
16132
16133 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16134 {
16135         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16136         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16137                 u32 reg;
16138
16139                 /* All devices that use the alternate
16140                  * ASIC REV location have a CPMU.
16141                  */
16142                 tg3_flag_set(tp, CPMU_PRESENT);
16143
16144                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16145                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16146                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16147                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16148                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16149                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16150                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16151                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16152                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16153                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16154                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16155                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16156                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16157                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16158                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16159                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16160                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16161                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16162                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16163                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16164                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16165                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16166                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16167                 else
16168                         reg = TG3PCI_PRODID_ASICREV;
16169
16170                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16171         }
16172
16173         /* Wrong chip ID in 5752 A0. This code can be removed later
16174          * as A0 is not in production.
16175          */
16176         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16177                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16178
16179         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16180                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16181
16182         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16183             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16184             tg3_asic_rev(tp) == ASIC_REV_5720)
16185                 tg3_flag_set(tp, 5717_PLUS);
16186
16187         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16188             tg3_asic_rev(tp) == ASIC_REV_57766)
16189                 tg3_flag_set(tp, 57765_CLASS);
16190
16191         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16192              tg3_asic_rev(tp) == ASIC_REV_5762)
16193                 tg3_flag_set(tp, 57765_PLUS);
16194
16195         /* Intentionally exclude ASIC_REV_5906 */
16196         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16197             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16198             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16199             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16200             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16201             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16202             tg3_flag(tp, 57765_PLUS))
16203                 tg3_flag_set(tp, 5755_PLUS);
16204
16205         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16206             tg3_asic_rev(tp) == ASIC_REV_5714)
16207                 tg3_flag_set(tp, 5780_CLASS);
16208
16209         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16210             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16211             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16212             tg3_flag(tp, 5755_PLUS) ||
16213             tg3_flag(tp, 5780_CLASS))
16214                 tg3_flag_set(tp, 5750_PLUS);
16215
16216         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16217             tg3_flag(tp, 5750_PLUS))
16218                 tg3_flag_set(tp, 5705_PLUS);
16219 }
16220
16221 static bool tg3_10_100_only_device(struct tg3 *tp,
16222                                    const struct pci_device_id *ent)
16223 {
16224         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16225
16226         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16227              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16228             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16229                 return true;
16230
16231         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16232                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16233                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16234                                 return true;
16235                 } else {
16236                         return true;
16237                 }
16238         }
16239
16240         return false;
16241 }
16242
16243 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16244 {
16245         u32 misc_ctrl_reg;
16246         u32 pci_state_reg, grc_misc_cfg;
16247         u32 val;
16248         u16 pci_cmd;
16249         int err;
16250
16251         /* Force memory write invalidate off.  If we leave it on,
16252          * then on 5700_BX chips we have to enable a workaround.
16253          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16254          * to match the cacheline size.  The Broadcom driver have this
16255          * workaround but turns MWI off all the times so never uses
16256          * it.  This seems to suggest that the workaround is insufficient.
16257          */
16258         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16259         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16260         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16261
16262         /* Important! -- Make sure register accesses are byteswapped
16263          * correctly.  Also, for those chips that require it, make
16264          * sure that indirect register accesses are enabled before
16265          * the first operation.
16266          */
16267         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16268                               &misc_ctrl_reg);
16269         tp->misc_host_ctrl |= (misc_ctrl_reg &
16270                                MISC_HOST_CTRL_CHIPREV);
16271         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16272                                tp->misc_host_ctrl);
16273
16274         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16275
16276         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16277          * we need to disable memory and use config. cycles
16278          * only to access all registers. The 5702/03 chips
16279          * can mistakenly decode the special cycles from the
16280          * ICH chipsets as memory write cycles, causing corruption
16281          * of register and memory space. Only certain ICH bridges
16282          * will drive special cycles with non-zero data during the
16283          * address phase which can fall within the 5703's address
16284          * range. This is not an ICH bug as the PCI spec allows
16285          * non-zero address during special cycles. However, only
16286          * these ICH bridges are known to drive non-zero addresses
16287          * during special cycles.
16288          *
16289          * Since special cycles do not cross PCI bridges, we only
16290          * enable this workaround if the 5703 is on the secondary
16291          * bus of these ICH bridges.
16292          */
16293         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16294             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16295                 static struct tg3_dev_id {
16296                         u32     vendor;
16297                         u32     device;
16298                         u32     rev;
16299                 } ich_chipsets[] = {
16300                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16301                           PCI_ANY_ID },
16302                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16303                           PCI_ANY_ID },
16304                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16305                           0xa },
16306                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16307                           PCI_ANY_ID },
16308                         { },
16309                 };
16310                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16311                 struct pci_dev *bridge = NULL;
16312
16313                 while (pci_id->vendor != 0) {
16314                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16315                                                 bridge);
16316                         if (!bridge) {
16317                                 pci_id++;
16318                                 continue;
16319                         }
16320                         if (pci_id->rev != PCI_ANY_ID) {
16321                                 if (bridge->revision > pci_id->rev)
16322                                         continue;
16323                         }
16324                         if (bridge->subordinate &&
16325                             (bridge->subordinate->number ==
16326                              tp->pdev->bus->number)) {
16327                                 tg3_flag_set(tp, ICH_WORKAROUND);
16328                                 pci_dev_put(bridge);
16329                                 break;
16330                         }
16331                 }
16332         }
16333
16334         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16335                 static struct tg3_dev_id {
16336                         u32     vendor;
16337                         u32     device;
16338                 } bridge_chipsets[] = {
16339                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16340                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16341                         { },
16342                 };
16343                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16344                 struct pci_dev *bridge = NULL;
16345
16346                 while (pci_id->vendor != 0) {
16347                         bridge = pci_get_device(pci_id->vendor,
16348                                                 pci_id->device,
16349                                                 bridge);
16350                         if (!bridge) {
16351                                 pci_id++;
16352                                 continue;
16353                         }
16354                         if (bridge->subordinate &&
16355                             (bridge->subordinate->number <=
16356                              tp->pdev->bus->number) &&
16357                             (bridge->subordinate->busn_res.end >=
16358                              tp->pdev->bus->number)) {
16359                                 tg3_flag_set(tp, 5701_DMA_BUG);
16360                                 pci_dev_put(bridge);
16361                                 break;
16362                         }
16363                 }
16364         }
16365
16366         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16367          * DMA addresses > 40-bit. This bridge may have other additional
16368          * 57xx devices behind it in some 4-port NIC designs for example.
16369          * Any tg3 device found behind the bridge will also need the 40-bit
16370          * DMA workaround.
16371          */
16372         if (tg3_flag(tp, 5780_CLASS)) {
16373                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16374                 tp->msi_cap = tp->pdev->msi_cap;
16375         } else {
16376                 struct pci_dev *bridge = NULL;
16377
16378                 do {
16379                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16380                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16381                                                 bridge);
16382                         if (bridge && bridge->subordinate &&
16383                             (bridge->subordinate->number <=
16384                              tp->pdev->bus->number) &&
16385                             (bridge->subordinate->busn_res.end >=
16386                              tp->pdev->bus->number)) {
16387                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16388                                 pci_dev_put(bridge);
16389                                 break;
16390                         }
16391                 } while (bridge);
16392         }
16393
16394         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16395             tg3_asic_rev(tp) == ASIC_REV_5714)
16396                 tp->pdev_peer = tg3_find_peer(tp);
16397
16398         /* Determine TSO capabilities */
16399         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16400                 ; /* Do nothing. HW bug. */
16401         else if (tg3_flag(tp, 57765_PLUS))
16402                 tg3_flag_set(tp, HW_TSO_3);
16403         else if (tg3_flag(tp, 5755_PLUS) ||
16404                  tg3_asic_rev(tp) == ASIC_REV_5906)
16405                 tg3_flag_set(tp, HW_TSO_2);
16406         else if (tg3_flag(tp, 5750_PLUS)) {
16407                 tg3_flag_set(tp, HW_TSO_1);
16408                 tg3_flag_set(tp, TSO_BUG);
16409                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16410                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16411                         tg3_flag_clear(tp, TSO_BUG);
16412         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16413                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16414                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16415                 tg3_flag_set(tp, FW_TSO);
16416                 tg3_flag_set(tp, TSO_BUG);
16417                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16418                         tp->fw_needed = FIRMWARE_TG3TSO5;
16419                 else
16420                         tp->fw_needed = FIRMWARE_TG3TSO;
16421         }
16422
16423         /* Selectively allow TSO based on operating conditions */
16424         if (tg3_flag(tp, HW_TSO_1) ||
16425             tg3_flag(tp, HW_TSO_2) ||
16426             tg3_flag(tp, HW_TSO_3) ||
16427             tg3_flag(tp, FW_TSO)) {
16428                 /* For firmware TSO, assume ASF is disabled.
16429                  * We'll disable TSO later if we discover ASF
16430                  * is enabled in tg3_get_eeprom_hw_cfg().
16431                  */
16432                 tg3_flag_set(tp, TSO_CAPABLE);
16433         } else {
16434                 tg3_flag_clear(tp, TSO_CAPABLE);
16435                 tg3_flag_clear(tp, TSO_BUG);
16436                 tp->fw_needed = NULL;
16437         }
16438
16439         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16440                 tp->fw_needed = FIRMWARE_TG3;
16441
16442         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16443                 tp->fw_needed = FIRMWARE_TG357766;
16444
16445         tp->irq_max = 1;
16446
16447         if (tg3_flag(tp, 5750_PLUS)) {
16448                 tg3_flag_set(tp, SUPPORT_MSI);
16449                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16450                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16451                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16452                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16453                      tp->pdev_peer == tp->pdev))
16454                         tg3_flag_clear(tp, SUPPORT_MSI);
16455
16456                 if (tg3_flag(tp, 5755_PLUS) ||
16457                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16458                         tg3_flag_set(tp, 1SHOT_MSI);
16459                 }
16460
16461                 if (tg3_flag(tp, 57765_PLUS)) {
16462                         tg3_flag_set(tp, SUPPORT_MSIX);
16463                         tp->irq_max = TG3_IRQ_MAX_VECS;
16464                 }
16465         }
16466
16467         tp->txq_max = 1;
16468         tp->rxq_max = 1;
16469         if (tp->irq_max > 1) {
16470                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16471                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16472
16473                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16474                     tg3_asic_rev(tp) == ASIC_REV_5720)
16475                         tp->txq_max = tp->irq_max - 1;
16476         }
16477
16478         if (tg3_flag(tp, 5755_PLUS) ||
16479             tg3_asic_rev(tp) == ASIC_REV_5906)
16480                 tg3_flag_set(tp, SHORT_DMA_BUG);
16481
16482         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16483                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16484
16485         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16486             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16487             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16488             tg3_asic_rev(tp) == ASIC_REV_5762)
16489                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16490
16491         if (tg3_flag(tp, 57765_PLUS) &&
16492             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16493                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16494
16495         if (!tg3_flag(tp, 5705_PLUS) ||
16496             tg3_flag(tp, 5780_CLASS) ||
16497             tg3_flag(tp, USE_JUMBO_BDFLAG))
16498                 tg3_flag_set(tp, JUMBO_CAPABLE);
16499
16500         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16501                               &pci_state_reg);
16502
16503         if (pci_is_pcie(tp->pdev)) {
16504                 u16 lnkctl;
16505
16506                 tg3_flag_set(tp, PCI_EXPRESS);
16507
16508                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16509                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16510                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16511                                 tg3_flag_clear(tp, HW_TSO_2);
16512                                 tg3_flag_clear(tp, TSO_CAPABLE);
16513                         }
16514                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16515                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16516                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16517                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16518                                 tg3_flag_set(tp, CLKREQ_BUG);
16519                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16520                         tg3_flag_set(tp, L1PLLPD_EN);
16521                 }
16522         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16523                 /* BCM5785 devices are effectively PCIe devices, and should
16524                  * follow PCIe codepaths, but do not have a PCIe capabilities
16525                  * section.
16526                  */
16527                 tg3_flag_set(tp, PCI_EXPRESS);
16528         } else if (!tg3_flag(tp, 5705_PLUS) ||
16529                    tg3_flag(tp, 5780_CLASS)) {
16530                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16531                 if (!tp->pcix_cap) {
16532                         dev_err(&tp->pdev->dev,
16533                                 "Cannot find PCI-X capability, aborting\n");
16534                         return -EIO;
16535                 }
16536
16537                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16538                         tg3_flag_set(tp, PCIX_MODE);
16539         }
16540
16541         /* If we have an AMD 762 or VIA K8T800 chipset, write
16542          * reordering to the mailbox registers done by the host
16543          * controller can cause major troubles.  We read back from
16544          * every mailbox register write to force the writes to be
16545          * posted to the chip in order.
16546          */
16547         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16548             !tg3_flag(tp, PCI_EXPRESS))
16549                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16550
16551         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16552                              &tp->pci_cacheline_sz);
16553         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16554                              &tp->pci_lat_timer);
16555         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16556             tp->pci_lat_timer < 64) {
16557                 tp->pci_lat_timer = 64;
16558                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16559                                       tp->pci_lat_timer);
16560         }
16561
16562         /* Important! -- It is critical that the PCI-X hw workaround
16563          * situation is decided before the first MMIO register access.
16564          */
16565         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16566                 /* 5700 BX chips need to have their TX producer index
16567                  * mailboxes written twice to workaround a bug.
16568                  */
16569                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16570
16571                 /* If we are in PCI-X mode, enable register write workaround.
16572                  *
16573                  * The workaround is to use indirect register accesses
16574                  * for all chip writes not to mailbox registers.
16575                  */
16576                 if (tg3_flag(tp, PCIX_MODE)) {
16577                         u32 pm_reg;
16578
16579                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16580
16581                         /* The chip can have it's power management PCI config
16582                          * space registers clobbered due to this bug.
16583                          * So explicitly force the chip into D0 here.
16584                          */
16585                         pci_read_config_dword(tp->pdev,
16586                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16587                                               &pm_reg);
16588                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16589                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16590                         pci_write_config_dword(tp->pdev,
16591                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16592                                                pm_reg);
16593
16594                         /* Also, force SERR#/PERR# in PCI command. */
16595                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16596                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16597                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16598                 }
16599         }
16600
16601         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16602                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16603         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16604                 tg3_flag_set(tp, PCI_32BIT);
16605
16606         /* Chip-specific fixup from Broadcom driver */
16607         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16608             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16609                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16610                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16611         }
16612
16613         /* Default fast path register access methods */
16614         tp->read32 = tg3_read32;
16615         tp->write32 = tg3_write32;
16616         tp->read32_mbox = tg3_read32;
16617         tp->write32_mbox = tg3_write32;
16618         tp->write32_tx_mbox = tg3_write32;
16619         tp->write32_rx_mbox = tg3_write32;
16620
16621         /* Various workaround register access methods */
16622         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16623                 tp->write32 = tg3_write_indirect_reg32;
16624         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16625                  (tg3_flag(tp, PCI_EXPRESS) &&
16626                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16627                 /*
16628                  * Back to back register writes can cause problems on these
16629                  * chips, the workaround is to read back all reg writes
16630                  * except those to mailbox regs.
16631                  *
16632                  * See tg3_write_indirect_reg32().
16633                  */
16634                 tp->write32 = tg3_write_flush_reg32;
16635         }
16636
16637         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16638                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16639                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16640                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16641         }
16642
16643         if (tg3_flag(tp, ICH_WORKAROUND)) {
16644                 tp->read32 = tg3_read_indirect_reg32;
16645                 tp->write32 = tg3_write_indirect_reg32;
16646                 tp->read32_mbox = tg3_read_indirect_mbox;
16647                 tp->write32_mbox = tg3_write_indirect_mbox;
16648                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16649                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16650
16651                 iounmap(tp->regs);
16652                 tp->regs = NULL;
16653
16654                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16655                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16656                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16657         }
16658         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16659                 tp->read32_mbox = tg3_read32_mbox_5906;
16660                 tp->write32_mbox = tg3_write32_mbox_5906;
16661                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16662                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16663         }
16664
16665         if (tp->write32 == tg3_write_indirect_reg32 ||
16666             (tg3_flag(tp, PCIX_MODE) &&
16667              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16668               tg3_asic_rev(tp) == ASIC_REV_5701)))
16669                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16670
16671         /* The memory arbiter has to be enabled in order for SRAM accesses
16672          * to succeed.  Normally on powerup the tg3 chip firmware will make
16673          * sure it is enabled, but other entities such as system netboot
16674          * code might disable it.
16675          */
16676         val = tr32(MEMARB_MODE);
16677         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16678
16679         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16680         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16681             tg3_flag(tp, 5780_CLASS)) {
16682                 if (tg3_flag(tp, PCIX_MODE)) {
16683                         pci_read_config_dword(tp->pdev,
16684                                               tp->pcix_cap + PCI_X_STATUS,
16685                                               &val);
16686                         tp->pci_fn = val & 0x7;
16687                 }
16688         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16689                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16690                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16691                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16692                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16693                         val = tr32(TG3_CPMU_STATUS);
16694
16695                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16696                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16697                 else
16698                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16699                                      TG3_CPMU_STATUS_FSHFT_5719;
16700         }
16701
16702         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16703                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16704                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16705         }
16706
16707         /* Get eeprom hw config before calling tg3_set_power_state().
16708          * In particular, the TG3_FLAG_IS_NIC flag must be
16709          * determined before calling tg3_set_power_state() so that
16710          * we know whether or not to switch out of Vaux power.
16711          * When the flag is set, it means that GPIO1 is used for eeprom
16712          * write protect and also implies that it is a LOM where GPIOs
16713          * are not used to switch power.
16714          */
16715         tg3_get_eeprom_hw_cfg(tp);
16716
16717         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16718                 tg3_flag_clear(tp, TSO_CAPABLE);
16719                 tg3_flag_clear(tp, TSO_BUG);
16720                 tp->fw_needed = NULL;
16721         }
16722
16723         if (tg3_flag(tp, ENABLE_APE)) {
16724                 /* Allow reads and writes to the
16725                  * APE register and memory space.
16726                  */
16727                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16728                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16729                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16730                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16731                                        pci_state_reg);
16732
16733                 tg3_ape_lock_init(tp);
16734                 tp->ape_hb_interval =
16735                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16736         }
16737
16738         /* Set up tp->grc_local_ctrl before calling
16739          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16740          * will bring 5700's external PHY out of reset.
16741          * It is also used as eeprom write protect on LOMs.
16742          */
16743         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16744         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16745             tg3_flag(tp, EEPROM_WRITE_PROT))
16746                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16747                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16748         /* Unused GPIO3 must be driven as output on 5752 because there
16749          * are no pull-up resistors on unused GPIO pins.
16750          */
16751         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16752                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16753
16754         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16755             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16756             tg3_flag(tp, 57765_CLASS))
16757                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16758
16759         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16760             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16761                 /* Turn off the debug UART. */
16762                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16763                 if (tg3_flag(tp, IS_NIC))
16764                         /* Keep VMain power. */
16765                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16766                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16767         }
16768
16769         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16770                 tp->grc_local_ctrl |=
16771                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16772
16773         /* Switch out of Vaux if it is a NIC */
16774         tg3_pwrsrc_switch_to_vmain(tp);
16775
16776         /* Derive initial jumbo mode from MTU assigned in
16777          * ether_setup() via the alloc_etherdev() call
16778          */
16779         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16780                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16781
16782         /* Determine WakeOnLan speed to use. */
16783         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16784             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16785             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16786             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16787                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16788         } else {
16789                 tg3_flag_set(tp, WOL_SPEED_100MB);
16790         }
16791
16792         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16793                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16794
16795         /* A few boards don't want Ethernet@WireSpeed phy feature */
16796         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16797             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16798              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16799              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16800             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16801             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16802                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16803
16804         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16805             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16806                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16807         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16808                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16809
16810         if (tg3_flag(tp, 5705_PLUS) &&
16811             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16812             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16813             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16814             !tg3_flag(tp, 57765_PLUS)) {
16815                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16816                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16817                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16818                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16819                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16820                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16821                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16822                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16823                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16824                 } else
16825                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16826         }
16827
16828         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16829             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16830                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16831                 if (tp->phy_otp == 0)
16832                         tp->phy_otp = TG3_OTP_DEFAULT;
16833         }
16834
16835         if (tg3_flag(tp, CPMU_PRESENT))
16836                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16837         else
16838                 tp->mi_mode = MAC_MI_MODE_BASE;
16839
16840         tp->coalesce_mode = 0;
16841         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16842             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16843                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16844
16845         /* Set these bits to enable statistics workaround. */
16846         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16847             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16848             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16849             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16850                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16851                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16852         }
16853
16854         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16855             tg3_asic_rev(tp) == ASIC_REV_57780)
16856                 tg3_flag_set(tp, USE_PHYLIB);
16857
16858         err = tg3_mdio_init(tp);
16859         if (err)
16860                 return err;
16861
16862         /* Initialize data/descriptor byte/word swapping. */
16863         val = tr32(GRC_MODE);
16864         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16865             tg3_asic_rev(tp) == ASIC_REV_5762)
16866                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16867                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16868                         GRC_MODE_B2HRX_ENABLE |
16869                         GRC_MODE_HTX2B_ENABLE |
16870                         GRC_MODE_HOST_STACKUP);
16871         else
16872                 val &= GRC_MODE_HOST_STACKUP;
16873
16874         tw32(GRC_MODE, val | tp->grc_mode);
16875
16876         tg3_switch_clocks(tp);
16877
16878         /* Clear this out for sanity. */
16879         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16880
16881         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16882         tw32(TG3PCI_REG_BASE_ADDR, 0);
16883
16884         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16885                               &pci_state_reg);
16886         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16887             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16888                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16889                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16890                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16891                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16892                         void __iomem *sram_base;
16893
16894                         /* Write some dummy words into the SRAM status block
16895                          * area, see if it reads back correctly.  If the return
16896                          * value is bad, force enable the PCIX workaround.
16897                          */
16898                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16899
16900                         writel(0x00000000, sram_base);
16901                         writel(0x00000000, sram_base + 4);
16902                         writel(0xffffffff, sram_base + 4);
16903                         if (readl(sram_base) != 0x00000000)
16904                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16905                 }
16906         }
16907
16908         udelay(50);
16909         tg3_nvram_init(tp);
16910
16911         /* If the device has an NVRAM, no need to load patch firmware */
16912         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16913             !tg3_flag(tp, NO_NVRAM))
16914                 tp->fw_needed = NULL;
16915
16916         grc_misc_cfg = tr32(GRC_MISC_CFG);
16917         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16918
16919         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16920             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16921              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16922                 tg3_flag_set(tp, IS_5788);
16923
16924         if (!tg3_flag(tp, IS_5788) &&
16925             tg3_asic_rev(tp) != ASIC_REV_5700)
16926                 tg3_flag_set(tp, TAGGED_STATUS);
16927         if (tg3_flag(tp, TAGGED_STATUS)) {
16928                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16929                                       HOSTCC_MODE_CLRTICK_TXBD);
16930
16931                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16932                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16933                                        tp->misc_host_ctrl);
16934         }
16935
16936         /* Preserve the APE MAC_MODE bits */
16937         if (tg3_flag(tp, ENABLE_APE))
16938                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16939         else
16940                 tp->mac_mode = 0;
16941
16942         if (tg3_10_100_only_device(tp, ent))
16943                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16944
16945         err = tg3_phy_probe(tp);
16946         if (err) {
16947                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16948                 /* ... but do not return immediately ... */
16949                 tg3_mdio_fini(tp);
16950         }
16951
16952         tg3_read_vpd(tp);
16953         tg3_read_fw_ver(tp);
16954
16955         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16956                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16957         } else {
16958                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16959                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16960                 else
16961                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16962         }
16963
16964         /* 5700 {AX,BX} chips have a broken status block link
16965          * change bit implementation, so we must use the
16966          * status register in those cases.
16967          */
16968         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16969                 tg3_flag_set(tp, USE_LINKCHG_REG);
16970         else
16971                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16972
16973         /* The led_ctrl is set during tg3_phy_probe, here we might
16974          * have to force the link status polling mechanism based
16975          * upon subsystem IDs.
16976          */
16977         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16978             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16979             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16980                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16981                 tg3_flag_set(tp, USE_LINKCHG_REG);
16982         }
16983
16984         /* For all SERDES we poll the MAC status register. */
16985         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16986                 tg3_flag_set(tp, POLL_SERDES);
16987         else
16988                 tg3_flag_clear(tp, POLL_SERDES);
16989
16990         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16991                 tg3_flag_set(tp, POLL_CPMU_LINK);
16992
16993         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16994         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16995         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16996             tg3_flag(tp, PCIX_MODE)) {
16997                 tp->rx_offset = NET_SKB_PAD;
16998 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16999                 tp->rx_copy_thresh = ~(u16)0;
17000 #endif
17001         }
17002
17003         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17004         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17005         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17006
17007         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17008
17009         /* Increment the rx prod index on the rx std ring by at most
17010          * 8 for these chips to workaround hw errata.
17011          */
17012         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17013             tg3_asic_rev(tp) == ASIC_REV_5752 ||
17014             tg3_asic_rev(tp) == ASIC_REV_5755)
17015                 tp->rx_std_max_post = 8;
17016
17017         if (tg3_flag(tp, ASPM_WORKAROUND))
17018                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17019                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
17020
17021         return err;
17022 }
17023
17024 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17025 {
17026         u32 hi, lo, mac_offset;
17027         int addr_ok = 0;
17028         int err;
17029
17030         if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17031                 return 0;
17032
17033         if (tg3_flag(tp, IS_SSB_CORE)) {
17034                 err = ssb_gige_get_macaddr(tp->pdev, addr);
17035                 if (!err && is_valid_ether_addr(addr))
17036                         return 0;
17037         }
17038
17039         mac_offset = 0x7c;
17040         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17041             tg3_flag(tp, 5780_CLASS)) {
17042                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17043                         mac_offset = 0xcc;
17044                 if (tg3_nvram_lock(tp))
17045                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17046                 else
17047                         tg3_nvram_unlock(tp);
17048         } else if (tg3_flag(tp, 5717_PLUS)) {
17049                 if (tp->pci_fn & 1)
17050                         mac_offset = 0xcc;
17051                 if (tp->pci_fn > 1)
17052                         mac_offset += 0x18c;
17053         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17054                 mac_offset = 0x10;
17055
17056         /* First try to get it from MAC address mailbox. */
17057         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17058         if ((hi >> 16) == 0x484b) {
17059                 addr[0] = (hi >>  8) & 0xff;
17060                 addr[1] = (hi >>  0) & 0xff;
17061
17062                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17063                 addr[2] = (lo >> 24) & 0xff;
17064                 addr[3] = (lo >> 16) & 0xff;
17065                 addr[4] = (lo >>  8) & 0xff;
17066                 addr[5] = (lo >>  0) & 0xff;
17067
17068                 /* Some old bootcode may report a 0 MAC address in SRAM */
17069                 addr_ok = is_valid_ether_addr(addr);
17070         }
17071         if (!addr_ok) {
17072                 /* Next, try NVRAM. */
17073                 if (!tg3_flag(tp, NO_NVRAM) &&
17074                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17075                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17076                         memcpy(&addr[0], ((char *)&hi) + 2, 2);
17077                         memcpy(&addr[2], (char *)&lo, sizeof(lo));
17078                 }
17079                 /* Finally just fetch it out of the MAC control regs. */
17080                 else {
17081                         hi = tr32(MAC_ADDR_0_HIGH);
17082                         lo = tr32(MAC_ADDR_0_LOW);
17083
17084                         addr[5] = lo & 0xff;
17085                         addr[4] = (lo >> 8) & 0xff;
17086                         addr[3] = (lo >> 16) & 0xff;
17087                         addr[2] = (lo >> 24) & 0xff;
17088                         addr[1] = hi & 0xff;
17089                         addr[0] = (hi >> 8) & 0xff;
17090                 }
17091         }
17092
17093         if (!is_valid_ether_addr(addr))
17094                 return -EINVAL;
17095         return 0;
17096 }
17097
17098 #define BOUNDARY_SINGLE_CACHELINE       1
17099 #define BOUNDARY_MULTI_CACHELINE        2
17100
17101 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17102 {
17103         int cacheline_size;
17104         u8 byte;
17105         int goal;
17106
17107         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17108         if (byte == 0)
17109                 cacheline_size = 1024;
17110         else
17111                 cacheline_size = (int) byte * 4;
17112
17113         /* On 5703 and later chips, the boundary bits have no
17114          * effect.
17115          */
17116         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17117             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17118             !tg3_flag(tp, PCI_EXPRESS))
17119                 goto out;
17120
17121 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17122         goal = BOUNDARY_MULTI_CACHELINE;
17123 #else
17124 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17125         goal = BOUNDARY_SINGLE_CACHELINE;
17126 #else
17127         goal = 0;
17128 #endif
17129 #endif
17130
17131         if (tg3_flag(tp, 57765_PLUS)) {
17132                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17133                 goto out;
17134         }
17135
17136         if (!goal)
17137                 goto out;
17138
17139         /* PCI controllers on most RISC systems tend to disconnect
17140          * when a device tries to burst across a cache-line boundary.
17141          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17142          *
17143          * Unfortunately, for PCI-E there are only limited
17144          * write-side controls for this, and thus for reads
17145          * we will still get the disconnects.  We'll also waste
17146          * these PCI cycles for both read and write for chips
17147          * other than 5700 and 5701 which do not implement the
17148          * boundary bits.
17149          */
17150         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17151                 switch (cacheline_size) {
17152                 case 16:
17153                 case 32:
17154                 case 64:
17155                 case 128:
17156                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17157                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17158                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17159                         } else {
17160                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17161                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17162                         }
17163                         break;
17164
17165                 case 256:
17166                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17167                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17168                         break;
17169
17170                 default:
17171                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17172                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17173                         break;
17174                 }
17175         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17176                 switch (cacheline_size) {
17177                 case 16:
17178                 case 32:
17179                 case 64:
17180                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17181                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17182                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17183                                 break;
17184                         }
17185                         fallthrough;
17186                 case 128:
17187                 default:
17188                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17189                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17190                         break;
17191                 }
17192         } else {
17193                 switch (cacheline_size) {
17194                 case 16:
17195                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17196                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17197                                         DMA_RWCTRL_WRITE_BNDRY_16);
17198                                 break;
17199                         }
17200                         fallthrough;
17201                 case 32:
17202                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17203                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17204                                         DMA_RWCTRL_WRITE_BNDRY_32);
17205                                 break;
17206                         }
17207                         fallthrough;
17208                 case 64:
17209                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17210                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17211                                         DMA_RWCTRL_WRITE_BNDRY_64);
17212                                 break;
17213                         }
17214                         fallthrough;
17215                 case 128:
17216                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17217                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17218                                         DMA_RWCTRL_WRITE_BNDRY_128);
17219                                 break;
17220                         }
17221                         fallthrough;
17222                 case 256:
17223                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17224                                 DMA_RWCTRL_WRITE_BNDRY_256);
17225                         break;
17226                 case 512:
17227                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17228                                 DMA_RWCTRL_WRITE_BNDRY_512);
17229                         break;
17230                 case 1024:
17231                 default:
17232                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17233                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17234                         break;
17235                 }
17236         }
17237
17238 out:
17239         return val;
17240 }
17241
17242 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17243                            int size, bool to_device)
17244 {
17245         struct tg3_internal_buffer_desc test_desc;
17246         u32 sram_dma_descs;
17247         int i, ret;
17248
17249         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17250
17251         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17252         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17253         tw32(RDMAC_STATUS, 0);
17254         tw32(WDMAC_STATUS, 0);
17255
17256         tw32(BUFMGR_MODE, 0);
17257         tw32(FTQ_RESET, 0);
17258
17259         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17260         test_desc.addr_lo = buf_dma & 0xffffffff;
17261         test_desc.nic_mbuf = 0x00002100;
17262         test_desc.len = size;
17263
17264         /*
17265          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17266          * the *second* time the tg3 driver was getting loaded after an
17267          * initial scan.
17268          *
17269          * Broadcom tells me:
17270          *   ...the DMA engine is connected to the GRC block and a DMA
17271          *   reset may affect the GRC block in some unpredictable way...
17272          *   The behavior of resets to individual blocks has not been tested.
17273          *
17274          * Broadcom noted the GRC reset will also reset all sub-components.
17275          */
17276         if (to_device) {
17277                 test_desc.cqid_sqid = (13 << 8) | 2;
17278
17279                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17280                 udelay(40);
17281         } else {
17282                 test_desc.cqid_sqid = (16 << 8) | 7;
17283
17284                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17285                 udelay(40);
17286         }
17287         test_desc.flags = 0x00000005;
17288
17289         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17290                 u32 val;
17291
17292                 val = *(((u32 *)&test_desc) + i);
17293                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17294                                        sram_dma_descs + (i * sizeof(u32)));
17295                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17296         }
17297         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17298
17299         if (to_device)
17300                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17301         else
17302                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17303
17304         ret = -ENODEV;
17305         for (i = 0; i < 40; i++) {
17306                 u32 val;
17307
17308                 if (to_device)
17309                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17310                 else
17311                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17312                 if ((val & 0xffff) == sram_dma_descs) {
17313                         ret = 0;
17314                         break;
17315                 }
17316
17317                 udelay(100);
17318         }
17319
17320         return ret;
17321 }
17322
17323 #define TEST_BUFFER_SIZE        0x2000
17324
17325 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17326         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17327         { },
17328 };
17329
17330 static int tg3_test_dma(struct tg3 *tp)
17331 {
17332         dma_addr_t buf_dma;
17333         u32 *buf, saved_dma_rwctrl;
17334         int ret = 0;
17335
17336         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17337                                  &buf_dma, GFP_KERNEL);
17338         if (!buf) {
17339                 ret = -ENOMEM;
17340                 goto out_nofree;
17341         }
17342
17343         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17344                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17345
17346         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17347
17348         if (tg3_flag(tp, 57765_PLUS))
17349                 goto out;
17350
17351         if (tg3_flag(tp, PCI_EXPRESS)) {
17352                 /* DMA read watermark not used on PCIE */
17353                 tp->dma_rwctrl |= 0x00180000;
17354         } else if (!tg3_flag(tp, PCIX_MODE)) {
17355                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17356                     tg3_asic_rev(tp) == ASIC_REV_5750)
17357                         tp->dma_rwctrl |= 0x003f0000;
17358                 else
17359                         tp->dma_rwctrl |= 0x003f000f;
17360         } else {
17361                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17362                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17363                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17364                         u32 read_water = 0x7;
17365
17366                         /* If the 5704 is behind the EPB bridge, we can
17367                          * do the less restrictive ONE_DMA workaround for
17368                          * better performance.
17369                          */
17370                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17371                             tg3_asic_rev(tp) == ASIC_REV_5704)
17372                                 tp->dma_rwctrl |= 0x8000;
17373                         else if (ccval == 0x6 || ccval == 0x7)
17374                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17375
17376                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17377                                 read_water = 4;
17378                         /* Set bit 23 to enable PCIX hw bug fix */
17379                         tp->dma_rwctrl |=
17380                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17381                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17382                                 (1 << 23);
17383                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17384                         /* 5780 always in PCIX mode */
17385                         tp->dma_rwctrl |= 0x00144000;
17386                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17387                         /* 5714 always in PCIX mode */
17388                         tp->dma_rwctrl |= 0x00148000;
17389                 } else {
17390                         tp->dma_rwctrl |= 0x001b000f;
17391                 }
17392         }
17393         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17394                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17395
17396         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17397             tg3_asic_rev(tp) == ASIC_REV_5704)
17398                 tp->dma_rwctrl &= 0xfffffff0;
17399
17400         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17401             tg3_asic_rev(tp) == ASIC_REV_5701) {
17402                 /* Remove this if it causes problems for some boards. */
17403                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17404
17405                 /* On 5700/5701 chips, we need to set this bit.
17406                  * Otherwise the chip will issue cacheline transactions
17407                  * to streamable DMA memory with not all the byte
17408                  * enables turned on.  This is an error on several
17409                  * RISC PCI controllers, in particular sparc64.
17410                  *
17411                  * On 5703/5704 chips, this bit has been reassigned
17412                  * a different meaning.  In particular, it is used
17413                  * on those chips to enable a PCI-X workaround.
17414                  */
17415                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17416         }
17417
17418         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17419
17420
17421         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17422             tg3_asic_rev(tp) != ASIC_REV_5701)
17423                 goto out;
17424
17425         /* It is best to perform DMA test with maximum write burst size
17426          * to expose the 5700/5701 write DMA bug.
17427          */
17428         saved_dma_rwctrl = tp->dma_rwctrl;
17429         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17430         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17431
17432         while (1) {
17433                 u32 *p = buf, i;
17434
17435                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17436                         p[i] = i;
17437
17438                 /* Send the buffer to the chip. */
17439                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17440                 if (ret) {
17441                         dev_err(&tp->pdev->dev,
17442                                 "%s: Buffer write failed. err = %d\n",
17443                                 __func__, ret);
17444                         break;
17445                 }
17446
17447                 /* Now read it back. */
17448                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17449                 if (ret) {
17450                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17451                                 "err = %d\n", __func__, ret);
17452                         break;
17453                 }
17454
17455                 /* Verify it. */
17456                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17457                         if (p[i] == i)
17458                                 continue;
17459
17460                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17461                             DMA_RWCTRL_WRITE_BNDRY_16) {
17462                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17463                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17464                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17465                                 break;
17466                         } else {
17467                                 dev_err(&tp->pdev->dev,
17468                                         "%s: Buffer corrupted on read back! "
17469                                         "(%d != %d)\n", __func__, p[i], i);
17470                                 ret = -ENODEV;
17471                                 goto out;
17472                         }
17473                 }
17474
17475                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17476                         /* Success. */
17477                         ret = 0;
17478                         break;
17479                 }
17480         }
17481         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17482             DMA_RWCTRL_WRITE_BNDRY_16) {
17483                 /* DMA test passed without adjusting DMA boundary,
17484                  * now look for chipsets that are known to expose the
17485                  * DMA bug without failing the test.
17486                  */
17487                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17488                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17489                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17490                 } else {
17491                         /* Safe to use the calculated DMA boundary. */
17492                         tp->dma_rwctrl = saved_dma_rwctrl;
17493                 }
17494
17495                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17496         }
17497
17498 out:
17499         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17500 out_nofree:
17501         return ret;
17502 }
17503
17504 static void tg3_init_bufmgr_config(struct tg3 *tp)
17505 {
17506         if (tg3_flag(tp, 57765_PLUS)) {
17507                 tp->bufmgr_config.mbuf_read_dma_low_water =
17508                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17509                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17510                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17511                 tp->bufmgr_config.mbuf_high_water =
17512                         DEFAULT_MB_HIGH_WATER_57765;
17513
17514                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17515                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17516                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17517                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17518                 tp->bufmgr_config.mbuf_high_water_jumbo =
17519                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17520         } else if (tg3_flag(tp, 5705_PLUS)) {
17521                 tp->bufmgr_config.mbuf_read_dma_low_water =
17522                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17523                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17524                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17525                 tp->bufmgr_config.mbuf_high_water =
17526                         DEFAULT_MB_HIGH_WATER_5705;
17527                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17528                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17529                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17530                         tp->bufmgr_config.mbuf_high_water =
17531                                 DEFAULT_MB_HIGH_WATER_5906;
17532                 }
17533
17534                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17535                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17536                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17537                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17538                 tp->bufmgr_config.mbuf_high_water_jumbo =
17539                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17540         } else {
17541                 tp->bufmgr_config.mbuf_read_dma_low_water =
17542                         DEFAULT_MB_RDMA_LOW_WATER;
17543                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17544                         DEFAULT_MB_MACRX_LOW_WATER;
17545                 tp->bufmgr_config.mbuf_high_water =
17546                         DEFAULT_MB_HIGH_WATER;
17547
17548                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17549                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17550                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17551                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17552                 tp->bufmgr_config.mbuf_high_water_jumbo =
17553                         DEFAULT_MB_HIGH_WATER_JUMBO;
17554         }
17555
17556         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17557         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17558 }
17559
17560 static char *tg3_phy_string(struct tg3 *tp)
17561 {
17562         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17563         case TG3_PHY_ID_BCM5400:        return "5400";
17564         case TG3_PHY_ID_BCM5401:        return "5401";
17565         case TG3_PHY_ID_BCM5411:        return "5411";
17566         case TG3_PHY_ID_BCM5701:        return "5701";
17567         case TG3_PHY_ID_BCM5703:        return "5703";
17568         case TG3_PHY_ID_BCM5704:        return "5704";
17569         case TG3_PHY_ID_BCM5705:        return "5705";
17570         case TG3_PHY_ID_BCM5750:        return "5750";
17571         case TG3_PHY_ID_BCM5752:        return "5752";
17572         case TG3_PHY_ID_BCM5714:        return "5714";
17573         case TG3_PHY_ID_BCM5780:        return "5780";
17574         case TG3_PHY_ID_BCM5755:        return "5755";
17575         case TG3_PHY_ID_BCM5787:        return "5787";
17576         case TG3_PHY_ID_BCM5784:        return "5784";
17577         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17578         case TG3_PHY_ID_BCM5906:        return "5906";
17579         case TG3_PHY_ID_BCM5761:        return "5761";
17580         case TG3_PHY_ID_BCM5718C:       return "5718C";
17581         case TG3_PHY_ID_BCM5718S:       return "5718S";
17582         case TG3_PHY_ID_BCM57765:       return "57765";
17583         case TG3_PHY_ID_BCM5719C:       return "5719C";
17584         case TG3_PHY_ID_BCM5720C:       return "5720C";
17585         case TG3_PHY_ID_BCM5762:        return "5762C";
17586         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17587         case 0:                 return "serdes";
17588         default:                return "unknown";
17589         }
17590 }
17591
17592 static char *tg3_bus_string(struct tg3 *tp, char *str)
17593 {
17594         if (tg3_flag(tp, PCI_EXPRESS)) {
17595                 strcpy(str, "PCI Express");
17596                 return str;
17597         } else if (tg3_flag(tp, PCIX_MODE)) {
17598                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17599
17600                 strcpy(str, "PCIX:");
17601
17602                 if ((clock_ctrl == 7) ||
17603                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17604                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17605                         strcat(str, "133MHz");
17606                 else if (clock_ctrl == 0)
17607                         strcat(str, "33MHz");
17608                 else if (clock_ctrl == 2)
17609                         strcat(str, "50MHz");
17610                 else if (clock_ctrl == 4)
17611                         strcat(str, "66MHz");
17612                 else if (clock_ctrl == 6)
17613                         strcat(str, "100MHz");
17614         } else {
17615                 strcpy(str, "PCI:");
17616                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17617                         strcat(str, "66MHz");
17618                 else
17619                         strcat(str, "33MHz");
17620         }
17621         if (tg3_flag(tp, PCI_32BIT))
17622                 strcat(str, ":32-bit");
17623         else
17624                 strcat(str, ":64-bit");
17625         return str;
17626 }
17627
17628 static void tg3_init_coal(struct tg3 *tp)
17629 {
17630         struct ethtool_coalesce *ec = &tp->coal;
17631
17632         memset(ec, 0, sizeof(*ec));
17633         ec->cmd = ETHTOOL_GCOALESCE;
17634         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17635         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17636         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17637         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17638         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17639         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17640         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17641         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17642         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17643
17644         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17645                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17646                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17647                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17648                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17649                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17650         }
17651
17652         if (tg3_flag(tp, 5705_PLUS)) {
17653                 ec->rx_coalesce_usecs_irq = 0;
17654                 ec->tx_coalesce_usecs_irq = 0;
17655                 ec->stats_block_coalesce_usecs = 0;
17656         }
17657 }
17658
17659 static int tg3_init_one(struct pci_dev *pdev,
17660                                   const struct pci_device_id *ent)
17661 {
17662         struct net_device *dev;
17663         struct tg3 *tp;
17664         int i, err;
17665         u32 sndmbx, rcvmbx, intmbx;
17666         char str[40];
17667         u64 dma_mask, persist_dma_mask;
17668         netdev_features_t features = 0;
17669         u8 addr[ETH_ALEN] __aligned(2);
17670
17671         err = pci_enable_device(pdev);
17672         if (err) {
17673                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17674                 return err;
17675         }
17676
17677         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17678         if (err) {
17679                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17680                 goto err_out_disable_pdev;
17681         }
17682
17683         pci_set_master(pdev);
17684
17685         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17686         if (!dev) {
17687                 err = -ENOMEM;
17688                 goto err_out_free_res;
17689         }
17690
17691         SET_NETDEV_DEV(dev, &pdev->dev);
17692
17693         tp = netdev_priv(dev);
17694         tp->pdev = pdev;
17695         tp->dev = dev;
17696         tp->rx_mode = TG3_DEF_RX_MODE;
17697         tp->tx_mode = TG3_DEF_TX_MODE;
17698         tp->irq_sync = 1;
17699         tp->pcierr_recovery = false;
17700
17701         if (tg3_debug > 0)
17702                 tp->msg_enable = tg3_debug;
17703         else
17704                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17705
17706         if (pdev_is_ssb_gige_core(pdev)) {
17707                 tg3_flag_set(tp, IS_SSB_CORE);
17708                 if (ssb_gige_must_flush_posted_writes(pdev))
17709                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17710                 if (ssb_gige_one_dma_at_once(pdev))
17711                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17712                 if (ssb_gige_have_roboswitch(pdev)) {
17713                         tg3_flag_set(tp, USE_PHYLIB);
17714                         tg3_flag_set(tp, ROBOSWITCH);
17715                 }
17716                 if (ssb_gige_is_rgmii(pdev))
17717                         tg3_flag_set(tp, RGMII_MODE);
17718         }
17719
17720         /* The word/byte swap controls here control register access byte
17721          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17722          * setting below.
17723          */
17724         tp->misc_host_ctrl =
17725                 MISC_HOST_CTRL_MASK_PCI_INT |
17726                 MISC_HOST_CTRL_WORD_SWAP |
17727                 MISC_HOST_CTRL_INDIR_ACCESS |
17728                 MISC_HOST_CTRL_PCISTATE_RW;
17729
17730         /* The NONFRM (non-frame) byte/word swap controls take effect
17731          * on descriptor entries, anything which isn't packet data.
17732          *
17733          * The StrongARM chips on the board (one for tx, one for rx)
17734          * are running in big-endian mode.
17735          */
17736         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17737                         GRC_MODE_WSWAP_NONFRM_DATA);
17738 #ifdef __BIG_ENDIAN
17739         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17740 #endif
17741         spin_lock_init(&tp->lock);
17742         spin_lock_init(&tp->indirect_lock);
17743         INIT_WORK(&tp->reset_task, tg3_reset_task);
17744
17745         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17746         if (!tp->regs) {
17747                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17748                 err = -ENOMEM;
17749                 goto err_out_free_dev;
17750         }
17751
17752         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17753             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17754             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17755             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17756             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17757             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17758             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17759             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17760             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17761             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17762             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17763             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17764             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17765             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17766             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17767                 tg3_flag_set(tp, ENABLE_APE);
17768                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17769                 if (!tp->aperegs) {
17770                         dev_err(&pdev->dev,
17771                                 "Cannot map APE registers, aborting\n");
17772                         err = -ENOMEM;
17773                         goto err_out_iounmap;
17774                 }
17775         }
17776
17777         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17778         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17779
17780         dev->ethtool_ops = &tg3_ethtool_ops;
17781         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17782         dev->netdev_ops = &tg3_netdev_ops;
17783         dev->irq = pdev->irq;
17784
17785         err = tg3_get_invariants(tp, ent);
17786         if (err) {
17787                 dev_err(&pdev->dev,
17788                         "Problem fetching invariants of chip, aborting\n");
17789                 goto err_out_apeunmap;
17790         }
17791
17792         /* The EPB bridge inside 5714, 5715, and 5780 and any
17793          * device behind the EPB cannot support DMA addresses > 40-bit.
17794          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17795          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17796          * do DMA address check in __tg3_start_xmit().
17797          */
17798         if (tg3_flag(tp, IS_5788))
17799                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17800         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17801                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17802 #ifdef CONFIG_HIGHMEM
17803                 dma_mask = DMA_BIT_MASK(64);
17804 #endif
17805         } else
17806                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17807
17808         /* Configure DMA attributes. */
17809         if (dma_mask > DMA_BIT_MASK(32)) {
17810                 err = dma_set_mask(&pdev->dev, dma_mask);
17811                 if (!err) {
17812                         features |= NETIF_F_HIGHDMA;
17813                         err = dma_set_coherent_mask(&pdev->dev,
17814                                                     persist_dma_mask);
17815                         if (err < 0) {
17816                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17817                                         "DMA for consistent allocations\n");
17818                                 goto err_out_apeunmap;
17819                         }
17820                 }
17821         }
17822         if (err || dma_mask == DMA_BIT_MASK(32)) {
17823                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17824                 if (err) {
17825                         dev_err(&pdev->dev,
17826                                 "No usable DMA configuration, aborting\n");
17827                         goto err_out_apeunmap;
17828                 }
17829         }
17830
17831         tg3_init_bufmgr_config(tp);
17832
17833         /* 5700 B0 chips do not support checksumming correctly due
17834          * to hardware bugs.
17835          */
17836         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17837                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17838
17839                 if (tg3_flag(tp, 5755_PLUS))
17840                         features |= NETIF_F_IPV6_CSUM;
17841         }
17842
17843         /* TSO is on by default on chips that support hardware TSO.
17844          * Firmware TSO on older chips gives lower performance, so it
17845          * is off by default, but can be enabled using ethtool.
17846          */
17847         if ((tg3_flag(tp, HW_TSO_1) ||
17848              tg3_flag(tp, HW_TSO_2) ||
17849              tg3_flag(tp, HW_TSO_3)) &&
17850             (features & NETIF_F_IP_CSUM))
17851                 features |= NETIF_F_TSO;
17852         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17853                 if (features & NETIF_F_IPV6_CSUM)
17854                         features |= NETIF_F_TSO6;
17855                 if (tg3_flag(tp, HW_TSO_3) ||
17856                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17857                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17858                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17859                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17860                     tg3_asic_rev(tp) == ASIC_REV_57780)
17861                         features |= NETIF_F_TSO_ECN;
17862         }
17863
17864         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17865                          NETIF_F_HW_VLAN_CTAG_RX;
17866         dev->vlan_features |= features;
17867
17868         /*
17869          * Add loopback capability only for a subset of devices that support
17870          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17871          * loopback for the remaining devices.
17872          */
17873         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17874             !tg3_flag(tp, CPMU_PRESENT))
17875                 /* Add the loopback capability */
17876                 features |= NETIF_F_LOOPBACK;
17877
17878         dev->hw_features |= features;
17879         dev->priv_flags |= IFF_UNICAST_FLT;
17880
17881         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17882         dev->min_mtu = TG3_MIN_MTU;
17883         dev->max_mtu = TG3_MAX_MTU(tp);
17884
17885         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17886             !tg3_flag(tp, TSO_CAPABLE) &&
17887             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17888                 tg3_flag_set(tp, MAX_RXPEND_64);
17889                 tp->rx_pending = 63;
17890         }
17891
17892         err = tg3_get_device_address(tp, addr);
17893         if (err) {
17894                 dev_err(&pdev->dev,
17895                         "Could not obtain valid ethernet address, aborting\n");
17896                 goto err_out_apeunmap;
17897         }
17898         eth_hw_addr_set(dev, addr);
17899
17900         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17901         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17902         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17903         for (i = 0; i < tp->irq_max; i++) {
17904                 struct tg3_napi *tnapi = &tp->napi[i];
17905
17906                 tnapi->tp = tp;
17907                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17908
17909                 tnapi->int_mbox = intmbx;
17910                 intmbx += 0x8;
17911
17912                 tnapi->consmbox = rcvmbx;
17913                 tnapi->prodmbox = sndmbx;
17914
17915                 if (i)
17916                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17917                 else
17918                         tnapi->coal_now = HOSTCC_MODE_NOW;
17919
17920                 if (!tg3_flag(tp, SUPPORT_MSIX))
17921                         break;
17922
17923                 /*
17924                  * If we support MSIX, we'll be using RSS.  If we're using
17925                  * RSS, the first vector only handles link interrupts and the
17926                  * remaining vectors handle rx and tx interrupts.  Reuse the
17927                  * mailbox values for the next iteration.  The values we setup
17928                  * above are still useful for the single vectored mode.
17929                  */
17930                 if (!i)
17931                         continue;
17932
17933                 rcvmbx += 0x8;
17934
17935                 if (sndmbx & 0x4)
17936                         sndmbx -= 0x4;
17937                 else
17938                         sndmbx += 0xc;
17939         }
17940
17941         /*
17942          * Reset chip in case UNDI or EFI driver did not shutdown
17943          * DMA self test will enable WDMAC and we'll see (spurious)
17944          * pending DMA on the PCI bus at that point.
17945          */
17946         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17947             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17948                 tg3_full_lock(tp, 0);
17949                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17950                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17951                 tg3_full_unlock(tp);
17952         }
17953
17954         err = tg3_test_dma(tp);
17955         if (err) {
17956                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17957                 goto err_out_apeunmap;
17958         }
17959
17960         tg3_init_coal(tp);
17961
17962         pci_set_drvdata(pdev, dev);
17963
17964         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17965             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17966             tg3_asic_rev(tp) == ASIC_REV_5762)
17967                 tg3_flag_set(tp, PTP_CAPABLE);
17968
17969         tg3_timer_init(tp);
17970
17971         tg3_carrier_off(tp);
17972
17973         err = register_netdev(dev);
17974         if (err) {
17975                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17976                 goto err_out_apeunmap;
17977         }
17978
17979         if (tg3_flag(tp, PTP_CAPABLE)) {
17980                 tg3_ptp_init(tp);
17981                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17982                                                    &tp->pdev->dev);
17983                 if (IS_ERR(tp->ptp_clock))
17984                         tp->ptp_clock = NULL;
17985         }
17986
17987         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17988                     tp->board_part_number,
17989                     tg3_chip_rev_id(tp),
17990                     tg3_bus_string(tp, str),
17991                     dev->dev_addr);
17992
17993         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17994                 char *ethtype;
17995
17996                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17997                         ethtype = "10/100Base-TX";
17998                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17999                         ethtype = "1000Base-SX";
18000                 else
18001                         ethtype = "10/100/1000Base-T";
18002
18003                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18004                             "(WireSpeed[%d], EEE[%d])\n",
18005                             tg3_phy_string(tp), ethtype,
18006                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18007                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18008         }
18009
18010         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18011                     (dev->features & NETIF_F_RXCSUM) != 0,
18012                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
18013                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18014                     tg3_flag(tp, ENABLE_ASF) != 0,
18015                     tg3_flag(tp, TSO_CAPABLE) != 0);
18016         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18017                     tp->dma_rwctrl,
18018                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18019                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18020
18021         pci_save_state(pdev);
18022
18023         return 0;
18024
18025 err_out_apeunmap:
18026         if (tp->aperegs) {
18027                 iounmap(tp->aperegs);
18028                 tp->aperegs = NULL;
18029         }
18030
18031 err_out_iounmap:
18032         if (tp->regs) {
18033                 iounmap(tp->regs);
18034                 tp->regs = NULL;
18035         }
18036
18037 err_out_free_dev:
18038         free_netdev(dev);
18039
18040 err_out_free_res:
18041         pci_release_regions(pdev);
18042
18043 err_out_disable_pdev:
18044         if (pci_is_enabled(pdev))
18045                 pci_disable_device(pdev);
18046         return err;
18047 }
18048
18049 static void tg3_remove_one(struct pci_dev *pdev)
18050 {
18051         struct net_device *dev = pci_get_drvdata(pdev);
18052
18053         if (dev) {
18054                 struct tg3 *tp = netdev_priv(dev);
18055
18056                 tg3_ptp_fini(tp);
18057
18058                 release_firmware(tp->fw);
18059
18060                 tg3_reset_task_cancel(tp);
18061
18062                 if (tg3_flag(tp, USE_PHYLIB)) {
18063                         tg3_phy_fini(tp);
18064                         tg3_mdio_fini(tp);
18065                 }
18066
18067                 unregister_netdev(dev);
18068                 if (tp->aperegs) {
18069                         iounmap(tp->aperegs);
18070                         tp->aperegs = NULL;
18071                 }
18072                 if (tp->regs) {
18073                         iounmap(tp->regs);
18074                         tp->regs = NULL;
18075                 }
18076                 free_netdev(dev);
18077                 pci_release_regions(pdev);
18078                 pci_disable_device(pdev);
18079         }
18080 }
18081
18082 #ifdef CONFIG_PM_SLEEP
18083 static int tg3_suspend(struct device *device)
18084 {
18085         struct net_device *dev = dev_get_drvdata(device);
18086         struct tg3 *tp = netdev_priv(dev);
18087         int err = 0;
18088
18089         rtnl_lock();
18090
18091         if (!netif_running(dev))
18092                 goto unlock;
18093
18094         tg3_reset_task_cancel(tp);
18095         tg3_phy_stop(tp);
18096         tg3_netif_stop(tp);
18097
18098         tg3_timer_stop(tp);
18099
18100         tg3_full_lock(tp, 1);
18101         tg3_disable_ints(tp);
18102         tg3_full_unlock(tp);
18103
18104         netif_device_detach(dev);
18105
18106         tg3_full_lock(tp, 0);
18107         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18108         tg3_flag_clear(tp, INIT_COMPLETE);
18109         tg3_full_unlock(tp);
18110
18111         err = tg3_power_down_prepare(tp);
18112         if (err) {
18113                 int err2;
18114
18115                 tg3_full_lock(tp, 0);
18116
18117                 tg3_flag_set(tp, INIT_COMPLETE);
18118                 err2 = tg3_restart_hw(tp, true);
18119                 if (err2)
18120                         goto out;
18121
18122                 tg3_timer_start(tp);
18123
18124                 netif_device_attach(dev);
18125                 tg3_netif_start(tp);
18126
18127 out:
18128                 tg3_full_unlock(tp);
18129
18130                 if (!err2)
18131                         tg3_phy_start(tp);
18132         }
18133
18134 unlock:
18135         rtnl_unlock();
18136         return err;
18137 }
18138
18139 static int tg3_resume(struct device *device)
18140 {
18141         struct net_device *dev = dev_get_drvdata(device);
18142         struct tg3 *tp = netdev_priv(dev);
18143         int err = 0;
18144
18145         rtnl_lock();
18146
18147         if (!netif_running(dev))
18148                 goto unlock;
18149
18150         netif_device_attach(dev);
18151
18152         tg3_full_lock(tp, 0);
18153
18154         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18155
18156         tg3_flag_set(tp, INIT_COMPLETE);
18157         err = tg3_restart_hw(tp,
18158                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18159         if (err)
18160                 goto out;
18161
18162         tg3_timer_start(tp);
18163
18164         tg3_netif_start(tp);
18165
18166 out:
18167         tg3_full_unlock(tp);
18168
18169         if (!err)
18170                 tg3_phy_start(tp);
18171
18172 unlock:
18173         rtnl_unlock();
18174         return err;
18175 }
18176 #endif /* CONFIG_PM_SLEEP */
18177
18178 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18179
18180 static void tg3_shutdown(struct pci_dev *pdev)
18181 {
18182         struct net_device *dev = pci_get_drvdata(pdev);
18183         struct tg3 *tp = netdev_priv(dev);
18184
18185         tg3_reset_task_cancel(tp);
18186
18187         rtnl_lock();
18188
18189         netif_device_detach(dev);
18190
18191         if (netif_running(dev))
18192                 dev_close(dev);
18193
18194         if (system_state == SYSTEM_POWER_OFF)
18195                 tg3_power_down(tp);
18196
18197         rtnl_unlock();
18198
18199         pci_disable_device(pdev);
18200 }
18201
18202 /**
18203  * tg3_io_error_detected - called when PCI error is detected
18204  * @pdev: Pointer to PCI device
18205  * @state: The current pci connection state
18206  *
18207  * This function is called after a PCI bus error affecting
18208  * this device has been detected.
18209  */
18210 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18211                                               pci_channel_state_t state)
18212 {
18213         struct net_device *netdev = pci_get_drvdata(pdev);
18214         struct tg3 *tp = netdev_priv(netdev);
18215         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18216
18217         netdev_info(netdev, "PCI I/O error detected\n");
18218
18219         /* Want to make sure that the reset task doesn't run */
18220         tg3_reset_task_cancel(tp);
18221
18222         rtnl_lock();
18223
18224         /* Could be second call or maybe we don't have netdev yet */
18225         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18226                 goto done;
18227
18228         /* We needn't recover from permanent error */
18229         if (state == pci_channel_io_frozen)
18230                 tp->pcierr_recovery = true;
18231
18232         tg3_phy_stop(tp);
18233
18234         tg3_netif_stop(tp);
18235
18236         tg3_timer_stop(tp);
18237
18238         netif_device_detach(netdev);
18239
18240         /* Clean up software state, even if MMIO is blocked */
18241         tg3_full_lock(tp, 0);
18242         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18243         tg3_full_unlock(tp);
18244
18245 done:
18246         if (state == pci_channel_io_perm_failure) {
18247                 if (netdev) {
18248                         tg3_napi_enable(tp);
18249                         dev_close(netdev);
18250                 }
18251                 err = PCI_ERS_RESULT_DISCONNECT;
18252         } else {
18253                 pci_disable_device(pdev);
18254         }
18255
18256         rtnl_unlock();
18257
18258         return err;
18259 }
18260
18261 /**
18262  * tg3_io_slot_reset - called after the pci bus has been reset.
18263  * @pdev: Pointer to PCI device
18264  *
18265  * Restart the card from scratch, as if from a cold-boot.
18266  * At this point, the card has exprienced a hard reset,
18267  * followed by fixups by BIOS, and has its config space
18268  * set up identically to what it was at cold boot.
18269  */
18270 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18271 {
18272         struct net_device *netdev = pci_get_drvdata(pdev);
18273         struct tg3 *tp = netdev_priv(netdev);
18274         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18275         int err;
18276
18277         rtnl_lock();
18278
18279         if (pci_enable_device(pdev)) {
18280                 dev_err(&pdev->dev,
18281                         "Cannot re-enable PCI device after reset.\n");
18282                 goto done;
18283         }
18284
18285         pci_set_master(pdev);
18286         pci_restore_state(pdev);
18287         pci_save_state(pdev);
18288
18289         if (!netdev || !netif_running(netdev)) {
18290                 rc = PCI_ERS_RESULT_RECOVERED;
18291                 goto done;
18292         }
18293
18294         err = tg3_power_up(tp);
18295         if (err)
18296                 goto done;
18297
18298         rc = PCI_ERS_RESULT_RECOVERED;
18299
18300 done:
18301         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18302                 tg3_napi_enable(tp);
18303                 dev_close(netdev);
18304         }
18305         rtnl_unlock();
18306
18307         return rc;
18308 }
18309
18310 /**
18311  * tg3_io_resume - called when traffic can start flowing again.
18312  * @pdev: Pointer to PCI device
18313  *
18314  * This callback is called when the error recovery driver tells
18315  * us that its OK to resume normal operation.
18316  */
18317 static void tg3_io_resume(struct pci_dev *pdev)
18318 {
18319         struct net_device *netdev = pci_get_drvdata(pdev);
18320         struct tg3 *tp = netdev_priv(netdev);
18321         int err;
18322
18323         rtnl_lock();
18324
18325         if (!netdev || !netif_running(netdev))
18326                 goto done;
18327
18328         tg3_full_lock(tp, 0);
18329         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18330         tg3_flag_set(tp, INIT_COMPLETE);
18331         err = tg3_restart_hw(tp, true);
18332         if (err) {
18333                 tg3_full_unlock(tp);
18334                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18335                 goto done;
18336         }
18337
18338         netif_device_attach(netdev);
18339
18340         tg3_timer_start(tp);
18341
18342         tg3_netif_start(tp);
18343
18344         tg3_full_unlock(tp);
18345
18346         tg3_phy_start(tp);
18347
18348 done:
18349         tp->pcierr_recovery = false;
18350         rtnl_unlock();
18351 }
18352
18353 static const struct pci_error_handlers tg3_err_handler = {
18354         .error_detected = tg3_io_error_detected,
18355         .slot_reset     = tg3_io_slot_reset,
18356         .resume         = tg3_io_resume
18357 };
18358
18359 static struct pci_driver tg3_driver = {
18360         .name           = DRV_MODULE_NAME,
18361         .id_table       = tg3_pci_tbl,
18362         .probe          = tg3_init_one,
18363         .remove         = tg3_remove_one,
18364         .err_handler    = &tg3_err_handler,
18365         .driver.pm      = &tg3_pm_ops,
18366         .shutdown       = tg3_shutdown,
18367 };
18368
18369 module_pci_driver(tg3_driver);