Merge tag 'xfs-4.20-fixes-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/ip.h>
61
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68
69 #ifdef CONFIG_SPARC
70 #include <asm/idprom.h>
71 #include <asm/prom.h>
72 #endif
73
74 #define BAR_0   0
75 #define BAR_2   2
76
77 #include "tg3.h"
78
79 /* Functions & macros to verify TG3_FLAGS types */
80
81 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         return test_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         set_bit(flag, bits);
89 }
90
91 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
92 {
93         clear_bit(flag, bits);
94 }
95
96 #define tg3_flag(tp, flag)                              \
97         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define tg3_flag_set(tp, flag)                          \
99         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
100 #define tg3_flag_clear(tp, flag)                        \
101         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
102
103 #define DRV_MODULE_NAME         "tg3"
104 #define TG3_MAJ_NUM                     3
105 #define TG3_MIN_NUM                     137
106 #define DRV_MODULE_VERSION      \
107         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
108 #define DRV_MODULE_RELDATE      "May 11, 2014"
109
110 #define RESET_KIND_SHUTDOWN     0
111 #define RESET_KIND_INIT         1
112 #define RESET_KIND_SUSPEND      2
113
114 #define TG3_DEF_RX_MODE         0
115 #define TG3_DEF_TX_MODE         0
116 #define TG3_DEF_MSG_ENABLE        \
117         (NETIF_MSG_DRV          | \
118          NETIF_MSG_PROBE        | \
119          NETIF_MSG_LINK         | \
120          NETIF_MSG_TIMER        | \
121          NETIF_MSG_IFDOWN       | \
122          NETIF_MSG_IFUP         | \
123          NETIF_MSG_RX_ERR       | \
124          NETIF_MSG_TX_ERR)
125
126 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
127
128 /* length of time before we decide the hardware is borked,
129  * and dev->tx_timeout() should be called to fix the problem
130  */
131
132 #define TG3_TX_TIMEOUT                  (5 * HZ)
133
134 /* hardware minimum and maximum for a single frame's data payload */
135 #define TG3_MIN_MTU                     ETH_ZLEN
136 #define TG3_MAX_MTU(tp) \
137         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
138
139 /* These numbers seem to be hard coded in the NIC firmware somehow.
140  * You can't change the ring sizes, but you can change where you place
141  * them in the NIC onboard memory.
142  */
143 #define TG3_RX_STD_RING_SIZE(tp) \
144         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
145          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
146 #define TG3_DEF_RX_RING_PENDING         200
147 #define TG3_RX_JMB_RING_SIZE(tp) \
148         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
149          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
150 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
151
152 /* Do not place this n-ring entries value into the tp struct itself,
153  * we really want to expose these constants to GCC so that modulo et
154  * al.  operations are done with shifts and masks instead of with
155  * hw multiply/modulo instructions.  Another solution would be to
156  * replace things like '% foo' with '& (foo - 1)'.
157  */
158
159 #define TG3_TX_RING_SIZE                512
160 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
161
162 #define TG3_RX_STD_RING_BYTES(tp) \
163         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
164 #define TG3_RX_JMB_RING_BYTES(tp) \
165         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
166 #define TG3_RX_RCB_RING_BYTES(tp) \
167         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
168 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
169                                  TG3_TX_RING_SIZE)
170 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
171
172 #define TG3_DMA_BYTE_ENAB               64
173
174 #define TG3_RX_STD_DMA_SZ               1536
175 #define TG3_RX_JMB_DMA_SZ               9046
176
177 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
178
179 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
180 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
181
182 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
183         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
184
185 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
186         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
187
188 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
189  * that are at least dword aligned when used in PCIX mode.  The driver
190  * works around this bug by double copying the packet.  This workaround
191  * is built into the normal double copy length check for efficiency.
192  *
193  * However, the double copy is only necessary on those architectures
194  * where unaligned memory accesses are inefficient.  For those architectures
195  * where unaligned memory accesses incur little penalty, we can reintegrate
196  * the 5701 in the normal rx path.  Doing so saves a device structure
197  * dereference by hardcoding the double copy threshold in place.
198  */
199 #define TG3_RX_COPY_THRESHOLD           256
200 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
201         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
202 #else
203         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
204 #endif
205
206 #if (NET_IP_ALIGN != 0)
207 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
208 #else
209 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
210 #endif
211
212 /* minimum number of free TX descriptors required to wake up TX process */
213 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
214 #define TG3_TX_BD_DMA_MAX_2K            2048
215 #define TG3_TX_BD_DMA_MAX_4K            4096
216
217 #define TG3_RAW_IP_ALIGN 2
218
219 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
220 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
221
222 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
223 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
224
225 #define FIRMWARE_TG3            "tigon/tg3.bin"
226 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
227 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
228 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
229
230 static char version[] =
231         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
232
233 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
234 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
235 MODULE_LICENSE("GPL");
236 MODULE_VERSION(DRV_MODULE_VERSION);
237 MODULE_FIRMWARE(FIRMWARE_TG3);
238 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
239 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
240
241 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
242 module_param(tg3_debug, int, 0);
243 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
244
245 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
246 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
247
248 static const struct pci_device_id tg3_pci_tbl[] = {
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
268          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269                         TG3_DRV_DATA_FLAG_5705_10_100},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
271          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
272                         TG3_DRV_DATA_FLAG_5705_10_100},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
276                         TG3_DRV_DATA_FLAG_5705_10_100},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
283          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
297         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
298                         PCI_VENDOR_ID_LENOVO,
299                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
300          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
303          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
322         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
323                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
324          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
326                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
327          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
331          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
341          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
343          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
349         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
350         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
351         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
352         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
353         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
354         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
355         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
356         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
357         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
358         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
359         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
360         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
361         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
362         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
363         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
364         {}
365 };
366
367 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
368
369 static const struct {
370         const char string[ETH_GSTRING_LEN];
371 } ethtool_stats_keys[] = {
372         { "rx_octets" },
373         { "rx_fragments" },
374         { "rx_ucast_packets" },
375         { "rx_mcast_packets" },
376         { "rx_bcast_packets" },
377         { "rx_fcs_errors" },
378         { "rx_align_errors" },
379         { "rx_xon_pause_rcvd" },
380         { "rx_xoff_pause_rcvd" },
381         { "rx_mac_ctrl_rcvd" },
382         { "rx_xoff_entered" },
383         { "rx_frame_too_long_errors" },
384         { "rx_jabbers" },
385         { "rx_undersize_packets" },
386         { "rx_in_length_errors" },
387         { "rx_out_length_errors" },
388         { "rx_64_or_less_octet_packets" },
389         { "rx_65_to_127_octet_packets" },
390         { "rx_128_to_255_octet_packets" },
391         { "rx_256_to_511_octet_packets" },
392         { "rx_512_to_1023_octet_packets" },
393         { "rx_1024_to_1522_octet_packets" },
394         { "rx_1523_to_2047_octet_packets" },
395         { "rx_2048_to_4095_octet_packets" },
396         { "rx_4096_to_8191_octet_packets" },
397         { "rx_8192_to_9022_octet_packets" },
398
399         { "tx_octets" },
400         { "tx_collisions" },
401
402         { "tx_xon_sent" },
403         { "tx_xoff_sent" },
404         { "tx_flow_control" },
405         { "tx_mac_errors" },
406         { "tx_single_collisions" },
407         { "tx_mult_collisions" },
408         { "tx_deferred" },
409         { "tx_excessive_collisions" },
410         { "tx_late_collisions" },
411         { "tx_collide_2times" },
412         { "tx_collide_3times" },
413         { "tx_collide_4times" },
414         { "tx_collide_5times" },
415         { "tx_collide_6times" },
416         { "tx_collide_7times" },
417         { "tx_collide_8times" },
418         { "tx_collide_9times" },
419         { "tx_collide_10times" },
420         { "tx_collide_11times" },
421         { "tx_collide_12times" },
422         { "tx_collide_13times" },
423         { "tx_collide_14times" },
424         { "tx_collide_15times" },
425         { "tx_ucast_packets" },
426         { "tx_mcast_packets" },
427         { "tx_bcast_packets" },
428         { "tx_carrier_sense_errors" },
429         { "tx_discards" },
430         { "tx_errors" },
431
432         { "dma_writeq_full" },
433         { "dma_write_prioq_full" },
434         { "rxbds_empty" },
435         { "rx_discards" },
436         { "rx_errors" },
437         { "rx_threshold_hit" },
438
439         { "dma_readq_full" },
440         { "dma_read_prioq_full" },
441         { "tx_comp_queue_full" },
442
443         { "ring_set_send_prod_index" },
444         { "ring_status_update" },
445         { "nic_irqs" },
446         { "nic_avoided_irqs" },
447         { "nic_tx_threshold_hit" },
448
449         { "mbuf_lwm_thresh_hit" },
450 };
451
452 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
453 #define TG3_NVRAM_TEST          0
454 #define TG3_LINK_TEST           1
455 #define TG3_REGISTER_TEST       2
456 #define TG3_MEMORY_TEST         3
457 #define TG3_MAC_LOOPB_TEST      4
458 #define TG3_PHY_LOOPB_TEST      5
459 #define TG3_EXT_LOOPB_TEST      6
460 #define TG3_INTERRUPT_TEST      7
461
462
463 static const struct {
464         const char string[ETH_GSTRING_LEN];
465 } ethtool_test_keys[] = {
466         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
467         [TG3_LINK_TEST]         = { "link test         (online) " },
468         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
469         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
470         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
471         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
472         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
473         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
474 };
475
476 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
477
478
479 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
480 {
481         writel(val, tp->regs + off);
482 }
483
484 static u32 tg3_read32(struct tg3 *tp, u32 off)
485 {
486         return readl(tp->regs + off);
487 }
488
489 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
490 {
491         writel(val, tp->aperegs + off);
492 }
493
494 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
495 {
496         return readl(tp->aperegs + off);
497 }
498
499 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
500 {
501         unsigned long flags;
502
503         spin_lock_irqsave(&tp->indirect_lock, flags);
504         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
506         spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 }
508
509 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
510 {
511         writel(val, tp->regs + off);
512         readl(tp->regs + off);
513 }
514
515 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
516 {
517         unsigned long flags;
518         u32 val;
519
520         spin_lock_irqsave(&tp->indirect_lock, flags);
521         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
522         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
523         spin_unlock_irqrestore(&tp->indirect_lock, flags);
524         return val;
525 }
526
527 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
528 {
529         unsigned long flags;
530
531         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
532                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
533                                        TG3_64BIT_REG_LOW, val);
534                 return;
535         }
536         if (off == TG3_RX_STD_PROD_IDX_REG) {
537                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
538                                        TG3_64BIT_REG_LOW, val);
539                 return;
540         }
541
542         spin_lock_irqsave(&tp->indirect_lock, flags);
543         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
545         spin_unlock_irqrestore(&tp->indirect_lock, flags);
546
547         /* In indirect mode when disabling interrupts, we also need
548          * to clear the interrupt bit in the GRC local ctrl register.
549          */
550         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
551             (val == 0x1)) {
552                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
553                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
554         }
555 }
556
557 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
558 {
559         unsigned long flags;
560         u32 val;
561
562         spin_lock_irqsave(&tp->indirect_lock, flags);
563         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
564         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
565         spin_unlock_irqrestore(&tp->indirect_lock, flags);
566         return val;
567 }
568
569 /* usec_wait specifies the wait time in usec when writing to certain registers
570  * where it is unsafe to read back the register without some delay.
571  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
572  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
573  */
574 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
575 {
576         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
577                 /* Non-posted methods */
578                 tp->write32(tp, off, val);
579         else {
580                 /* Posted method */
581                 tg3_write32(tp, off, val);
582                 if (usec_wait)
583                         udelay(usec_wait);
584                 tp->read32(tp, off);
585         }
586         /* Wait again after the read for the posted method to guarantee that
587          * the wait time is met.
588          */
589         if (usec_wait)
590                 udelay(usec_wait);
591 }
592
593 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
594 {
595         tp->write32_mbox(tp, off, val);
596         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
597             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
598              !tg3_flag(tp, ICH_WORKAROUND)))
599                 tp->read32_mbox(tp, off);
600 }
601
602 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
603 {
604         void __iomem *mbox = tp->regs + off;
605         writel(val, mbox);
606         if (tg3_flag(tp, TXD_MBOX_HWBUG))
607                 writel(val, mbox);
608         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
609             tg3_flag(tp, FLUSH_POSTED_WRITES))
610                 readl(mbox);
611 }
612
613 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
614 {
615         return readl(tp->regs + off + GRCMBOX_BASE);
616 }
617
618 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
619 {
620         writel(val, tp->regs + off + GRCMBOX_BASE);
621 }
622
623 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
624 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
625 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
626 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
627 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
628
629 #define tw32(reg, val)                  tp->write32(tp, reg, val)
630 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
631 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
632 #define tr32(reg)                       tp->read32(tp, reg)
633
634 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
635 {
636         unsigned long flags;
637
638         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
639             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
640                 return;
641
642         spin_lock_irqsave(&tp->indirect_lock, flags);
643         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
644                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
645                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
646
647                 /* Always leave this as zero. */
648                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
649         } else {
650                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
651                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
652
653                 /* Always leave this as zero. */
654                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
655         }
656         spin_unlock_irqrestore(&tp->indirect_lock, flags);
657 }
658
659 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
660 {
661         unsigned long flags;
662
663         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
664             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
665                 *val = 0;
666                 return;
667         }
668
669         spin_lock_irqsave(&tp->indirect_lock, flags);
670         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
671                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
672                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
673
674                 /* Always leave this as zero. */
675                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
676         } else {
677                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
678                 *val = tr32(TG3PCI_MEM_WIN_DATA);
679
680                 /* Always leave this as zero. */
681                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
682         }
683         spin_unlock_irqrestore(&tp->indirect_lock, flags);
684 }
685
686 static void tg3_ape_lock_init(struct tg3 *tp)
687 {
688         int i;
689         u32 regbase, bit;
690
691         if (tg3_asic_rev(tp) == ASIC_REV_5761)
692                 regbase = TG3_APE_LOCK_GRANT;
693         else
694                 regbase = TG3_APE_PER_LOCK_GRANT;
695
696         /* Make sure the driver hasn't any stale locks. */
697         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
698                 switch (i) {
699                 case TG3_APE_LOCK_PHY0:
700                 case TG3_APE_LOCK_PHY1:
701                 case TG3_APE_LOCK_PHY2:
702                 case TG3_APE_LOCK_PHY3:
703                         bit = APE_LOCK_GRANT_DRIVER;
704                         break;
705                 default:
706                         if (!tp->pci_fn)
707                                 bit = APE_LOCK_GRANT_DRIVER;
708                         else
709                                 bit = 1 << tp->pci_fn;
710                 }
711                 tg3_ape_write32(tp, regbase + 4 * i, bit);
712         }
713
714 }
715
716 static int tg3_ape_lock(struct tg3 *tp, int locknum)
717 {
718         int i, off;
719         int ret = 0;
720         u32 status, req, gnt, bit;
721
722         if (!tg3_flag(tp, ENABLE_APE))
723                 return 0;
724
725         switch (locknum) {
726         case TG3_APE_LOCK_GPIO:
727                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
728                         return 0;
729                 /* else: fall through */
730         case TG3_APE_LOCK_GRC:
731         case TG3_APE_LOCK_MEM:
732                 if (!tp->pci_fn)
733                         bit = APE_LOCK_REQ_DRIVER;
734                 else
735                         bit = 1 << tp->pci_fn;
736                 break;
737         case TG3_APE_LOCK_PHY0:
738         case TG3_APE_LOCK_PHY1:
739         case TG3_APE_LOCK_PHY2:
740         case TG3_APE_LOCK_PHY3:
741                 bit = APE_LOCK_REQ_DRIVER;
742                 break;
743         default:
744                 return -EINVAL;
745         }
746
747         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
748                 req = TG3_APE_LOCK_REQ;
749                 gnt = TG3_APE_LOCK_GRANT;
750         } else {
751                 req = TG3_APE_PER_LOCK_REQ;
752                 gnt = TG3_APE_PER_LOCK_GRANT;
753         }
754
755         off = 4 * locknum;
756
757         tg3_ape_write32(tp, req + off, bit);
758
759         /* Wait for up to 1 millisecond to acquire lock. */
760         for (i = 0; i < 100; i++) {
761                 status = tg3_ape_read32(tp, gnt + off);
762                 if (status == bit)
763                         break;
764                 if (pci_channel_offline(tp->pdev))
765                         break;
766
767                 udelay(10);
768         }
769
770         if (status != bit) {
771                 /* Revoke the lock request. */
772                 tg3_ape_write32(tp, gnt + off, bit);
773                 ret = -EBUSY;
774         }
775
776         return ret;
777 }
778
779 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
780 {
781         u32 gnt, bit;
782
783         if (!tg3_flag(tp, ENABLE_APE))
784                 return;
785
786         switch (locknum) {
787         case TG3_APE_LOCK_GPIO:
788                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
789                         return;
790                 /* else: fall through */
791         case TG3_APE_LOCK_GRC:
792         case TG3_APE_LOCK_MEM:
793                 if (!tp->pci_fn)
794                         bit = APE_LOCK_GRANT_DRIVER;
795                 else
796                         bit = 1 << tp->pci_fn;
797                 break;
798         case TG3_APE_LOCK_PHY0:
799         case TG3_APE_LOCK_PHY1:
800         case TG3_APE_LOCK_PHY2:
801         case TG3_APE_LOCK_PHY3:
802                 bit = APE_LOCK_GRANT_DRIVER;
803                 break;
804         default:
805                 return;
806         }
807
808         if (tg3_asic_rev(tp) == ASIC_REV_5761)
809                 gnt = TG3_APE_LOCK_GRANT;
810         else
811                 gnt = TG3_APE_PER_LOCK_GRANT;
812
813         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
814 }
815
816 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 apedata;
819
820         while (timeout_us) {
821                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
822                         return -EBUSY;
823
824                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
826                         break;
827
828                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
829
830                 udelay(10);
831                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
832         }
833
834         return timeout_us ? 0 : -EBUSY;
835 }
836
837 #ifdef CONFIG_TIGON3_HWMON
838 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
839 {
840         u32 i, apedata;
841
842         for (i = 0; i < timeout_us / 10; i++) {
843                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
844
845                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
846                         break;
847
848                 udelay(10);
849         }
850
851         return i == timeout_us / 10;
852 }
853
854 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
855                                    u32 len)
856 {
857         int err;
858         u32 i, bufoff, msgoff, maxlen, apedata;
859
860         if (!tg3_flag(tp, APE_HAS_NCSI))
861                 return 0;
862
863         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
864         if (apedata != APE_SEG_SIG_MAGIC)
865                 return -ENODEV;
866
867         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
868         if (!(apedata & APE_FW_STATUS_READY))
869                 return -EAGAIN;
870
871         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
872                  TG3_APE_SHMEM_BASE;
873         msgoff = bufoff + 2 * sizeof(u32);
874         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
875
876         while (len) {
877                 u32 length;
878
879                 /* Cap xfer sizes to scratchpad limits. */
880                 length = (len > maxlen) ? maxlen : len;
881                 len -= length;
882
883                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
884                 if (!(apedata & APE_FW_STATUS_READY))
885                         return -EAGAIN;
886
887                 /* Wait for up to 1 msec for APE to service previous event. */
888                 err = tg3_ape_event_lock(tp, 1000);
889                 if (err)
890                         return err;
891
892                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
893                           APE_EVENT_STATUS_SCRTCHPD_READ |
894                           APE_EVENT_STATUS_EVENT_PENDING;
895                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
896
897                 tg3_ape_write32(tp, bufoff, base_off);
898                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
899
900                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
901                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
902
903                 base_off += length;
904
905                 if (tg3_ape_wait_for_event(tp, 30000))
906                         return -EAGAIN;
907
908                 for (i = 0; length; i += 4, length -= 4) {
909                         u32 val = tg3_ape_read32(tp, msgoff + i);
910                         memcpy(data, &val, sizeof(u32));
911                         data++;
912                 }
913         }
914
915         return 0;
916 }
917 #endif
918
919 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
920 {
921         int err;
922         u32 apedata;
923
924         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
925         if (apedata != APE_SEG_SIG_MAGIC)
926                 return -EAGAIN;
927
928         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
929         if (!(apedata & APE_FW_STATUS_READY))
930                 return -EAGAIN;
931
932         /* Wait for up to 20 millisecond for APE to service previous event. */
933         err = tg3_ape_event_lock(tp, 20000);
934         if (err)
935                 return err;
936
937         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
938                         event | APE_EVENT_STATUS_EVENT_PENDING);
939
940         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
941         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
942
943         return 0;
944 }
945
946 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
947 {
948         u32 event;
949         u32 apedata;
950
951         if (!tg3_flag(tp, ENABLE_APE))
952                 return;
953
954         switch (kind) {
955         case RESET_KIND_INIT:
956                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
957                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
958                                 APE_HOST_SEG_SIG_MAGIC);
959                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
960                                 APE_HOST_SEG_LEN_MAGIC);
961                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
962                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
963                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
964                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
965                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
966                                 APE_HOST_BEHAV_NO_PHYLOCK);
967                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
968                                     TG3_APE_HOST_DRVR_STATE_START);
969
970                 event = APE_EVENT_STATUS_STATE_START;
971                 break;
972         case RESET_KIND_SHUTDOWN:
973                 if (device_may_wakeup(&tp->pdev->dev) &&
974                     tg3_flag(tp, WOL_ENABLE)) {
975                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
976                                             TG3_APE_HOST_WOL_SPEED_AUTO);
977                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
978                 } else
979                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
980
981                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
982
983                 event = APE_EVENT_STATUS_STATE_UNLOAD;
984                 break;
985         default:
986                 return;
987         }
988
989         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
990
991         tg3_ape_send_event(tp, event);
992 }
993
994 static void tg3_send_ape_heartbeat(struct tg3 *tp,
995                                    unsigned long interval)
996 {
997         /* Check if hb interval has exceeded */
998         if (!tg3_flag(tp, ENABLE_APE) ||
999             time_before(jiffies, tp->ape_hb_jiffies + interval))
1000                 return;
1001
1002         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
1003         tp->ape_hb_jiffies = jiffies;
1004 }
1005
1006 static void tg3_disable_ints(struct tg3 *tp)
1007 {
1008         int i;
1009
1010         tw32(TG3PCI_MISC_HOST_CTRL,
1011              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1012         for (i = 0; i < tp->irq_max; i++)
1013                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1014 }
1015
1016 static void tg3_enable_ints(struct tg3 *tp)
1017 {
1018         int i;
1019
1020         tp->irq_sync = 0;
1021         wmb();
1022
1023         tw32(TG3PCI_MISC_HOST_CTRL,
1024              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1025
1026         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1027         for (i = 0; i < tp->irq_cnt; i++) {
1028                 struct tg3_napi *tnapi = &tp->napi[i];
1029
1030                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1031                 if (tg3_flag(tp, 1SHOT_MSI))
1032                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1033
1034                 tp->coal_now |= tnapi->coal_now;
1035         }
1036
1037         /* Force an initial interrupt */
1038         if (!tg3_flag(tp, TAGGED_STATUS) &&
1039             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1040                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1041         else
1042                 tw32(HOSTCC_MODE, tp->coal_now);
1043
1044         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1045 }
1046
1047 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1048 {
1049         struct tg3 *tp = tnapi->tp;
1050         struct tg3_hw_status *sblk = tnapi->hw_status;
1051         unsigned int work_exists = 0;
1052
1053         /* check for phy events */
1054         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1055                 if (sblk->status & SD_STATUS_LINK_CHG)
1056                         work_exists = 1;
1057         }
1058
1059         /* check for TX work to do */
1060         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1061                 work_exists = 1;
1062
1063         /* check for RX work to do */
1064         if (tnapi->rx_rcb_prod_idx &&
1065             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1066                 work_exists = 1;
1067
1068         return work_exists;
1069 }
1070
1071 /* tg3_int_reenable
1072  *  similar to tg3_enable_ints, but it accurately determines whether there
1073  *  is new work pending and can return without flushing the PIO write
1074  *  which reenables interrupts
1075  */
1076 static void tg3_int_reenable(struct tg3_napi *tnapi)
1077 {
1078         struct tg3 *tp = tnapi->tp;
1079
1080         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1081         mmiowb();
1082
1083         /* When doing tagged status, this work check is unnecessary.
1084          * The last_tag we write above tells the chip which piece of
1085          * work we've completed.
1086          */
1087         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1088                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1089                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1090 }
1091
1092 static void tg3_switch_clocks(struct tg3 *tp)
1093 {
1094         u32 clock_ctrl;
1095         u32 orig_clock_ctrl;
1096
1097         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1098                 return;
1099
1100         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1101
1102         orig_clock_ctrl = clock_ctrl;
1103         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1104                        CLOCK_CTRL_CLKRUN_OENABLE |
1105                        0x1f);
1106         tp->pci_clock_ctrl = clock_ctrl;
1107
1108         if (tg3_flag(tp, 5705_PLUS)) {
1109                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1110                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1111                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1112                 }
1113         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1114                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1115                             clock_ctrl |
1116                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1117                             40);
1118                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1119                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1120                             40);
1121         }
1122         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1123 }
1124
1125 #define PHY_BUSY_LOOPS  5000
1126
1127 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1128                          u32 *val)
1129 {
1130         u32 frame_val;
1131         unsigned int loops;
1132         int ret;
1133
1134         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135                 tw32_f(MAC_MI_MODE,
1136                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1137                 udelay(80);
1138         }
1139
1140         tg3_ape_lock(tp, tp->phy_ape_lock);
1141
1142         *val = 0x0;
1143
1144         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1145                       MI_COM_PHY_ADDR_MASK);
1146         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1147                       MI_COM_REG_ADDR_MASK);
1148         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1149
1150         tw32_f(MAC_MI_COM, frame_val);
1151
1152         loops = PHY_BUSY_LOOPS;
1153         while (loops != 0) {
1154                 udelay(10);
1155                 frame_val = tr32(MAC_MI_COM);
1156
1157                 if ((frame_val & MI_COM_BUSY) == 0) {
1158                         udelay(5);
1159                         frame_val = tr32(MAC_MI_COM);
1160                         break;
1161                 }
1162                 loops -= 1;
1163         }
1164
1165         ret = -EBUSY;
1166         if (loops != 0) {
1167                 *val = frame_val & MI_COM_DATA_MASK;
1168                 ret = 0;
1169         }
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1173                 udelay(80);
1174         }
1175
1176         tg3_ape_unlock(tp, tp->phy_ape_lock);
1177
1178         return ret;
1179 }
1180
1181 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1182 {
1183         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1184 }
1185
1186 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1187                           u32 val)
1188 {
1189         u32 frame_val;
1190         unsigned int loops;
1191         int ret;
1192
1193         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1194             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1195                 return 0;
1196
1197         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1198                 tw32_f(MAC_MI_MODE,
1199                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1200                 udelay(80);
1201         }
1202
1203         tg3_ape_lock(tp, tp->phy_ape_lock);
1204
1205         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1206                       MI_COM_PHY_ADDR_MASK);
1207         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1208                       MI_COM_REG_ADDR_MASK);
1209         frame_val |= (val & MI_COM_DATA_MASK);
1210         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1211
1212         tw32_f(MAC_MI_COM, frame_val);
1213
1214         loops = PHY_BUSY_LOOPS;
1215         while (loops != 0) {
1216                 udelay(10);
1217                 frame_val = tr32(MAC_MI_COM);
1218                 if ((frame_val & MI_COM_BUSY) == 0) {
1219                         udelay(5);
1220                         frame_val = tr32(MAC_MI_COM);
1221                         break;
1222                 }
1223                 loops -= 1;
1224         }
1225
1226         ret = -EBUSY;
1227         if (loops != 0)
1228                 ret = 0;
1229
1230         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1231                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1232                 udelay(80);
1233         }
1234
1235         tg3_ape_unlock(tp, tp->phy_ape_lock);
1236
1237         return ret;
1238 }
1239
1240 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1241 {
1242         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1243 }
1244
1245 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1246 {
1247         int err;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250         if (err)
1251                 goto done;
1252
1253         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254         if (err)
1255                 goto done;
1256
1257         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259         if (err)
1260                 goto done;
1261
1262         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1263
1264 done:
1265         return err;
1266 }
1267
1268 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1269 {
1270         int err;
1271
1272         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1273         if (err)
1274                 goto done;
1275
1276         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1277         if (err)
1278                 goto done;
1279
1280         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1281                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1282         if (err)
1283                 goto done;
1284
1285         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1286
1287 done:
1288         return err;
1289 }
1290
1291 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1292 {
1293         int err;
1294
1295         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1296         if (!err)
1297                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1298
1299         return err;
1300 }
1301
1302 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1303 {
1304         int err;
1305
1306         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1307         if (!err)
1308                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1309
1310         return err;
1311 }
1312
1313 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1314 {
1315         int err;
1316
1317         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1318                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1319                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1320         if (!err)
1321                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1322
1323         return err;
1324 }
1325
1326 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1327 {
1328         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1329                 set |= MII_TG3_AUXCTL_MISC_WREN;
1330
1331         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1332 }
1333
1334 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1335 {
1336         u32 val;
1337         int err;
1338
1339         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1340
1341         if (err)
1342                 return err;
1343
1344         if (enable)
1345                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1346         else
1347                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1348
1349         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1350                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1351
1352         return err;
1353 }
1354
1355 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1356 {
1357         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1358                             reg | val | MII_TG3_MISC_SHDW_WREN);
1359 }
1360
1361 static int tg3_bmcr_reset(struct tg3 *tp)
1362 {
1363         u32 phy_control;
1364         int limit, err;
1365
1366         /* OK, reset it, and poll the BMCR_RESET bit until it
1367          * clears or we time out.
1368          */
1369         phy_control = BMCR_RESET;
1370         err = tg3_writephy(tp, MII_BMCR, phy_control);
1371         if (err != 0)
1372                 return -EBUSY;
1373
1374         limit = 5000;
1375         while (limit--) {
1376                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1377                 if (err != 0)
1378                         return -EBUSY;
1379
1380                 if ((phy_control & BMCR_RESET) == 0) {
1381                         udelay(40);
1382                         break;
1383                 }
1384                 udelay(10);
1385         }
1386         if (limit < 0)
1387                 return -EBUSY;
1388
1389         return 0;
1390 }
1391
1392 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1393 {
1394         struct tg3 *tp = bp->priv;
1395         u32 val;
1396
1397         spin_lock_bh(&tp->lock);
1398
1399         if (__tg3_readphy(tp, mii_id, reg, &val))
1400                 val = -EIO;
1401
1402         spin_unlock_bh(&tp->lock);
1403
1404         return val;
1405 }
1406
1407 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1408 {
1409         struct tg3 *tp = bp->priv;
1410         u32 ret = 0;
1411
1412         spin_lock_bh(&tp->lock);
1413
1414         if (__tg3_writephy(tp, mii_id, reg, val))
1415                 ret = -EIO;
1416
1417         spin_unlock_bh(&tp->lock);
1418
1419         return ret;
1420 }
1421
1422 static void tg3_mdio_config_5785(struct tg3 *tp)
1423 {
1424         u32 val;
1425         struct phy_device *phydev;
1426
1427         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1428         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1429         case PHY_ID_BCM50610:
1430         case PHY_ID_BCM50610M:
1431                 val = MAC_PHYCFG2_50610_LED_MODES;
1432                 break;
1433         case PHY_ID_BCMAC131:
1434                 val = MAC_PHYCFG2_AC131_LED_MODES;
1435                 break;
1436         case PHY_ID_RTL8211C:
1437                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1438                 break;
1439         case PHY_ID_RTL8201E:
1440                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1441                 break;
1442         default:
1443                 return;
1444         }
1445
1446         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1447                 tw32(MAC_PHYCFG2, val);
1448
1449                 val = tr32(MAC_PHYCFG1);
1450                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1451                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1452                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1453                 tw32(MAC_PHYCFG1, val);
1454
1455                 return;
1456         }
1457
1458         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1459                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1460                        MAC_PHYCFG2_FMODE_MASK_MASK |
1461                        MAC_PHYCFG2_GMODE_MASK_MASK |
1462                        MAC_PHYCFG2_ACT_MASK_MASK   |
1463                        MAC_PHYCFG2_QUAL_MASK_MASK |
1464                        MAC_PHYCFG2_INBAND_ENABLE;
1465
1466         tw32(MAC_PHYCFG2, val);
1467
1468         val = tr32(MAC_PHYCFG1);
1469         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1470                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1471         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1474                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1475                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1476         }
1477         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1478                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1479         tw32(MAC_PHYCFG1, val);
1480
1481         val = tr32(MAC_EXT_RGMII_MODE);
1482         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1483                  MAC_RGMII_MODE_RX_QUALITY |
1484                  MAC_RGMII_MODE_RX_ACTIVITY |
1485                  MAC_RGMII_MODE_RX_ENG_DET |
1486                  MAC_RGMII_MODE_TX_ENABLE |
1487                  MAC_RGMII_MODE_TX_LOWPWR |
1488                  MAC_RGMII_MODE_TX_RESET);
1489         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1490                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1491                         val |= MAC_RGMII_MODE_RX_INT_B |
1492                                MAC_RGMII_MODE_RX_QUALITY |
1493                                MAC_RGMII_MODE_RX_ACTIVITY |
1494                                MAC_RGMII_MODE_RX_ENG_DET;
1495                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1496                         val |= MAC_RGMII_MODE_TX_ENABLE |
1497                                MAC_RGMII_MODE_TX_LOWPWR |
1498                                MAC_RGMII_MODE_TX_RESET;
1499         }
1500         tw32(MAC_EXT_RGMII_MODE, val);
1501 }
1502
1503 static void tg3_mdio_start(struct tg3 *tp)
1504 {
1505         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1506         tw32_f(MAC_MI_MODE, tp->mi_mode);
1507         udelay(80);
1508
1509         if (tg3_flag(tp, MDIOBUS_INITED) &&
1510             tg3_asic_rev(tp) == ASIC_REV_5785)
1511                 tg3_mdio_config_5785(tp);
1512 }
1513
1514 static int tg3_mdio_init(struct tg3 *tp)
1515 {
1516         int i;
1517         u32 reg;
1518         struct phy_device *phydev;
1519
1520         if (tg3_flag(tp, 5717_PLUS)) {
1521                 u32 is_serdes;
1522
1523                 tp->phy_addr = tp->pci_fn + 1;
1524
1525                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1526                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1527                 else
1528                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1529                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1530                 if (is_serdes)
1531                         tp->phy_addr += 7;
1532         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1533                 int addr;
1534
1535                 addr = ssb_gige_get_phyaddr(tp->pdev);
1536                 if (addr < 0)
1537                         return addr;
1538                 tp->phy_addr = addr;
1539         } else
1540                 tp->phy_addr = TG3_PHY_MII_ADDR;
1541
1542         tg3_mdio_start(tp);
1543
1544         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1545                 return 0;
1546
1547         tp->mdio_bus = mdiobus_alloc();
1548         if (tp->mdio_bus == NULL)
1549                 return -ENOMEM;
1550
1551         tp->mdio_bus->name     = "tg3 mdio bus";
1552         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1553                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1554         tp->mdio_bus->priv     = tp;
1555         tp->mdio_bus->parent   = &tp->pdev->dev;
1556         tp->mdio_bus->read     = &tg3_mdio_read;
1557         tp->mdio_bus->write    = &tg3_mdio_write;
1558         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1559
1560         /* The bus registration will look for all the PHYs on the mdio bus.
1561          * Unfortunately, it does not ensure the PHY is powered up before
1562          * accessing the PHY ID registers.  A chip reset is the
1563          * quickest way to bring the device back to an operational state..
1564          */
1565         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1566                 tg3_bmcr_reset(tp);
1567
1568         i = mdiobus_register(tp->mdio_bus);
1569         if (i) {
1570                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1571                 mdiobus_free(tp->mdio_bus);
1572                 return i;
1573         }
1574
1575         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1576
1577         if (!phydev || !phydev->drv) {
1578                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1579                 mdiobus_unregister(tp->mdio_bus);
1580                 mdiobus_free(tp->mdio_bus);
1581                 return -ENODEV;
1582         }
1583
1584         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1585         case PHY_ID_BCM57780:
1586                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1587                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1588                 break;
1589         case PHY_ID_BCM50610:
1590         case PHY_ID_BCM50610M:
1591                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1592                                      PHY_BRCM_RX_REFCLK_UNUSED |
1593                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1594                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1595                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1596                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1597                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1598                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1599                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1600                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1601                 /* fall through */
1602         case PHY_ID_RTL8211C:
1603                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1604                 break;
1605         case PHY_ID_RTL8201E:
1606         case PHY_ID_BCMAC131:
1607                 phydev->interface = PHY_INTERFACE_MODE_MII;
1608                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1609                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1610                 break;
1611         }
1612
1613         tg3_flag_set(tp, MDIOBUS_INITED);
1614
1615         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1616                 tg3_mdio_config_5785(tp);
1617
1618         return 0;
1619 }
1620
1621 static void tg3_mdio_fini(struct tg3 *tp)
1622 {
1623         if (tg3_flag(tp, MDIOBUS_INITED)) {
1624                 tg3_flag_clear(tp, MDIOBUS_INITED);
1625                 mdiobus_unregister(tp->mdio_bus);
1626                 mdiobus_free(tp->mdio_bus);
1627         }
1628 }
1629
1630 /* tp->lock is held. */
1631 static inline void tg3_generate_fw_event(struct tg3 *tp)
1632 {
1633         u32 val;
1634
1635         val = tr32(GRC_RX_CPU_EVENT);
1636         val |= GRC_RX_CPU_DRIVER_EVENT;
1637         tw32_f(GRC_RX_CPU_EVENT, val);
1638
1639         tp->last_event_jiffies = jiffies;
1640 }
1641
1642 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1643
1644 /* tp->lock is held. */
1645 static void tg3_wait_for_event_ack(struct tg3 *tp)
1646 {
1647         int i;
1648         unsigned int delay_cnt;
1649         long time_remain;
1650
1651         /* If enough time has passed, no wait is necessary. */
1652         time_remain = (long)(tp->last_event_jiffies + 1 +
1653                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1654                       (long)jiffies;
1655         if (time_remain < 0)
1656                 return;
1657
1658         /* Check if we can shorten the wait time. */
1659         delay_cnt = jiffies_to_usecs(time_remain);
1660         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1661                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1662         delay_cnt = (delay_cnt >> 3) + 1;
1663
1664         for (i = 0; i < delay_cnt; i++) {
1665                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1666                         break;
1667                 if (pci_channel_offline(tp->pdev))
1668                         break;
1669
1670                 udelay(8);
1671         }
1672 }
1673
1674 /* tp->lock is held. */
1675 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1676 {
1677         u32 reg, val;
1678
1679         val = 0;
1680         if (!tg3_readphy(tp, MII_BMCR, &reg))
1681                 val = reg << 16;
1682         if (!tg3_readphy(tp, MII_BMSR, &reg))
1683                 val |= (reg & 0xffff);
1684         *data++ = val;
1685
1686         val = 0;
1687         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1688                 val = reg << 16;
1689         if (!tg3_readphy(tp, MII_LPA, &reg))
1690                 val |= (reg & 0xffff);
1691         *data++ = val;
1692
1693         val = 0;
1694         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1695                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1696                         val = reg << 16;
1697                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1698                         val |= (reg & 0xffff);
1699         }
1700         *data++ = val;
1701
1702         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1703                 val = reg << 16;
1704         else
1705                 val = 0;
1706         *data++ = val;
1707 }
1708
1709 /* tp->lock is held. */
1710 static void tg3_ump_link_report(struct tg3 *tp)
1711 {
1712         u32 data[4];
1713
1714         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1715                 return;
1716
1717         tg3_phy_gather_ump_data(tp, data);
1718
1719         tg3_wait_for_event_ack(tp);
1720
1721         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1722         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1723         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1724         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1725         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1726         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1727
1728         tg3_generate_fw_event(tp);
1729 }
1730
1731 /* tp->lock is held. */
1732 static void tg3_stop_fw(struct tg3 *tp)
1733 {
1734         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1735                 /* Wait for RX cpu to ACK the previous event. */
1736                 tg3_wait_for_event_ack(tp);
1737
1738                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1739
1740                 tg3_generate_fw_event(tp);
1741
1742                 /* Wait for RX cpu to ACK this event. */
1743                 tg3_wait_for_event_ack(tp);
1744         }
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1749 {
1750         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1751                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1752
1753         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1754                 switch (kind) {
1755                 case RESET_KIND_INIT:
1756                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1757                                       DRV_STATE_START);
1758                         break;
1759
1760                 case RESET_KIND_SHUTDOWN:
1761                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1762                                       DRV_STATE_UNLOAD);
1763                         break;
1764
1765                 case RESET_KIND_SUSPEND:
1766                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767                                       DRV_STATE_SUSPEND);
1768                         break;
1769
1770                 default:
1771                         break;
1772                 }
1773         }
1774 }
1775
1776 /* tp->lock is held. */
1777 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1778 {
1779         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1780                 switch (kind) {
1781                 case RESET_KIND_INIT:
1782                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783                                       DRV_STATE_START_DONE);
1784                         break;
1785
1786                 case RESET_KIND_SHUTDOWN:
1787                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788                                       DRV_STATE_UNLOAD_DONE);
1789                         break;
1790
1791                 default:
1792                         break;
1793                 }
1794         }
1795 }
1796
1797 /* tp->lock is held. */
1798 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1799 {
1800         if (tg3_flag(tp, ENABLE_ASF)) {
1801                 switch (kind) {
1802                 case RESET_KIND_INIT:
1803                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1804                                       DRV_STATE_START);
1805                         break;
1806
1807                 case RESET_KIND_SHUTDOWN:
1808                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1809                                       DRV_STATE_UNLOAD);
1810                         break;
1811
1812                 case RESET_KIND_SUSPEND:
1813                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1814                                       DRV_STATE_SUSPEND);
1815                         break;
1816
1817                 default:
1818                         break;
1819                 }
1820         }
1821 }
1822
1823 static int tg3_poll_fw(struct tg3 *tp)
1824 {
1825         int i;
1826         u32 val;
1827
1828         if (tg3_flag(tp, NO_FWARE_REPORTED))
1829                 return 0;
1830
1831         if (tg3_flag(tp, IS_SSB_CORE)) {
1832                 /* We don't use firmware. */
1833                 return 0;
1834         }
1835
1836         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1837                 /* Wait up to 20ms for init done. */
1838                 for (i = 0; i < 200; i++) {
1839                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1840                                 return 0;
1841                         if (pci_channel_offline(tp->pdev))
1842                                 return -ENODEV;
1843
1844                         udelay(100);
1845                 }
1846                 return -ENODEV;
1847         }
1848
1849         /* Wait for firmware initialization to complete. */
1850         for (i = 0; i < 100000; i++) {
1851                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1852                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1853                         break;
1854                 if (pci_channel_offline(tp->pdev)) {
1855                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1856                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1857                                 netdev_info(tp->dev, "No firmware running\n");
1858                         }
1859
1860                         break;
1861                 }
1862
1863                 udelay(10);
1864         }
1865
1866         /* Chip might not be fitted with firmware.  Some Sun onboard
1867          * parts are configured like that.  So don't signal the timeout
1868          * of the above loop as an error, but do report the lack of
1869          * running firmware once.
1870          */
1871         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1872                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1873
1874                 netdev_info(tp->dev, "No firmware running\n");
1875         }
1876
1877         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1878                 /* The 57765 A0 needs a little more
1879                  * time to do some important work.
1880                  */
1881                 mdelay(10);
1882         }
1883
1884         return 0;
1885 }
1886
1887 static void tg3_link_report(struct tg3 *tp)
1888 {
1889         if (!netif_carrier_ok(tp->dev)) {
1890                 netif_info(tp, link, tp->dev, "Link is down\n");
1891                 tg3_ump_link_report(tp);
1892         } else if (netif_msg_link(tp)) {
1893                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1894                             (tp->link_config.active_speed == SPEED_1000 ?
1895                              1000 :
1896                              (tp->link_config.active_speed == SPEED_100 ?
1897                               100 : 10)),
1898                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1899                              "full" : "half"));
1900
1901                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1902                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1903                             "on" : "off",
1904                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1905                             "on" : "off");
1906
1907                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1908                         netdev_info(tp->dev, "EEE is %s\n",
1909                                     tp->setlpicnt ? "enabled" : "disabled");
1910
1911                 tg3_ump_link_report(tp);
1912         }
1913
1914         tp->link_up = netif_carrier_ok(tp->dev);
1915 }
1916
1917 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1918 {
1919         u32 flowctrl = 0;
1920
1921         if (adv & ADVERTISE_PAUSE_CAP) {
1922                 flowctrl |= FLOW_CTRL_RX;
1923                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1924                         flowctrl |= FLOW_CTRL_TX;
1925         } else if (adv & ADVERTISE_PAUSE_ASYM)
1926                 flowctrl |= FLOW_CTRL_TX;
1927
1928         return flowctrl;
1929 }
1930
1931 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1932 {
1933         u16 miireg;
1934
1935         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1936                 miireg = ADVERTISE_1000XPAUSE;
1937         else if (flow_ctrl & FLOW_CTRL_TX)
1938                 miireg = ADVERTISE_1000XPSE_ASYM;
1939         else if (flow_ctrl & FLOW_CTRL_RX)
1940                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1941         else
1942                 miireg = 0;
1943
1944         return miireg;
1945 }
1946
1947 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1948 {
1949         u32 flowctrl = 0;
1950
1951         if (adv & ADVERTISE_1000XPAUSE) {
1952                 flowctrl |= FLOW_CTRL_RX;
1953                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1954                         flowctrl |= FLOW_CTRL_TX;
1955         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1956                 flowctrl |= FLOW_CTRL_TX;
1957
1958         return flowctrl;
1959 }
1960
1961 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1962 {
1963         u8 cap = 0;
1964
1965         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1966                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1967         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1968                 if (lcladv & ADVERTISE_1000XPAUSE)
1969                         cap = FLOW_CTRL_RX;
1970                 if (rmtadv & ADVERTISE_1000XPAUSE)
1971                         cap = FLOW_CTRL_TX;
1972         }
1973
1974         return cap;
1975 }
1976
1977 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1978 {
1979         u8 autoneg;
1980         u8 flowctrl = 0;
1981         u32 old_rx_mode = tp->rx_mode;
1982         u32 old_tx_mode = tp->tx_mode;
1983
1984         if (tg3_flag(tp, USE_PHYLIB))
1985                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1986         else
1987                 autoneg = tp->link_config.autoneg;
1988
1989         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1990                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1991                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1992                 else
1993                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1994         } else
1995                 flowctrl = tp->link_config.flowctrl;
1996
1997         tp->link_config.active_flowctrl = flowctrl;
1998
1999         if (flowctrl & FLOW_CTRL_RX)
2000                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
2001         else
2002                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
2003
2004         if (old_rx_mode != tp->rx_mode)
2005                 tw32_f(MAC_RX_MODE, tp->rx_mode);
2006
2007         if (flowctrl & FLOW_CTRL_TX)
2008                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2009         else
2010                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2011
2012         if (old_tx_mode != tp->tx_mode)
2013                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2014 }
2015
2016 static void tg3_adjust_link(struct net_device *dev)
2017 {
2018         u8 oldflowctrl, linkmesg = 0;
2019         u32 mac_mode, lcl_adv, rmt_adv;
2020         struct tg3 *tp = netdev_priv(dev);
2021         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2022
2023         spin_lock_bh(&tp->lock);
2024
2025         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2026                                     MAC_MODE_HALF_DUPLEX);
2027
2028         oldflowctrl = tp->link_config.active_flowctrl;
2029
2030         if (phydev->link) {
2031                 lcl_adv = 0;
2032                 rmt_adv = 0;
2033
2034                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2035                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2036                 else if (phydev->speed == SPEED_1000 ||
2037                          tg3_asic_rev(tp) != ASIC_REV_5785)
2038                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039                 else
2040                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2041
2042                 if (phydev->duplex == DUPLEX_HALF)
2043                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2044                 else {
2045                         lcl_adv = mii_advertise_flowctrl(
2046                                   tp->link_config.flowctrl);
2047
2048                         if (phydev->pause)
2049                                 rmt_adv = LPA_PAUSE_CAP;
2050                         if (phydev->asym_pause)
2051                                 rmt_adv |= LPA_PAUSE_ASYM;
2052                 }
2053
2054                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2055         } else
2056                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2057
2058         if (mac_mode != tp->mac_mode) {
2059                 tp->mac_mode = mac_mode;
2060                 tw32_f(MAC_MODE, tp->mac_mode);
2061                 udelay(40);
2062         }
2063
2064         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2065                 if (phydev->speed == SPEED_10)
2066                         tw32(MAC_MI_STAT,
2067                              MAC_MI_STAT_10MBPS_MODE |
2068                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2069                 else
2070                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2071         }
2072
2073         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2074                 tw32(MAC_TX_LENGTHS,
2075                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2076                       (6 << TX_LENGTHS_IPG_SHIFT) |
2077                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2078         else
2079                 tw32(MAC_TX_LENGTHS,
2080                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2081                       (6 << TX_LENGTHS_IPG_SHIFT) |
2082                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2083
2084         if (phydev->link != tp->old_link ||
2085             phydev->speed != tp->link_config.active_speed ||
2086             phydev->duplex != tp->link_config.active_duplex ||
2087             oldflowctrl != tp->link_config.active_flowctrl)
2088                 linkmesg = 1;
2089
2090         tp->old_link = phydev->link;
2091         tp->link_config.active_speed = phydev->speed;
2092         tp->link_config.active_duplex = phydev->duplex;
2093
2094         spin_unlock_bh(&tp->lock);
2095
2096         if (linkmesg)
2097                 tg3_link_report(tp);
2098 }
2099
2100 static int tg3_phy_init(struct tg3 *tp)
2101 {
2102         struct phy_device *phydev;
2103
2104         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2105                 return 0;
2106
2107         /* Bring the PHY back to a known state. */
2108         tg3_bmcr_reset(tp);
2109
2110         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2111
2112         /* Attach the MAC to the PHY. */
2113         phydev = phy_connect(tp->dev, phydev_name(phydev),
2114                              tg3_adjust_link, phydev->interface);
2115         if (IS_ERR(phydev)) {
2116                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2117                 return PTR_ERR(phydev);
2118         }
2119
2120         /* Mask with MAC supported features. */
2121         switch (phydev->interface) {
2122         case PHY_INTERFACE_MODE_GMII:
2123         case PHY_INTERFACE_MODE_RGMII:
2124                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2125                         phy_set_max_speed(phydev, SPEED_1000);
2126                         phy_support_asym_pause(phydev);
2127                         break;
2128                 }
2129                 /* fall through */
2130         case PHY_INTERFACE_MODE_MII:
2131                 phy_set_max_speed(phydev, SPEED_100);
2132                 phy_support_asym_pause(phydev);
2133                 break;
2134         default:
2135                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2136                 return -EINVAL;
2137         }
2138
2139         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2140
2141         phy_attached_info(phydev);
2142
2143         return 0;
2144 }
2145
2146 static void tg3_phy_start(struct tg3 *tp)
2147 {
2148         struct phy_device *phydev;
2149
2150         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2151                 return;
2152
2153         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2154
2155         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2156                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2157                 phydev->speed = tp->link_config.speed;
2158                 phydev->duplex = tp->link_config.duplex;
2159                 phydev->autoneg = tp->link_config.autoneg;
2160                 phydev->advertising = tp->link_config.advertising;
2161         }
2162
2163         phy_start(phydev);
2164
2165         phy_start_aneg(phydev);
2166 }
2167
2168 static void tg3_phy_stop(struct tg3 *tp)
2169 {
2170         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2171                 return;
2172
2173         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2174 }
2175
2176 static void tg3_phy_fini(struct tg3 *tp)
2177 {
2178         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2179                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2180                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2181         }
2182 }
2183
2184 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2185 {
2186         int err;
2187         u32 val;
2188
2189         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2190                 return 0;
2191
2192         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2193                 /* Cannot do read-modify-write on 5401 */
2194                 err = tg3_phy_auxctl_write(tp,
2195                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2196                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2197                                            0x4c20);
2198                 goto done;
2199         }
2200
2201         err = tg3_phy_auxctl_read(tp,
2202                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2203         if (err)
2204                 return err;
2205
2206         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2207         err = tg3_phy_auxctl_write(tp,
2208                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2209
2210 done:
2211         return err;
2212 }
2213
2214 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2215 {
2216         u32 phytest;
2217
2218         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2219                 u32 phy;
2220
2221                 tg3_writephy(tp, MII_TG3_FET_TEST,
2222                              phytest | MII_TG3_FET_SHADOW_EN);
2223                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2224                         if (enable)
2225                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2226                         else
2227                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2228                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2229                 }
2230                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2231         }
2232 }
2233
2234 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2235 {
2236         u32 reg;
2237
2238         if (!tg3_flag(tp, 5705_PLUS) ||
2239             (tg3_flag(tp, 5717_PLUS) &&
2240              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2241                 return;
2242
2243         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2244                 tg3_phy_fet_toggle_apd(tp, enable);
2245                 return;
2246         }
2247
2248         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2249               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2250               MII_TG3_MISC_SHDW_SCR5_SDTL |
2251               MII_TG3_MISC_SHDW_SCR5_C125OE;
2252         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2253                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2254
2255         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2256
2257
2258         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2259         if (enable)
2260                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2261
2262         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2263 }
2264
2265 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2266 {
2267         u32 phy;
2268
2269         if (!tg3_flag(tp, 5705_PLUS) ||
2270             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2271                 return;
2272
2273         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2274                 u32 ephy;
2275
2276                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2277                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2278
2279                         tg3_writephy(tp, MII_TG3_FET_TEST,
2280                                      ephy | MII_TG3_FET_SHADOW_EN);
2281                         if (!tg3_readphy(tp, reg, &phy)) {
2282                                 if (enable)
2283                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2284                                 else
2285                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2286                                 tg3_writephy(tp, reg, phy);
2287                         }
2288                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2289                 }
2290         } else {
2291                 int ret;
2292
2293                 ret = tg3_phy_auxctl_read(tp,
2294                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2295                 if (!ret) {
2296                         if (enable)
2297                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2298                         else
2299                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2300                         tg3_phy_auxctl_write(tp,
2301                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2302                 }
2303         }
2304 }
2305
2306 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2307 {
2308         int ret;
2309         u32 val;
2310
2311         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2312                 return;
2313
2314         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2315         if (!ret)
2316                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2317                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2318 }
2319
2320 static void tg3_phy_apply_otp(struct tg3 *tp)
2321 {
2322         u32 otp, phy;
2323
2324         if (!tp->phy_otp)
2325                 return;
2326
2327         otp = tp->phy_otp;
2328
2329         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2330                 return;
2331
2332         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2333         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2334         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2335
2336         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2337               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2338         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2339
2340         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2341         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2342         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2343
2344         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2345         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2346
2347         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2348         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2349
2350         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2351               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2352         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2353
2354         tg3_phy_toggle_auxctl_smdsp(tp, false);
2355 }
2356
2357 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2358 {
2359         u32 val;
2360         struct ethtool_eee *dest = &tp->eee;
2361
2362         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2363                 return;
2364
2365         if (eee)
2366                 dest = eee;
2367
2368         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2369                 return;
2370
2371         /* Pull eee_active */
2372         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2373             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2374                 dest->eee_active = 1;
2375         } else
2376                 dest->eee_active = 0;
2377
2378         /* Pull lp advertised settings */
2379         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2380                 return;
2381         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2382
2383         /* Pull advertised and eee_enabled settings */
2384         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2385                 return;
2386         dest->eee_enabled = !!val;
2387         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2388
2389         /* Pull tx_lpi_enabled */
2390         val = tr32(TG3_CPMU_EEE_MODE);
2391         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2392
2393         /* Pull lpi timer value */
2394         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2395 }
2396
2397 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2398 {
2399         u32 val;
2400
2401         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2402                 return;
2403
2404         tp->setlpicnt = 0;
2405
2406         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2407             current_link_up &&
2408             tp->link_config.active_duplex == DUPLEX_FULL &&
2409             (tp->link_config.active_speed == SPEED_100 ||
2410              tp->link_config.active_speed == SPEED_1000)) {
2411                 u32 eeectl;
2412
2413                 if (tp->link_config.active_speed == SPEED_1000)
2414                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2415                 else
2416                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2417
2418                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2419
2420                 tg3_eee_pull_config(tp, NULL);
2421                 if (tp->eee.eee_active)
2422                         tp->setlpicnt = 2;
2423         }
2424
2425         if (!tp->setlpicnt) {
2426                 if (current_link_up &&
2427                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2428                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2429                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2430                 }
2431
2432                 val = tr32(TG3_CPMU_EEE_MODE);
2433                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2434         }
2435 }
2436
2437 static void tg3_phy_eee_enable(struct tg3 *tp)
2438 {
2439         u32 val;
2440
2441         if (tp->link_config.active_speed == SPEED_1000 &&
2442             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2443              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2444              tg3_flag(tp, 57765_CLASS)) &&
2445             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2446                 val = MII_TG3_DSP_TAP26_ALNOKO |
2447                       MII_TG3_DSP_TAP26_RMRXSTO;
2448                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2449                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2450         }
2451
2452         val = tr32(TG3_CPMU_EEE_MODE);
2453         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2454 }
2455
2456 static int tg3_wait_macro_done(struct tg3 *tp)
2457 {
2458         int limit = 100;
2459
2460         while (limit--) {
2461                 u32 tmp32;
2462
2463                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2464                         if ((tmp32 & 0x1000) == 0)
2465                                 break;
2466                 }
2467         }
2468         if (limit < 0)
2469                 return -EBUSY;
2470
2471         return 0;
2472 }
2473
2474 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2475 {
2476         static const u32 test_pat[4][6] = {
2477         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2478         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2479         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2480         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2481         };
2482         int chan;
2483
2484         for (chan = 0; chan < 4; chan++) {
2485                 int i;
2486
2487                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2488                              (chan * 0x2000) | 0x0200);
2489                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2490
2491                 for (i = 0; i < 6; i++)
2492                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2493                                      test_pat[chan][i]);
2494
2495                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2496                 if (tg3_wait_macro_done(tp)) {
2497                         *resetp = 1;
2498                         return -EBUSY;
2499                 }
2500
2501                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2502                              (chan * 0x2000) | 0x0200);
2503                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2504                 if (tg3_wait_macro_done(tp)) {
2505                         *resetp = 1;
2506                         return -EBUSY;
2507                 }
2508
2509                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2510                 if (tg3_wait_macro_done(tp)) {
2511                         *resetp = 1;
2512                         return -EBUSY;
2513                 }
2514
2515                 for (i = 0; i < 6; i += 2) {
2516                         u32 low, high;
2517
2518                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2519                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2520                             tg3_wait_macro_done(tp)) {
2521                                 *resetp = 1;
2522                                 return -EBUSY;
2523                         }
2524                         low &= 0x7fff;
2525                         high &= 0x000f;
2526                         if (low != test_pat[chan][i] ||
2527                             high != test_pat[chan][i+1]) {
2528                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2529                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2530                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2531
2532                                 return -EBUSY;
2533                         }
2534                 }
2535         }
2536
2537         return 0;
2538 }
2539
2540 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2541 {
2542         int chan;
2543
2544         for (chan = 0; chan < 4; chan++) {
2545                 int i;
2546
2547                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2548                              (chan * 0x2000) | 0x0200);
2549                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2550                 for (i = 0; i < 6; i++)
2551                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2552                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2553                 if (tg3_wait_macro_done(tp))
2554                         return -EBUSY;
2555         }
2556
2557         return 0;
2558 }
2559
2560 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2561 {
2562         u32 reg32, phy9_orig;
2563         int retries, do_phy_reset, err;
2564
2565         retries = 10;
2566         do_phy_reset = 1;
2567         do {
2568                 if (do_phy_reset) {
2569                         err = tg3_bmcr_reset(tp);
2570                         if (err)
2571                                 return err;
2572                         do_phy_reset = 0;
2573                 }
2574
2575                 /* Disable transmitter and interrupt.  */
2576                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2577                         continue;
2578
2579                 reg32 |= 0x3000;
2580                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2581
2582                 /* Set full-duplex, 1000 mbps.  */
2583                 tg3_writephy(tp, MII_BMCR,
2584                              BMCR_FULLDPLX | BMCR_SPEED1000);
2585
2586                 /* Set to master mode.  */
2587                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2588                         continue;
2589
2590                 tg3_writephy(tp, MII_CTRL1000,
2591                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2592
2593                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2594                 if (err)
2595                         return err;
2596
2597                 /* Block the PHY control access.  */
2598                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2599
2600                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2601                 if (!err)
2602                         break;
2603         } while (--retries);
2604
2605         err = tg3_phy_reset_chanpat(tp);
2606         if (err)
2607                 return err;
2608
2609         tg3_phydsp_write(tp, 0x8005, 0x0000);
2610
2611         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2612         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2613
2614         tg3_phy_toggle_auxctl_smdsp(tp, false);
2615
2616         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2617
2618         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2619         if (err)
2620                 return err;
2621
2622         reg32 &= ~0x3000;
2623         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2624
2625         return 0;
2626 }
2627
2628 static void tg3_carrier_off(struct tg3 *tp)
2629 {
2630         netif_carrier_off(tp->dev);
2631         tp->link_up = false;
2632 }
2633
2634 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2635 {
2636         if (tg3_flag(tp, ENABLE_ASF))
2637                 netdev_warn(tp->dev,
2638                             "Management side-band traffic will be interrupted during phy settings change\n");
2639 }
2640
2641 /* This will reset the tigon3 PHY if there is no valid
2642  * link unless the FORCE argument is non-zero.
2643  */
2644 static int tg3_phy_reset(struct tg3 *tp)
2645 {
2646         u32 val, cpmuctrl;
2647         int err;
2648
2649         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2650                 val = tr32(GRC_MISC_CFG);
2651                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2652                 udelay(40);
2653         }
2654         err  = tg3_readphy(tp, MII_BMSR, &val);
2655         err |= tg3_readphy(tp, MII_BMSR, &val);
2656         if (err != 0)
2657                 return -EBUSY;
2658
2659         if (netif_running(tp->dev) && tp->link_up) {
2660                 netif_carrier_off(tp->dev);
2661                 tg3_link_report(tp);
2662         }
2663
2664         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2665             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2666             tg3_asic_rev(tp) == ASIC_REV_5705) {
2667                 err = tg3_phy_reset_5703_4_5(tp);
2668                 if (err)
2669                         return err;
2670                 goto out;
2671         }
2672
2673         cpmuctrl = 0;
2674         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2675             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2676                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2677                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2678                         tw32(TG3_CPMU_CTRL,
2679                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2680         }
2681
2682         err = tg3_bmcr_reset(tp);
2683         if (err)
2684                 return err;
2685
2686         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2687                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2688                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2689
2690                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2691         }
2692
2693         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2694             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2695                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2696                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2697                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2698                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2699                         udelay(40);
2700                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2701                 }
2702         }
2703
2704         if (tg3_flag(tp, 5717_PLUS) &&
2705             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2706                 return 0;
2707
2708         tg3_phy_apply_otp(tp);
2709
2710         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2711                 tg3_phy_toggle_apd(tp, true);
2712         else
2713                 tg3_phy_toggle_apd(tp, false);
2714
2715 out:
2716         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2717             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2718                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2719                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2720                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2721         }
2722
2723         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2724                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2725                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2726         }
2727
2728         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2729                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2730                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2731                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2732                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2733                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2734                 }
2735         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2736                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2737                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2738                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2739                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2740                                 tg3_writephy(tp, MII_TG3_TEST1,
2741                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2742                         } else
2743                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2744
2745                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2746                 }
2747         }
2748
2749         /* Set Extended packet length bit (bit 14) on all chips that */
2750         /* support jumbo frames */
2751         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2752                 /* Cannot do read-modify-write on 5401 */
2753                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2754         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2755                 /* Set bit 14 with read-modify-write to preserve other bits */
2756                 err = tg3_phy_auxctl_read(tp,
2757                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2758                 if (!err)
2759                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2760                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2761         }
2762
2763         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2764          * jumbo frames transmission.
2765          */
2766         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2767                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2768                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2769                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2770         }
2771
2772         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2773                 /* adjust output voltage */
2774                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2775         }
2776
2777         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2778                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2779
2780         tg3_phy_toggle_automdix(tp, true);
2781         tg3_phy_set_wirespeed(tp);
2782         return 0;
2783 }
2784
2785 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2786 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2787 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2788                                           TG3_GPIO_MSG_NEED_VAUX)
2789 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2790         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2791          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2792          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2793          (TG3_GPIO_MSG_DRVR_PRES << 12))
2794
2795 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2796         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2797          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2798          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2799          (TG3_GPIO_MSG_NEED_VAUX << 12))
2800
2801 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2802 {
2803         u32 status, shift;
2804
2805         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806             tg3_asic_rev(tp) == ASIC_REV_5719)
2807                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2808         else
2809                 status = tr32(TG3_CPMU_DRV_STATUS);
2810
2811         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2812         status &= ~(TG3_GPIO_MSG_MASK << shift);
2813         status |= (newstat << shift);
2814
2815         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2816             tg3_asic_rev(tp) == ASIC_REV_5719)
2817                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2818         else
2819                 tw32(TG3_CPMU_DRV_STATUS, status);
2820
2821         return status >> TG3_APE_GPIO_MSG_SHIFT;
2822 }
2823
2824 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2825 {
2826         if (!tg3_flag(tp, IS_NIC))
2827                 return 0;
2828
2829         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2830             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2831             tg3_asic_rev(tp) == ASIC_REV_5720) {
2832                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2833                         return -EIO;
2834
2835                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2836
2837                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2838                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2839
2840                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2841         } else {
2842                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2843                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2844         }
2845
2846         return 0;
2847 }
2848
2849 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2850 {
2851         u32 grc_local_ctrl;
2852
2853         if (!tg3_flag(tp, IS_NIC) ||
2854             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855             tg3_asic_rev(tp) == ASIC_REV_5701)
2856                 return;
2857
2858         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2859
2860         tw32_wait_f(GRC_LOCAL_CTRL,
2861                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2862                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2863
2864         tw32_wait_f(GRC_LOCAL_CTRL,
2865                     grc_local_ctrl,
2866                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2867
2868         tw32_wait_f(GRC_LOCAL_CTRL,
2869                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2870                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 }
2872
2873 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2874 {
2875         if (!tg3_flag(tp, IS_NIC))
2876                 return;
2877
2878         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2879             tg3_asic_rev(tp) == ASIC_REV_5701) {
2880                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2881                             (GRC_LCLCTRL_GPIO_OE0 |
2882                              GRC_LCLCTRL_GPIO_OE1 |
2883                              GRC_LCLCTRL_GPIO_OE2 |
2884                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2885                              GRC_LCLCTRL_GPIO_OUTPUT1),
2886                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2887         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2888                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2889                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2890                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2891                                      GRC_LCLCTRL_GPIO_OE1 |
2892                                      GRC_LCLCTRL_GPIO_OE2 |
2893                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2894                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2895                                      tp->grc_local_ctrl;
2896                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2897                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2898
2899                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2900                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2901                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2902
2903                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2904                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2905                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2906         } else {
2907                 u32 no_gpio2;
2908                 u32 grc_local_ctrl = 0;
2909
2910                 /* Workaround to prevent overdrawing Amps. */
2911                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2912                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2913                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2914                                     grc_local_ctrl,
2915                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2916                 }
2917
2918                 /* On 5753 and variants, GPIO2 cannot be used. */
2919                 no_gpio2 = tp->nic_sram_data_cfg &
2920                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2921
2922                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2923                                   GRC_LCLCTRL_GPIO_OE1 |
2924                                   GRC_LCLCTRL_GPIO_OE2 |
2925                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2926                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2927                 if (no_gpio2) {
2928                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2929                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2930                 }
2931                 tw32_wait_f(GRC_LOCAL_CTRL,
2932                             tp->grc_local_ctrl | grc_local_ctrl,
2933                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2934
2935                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2936
2937                 tw32_wait_f(GRC_LOCAL_CTRL,
2938                             tp->grc_local_ctrl | grc_local_ctrl,
2939                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2940
2941                 if (!no_gpio2) {
2942                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2943                         tw32_wait_f(GRC_LOCAL_CTRL,
2944                                     tp->grc_local_ctrl | grc_local_ctrl,
2945                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2946                 }
2947         }
2948 }
2949
2950 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2951 {
2952         u32 msg = 0;
2953
2954         /* Serialize power state transitions */
2955         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2956                 return;
2957
2958         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2959                 msg = TG3_GPIO_MSG_NEED_VAUX;
2960
2961         msg = tg3_set_function_status(tp, msg);
2962
2963         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2964                 goto done;
2965
2966         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2967                 tg3_pwrsrc_switch_to_vaux(tp);
2968         else
2969                 tg3_pwrsrc_die_with_vmain(tp);
2970
2971 done:
2972         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2973 }
2974
2975 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2976 {
2977         bool need_vaux = false;
2978
2979         /* The GPIOs do something completely different on 57765. */
2980         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2981                 return;
2982
2983         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2984             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2985             tg3_asic_rev(tp) == ASIC_REV_5720) {
2986                 tg3_frob_aux_power_5717(tp, include_wol ?
2987                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2988                 return;
2989         }
2990
2991         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2992                 struct net_device *dev_peer;
2993
2994                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2995
2996                 /* remove_one() may have been run on the peer. */
2997                 if (dev_peer) {
2998                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2999
3000                         if (tg3_flag(tp_peer, INIT_COMPLETE))
3001                                 return;
3002
3003                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3004                             tg3_flag(tp_peer, ENABLE_ASF))
3005                                 need_vaux = true;
3006                 }
3007         }
3008
3009         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3010             tg3_flag(tp, ENABLE_ASF))
3011                 need_vaux = true;
3012
3013         if (need_vaux)
3014                 tg3_pwrsrc_switch_to_vaux(tp);
3015         else
3016                 tg3_pwrsrc_die_with_vmain(tp);
3017 }
3018
3019 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3020 {
3021         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3022                 return 1;
3023         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3024                 if (speed != SPEED_10)
3025                         return 1;
3026         } else if (speed == SPEED_10)
3027                 return 1;
3028
3029         return 0;
3030 }
3031
3032 static bool tg3_phy_power_bug(struct tg3 *tp)
3033 {
3034         switch (tg3_asic_rev(tp)) {
3035         case ASIC_REV_5700:
3036         case ASIC_REV_5704:
3037                 return true;
3038         case ASIC_REV_5780:
3039                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3040                         return true;
3041                 return false;
3042         case ASIC_REV_5717:
3043                 if (!tp->pci_fn)
3044                         return true;
3045                 return false;
3046         case ASIC_REV_5719:
3047         case ASIC_REV_5720:
3048                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3049                     !tp->pci_fn)
3050                         return true;
3051                 return false;
3052         }
3053
3054         return false;
3055 }
3056
3057 static bool tg3_phy_led_bug(struct tg3 *tp)
3058 {
3059         switch (tg3_asic_rev(tp)) {
3060         case ASIC_REV_5719:
3061         case ASIC_REV_5720:
3062                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3063                     !tp->pci_fn)
3064                         return true;
3065                 return false;
3066         }
3067
3068         return false;
3069 }
3070
3071 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3072 {
3073         u32 val;
3074
3075         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3076                 return;
3077
3078         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3079                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3080                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3081                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3082
3083                         sg_dig_ctrl |=
3084                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3085                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3086                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3087                 }
3088                 return;
3089         }
3090
3091         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3092                 tg3_bmcr_reset(tp);
3093                 val = tr32(GRC_MISC_CFG);
3094                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3095                 udelay(40);
3096                 return;
3097         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3098                 u32 phytest;
3099                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3100                         u32 phy;
3101
3102                         tg3_writephy(tp, MII_ADVERTISE, 0);
3103                         tg3_writephy(tp, MII_BMCR,
3104                                      BMCR_ANENABLE | BMCR_ANRESTART);
3105
3106                         tg3_writephy(tp, MII_TG3_FET_TEST,
3107                                      phytest | MII_TG3_FET_SHADOW_EN);
3108                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3109                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3110                                 tg3_writephy(tp,
3111                                              MII_TG3_FET_SHDW_AUXMODE4,
3112                                              phy);
3113                         }
3114                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3115                 }
3116                 return;
3117         } else if (do_low_power) {
3118                 if (!tg3_phy_led_bug(tp))
3119                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3120                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3121
3122                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3123                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3124                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3125                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3126         }
3127
3128         /* The PHY should not be powered down on some chips because
3129          * of bugs.
3130          */
3131         if (tg3_phy_power_bug(tp))
3132                 return;
3133
3134         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3135             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3136                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3137                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3138                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3139                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3140         }
3141
3142         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3143 }
3144
3145 /* tp->lock is held. */
3146 static int tg3_nvram_lock(struct tg3 *tp)
3147 {
3148         if (tg3_flag(tp, NVRAM)) {
3149                 int i;
3150
3151                 if (tp->nvram_lock_cnt == 0) {
3152                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3153                         for (i = 0; i < 8000; i++) {
3154                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3155                                         break;
3156                                 udelay(20);
3157                         }
3158                         if (i == 8000) {
3159                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3160                                 return -ENODEV;
3161                         }
3162                 }
3163                 tp->nvram_lock_cnt++;
3164         }
3165         return 0;
3166 }
3167
3168 /* tp->lock is held. */
3169 static void tg3_nvram_unlock(struct tg3 *tp)
3170 {
3171         if (tg3_flag(tp, NVRAM)) {
3172                 if (tp->nvram_lock_cnt > 0)
3173                         tp->nvram_lock_cnt--;
3174                 if (tp->nvram_lock_cnt == 0)
3175                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3176         }
3177 }
3178
3179 /* tp->lock is held. */
3180 static void tg3_enable_nvram_access(struct tg3 *tp)
3181 {
3182         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183                 u32 nvaccess = tr32(NVRAM_ACCESS);
3184
3185                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3186         }
3187 }
3188
3189 /* tp->lock is held. */
3190 static void tg3_disable_nvram_access(struct tg3 *tp)
3191 {
3192         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3193                 u32 nvaccess = tr32(NVRAM_ACCESS);
3194
3195                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3196         }
3197 }
3198
3199 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3200                                         u32 offset, u32 *val)
3201 {
3202         u32 tmp;
3203         int i;
3204
3205         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3206                 return -EINVAL;
3207
3208         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3209                                         EEPROM_ADDR_DEVID_MASK |
3210                                         EEPROM_ADDR_READ);
3211         tw32(GRC_EEPROM_ADDR,
3212              tmp |
3213              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3214              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3215               EEPROM_ADDR_ADDR_MASK) |
3216              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3217
3218         for (i = 0; i < 1000; i++) {
3219                 tmp = tr32(GRC_EEPROM_ADDR);
3220
3221                 if (tmp & EEPROM_ADDR_COMPLETE)
3222                         break;
3223                 msleep(1);
3224         }
3225         if (!(tmp & EEPROM_ADDR_COMPLETE))
3226                 return -EBUSY;
3227
3228         tmp = tr32(GRC_EEPROM_DATA);
3229
3230         /*
3231          * The data will always be opposite the native endian
3232          * format.  Perform a blind byteswap to compensate.
3233          */
3234         *val = swab32(tmp);
3235
3236         return 0;
3237 }
3238
3239 #define NVRAM_CMD_TIMEOUT 10000
3240
3241 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3242 {
3243         int i;
3244
3245         tw32(NVRAM_CMD, nvram_cmd);
3246         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3247                 usleep_range(10, 40);
3248                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3249                         udelay(10);
3250                         break;
3251                 }
3252         }
3253
3254         if (i == NVRAM_CMD_TIMEOUT)
3255                 return -EBUSY;
3256
3257         return 0;
3258 }
3259
3260 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3261 {
3262         if (tg3_flag(tp, NVRAM) &&
3263             tg3_flag(tp, NVRAM_BUFFERED) &&
3264             tg3_flag(tp, FLASH) &&
3265             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266             (tp->nvram_jedecnum == JEDEC_ATMEL))
3267
3268                 addr = ((addr / tp->nvram_pagesize) <<
3269                         ATMEL_AT45DB0X1B_PAGE_POS) +
3270                        (addr % tp->nvram_pagesize);
3271
3272         return addr;
3273 }
3274
3275 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3276 {
3277         if (tg3_flag(tp, NVRAM) &&
3278             tg3_flag(tp, NVRAM_BUFFERED) &&
3279             tg3_flag(tp, FLASH) &&
3280             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3281             (tp->nvram_jedecnum == JEDEC_ATMEL))
3282
3283                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3284                         tp->nvram_pagesize) +
3285                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3286
3287         return addr;
3288 }
3289
3290 /* NOTE: Data read in from NVRAM is byteswapped according to
3291  * the byteswapping settings for all other register accesses.
3292  * tg3 devices are BE devices, so on a BE machine, the data
3293  * returned will be exactly as it is seen in NVRAM.  On a LE
3294  * machine, the 32-bit value will be byteswapped.
3295  */
3296 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3297 {
3298         int ret;
3299
3300         if (!tg3_flag(tp, NVRAM))
3301                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3302
3303         offset = tg3_nvram_phys_addr(tp, offset);
3304
3305         if (offset > NVRAM_ADDR_MSK)
3306                 return -EINVAL;
3307
3308         ret = tg3_nvram_lock(tp);
3309         if (ret)
3310                 return ret;
3311
3312         tg3_enable_nvram_access(tp);
3313
3314         tw32(NVRAM_ADDR, offset);
3315         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3316                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3317
3318         if (ret == 0)
3319                 *val = tr32(NVRAM_RDDATA);
3320
3321         tg3_disable_nvram_access(tp);
3322
3323         tg3_nvram_unlock(tp);
3324
3325         return ret;
3326 }
3327
3328 /* Ensures NVRAM data is in bytestream format. */
3329 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3330 {
3331         u32 v;
3332         int res = tg3_nvram_read(tp, offset, &v);
3333         if (!res)
3334                 *val = cpu_to_be32(v);
3335         return res;
3336 }
3337
3338 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3339                                     u32 offset, u32 len, u8 *buf)
3340 {
3341         int i, j, rc = 0;
3342         u32 val;
3343
3344         for (i = 0; i < len; i += 4) {
3345                 u32 addr;
3346                 __be32 data;
3347
3348                 addr = offset + i;
3349
3350                 memcpy(&data, buf + i, 4);
3351
3352                 /*
3353                  * The SEEPROM interface expects the data to always be opposite
3354                  * the native endian format.  We accomplish this by reversing
3355                  * all the operations that would have been performed on the
3356                  * data from a call to tg3_nvram_read_be32().
3357                  */
3358                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3359
3360                 val = tr32(GRC_EEPROM_ADDR);
3361                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3362
3363                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3364                         EEPROM_ADDR_READ);
3365                 tw32(GRC_EEPROM_ADDR, val |
3366                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3367                         (addr & EEPROM_ADDR_ADDR_MASK) |
3368                         EEPROM_ADDR_START |
3369                         EEPROM_ADDR_WRITE);
3370
3371                 for (j = 0; j < 1000; j++) {
3372                         val = tr32(GRC_EEPROM_ADDR);
3373
3374                         if (val & EEPROM_ADDR_COMPLETE)
3375                                 break;
3376                         msleep(1);
3377                 }
3378                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3379                         rc = -EBUSY;
3380                         break;
3381                 }
3382         }
3383
3384         return rc;
3385 }
3386
3387 /* offset and length are dword aligned */
3388 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3389                 u8 *buf)
3390 {
3391         int ret = 0;
3392         u32 pagesize = tp->nvram_pagesize;
3393         u32 pagemask = pagesize - 1;
3394         u32 nvram_cmd;
3395         u8 *tmp;
3396
3397         tmp = kmalloc(pagesize, GFP_KERNEL);
3398         if (tmp == NULL)
3399                 return -ENOMEM;
3400
3401         while (len) {
3402                 int j;
3403                 u32 phy_addr, page_off, size;
3404
3405                 phy_addr = offset & ~pagemask;
3406
3407                 for (j = 0; j < pagesize; j += 4) {
3408                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3409                                                   (__be32 *) (tmp + j));
3410                         if (ret)
3411                                 break;
3412                 }
3413                 if (ret)
3414                         break;
3415
3416                 page_off = offset & pagemask;
3417                 size = pagesize;
3418                 if (len < size)
3419                         size = len;
3420
3421                 len -= size;
3422
3423                 memcpy(tmp + page_off, buf, size);
3424
3425                 offset = offset + (pagesize - page_off);
3426
3427                 tg3_enable_nvram_access(tp);
3428
3429                 /*
3430                  * Before we can erase the flash page, we need
3431                  * to issue a special "write enable" command.
3432                  */
3433                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434
3435                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436                         break;
3437
3438                 /* Erase the target page */
3439                 tw32(NVRAM_ADDR, phy_addr);
3440
3441                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3442                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3443
3444                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3445                         break;
3446
3447                 /* Issue another write enable to start the write. */
3448                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3449
3450                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3451                         break;
3452
3453                 for (j = 0; j < pagesize; j += 4) {
3454                         __be32 data;
3455
3456                         data = *((__be32 *) (tmp + j));
3457
3458                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3459
3460                         tw32(NVRAM_ADDR, phy_addr + j);
3461
3462                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3463                                 NVRAM_CMD_WR;
3464
3465                         if (j == 0)
3466                                 nvram_cmd |= NVRAM_CMD_FIRST;
3467                         else if (j == (pagesize - 4))
3468                                 nvram_cmd |= NVRAM_CMD_LAST;
3469
3470                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3471                         if (ret)
3472                                 break;
3473                 }
3474                 if (ret)
3475                         break;
3476         }
3477
3478         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3479         tg3_nvram_exec_cmd(tp, nvram_cmd);
3480
3481         kfree(tmp);
3482
3483         return ret;
3484 }
3485
3486 /* offset and length are dword aligned */
3487 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3488                 u8 *buf)
3489 {
3490         int i, ret = 0;
3491
3492         for (i = 0; i < len; i += 4, offset += 4) {
3493                 u32 page_off, phy_addr, nvram_cmd;
3494                 __be32 data;
3495
3496                 memcpy(&data, buf + i, 4);
3497                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3498
3499                 page_off = offset % tp->nvram_pagesize;
3500
3501                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3502
3503                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3504
3505                 if (page_off == 0 || i == 0)
3506                         nvram_cmd |= NVRAM_CMD_FIRST;
3507                 if (page_off == (tp->nvram_pagesize - 4))
3508                         nvram_cmd |= NVRAM_CMD_LAST;
3509
3510                 if (i == (len - 4))
3511                         nvram_cmd |= NVRAM_CMD_LAST;
3512
3513                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3514                     !tg3_flag(tp, FLASH) ||
3515                     !tg3_flag(tp, 57765_PLUS))
3516                         tw32(NVRAM_ADDR, phy_addr);
3517
3518                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3519                     !tg3_flag(tp, 5755_PLUS) &&
3520                     (tp->nvram_jedecnum == JEDEC_ST) &&
3521                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3522                         u32 cmd;
3523
3524                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3525                         ret = tg3_nvram_exec_cmd(tp, cmd);
3526                         if (ret)
3527                                 break;
3528                 }
3529                 if (!tg3_flag(tp, FLASH)) {
3530                         /* We always do complete word writes to eeprom. */
3531                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3532                 }
3533
3534                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3535                 if (ret)
3536                         break;
3537         }
3538         return ret;
3539 }
3540
3541 /* offset and length are dword aligned */
3542 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3543 {
3544         int ret;
3545
3546         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3547                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3548                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3549                 udelay(40);
3550         }
3551
3552         if (!tg3_flag(tp, NVRAM)) {
3553                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3554         } else {
3555                 u32 grc_mode;
3556
3557                 ret = tg3_nvram_lock(tp);
3558                 if (ret)
3559                         return ret;
3560
3561                 tg3_enable_nvram_access(tp);
3562                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3563                         tw32(NVRAM_WRITE1, 0x406);
3564
3565                 grc_mode = tr32(GRC_MODE);
3566                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3567
3568                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3569                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3570                                 buf);
3571                 } else {
3572                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3573                                 buf);
3574                 }
3575
3576                 grc_mode = tr32(GRC_MODE);
3577                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3578
3579                 tg3_disable_nvram_access(tp);
3580                 tg3_nvram_unlock(tp);
3581         }
3582
3583         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3584                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3585                 udelay(40);
3586         }
3587
3588         return ret;
3589 }
3590
3591 #define RX_CPU_SCRATCH_BASE     0x30000
3592 #define RX_CPU_SCRATCH_SIZE     0x04000
3593 #define TX_CPU_SCRATCH_BASE     0x34000
3594 #define TX_CPU_SCRATCH_SIZE     0x04000
3595
3596 /* tp->lock is held. */
3597 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3598 {
3599         int i;
3600         const int iters = 10000;
3601
3602         for (i = 0; i < iters; i++) {
3603                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3604                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3605                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3606                         break;
3607                 if (pci_channel_offline(tp->pdev))
3608                         return -EBUSY;
3609         }
3610
3611         return (i == iters) ? -EBUSY : 0;
3612 }
3613
3614 /* tp->lock is held. */
3615 static int tg3_rxcpu_pause(struct tg3 *tp)
3616 {
3617         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3618
3619         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3620         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3621         udelay(10);
3622
3623         return rc;
3624 }
3625
3626 /* tp->lock is held. */
3627 static int tg3_txcpu_pause(struct tg3 *tp)
3628 {
3629         return tg3_pause_cpu(tp, TX_CPU_BASE);
3630 }
3631
3632 /* tp->lock is held. */
3633 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3634 {
3635         tw32(cpu_base + CPU_STATE, 0xffffffff);
3636         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3637 }
3638
3639 /* tp->lock is held. */
3640 static void tg3_rxcpu_resume(struct tg3 *tp)
3641 {
3642         tg3_resume_cpu(tp, RX_CPU_BASE);
3643 }
3644
3645 /* tp->lock is held. */
3646 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3647 {
3648         int rc;
3649
3650         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3651
3652         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3653                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3654
3655                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3656                 return 0;
3657         }
3658         if (cpu_base == RX_CPU_BASE) {
3659                 rc = tg3_rxcpu_pause(tp);
3660         } else {
3661                 /*
3662                  * There is only an Rx CPU for the 5750 derivative in the
3663                  * BCM4785.
3664                  */
3665                 if (tg3_flag(tp, IS_SSB_CORE))
3666                         return 0;
3667
3668                 rc = tg3_txcpu_pause(tp);
3669         }
3670
3671         if (rc) {
3672                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3673                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3674                 return -ENODEV;
3675         }
3676
3677         /* Clear firmware's nvram arbitration. */
3678         if (tg3_flag(tp, NVRAM))
3679                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3680         return 0;
3681 }
3682
3683 static int tg3_fw_data_len(struct tg3 *tp,
3684                            const struct tg3_firmware_hdr *fw_hdr)
3685 {
3686         int fw_len;
3687
3688         /* Non fragmented firmware have one firmware header followed by a
3689          * contiguous chunk of data to be written. The length field in that
3690          * header is not the length of data to be written but the complete
3691          * length of the bss. The data length is determined based on
3692          * tp->fw->size minus headers.
3693          *
3694          * Fragmented firmware have a main header followed by multiple
3695          * fragments. Each fragment is identical to non fragmented firmware
3696          * with a firmware header followed by a contiguous chunk of data. In
3697          * the main header, the length field is unused and set to 0xffffffff.
3698          * In each fragment header the length is the entire size of that
3699          * fragment i.e. fragment data + header length. Data length is
3700          * therefore length field in the header minus TG3_FW_HDR_LEN.
3701          */
3702         if (tp->fw_len == 0xffffffff)
3703                 fw_len = be32_to_cpu(fw_hdr->len);
3704         else
3705                 fw_len = tp->fw->size;
3706
3707         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3708 }
3709
3710 /* tp->lock is held. */
3711 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3712                                  u32 cpu_scratch_base, int cpu_scratch_size,
3713                                  const struct tg3_firmware_hdr *fw_hdr)
3714 {
3715         int err, i;
3716         void (*write_op)(struct tg3 *, u32, u32);
3717         int total_len = tp->fw->size;
3718
3719         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3720                 netdev_err(tp->dev,
3721                            "%s: Trying to load TX cpu firmware which is 5705\n",
3722                            __func__);
3723                 return -EINVAL;
3724         }
3725
3726         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3727                 write_op = tg3_write_mem;
3728         else
3729                 write_op = tg3_write_indirect_reg32;
3730
3731         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3732                 /* It is possible that bootcode is still loading at this point.
3733                  * Get the nvram lock first before halting the cpu.
3734                  */
3735                 int lock_err = tg3_nvram_lock(tp);
3736                 err = tg3_halt_cpu(tp, cpu_base);
3737                 if (!lock_err)
3738                         tg3_nvram_unlock(tp);
3739                 if (err)
3740                         goto out;
3741
3742                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3743                         write_op(tp, cpu_scratch_base + i, 0);
3744                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3745                 tw32(cpu_base + CPU_MODE,
3746                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3747         } else {
3748                 /* Subtract additional main header for fragmented firmware and
3749                  * advance to the first fragment
3750                  */
3751                 total_len -= TG3_FW_HDR_LEN;
3752                 fw_hdr++;
3753         }
3754
3755         do {
3756                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3757                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3758                         write_op(tp, cpu_scratch_base +
3759                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3760                                      (i * sizeof(u32)),
3761                                  be32_to_cpu(fw_data[i]));
3762
3763                 total_len -= be32_to_cpu(fw_hdr->len);
3764
3765                 /* Advance to next fragment */
3766                 fw_hdr = (struct tg3_firmware_hdr *)
3767                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3768         } while (total_len > 0);
3769
3770         err = 0;
3771
3772 out:
3773         return err;
3774 }
3775
3776 /* tp->lock is held. */
3777 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3778 {
3779         int i;
3780         const int iters = 5;
3781
3782         tw32(cpu_base + CPU_STATE, 0xffffffff);
3783         tw32_f(cpu_base + CPU_PC, pc);
3784
3785         for (i = 0; i < iters; i++) {
3786                 if (tr32(cpu_base + CPU_PC) == pc)
3787                         break;
3788                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3789                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3790                 tw32_f(cpu_base + CPU_PC, pc);
3791                 udelay(1000);
3792         }
3793
3794         return (i == iters) ? -EBUSY : 0;
3795 }
3796
3797 /* tp->lock is held. */
3798 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3799 {
3800         const struct tg3_firmware_hdr *fw_hdr;
3801         int err;
3802
3803         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3804
3805         /* Firmware blob starts with version numbers, followed by
3806            start address and length. We are setting complete length.
3807            length = end_address_of_bss - start_address_of_text.
3808            Remainder is the blob to be loaded contiguously
3809            from start address. */
3810
3811         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3812                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3813                                     fw_hdr);
3814         if (err)
3815                 return err;
3816
3817         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3818                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3819                                     fw_hdr);
3820         if (err)
3821                 return err;
3822
3823         /* Now startup only the RX cpu. */
3824         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3825                                        be32_to_cpu(fw_hdr->base_addr));
3826         if (err) {
3827                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3828                            "should be %08x\n", __func__,
3829                            tr32(RX_CPU_BASE + CPU_PC),
3830                                 be32_to_cpu(fw_hdr->base_addr));
3831                 return -ENODEV;
3832         }
3833
3834         tg3_rxcpu_resume(tp);
3835
3836         return 0;
3837 }
3838
3839 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3840 {
3841         const int iters = 1000;
3842         int i;
3843         u32 val;
3844
3845         /* Wait for boot code to complete initialization and enter service
3846          * loop. It is then safe to download service patches
3847          */
3848         for (i = 0; i < iters; i++) {
3849                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3850                         break;
3851
3852                 udelay(10);
3853         }
3854
3855         if (i == iters) {
3856                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3857                 return -EBUSY;
3858         }
3859
3860         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3861         if (val & 0xff) {
3862                 netdev_warn(tp->dev,
3863                             "Other patches exist. Not downloading EEE patch\n");
3864                 return -EEXIST;
3865         }
3866
3867         return 0;
3868 }
3869
3870 /* tp->lock is held. */
3871 static void tg3_load_57766_firmware(struct tg3 *tp)
3872 {
3873         struct tg3_firmware_hdr *fw_hdr;
3874
3875         if (!tg3_flag(tp, NO_NVRAM))
3876                 return;
3877
3878         if (tg3_validate_rxcpu_state(tp))
3879                 return;
3880
3881         if (!tp->fw)
3882                 return;
3883
3884         /* This firmware blob has a different format than older firmware
3885          * releases as given below. The main difference is we have fragmented
3886          * data to be written to non-contiguous locations.
3887          *
3888          * In the beginning we have a firmware header identical to other
3889          * firmware which consists of version, base addr and length. The length
3890          * here is unused and set to 0xffffffff.
3891          *
3892          * This is followed by a series of firmware fragments which are
3893          * individually identical to previous firmware. i.e. they have the
3894          * firmware header and followed by data for that fragment. The version
3895          * field of the individual fragment header is unused.
3896          */
3897
3898         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3899         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3900                 return;
3901
3902         if (tg3_rxcpu_pause(tp))
3903                 return;
3904
3905         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3906         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3907
3908         tg3_rxcpu_resume(tp);
3909 }
3910
3911 /* tp->lock is held. */
3912 static int tg3_load_tso_firmware(struct tg3 *tp)
3913 {
3914         const struct tg3_firmware_hdr *fw_hdr;
3915         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3916         int err;
3917
3918         if (!tg3_flag(tp, FW_TSO))
3919                 return 0;
3920
3921         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3922
3923         /* Firmware blob starts with version numbers, followed by
3924            start address and length. We are setting complete length.
3925            length = end_address_of_bss - start_address_of_text.
3926            Remainder is the blob to be loaded contiguously
3927            from start address. */
3928
3929         cpu_scratch_size = tp->fw_len;
3930
3931         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3932                 cpu_base = RX_CPU_BASE;
3933                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3934         } else {
3935                 cpu_base = TX_CPU_BASE;
3936                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3937                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3938         }
3939
3940         err = tg3_load_firmware_cpu(tp, cpu_base,
3941                                     cpu_scratch_base, cpu_scratch_size,
3942                                     fw_hdr);
3943         if (err)
3944                 return err;
3945
3946         /* Now startup the cpu. */
3947         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3948                                        be32_to_cpu(fw_hdr->base_addr));
3949         if (err) {
3950                 netdev_err(tp->dev,
3951                            "%s fails to set CPU PC, is %08x should be %08x\n",
3952                            __func__, tr32(cpu_base + CPU_PC),
3953                            be32_to_cpu(fw_hdr->base_addr));
3954                 return -ENODEV;
3955         }
3956
3957         tg3_resume_cpu(tp, cpu_base);
3958         return 0;
3959 }
3960
3961 /* tp->lock is held. */
3962 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3963 {
3964         u32 addr_high, addr_low;
3965
3966         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3967         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3968                     (mac_addr[4] <<  8) | mac_addr[5]);
3969
3970         if (index < 4) {
3971                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3972                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3973         } else {
3974                 index -= 4;
3975                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3976                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3977         }
3978 }
3979
3980 /* tp->lock is held. */
3981 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3982 {
3983         u32 addr_high;
3984         int i;
3985
3986         for (i = 0; i < 4; i++) {
3987                 if (i == 1 && skip_mac_1)
3988                         continue;
3989                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3990         }
3991
3992         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3993             tg3_asic_rev(tp) == ASIC_REV_5704) {
3994                 for (i = 4; i < 16; i++)
3995                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3996         }
3997
3998         addr_high = (tp->dev->dev_addr[0] +
3999                      tp->dev->dev_addr[1] +
4000                      tp->dev->dev_addr[2] +
4001                      tp->dev->dev_addr[3] +
4002                      tp->dev->dev_addr[4] +
4003                      tp->dev->dev_addr[5]) &
4004                 TX_BACKOFF_SEED_MASK;
4005         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4006 }
4007
4008 static void tg3_enable_register_access(struct tg3 *tp)
4009 {
4010         /*
4011          * Make sure register accesses (indirect or otherwise) will function
4012          * correctly.
4013          */
4014         pci_write_config_dword(tp->pdev,
4015                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4016 }
4017
4018 static int tg3_power_up(struct tg3 *tp)
4019 {
4020         int err;
4021
4022         tg3_enable_register_access(tp);
4023
4024         err = pci_set_power_state(tp->pdev, PCI_D0);
4025         if (!err) {
4026                 /* Switch out of Vaux if it is a NIC */
4027                 tg3_pwrsrc_switch_to_vmain(tp);
4028         } else {
4029                 netdev_err(tp->dev, "Transition to D0 failed\n");
4030         }
4031
4032         return err;
4033 }
4034
4035 static int tg3_setup_phy(struct tg3 *, bool);
4036
4037 static int tg3_power_down_prepare(struct tg3 *tp)
4038 {
4039         u32 misc_host_ctrl;
4040         bool device_should_wake, do_low_power;
4041
4042         tg3_enable_register_access(tp);
4043
4044         /* Restore the CLKREQ setting. */
4045         if (tg3_flag(tp, CLKREQ_BUG))
4046                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4047                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4048
4049         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4050         tw32(TG3PCI_MISC_HOST_CTRL,
4051              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4052
4053         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4054                              tg3_flag(tp, WOL_ENABLE);
4055
4056         if (tg3_flag(tp, USE_PHYLIB)) {
4057                 do_low_power = false;
4058                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4059                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4060                         struct phy_device *phydev;
4061                         u32 phyid, advertising;
4062
4063                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4064
4065                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4066
4067                         tp->link_config.speed = phydev->speed;
4068                         tp->link_config.duplex = phydev->duplex;
4069                         tp->link_config.autoneg = phydev->autoneg;
4070                         tp->link_config.advertising = phydev->advertising;
4071
4072                         advertising = ADVERTISED_TP |
4073                                       ADVERTISED_Pause |
4074                                       ADVERTISED_Autoneg |
4075                                       ADVERTISED_10baseT_Half;
4076
4077                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4078                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4079                                         advertising |=
4080                                                 ADVERTISED_100baseT_Half |
4081                                                 ADVERTISED_100baseT_Full |
4082                                                 ADVERTISED_10baseT_Full;
4083                                 else
4084                                         advertising |= ADVERTISED_10baseT_Full;
4085                         }
4086
4087                         phydev->advertising = advertising;
4088
4089                         phy_start_aneg(phydev);
4090
4091                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4092                         if (phyid != PHY_ID_BCMAC131) {
4093                                 phyid &= PHY_BCM_OUI_MASK;
4094                                 if (phyid == PHY_BCM_OUI_1 ||
4095                                     phyid == PHY_BCM_OUI_2 ||
4096                                     phyid == PHY_BCM_OUI_3)
4097                                         do_low_power = true;
4098                         }
4099                 }
4100         } else {
4101                 do_low_power = true;
4102
4103                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4104                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4105
4106                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4107                         tg3_setup_phy(tp, false);
4108         }
4109
4110         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4111                 u32 val;
4112
4113                 val = tr32(GRC_VCPU_EXT_CTRL);
4114                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4115         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4116                 int i;
4117                 u32 val;
4118
4119                 for (i = 0; i < 200; i++) {
4120                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4121                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4122                                 break;
4123                         msleep(1);
4124                 }
4125         }
4126         if (tg3_flag(tp, WOL_CAP))
4127                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4128                                                      WOL_DRV_STATE_SHUTDOWN |
4129                                                      WOL_DRV_WOL |
4130                                                      WOL_SET_MAGIC_PKT);
4131
4132         if (device_should_wake) {
4133                 u32 mac_mode;
4134
4135                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4136                         if (do_low_power &&
4137                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4138                                 tg3_phy_auxctl_write(tp,
4139                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4140                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4141                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4142                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4143                                 udelay(40);
4144                         }
4145
4146                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4147                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4148                         else if (tp->phy_flags &
4149                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4150                                 if (tp->link_config.active_speed == SPEED_1000)
4151                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4152                                 else
4153                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4154                         } else
4155                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4156
4157                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4158                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4159                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4160                                              SPEED_100 : SPEED_10;
4161                                 if (tg3_5700_link_polarity(tp, speed))
4162                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4163                                 else
4164                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4165                         }
4166                 } else {
4167                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4168                 }
4169
4170                 if (!tg3_flag(tp, 5750_PLUS))
4171                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4172
4173                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4174                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4175                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4176                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4177
4178                 if (tg3_flag(tp, ENABLE_APE))
4179                         mac_mode |= MAC_MODE_APE_TX_EN |
4180                                     MAC_MODE_APE_RX_EN |
4181                                     MAC_MODE_TDE_ENABLE;
4182
4183                 tw32_f(MAC_MODE, mac_mode);
4184                 udelay(100);
4185
4186                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4187                 udelay(10);
4188         }
4189
4190         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4191             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4192              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4193                 u32 base_val;
4194
4195                 base_val = tp->pci_clock_ctrl;
4196                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4197                              CLOCK_CTRL_TXCLK_DISABLE);
4198
4199                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4200                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4201         } else if (tg3_flag(tp, 5780_CLASS) ||
4202                    tg3_flag(tp, CPMU_PRESENT) ||
4203                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4204                 /* do nothing */
4205         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4206                 u32 newbits1, newbits2;
4207
4208                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4209                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4210                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4211                                     CLOCK_CTRL_TXCLK_DISABLE |
4212                                     CLOCK_CTRL_ALTCLK);
4213                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214                 } else if (tg3_flag(tp, 5705_PLUS)) {
4215                         newbits1 = CLOCK_CTRL_625_CORE;
4216                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4217                 } else {
4218                         newbits1 = CLOCK_CTRL_ALTCLK;
4219                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4220                 }
4221
4222                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4223                             40);
4224
4225                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4226                             40);
4227
4228                 if (!tg3_flag(tp, 5705_PLUS)) {
4229                         u32 newbits3;
4230
4231                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4232                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4233                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4234                                             CLOCK_CTRL_TXCLK_DISABLE |
4235                                             CLOCK_CTRL_44MHZ_CORE);
4236                         } else {
4237                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4238                         }
4239
4240                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4241                                     tp->pci_clock_ctrl | newbits3, 40);
4242                 }
4243         }
4244
4245         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4246                 tg3_power_down_phy(tp, do_low_power);
4247
4248         tg3_frob_aux_power(tp, true);
4249
4250         /* Workaround for unstable PLL clock */
4251         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4252             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4253              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4254                 u32 val = tr32(0x7d00);
4255
4256                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4257                 tw32(0x7d00, val);
4258                 if (!tg3_flag(tp, ENABLE_ASF)) {
4259                         int err;
4260
4261                         err = tg3_nvram_lock(tp);
4262                         tg3_halt_cpu(tp, RX_CPU_BASE);
4263                         if (!err)
4264                                 tg3_nvram_unlock(tp);
4265                 }
4266         }
4267
4268         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4269
4270         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4271
4272         return 0;
4273 }
4274
4275 static void tg3_power_down(struct tg3 *tp)
4276 {
4277         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4278         pci_set_power_state(tp->pdev, PCI_D3hot);
4279 }
4280
4281 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4282 {
4283         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4284         case MII_TG3_AUX_STAT_10HALF:
4285                 *speed = SPEED_10;
4286                 *duplex = DUPLEX_HALF;
4287                 break;
4288
4289         case MII_TG3_AUX_STAT_10FULL:
4290                 *speed = SPEED_10;
4291                 *duplex = DUPLEX_FULL;
4292                 break;
4293
4294         case MII_TG3_AUX_STAT_100HALF:
4295                 *speed = SPEED_100;
4296                 *duplex = DUPLEX_HALF;
4297                 break;
4298
4299         case MII_TG3_AUX_STAT_100FULL:
4300                 *speed = SPEED_100;
4301                 *duplex = DUPLEX_FULL;
4302                 break;
4303
4304         case MII_TG3_AUX_STAT_1000HALF:
4305                 *speed = SPEED_1000;
4306                 *duplex = DUPLEX_HALF;
4307                 break;
4308
4309         case MII_TG3_AUX_STAT_1000FULL:
4310                 *speed = SPEED_1000;
4311                 *duplex = DUPLEX_FULL;
4312                 break;
4313
4314         default:
4315                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4316                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4317                                  SPEED_10;
4318                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4319                                   DUPLEX_HALF;
4320                         break;
4321                 }
4322                 *speed = SPEED_UNKNOWN;
4323                 *duplex = DUPLEX_UNKNOWN;
4324                 break;
4325         }
4326 }
4327
4328 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4329 {
4330         int err = 0;
4331         u32 val, new_adv;
4332
4333         new_adv = ADVERTISE_CSMA;
4334         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4335         new_adv |= mii_advertise_flowctrl(flowctrl);
4336
4337         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4338         if (err)
4339                 goto done;
4340
4341         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4342                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4343
4344                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4345                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4346                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4347
4348                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4349                 if (err)
4350                         goto done;
4351         }
4352
4353         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4354                 goto done;
4355
4356         tw32(TG3_CPMU_EEE_MODE,
4357              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4358
4359         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4360         if (!err) {
4361                 u32 err2;
4362
4363                 val = 0;
4364                 /* Advertise 100-BaseTX EEE ability */
4365                 if (advertise & ADVERTISED_100baseT_Full)
4366                         val |= MDIO_AN_EEE_ADV_100TX;
4367                 /* Advertise 1000-BaseT EEE ability */
4368                 if (advertise & ADVERTISED_1000baseT_Full)
4369                         val |= MDIO_AN_EEE_ADV_1000T;
4370
4371                 if (!tp->eee.eee_enabled) {
4372                         val = 0;
4373                         tp->eee.advertised = 0;
4374                 } else {
4375                         tp->eee.advertised = advertise &
4376                                              (ADVERTISED_100baseT_Full |
4377                                               ADVERTISED_1000baseT_Full);
4378                 }
4379
4380                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4381                 if (err)
4382                         val = 0;
4383
4384                 switch (tg3_asic_rev(tp)) {
4385                 case ASIC_REV_5717:
4386                 case ASIC_REV_57765:
4387                 case ASIC_REV_57766:
4388                 case ASIC_REV_5719:
4389                         /* If we advertised any eee advertisements above... */
4390                         if (val)
4391                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4392                                       MII_TG3_DSP_TAP26_RMRXSTO |
4393                                       MII_TG3_DSP_TAP26_OPCSINPT;
4394                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4395                         /* Fall through */
4396                 case ASIC_REV_5720:
4397                 case ASIC_REV_5762:
4398                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4399                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4400                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4401                 }
4402
4403                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4404                 if (!err)
4405                         err = err2;
4406         }
4407
4408 done:
4409         return err;
4410 }
4411
4412 static void tg3_phy_copper_begin(struct tg3 *tp)
4413 {
4414         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4415             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4416                 u32 adv, fc;
4417
4418                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4419                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4420                         adv = ADVERTISED_10baseT_Half |
4421                               ADVERTISED_10baseT_Full;
4422                         if (tg3_flag(tp, WOL_SPEED_100MB))
4423                                 adv |= ADVERTISED_100baseT_Half |
4424                                        ADVERTISED_100baseT_Full;
4425                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4426                                 if (!(tp->phy_flags &
4427                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4428                                         adv |= ADVERTISED_1000baseT_Half;
4429                                 adv |= ADVERTISED_1000baseT_Full;
4430                         }
4431
4432                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4433                 } else {
4434                         adv = tp->link_config.advertising;
4435                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4436                                 adv &= ~(ADVERTISED_1000baseT_Half |
4437                                          ADVERTISED_1000baseT_Full);
4438
4439                         fc = tp->link_config.flowctrl;
4440                 }
4441
4442                 tg3_phy_autoneg_cfg(tp, adv, fc);
4443
4444                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4445                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4446                         /* Normally during power down we want to autonegotiate
4447                          * the lowest possible speed for WOL. However, to avoid
4448                          * link flap, we leave it untouched.
4449                          */
4450                         return;
4451                 }
4452
4453                 tg3_writephy(tp, MII_BMCR,
4454                              BMCR_ANENABLE | BMCR_ANRESTART);
4455         } else {
4456                 int i;
4457                 u32 bmcr, orig_bmcr;
4458
4459                 tp->link_config.active_speed = tp->link_config.speed;
4460                 tp->link_config.active_duplex = tp->link_config.duplex;
4461
4462                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4463                         /* With autoneg disabled, 5715 only links up when the
4464                          * advertisement register has the configured speed
4465                          * enabled.
4466                          */
4467                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4468                 }
4469
4470                 bmcr = 0;
4471                 switch (tp->link_config.speed) {
4472                 default:
4473                 case SPEED_10:
4474                         break;
4475
4476                 case SPEED_100:
4477                         bmcr |= BMCR_SPEED100;
4478                         break;
4479
4480                 case SPEED_1000:
4481                         bmcr |= BMCR_SPEED1000;
4482                         break;
4483                 }
4484
4485                 if (tp->link_config.duplex == DUPLEX_FULL)
4486                         bmcr |= BMCR_FULLDPLX;
4487
4488                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4489                     (bmcr != orig_bmcr)) {
4490                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4491                         for (i = 0; i < 1500; i++) {
4492                                 u32 tmp;
4493
4494                                 udelay(10);
4495                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4496                                     tg3_readphy(tp, MII_BMSR, &tmp))
4497                                         continue;
4498                                 if (!(tmp & BMSR_LSTATUS)) {
4499                                         udelay(40);
4500                                         break;
4501                                 }
4502                         }
4503                         tg3_writephy(tp, MII_BMCR, bmcr);
4504                         udelay(40);
4505                 }
4506         }
4507 }
4508
4509 static int tg3_phy_pull_config(struct tg3 *tp)
4510 {
4511         int err;
4512         u32 val;
4513
4514         err = tg3_readphy(tp, MII_BMCR, &val);
4515         if (err)
4516                 goto done;
4517
4518         if (!(val & BMCR_ANENABLE)) {
4519                 tp->link_config.autoneg = AUTONEG_DISABLE;
4520                 tp->link_config.advertising = 0;
4521                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4522
4523                 err = -EIO;
4524
4525                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4526                 case 0:
4527                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4528                                 goto done;
4529
4530                         tp->link_config.speed = SPEED_10;
4531                         break;
4532                 case BMCR_SPEED100:
4533                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4534                                 goto done;
4535
4536                         tp->link_config.speed = SPEED_100;
4537                         break;
4538                 case BMCR_SPEED1000:
4539                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4540                                 tp->link_config.speed = SPEED_1000;
4541                                 break;
4542                         }
4543                         /* Fall through */
4544                 default:
4545                         goto done;
4546                 }
4547
4548                 if (val & BMCR_FULLDPLX)
4549                         tp->link_config.duplex = DUPLEX_FULL;
4550                 else
4551                         tp->link_config.duplex = DUPLEX_HALF;
4552
4553                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4554
4555                 err = 0;
4556                 goto done;
4557         }
4558
4559         tp->link_config.autoneg = AUTONEG_ENABLE;
4560         tp->link_config.advertising = ADVERTISED_Autoneg;
4561         tg3_flag_set(tp, PAUSE_AUTONEG);
4562
4563         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4564                 u32 adv;
4565
4566                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4567                 if (err)
4568                         goto done;
4569
4570                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4571                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4572
4573                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4574         } else {
4575                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4576         }
4577
4578         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4579                 u32 adv;
4580
4581                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4582                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4583                         if (err)
4584                                 goto done;
4585
4586                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4587                 } else {
4588                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4589                         if (err)
4590                                 goto done;
4591
4592                         adv = tg3_decode_flowctrl_1000X(val);
4593                         tp->link_config.flowctrl = adv;
4594
4595                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4596                         adv = mii_adv_to_ethtool_adv_x(val);
4597                 }
4598
4599                 tp->link_config.advertising |= adv;
4600         }
4601
4602 done:
4603         return err;
4604 }
4605
4606 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4607 {
4608         int err;
4609
4610         /* Turn off tap power management. */
4611         /* Set Extended packet length bit */
4612         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4613
4614         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4615         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4616         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4617         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4618         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4619
4620         udelay(40);
4621
4622         return err;
4623 }
4624
4625 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4626 {
4627         struct ethtool_eee eee;
4628
4629         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4630                 return true;
4631
4632         tg3_eee_pull_config(tp, &eee);
4633
4634         if (tp->eee.eee_enabled) {
4635                 if (tp->eee.advertised != eee.advertised ||
4636                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4637                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4638                         return false;
4639         } else {
4640                 /* EEE is disabled but we're advertising */
4641                 if (eee.advertised)
4642                         return false;
4643         }
4644
4645         return true;
4646 }
4647
4648 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4649 {
4650         u32 advmsk, tgtadv, advertising;
4651
4652         advertising = tp->link_config.advertising;
4653         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4654
4655         advmsk = ADVERTISE_ALL;
4656         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4657                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4658                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4659         }
4660
4661         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4662                 return false;
4663
4664         if ((*lcladv & advmsk) != tgtadv)
4665                 return false;
4666
4667         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4668                 u32 tg3_ctrl;
4669
4670                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4671
4672                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4673                         return false;
4674
4675                 if (tgtadv &&
4676                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4677                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4678                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4679                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4680                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4681                 } else {
4682                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4683                 }
4684
4685                 if (tg3_ctrl != tgtadv)
4686                         return false;
4687         }
4688
4689         return true;
4690 }
4691
4692 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4693 {
4694         u32 lpeth = 0;
4695
4696         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4697                 u32 val;
4698
4699                 if (tg3_readphy(tp, MII_STAT1000, &val))
4700                         return false;
4701
4702                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4703         }
4704
4705         if (tg3_readphy(tp, MII_LPA, rmtadv))
4706                 return false;
4707
4708         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4709         tp->link_config.rmt_adv = lpeth;
4710
4711         return true;
4712 }
4713
4714 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4715 {
4716         if (curr_link_up != tp->link_up) {
4717                 if (curr_link_up) {
4718                         netif_carrier_on(tp->dev);
4719                 } else {
4720                         netif_carrier_off(tp->dev);
4721                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4722                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4723                 }
4724
4725                 tg3_link_report(tp);
4726                 return true;
4727         }
4728
4729         return false;
4730 }
4731
4732 static void tg3_clear_mac_status(struct tg3 *tp)
4733 {
4734         tw32(MAC_EVENT, 0);
4735
4736         tw32_f(MAC_STATUS,
4737                MAC_STATUS_SYNC_CHANGED |
4738                MAC_STATUS_CFG_CHANGED |
4739                MAC_STATUS_MI_COMPLETION |
4740                MAC_STATUS_LNKSTATE_CHANGED);
4741         udelay(40);
4742 }
4743
4744 static void tg3_setup_eee(struct tg3 *tp)
4745 {
4746         u32 val;
4747
4748         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4749               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4750         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4751                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4752
4753         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4754
4755         tw32_f(TG3_CPMU_EEE_CTRL,
4756                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4757
4758         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4759               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4760               TG3_CPMU_EEEMD_LPI_IN_RX |
4761               TG3_CPMU_EEEMD_EEE_ENABLE;
4762
4763         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4764                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4765
4766         if (tg3_flag(tp, ENABLE_APE))
4767                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4768
4769         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4770
4771         tw32_f(TG3_CPMU_EEE_DBTMR1,
4772                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4773                (tp->eee.tx_lpi_timer & 0xffff));
4774
4775         tw32_f(TG3_CPMU_EEE_DBTMR2,
4776                TG3_CPMU_DBTMR2_APE_TX_2047US |
4777                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4778 }
4779
4780 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4781 {
4782         bool current_link_up;
4783         u32 bmsr, val;
4784         u32 lcl_adv, rmt_adv;
4785         u16 current_speed;
4786         u8 current_duplex;
4787         int i, err;
4788
4789         tg3_clear_mac_status(tp);
4790
4791         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4792                 tw32_f(MAC_MI_MODE,
4793                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4794                 udelay(80);
4795         }
4796
4797         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4798
4799         /* Some third-party PHYs need to be reset on link going
4800          * down.
4801          */
4802         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4803              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4804              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4805             tp->link_up) {
4806                 tg3_readphy(tp, MII_BMSR, &bmsr);
4807                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4808                     !(bmsr & BMSR_LSTATUS))
4809                         force_reset = true;
4810         }
4811         if (force_reset)
4812                 tg3_phy_reset(tp);
4813
4814         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4815                 tg3_readphy(tp, MII_BMSR, &bmsr);
4816                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4817                     !tg3_flag(tp, INIT_COMPLETE))
4818                         bmsr = 0;
4819
4820                 if (!(bmsr & BMSR_LSTATUS)) {
4821                         err = tg3_init_5401phy_dsp(tp);
4822                         if (err)
4823                                 return err;
4824
4825                         tg3_readphy(tp, MII_BMSR, &bmsr);
4826                         for (i = 0; i < 1000; i++) {
4827                                 udelay(10);
4828                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4829                                     (bmsr & BMSR_LSTATUS)) {
4830                                         udelay(40);
4831                                         break;
4832                                 }
4833                         }
4834
4835                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4836                             TG3_PHY_REV_BCM5401_B0 &&
4837                             !(bmsr & BMSR_LSTATUS) &&
4838                             tp->link_config.active_speed == SPEED_1000) {
4839                                 err = tg3_phy_reset(tp);
4840                                 if (!err)
4841                                         err = tg3_init_5401phy_dsp(tp);
4842                                 if (err)
4843                                         return err;
4844                         }
4845                 }
4846         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4847                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4848                 /* 5701 {A0,B0} CRC bug workaround */
4849                 tg3_writephy(tp, 0x15, 0x0a75);
4850                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4851                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4852                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4853         }
4854
4855         /* Clear pending interrupts... */
4856         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4857         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4858
4859         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4860                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4861         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4862                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4863
4864         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4865             tg3_asic_rev(tp) == ASIC_REV_5701) {
4866                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4867                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4868                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4869                 else
4870                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4871         }
4872
4873         current_link_up = false;
4874         current_speed = SPEED_UNKNOWN;
4875         current_duplex = DUPLEX_UNKNOWN;
4876         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4877         tp->link_config.rmt_adv = 0;
4878
4879         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4880                 err = tg3_phy_auxctl_read(tp,
4881                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4882                                           &val);
4883                 if (!err && !(val & (1 << 10))) {
4884                         tg3_phy_auxctl_write(tp,
4885                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4886                                              val | (1 << 10));
4887                         goto relink;
4888                 }
4889         }
4890
4891         bmsr = 0;
4892         for (i = 0; i < 100; i++) {
4893                 tg3_readphy(tp, MII_BMSR, &bmsr);
4894                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4895                     (bmsr & BMSR_LSTATUS))
4896                         break;
4897                 udelay(40);
4898         }
4899
4900         if (bmsr & BMSR_LSTATUS) {
4901                 u32 aux_stat, bmcr;
4902
4903                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4904                 for (i = 0; i < 2000; i++) {
4905                         udelay(10);
4906                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4907                             aux_stat)
4908                                 break;
4909                 }
4910
4911                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4912                                              &current_speed,
4913                                              &current_duplex);
4914
4915                 bmcr = 0;
4916                 for (i = 0; i < 200; i++) {
4917                         tg3_readphy(tp, MII_BMCR, &bmcr);
4918                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4919                                 continue;
4920                         if (bmcr && bmcr != 0x7fff)
4921                                 break;
4922                         udelay(10);
4923                 }
4924
4925                 lcl_adv = 0;
4926                 rmt_adv = 0;
4927
4928                 tp->link_config.active_speed = current_speed;
4929                 tp->link_config.active_duplex = current_duplex;
4930
4931                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4932                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4933
4934                         if ((bmcr & BMCR_ANENABLE) &&
4935                             eee_config_ok &&
4936                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4937                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4938                                 current_link_up = true;
4939
4940                         /* EEE settings changes take effect only after a phy
4941                          * reset.  If we have skipped a reset due to Link Flap
4942                          * Avoidance being enabled, do it now.
4943                          */
4944                         if (!eee_config_ok &&
4945                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4946                             !force_reset) {
4947                                 tg3_setup_eee(tp);
4948                                 tg3_phy_reset(tp);
4949                         }
4950                 } else {
4951                         if (!(bmcr & BMCR_ANENABLE) &&
4952                             tp->link_config.speed == current_speed &&
4953                             tp->link_config.duplex == current_duplex) {
4954                                 current_link_up = true;
4955                         }
4956                 }
4957
4958                 if (current_link_up &&
4959                     tp->link_config.active_duplex == DUPLEX_FULL) {
4960                         u32 reg, bit;
4961
4962                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4963                                 reg = MII_TG3_FET_GEN_STAT;
4964                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4965                         } else {
4966                                 reg = MII_TG3_EXT_STAT;
4967                                 bit = MII_TG3_EXT_STAT_MDIX;
4968                         }
4969
4970                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4971                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4972
4973                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4974                 }
4975         }
4976
4977 relink:
4978         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4979                 tg3_phy_copper_begin(tp);
4980
4981                 if (tg3_flag(tp, ROBOSWITCH)) {
4982                         current_link_up = true;
4983                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4984                         current_speed = SPEED_1000;
4985                         current_duplex = DUPLEX_FULL;
4986                         tp->link_config.active_speed = current_speed;
4987                         tp->link_config.active_duplex = current_duplex;
4988                 }
4989
4990                 tg3_readphy(tp, MII_BMSR, &bmsr);
4991                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4992                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4993                         current_link_up = true;
4994         }
4995
4996         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4997         if (current_link_up) {
4998                 if (tp->link_config.active_speed == SPEED_100 ||
4999                     tp->link_config.active_speed == SPEED_10)
5000                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5001                 else
5002                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5003         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5004                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5005         else
5006                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5007
5008         /* In order for the 5750 core in BCM4785 chip to work properly
5009          * in RGMII mode, the Led Control Register must be set up.
5010          */
5011         if (tg3_flag(tp, RGMII_MODE)) {
5012                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5013                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5014
5015                 if (tp->link_config.active_speed == SPEED_10)
5016                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5017                 else if (tp->link_config.active_speed == SPEED_100)
5018                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5019                                      LED_CTRL_100MBPS_ON);
5020                 else if (tp->link_config.active_speed == SPEED_1000)
5021                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5022                                      LED_CTRL_1000MBPS_ON);
5023
5024                 tw32(MAC_LED_CTRL, led_ctrl);
5025                 udelay(40);
5026         }
5027
5028         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5029         if (tp->link_config.active_duplex == DUPLEX_HALF)
5030                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5031
5032         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5033                 if (current_link_up &&
5034                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5035                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5036                 else
5037                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5038         }
5039
5040         /* ??? Without this setting Netgear GA302T PHY does not
5041          * ??? send/receive packets...
5042          */
5043         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5044             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5045                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5046                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5047                 udelay(80);
5048         }
5049
5050         tw32_f(MAC_MODE, tp->mac_mode);
5051         udelay(40);
5052
5053         tg3_phy_eee_adjust(tp, current_link_up);
5054
5055         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5056                 /* Polled via timer. */
5057                 tw32_f(MAC_EVENT, 0);
5058         } else {
5059                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5060         }
5061         udelay(40);
5062
5063         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5064             current_link_up &&
5065             tp->link_config.active_speed == SPEED_1000 &&
5066             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5067                 udelay(120);
5068                 tw32_f(MAC_STATUS,
5069                      (MAC_STATUS_SYNC_CHANGED |
5070                       MAC_STATUS_CFG_CHANGED));
5071                 udelay(40);
5072                 tg3_write_mem(tp,
5073                               NIC_SRAM_FIRMWARE_MBOX,
5074                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5075         }
5076
5077         /* Prevent send BD corruption. */
5078         if (tg3_flag(tp, CLKREQ_BUG)) {
5079                 if (tp->link_config.active_speed == SPEED_100 ||
5080                     tp->link_config.active_speed == SPEED_10)
5081                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5082                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5083                 else
5084                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5085                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5086         }
5087
5088         tg3_test_and_report_link_chg(tp, current_link_up);
5089
5090         return 0;
5091 }
5092
5093 struct tg3_fiber_aneginfo {
5094         int state;
5095 #define ANEG_STATE_UNKNOWN              0
5096 #define ANEG_STATE_AN_ENABLE            1
5097 #define ANEG_STATE_RESTART_INIT         2
5098 #define ANEG_STATE_RESTART              3
5099 #define ANEG_STATE_DISABLE_LINK_OK      4
5100 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5101 #define ANEG_STATE_ABILITY_DETECT       6
5102 #define ANEG_STATE_ACK_DETECT_INIT      7
5103 #define ANEG_STATE_ACK_DETECT           8
5104 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5105 #define ANEG_STATE_COMPLETE_ACK         10
5106 #define ANEG_STATE_IDLE_DETECT_INIT     11
5107 #define ANEG_STATE_IDLE_DETECT          12
5108 #define ANEG_STATE_LINK_OK              13
5109 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5110 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5111
5112         u32 flags;
5113 #define MR_AN_ENABLE            0x00000001
5114 #define MR_RESTART_AN           0x00000002
5115 #define MR_AN_COMPLETE          0x00000004
5116 #define MR_PAGE_RX              0x00000008
5117 #define MR_NP_LOADED            0x00000010
5118 #define MR_TOGGLE_TX            0x00000020
5119 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5120 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5121 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5122 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5123 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5124 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5125 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5126 #define MR_TOGGLE_RX            0x00002000
5127 #define MR_NP_RX                0x00004000
5128
5129 #define MR_LINK_OK              0x80000000
5130
5131         unsigned long link_time, cur_time;
5132
5133         u32 ability_match_cfg;
5134         int ability_match_count;
5135
5136         char ability_match, idle_match, ack_match;
5137
5138         u32 txconfig, rxconfig;
5139 #define ANEG_CFG_NP             0x00000080
5140 #define ANEG_CFG_ACK            0x00000040
5141 #define ANEG_CFG_RF2            0x00000020
5142 #define ANEG_CFG_RF1            0x00000010
5143 #define ANEG_CFG_PS2            0x00000001
5144 #define ANEG_CFG_PS1            0x00008000
5145 #define ANEG_CFG_HD             0x00004000
5146 #define ANEG_CFG_FD             0x00002000
5147 #define ANEG_CFG_INVAL          0x00001f06
5148
5149 };
5150 #define ANEG_OK         0
5151 #define ANEG_DONE       1
5152 #define ANEG_TIMER_ENAB 2
5153 #define ANEG_FAILED     -1
5154
5155 #define ANEG_STATE_SETTLE_TIME  10000
5156
5157 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5158                                    struct tg3_fiber_aneginfo *ap)
5159 {
5160         u16 flowctrl;
5161         unsigned long delta;
5162         u32 rx_cfg_reg;
5163         int ret;
5164
5165         if (ap->state == ANEG_STATE_UNKNOWN) {
5166                 ap->rxconfig = 0;
5167                 ap->link_time = 0;
5168                 ap->cur_time = 0;
5169                 ap->ability_match_cfg = 0;
5170                 ap->ability_match_count = 0;
5171                 ap->ability_match = 0;
5172                 ap->idle_match = 0;
5173                 ap->ack_match = 0;
5174         }
5175         ap->cur_time++;
5176
5177         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5178                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5179
5180                 if (rx_cfg_reg != ap->ability_match_cfg) {
5181                         ap->ability_match_cfg = rx_cfg_reg;
5182                         ap->ability_match = 0;
5183                         ap->ability_match_count = 0;
5184                 } else {
5185                         if (++ap->ability_match_count > 1) {
5186                                 ap->ability_match = 1;
5187                                 ap->ability_match_cfg = rx_cfg_reg;
5188                         }
5189                 }
5190                 if (rx_cfg_reg & ANEG_CFG_ACK)
5191                         ap->ack_match = 1;
5192                 else
5193                         ap->ack_match = 0;
5194
5195                 ap->idle_match = 0;
5196         } else {
5197                 ap->idle_match = 1;
5198                 ap->ability_match_cfg = 0;
5199                 ap->ability_match_count = 0;
5200                 ap->ability_match = 0;
5201                 ap->ack_match = 0;
5202
5203                 rx_cfg_reg = 0;
5204         }
5205
5206         ap->rxconfig = rx_cfg_reg;
5207         ret = ANEG_OK;
5208
5209         switch (ap->state) {
5210         case ANEG_STATE_UNKNOWN:
5211                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5212                         ap->state = ANEG_STATE_AN_ENABLE;
5213
5214                 /* fall through */
5215         case ANEG_STATE_AN_ENABLE:
5216                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5217                 if (ap->flags & MR_AN_ENABLE) {
5218                         ap->link_time = 0;
5219                         ap->cur_time = 0;
5220                         ap->ability_match_cfg = 0;
5221                         ap->ability_match_count = 0;
5222                         ap->ability_match = 0;
5223                         ap->idle_match = 0;
5224                         ap->ack_match = 0;
5225
5226                         ap->state = ANEG_STATE_RESTART_INIT;
5227                 } else {
5228                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5229                 }
5230                 break;
5231
5232         case ANEG_STATE_RESTART_INIT:
5233                 ap->link_time = ap->cur_time;
5234                 ap->flags &= ~(MR_NP_LOADED);
5235                 ap->txconfig = 0;
5236                 tw32(MAC_TX_AUTO_NEG, 0);
5237                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5238                 tw32_f(MAC_MODE, tp->mac_mode);
5239                 udelay(40);
5240
5241                 ret = ANEG_TIMER_ENAB;
5242                 ap->state = ANEG_STATE_RESTART;
5243
5244                 /* fall through */
5245         case ANEG_STATE_RESTART:
5246                 delta = ap->cur_time - ap->link_time;
5247                 if (delta > ANEG_STATE_SETTLE_TIME)
5248                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5249                 else
5250                         ret = ANEG_TIMER_ENAB;
5251                 break;
5252
5253         case ANEG_STATE_DISABLE_LINK_OK:
5254                 ret = ANEG_DONE;
5255                 break;
5256
5257         case ANEG_STATE_ABILITY_DETECT_INIT:
5258                 ap->flags &= ~(MR_TOGGLE_TX);
5259                 ap->txconfig = ANEG_CFG_FD;
5260                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5261                 if (flowctrl & ADVERTISE_1000XPAUSE)
5262                         ap->txconfig |= ANEG_CFG_PS1;
5263                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5264                         ap->txconfig |= ANEG_CFG_PS2;
5265                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5266                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5267                 tw32_f(MAC_MODE, tp->mac_mode);
5268                 udelay(40);
5269
5270                 ap->state = ANEG_STATE_ABILITY_DETECT;
5271                 break;
5272
5273         case ANEG_STATE_ABILITY_DETECT:
5274                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5275                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5276                 break;
5277
5278         case ANEG_STATE_ACK_DETECT_INIT:
5279                 ap->txconfig |= ANEG_CFG_ACK;
5280                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5281                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5282                 tw32_f(MAC_MODE, tp->mac_mode);
5283                 udelay(40);
5284
5285                 ap->state = ANEG_STATE_ACK_DETECT;
5286
5287                 /* fall through */
5288         case ANEG_STATE_ACK_DETECT:
5289                 if (ap->ack_match != 0) {
5290                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5291                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5292                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5293                         } else {
5294                                 ap->state = ANEG_STATE_AN_ENABLE;
5295                         }
5296                 } else if (ap->ability_match != 0 &&
5297                            ap->rxconfig == 0) {
5298                         ap->state = ANEG_STATE_AN_ENABLE;
5299                 }
5300                 break;
5301
5302         case ANEG_STATE_COMPLETE_ACK_INIT:
5303                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5304                         ret = ANEG_FAILED;
5305                         break;
5306                 }
5307                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5308                                MR_LP_ADV_HALF_DUPLEX |
5309                                MR_LP_ADV_SYM_PAUSE |
5310                                MR_LP_ADV_ASYM_PAUSE |
5311                                MR_LP_ADV_REMOTE_FAULT1 |
5312                                MR_LP_ADV_REMOTE_FAULT2 |
5313                                MR_LP_ADV_NEXT_PAGE |
5314                                MR_TOGGLE_RX |
5315                                MR_NP_RX);
5316                 if (ap->rxconfig & ANEG_CFG_FD)
5317                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5318                 if (ap->rxconfig & ANEG_CFG_HD)
5319                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5320                 if (ap->rxconfig & ANEG_CFG_PS1)
5321                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5322                 if (ap->rxconfig & ANEG_CFG_PS2)
5323                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5324                 if (ap->rxconfig & ANEG_CFG_RF1)
5325                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5326                 if (ap->rxconfig & ANEG_CFG_RF2)
5327                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5328                 if (ap->rxconfig & ANEG_CFG_NP)
5329                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5330
5331                 ap->link_time = ap->cur_time;
5332
5333                 ap->flags ^= (MR_TOGGLE_TX);
5334                 if (ap->rxconfig & 0x0008)
5335                         ap->flags |= MR_TOGGLE_RX;
5336                 if (ap->rxconfig & ANEG_CFG_NP)
5337                         ap->flags |= MR_NP_RX;
5338                 ap->flags |= MR_PAGE_RX;
5339
5340                 ap->state = ANEG_STATE_COMPLETE_ACK;
5341                 ret = ANEG_TIMER_ENAB;
5342                 break;
5343
5344         case ANEG_STATE_COMPLETE_ACK:
5345                 if (ap->ability_match != 0 &&
5346                     ap->rxconfig == 0) {
5347                         ap->state = ANEG_STATE_AN_ENABLE;
5348                         break;
5349                 }
5350                 delta = ap->cur_time - ap->link_time;
5351                 if (delta > ANEG_STATE_SETTLE_TIME) {
5352                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5353                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5354                         } else {
5355                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5356                                     !(ap->flags & MR_NP_RX)) {
5357                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5358                                 } else {
5359                                         ret = ANEG_FAILED;
5360                                 }
5361                         }
5362                 }
5363                 break;
5364
5365         case ANEG_STATE_IDLE_DETECT_INIT:
5366                 ap->link_time = ap->cur_time;
5367                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5368                 tw32_f(MAC_MODE, tp->mac_mode);
5369                 udelay(40);
5370
5371                 ap->state = ANEG_STATE_IDLE_DETECT;
5372                 ret = ANEG_TIMER_ENAB;
5373                 break;
5374
5375         case ANEG_STATE_IDLE_DETECT:
5376                 if (ap->ability_match != 0 &&
5377                     ap->rxconfig == 0) {
5378                         ap->state = ANEG_STATE_AN_ENABLE;
5379                         break;
5380                 }
5381                 delta = ap->cur_time - ap->link_time;
5382                 if (delta > ANEG_STATE_SETTLE_TIME) {
5383                         /* XXX another gem from the Broadcom driver :( */
5384                         ap->state = ANEG_STATE_LINK_OK;
5385                 }
5386                 break;
5387
5388         case ANEG_STATE_LINK_OK:
5389                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5390                 ret = ANEG_DONE;
5391                 break;
5392
5393         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5394                 /* ??? unimplemented */
5395                 break;
5396
5397         case ANEG_STATE_NEXT_PAGE_WAIT:
5398                 /* ??? unimplemented */
5399                 break;
5400
5401         default:
5402                 ret = ANEG_FAILED;
5403                 break;
5404         }
5405
5406         return ret;
5407 }
5408
5409 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5410 {
5411         int res = 0;
5412         struct tg3_fiber_aneginfo aninfo;
5413         int status = ANEG_FAILED;
5414         unsigned int tick;
5415         u32 tmp;
5416
5417         tw32_f(MAC_TX_AUTO_NEG, 0);
5418
5419         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5420         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5421         udelay(40);
5422
5423         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5424         udelay(40);
5425
5426         memset(&aninfo, 0, sizeof(aninfo));
5427         aninfo.flags |= MR_AN_ENABLE;
5428         aninfo.state = ANEG_STATE_UNKNOWN;
5429         aninfo.cur_time = 0;
5430         tick = 0;
5431         while (++tick < 195000) {
5432                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5433                 if (status == ANEG_DONE || status == ANEG_FAILED)
5434                         break;
5435
5436                 udelay(1);
5437         }
5438
5439         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5440         tw32_f(MAC_MODE, tp->mac_mode);
5441         udelay(40);
5442
5443         *txflags = aninfo.txconfig;
5444         *rxflags = aninfo.flags;
5445
5446         if (status == ANEG_DONE &&
5447             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5448                              MR_LP_ADV_FULL_DUPLEX)))
5449                 res = 1;
5450
5451         return res;
5452 }
5453
5454 static void tg3_init_bcm8002(struct tg3 *tp)
5455 {
5456         u32 mac_status = tr32(MAC_STATUS);
5457         int i;
5458
5459         /* Reset when initting first time or we have a link. */
5460         if (tg3_flag(tp, INIT_COMPLETE) &&
5461             !(mac_status & MAC_STATUS_PCS_SYNCED))
5462                 return;
5463
5464         /* Set PLL lock range. */
5465         tg3_writephy(tp, 0x16, 0x8007);
5466
5467         /* SW reset */
5468         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5469
5470         /* Wait for reset to complete. */
5471         /* XXX schedule_timeout() ... */
5472         for (i = 0; i < 500; i++)
5473                 udelay(10);
5474
5475         /* Config mode; select PMA/Ch 1 regs. */
5476         tg3_writephy(tp, 0x10, 0x8411);
5477
5478         /* Enable auto-lock and comdet, select txclk for tx. */
5479         tg3_writephy(tp, 0x11, 0x0a10);
5480
5481         tg3_writephy(tp, 0x18, 0x00a0);
5482         tg3_writephy(tp, 0x16, 0x41ff);
5483
5484         /* Assert and deassert POR. */
5485         tg3_writephy(tp, 0x13, 0x0400);
5486         udelay(40);
5487         tg3_writephy(tp, 0x13, 0x0000);
5488
5489         tg3_writephy(tp, 0x11, 0x0a50);
5490         udelay(40);
5491         tg3_writephy(tp, 0x11, 0x0a10);
5492
5493         /* Wait for signal to stabilize */
5494         /* XXX schedule_timeout() ... */
5495         for (i = 0; i < 15000; i++)
5496                 udelay(10);
5497
5498         /* Deselect the channel register so we can read the PHYID
5499          * later.
5500          */
5501         tg3_writephy(tp, 0x10, 0x8011);
5502 }
5503
5504 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5505 {
5506         u16 flowctrl;
5507         bool current_link_up;
5508         u32 sg_dig_ctrl, sg_dig_status;
5509         u32 serdes_cfg, expected_sg_dig_ctrl;
5510         int workaround, port_a;
5511
5512         serdes_cfg = 0;
5513         expected_sg_dig_ctrl = 0;
5514         workaround = 0;
5515         port_a = 1;
5516         current_link_up = false;
5517
5518         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5519             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5520                 workaround = 1;
5521                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5522                         port_a = 0;
5523
5524                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5525                 /* preserve bits 20-23 for voltage regulator */
5526                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5527         }
5528
5529         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5530
5531         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5532                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5533                         if (workaround) {
5534                                 u32 val = serdes_cfg;
5535
5536                                 if (port_a)
5537                                         val |= 0xc010000;
5538                                 else
5539                                         val |= 0x4010000;
5540                                 tw32_f(MAC_SERDES_CFG, val);
5541                         }
5542
5543                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5544                 }
5545                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5546                         tg3_setup_flow_control(tp, 0, 0);
5547                         current_link_up = true;
5548                 }
5549                 goto out;
5550         }
5551
5552         /* Want auto-negotiation.  */
5553         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5554
5555         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5556         if (flowctrl & ADVERTISE_1000XPAUSE)
5557                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5558         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5559                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5560
5561         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5562                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5563                     tp->serdes_counter &&
5564                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5565                                     MAC_STATUS_RCVD_CFG)) ==
5566                      MAC_STATUS_PCS_SYNCED)) {
5567                         tp->serdes_counter--;
5568                         current_link_up = true;
5569                         goto out;
5570                 }
5571 restart_autoneg:
5572                 if (workaround)
5573                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5574                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5575                 udelay(5);
5576                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5577
5578                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5579                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5580         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5581                                  MAC_STATUS_SIGNAL_DET)) {
5582                 sg_dig_status = tr32(SG_DIG_STATUS);
5583                 mac_status = tr32(MAC_STATUS);
5584
5585                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5586                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5587                         u32 local_adv = 0, remote_adv = 0;
5588
5589                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5590                                 local_adv |= ADVERTISE_1000XPAUSE;
5591                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5592                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5593
5594                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5595                                 remote_adv |= LPA_1000XPAUSE;
5596                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5597                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5598
5599                         tp->link_config.rmt_adv =
5600                                            mii_adv_to_ethtool_adv_x(remote_adv);
5601
5602                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5603                         current_link_up = true;
5604                         tp->serdes_counter = 0;
5605                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5606                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5607                         if (tp->serdes_counter)
5608                                 tp->serdes_counter--;
5609                         else {
5610                                 if (workaround) {
5611                                         u32 val = serdes_cfg;
5612
5613                                         if (port_a)
5614                                                 val |= 0xc010000;
5615                                         else
5616                                                 val |= 0x4010000;
5617
5618                                         tw32_f(MAC_SERDES_CFG, val);
5619                                 }
5620
5621                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5622                                 udelay(40);
5623
5624                                 /* Link parallel detection - link is up */
5625                                 /* only if we have PCS_SYNC and not */
5626                                 /* receiving config code words */
5627                                 mac_status = tr32(MAC_STATUS);
5628                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5629                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5630                                         tg3_setup_flow_control(tp, 0, 0);
5631                                         current_link_up = true;
5632                                         tp->phy_flags |=
5633                                                 TG3_PHYFLG_PARALLEL_DETECT;
5634                                         tp->serdes_counter =
5635                                                 SERDES_PARALLEL_DET_TIMEOUT;
5636                                 } else
5637                                         goto restart_autoneg;
5638                         }
5639                 }
5640         } else {
5641                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5642                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5643         }
5644
5645 out:
5646         return current_link_up;
5647 }
5648
5649 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5650 {
5651         bool current_link_up = false;
5652
5653         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5654                 goto out;
5655
5656         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5657                 u32 txflags, rxflags;
5658                 int i;
5659
5660                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5661                         u32 local_adv = 0, remote_adv = 0;
5662
5663                         if (txflags & ANEG_CFG_PS1)
5664                                 local_adv |= ADVERTISE_1000XPAUSE;
5665                         if (txflags & ANEG_CFG_PS2)
5666                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5667
5668                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5669                                 remote_adv |= LPA_1000XPAUSE;
5670                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5671                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5672
5673                         tp->link_config.rmt_adv =
5674                                            mii_adv_to_ethtool_adv_x(remote_adv);
5675
5676                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5677
5678                         current_link_up = true;
5679                 }
5680                 for (i = 0; i < 30; i++) {
5681                         udelay(20);
5682                         tw32_f(MAC_STATUS,
5683                                (MAC_STATUS_SYNC_CHANGED |
5684                                 MAC_STATUS_CFG_CHANGED));
5685                         udelay(40);
5686                         if ((tr32(MAC_STATUS) &
5687                              (MAC_STATUS_SYNC_CHANGED |
5688                               MAC_STATUS_CFG_CHANGED)) == 0)
5689                                 break;
5690                 }
5691
5692                 mac_status = tr32(MAC_STATUS);
5693                 if (!current_link_up &&
5694                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5695                     !(mac_status & MAC_STATUS_RCVD_CFG))
5696                         current_link_up = true;
5697         } else {
5698                 tg3_setup_flow_control(tp, 0, 0);
5699
5700                 /* Forcing 1000FD link up. */
5701                 current_link_up = true;
5702
5703                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5704                 udelay(40);
5705
5706                 tw32_f(MAC_MODE, tp->mac_mode);
5707                 udelay(40);
5708         }
5709
5710 out:
5711         return current_link_up;
5712 }
5713
5714 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5715 {
5716         u32 orig_pause_cfg;
5717         u16 orig_active_speed;
5718         u8 orig_active_duplex;
5719         u32 mac_status;
5720         bool current_link_up;
5721         int i;
5722
5723         orig_pause_cfg = tp->link_config.active_flowctrl;
5724         orig_active_speed = tp->link_config.active_speed;
5725         orig_active_duplex = tp->link_config.active_duplex;
5726
5727         if (!tg3_flag(tp, HW_AUTONEG) &&
5728             tp->link_up &&
5729             tg3_flag(tp, INIT_COMPLETE)) {
5730                 mac_status = tr32(MAC_STATUS);
5731                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5732                                MAC_STATUS_SIGNAL_DET |
5733                                MAC_STATUS_CFG_CHANGED |
5734                                MAC_STATUS_RCVD_CFG);
5735                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5736                                    MAC_STATUS_SIGNAL_DET)) {
5737                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5738                                             MAC_STATUS_CFG_CHANGED));
5739                         return 0;
5740                 }
5741         }
5742
5743         tw32_f(MAC_TX_AUTO_NEG, 0);
5744
5745         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5746         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5747         tw32_f(MAC_MODE, tp->mac_mode);
5748         udelay(40);
5749
5750         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5751                 tg3_init_bcm8002(tp);
5752
5753         /* Enable link change event even when serdes polling.  */
5754         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5755         udelay(40);
5756
5757         current_link_up = false;
5758         tp->link_config.rmt_adv = 0;
5759         mac_status = tr32(MAC_STATUS);
5760
5761         if (tg3_flag(tp, HW_AUTONEG))
5762                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5763         else
5764                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5765
5766         tp->napi[0].hw_status->status =
5767                 (SD_STATUS_UPDATED |
5768                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5769
5770         for (i = 0; i < 100; i++) {
5771                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5772                                     MAC_STATUS_CFG_CHANGED));
5773                 udelay(5);
5774                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5775                                          MAC_STATUS_CFG_CHANGED |
5776                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5777                         break;
5778         }
5779
5780         mac_status = tr32(MAC_STATUS);
5781         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5782                 current_link_up = false;
5783                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5784                     tp->serdes_counter == 0) {
5785                         tw32_f(MAC_MODE, (tp->mac_mode |
5786                                           MAC_MODE_SEND_CONFIGS));
5787                         udelay(1);
5788                         tw32_f(MAC_MODE, tp->mac_mode);
5789                 }
5790         }
5791
5792         if (current_link_up) {
5793                 tp->link_config.active_speed = SPEED_1000;
5794                 tp->link_config.active_duplex = DUPLEX_FULL;
5795                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5796                                     LED_CTRL_LNKLED_OVERRIDE |
5797                                     LED_CTRL_1000MBPS_ON));
5798         } else {
5799                 tp->link_config.active_speed = SPEED_UNKNOWN;
5800                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5801                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5802                                     LED_CTRL_LNKLED_OVERRIDE |
5803                                     LED_CTRL_TRAFFIC_OVERRIDE));
5804         }
5805
5806         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5807                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5808                 if (orig_pause_cfg != now_pause_cfg ||
5809                     orig_active_speed != tp->link_config.active_speed ||
5810                     orig_active_duplex != tp->link_config.active_duplex)
5811                         tg3_link_report(tp);
5812         }
5813
5814         return 0;
5815 }
5816
5817 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5818 {
5819         int err = 0;
5820         u32 bmsr, bmcr;
5821         u16 current_speed = SPEED_UNKNOWN;
5822         u8 current_duplex = DUPLEX_UNKNOWN;
5823         bool current_link_up = false;
5824         u32 local_adv, remote_adv, sgsr;
5825
5826         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5827              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5828              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5829              (sgsr & SERDES_TG3_SGMII_MODE)) {
5830
5831                 if (force_reset)
5832                         tg3_phy_reset(tp);
5833
5834                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5835
5836                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5837                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5838                 } else {
5839                         current_link_up = true;
5840                         if (sgsr & SERDES_TG3_SPEED_1000) {
5841                                 current_speed = SPEED_1000;
5842                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5843                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5844                                 current_speed = SPEED_100;
5845                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5846                         } else {
5847                                 current_speed = SPEED_10;
5848                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5849                         }
5850
5851                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5852                                 current_duplex = DUPLEX_FULL;
5853                         else
5854                                 current_duplex = DUPLEX_HALF;
5855                 }
5856
5857                 tw32_f(MAC_MODE, tp->mac_mode);
5858                 udelay(40);
5859
5860                 tg3_clear_mac_status(tp);
5861
5862                 goto fiber_setup_done;
5863         }
5864
5865         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5866         tw32_f(MAC_MODE, tp->mac_mode);
5867         udelay(40);
5868
5869         tg3_clear_mac_status(tp);
5870
5871         if (force_reset)
5872                 tg3_phy_reset(tp);
5873
5874         tp->link_config.rmt_adv = 0;
5875
5876         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5877         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5878         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5879                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5880                         bmsr |= BMSR_LSTATUS;
5881                 else
5882                         bmsr &= ~BMSR_LSTATUS;
5883         }
5884
5885         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5886
5887         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5888             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5889                 /* do nothing, just check for link up at the end */
5890         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5891                 u32 adv, newadv;
5892
5893                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5894                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5895                                  ADVERTISE_1000XPAUSE |
5896                                  ADVERTISE_1000XPSE_ASYM |
5897                                  ADVERTISE_SLCT);
5898
5899                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5900                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5901
5902                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5903                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5904                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5905                         tg3_writephy(tp, MII_BMCR, bmcr);
5906
5907                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5908                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5909                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5910
5911                         return err;
5912                 }
5913         } else {
5914                 u32 new_bmcr;
5915
5916                 bmcr &= ~BMCR_SPEED1000;
5917                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5918
5919                 if (tp->link_config.duplex == DUPLEX_FULL)
5920                         new_bmcr |= BMCR_FULLDPLX;
5921
5922                 if (new_bmcr != bmcr) {
5923                         /* BMCR_SPEED1000 is a reserved bit that needs
5924                          * to be set on write.
5925                          */
5926                         new_bmcr |= BMCR_SPEED1000;
5927
5928                         /* Force a linkdown */
5929                         if (tp->link_up) {
5930                                 u32 adv;
5931
5932                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5933                                 adv &= ~(ADVERTISE_1000XFULL |
5934                                          ADVERTISE_1000XHALF |
5935                                          ADVERTISE_SLCT);
5936                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5937                                 tg3_writephy(tp, MII_BMCR, bmcr |
5938                                                            BMCR_ANRESTART |
5939                                                            BMCR_ANENABLE);
5940                                 udelay(10);
5941                                 tg3_carrier_off(tp);
5942                         }
5943                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5944                         bmcr = new_bmcr;
5945                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5946                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5947                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5948                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5949                                         bmsr |= BMSR_LSTATUS;
5950                                 else
5951                                         bmsr &= ~BMSR_LSTATUS;
5952                         }
5953                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5954                 }
5955         }
5956
5957         if (bmsr & BMSR_LSTATUS) {
5958                 current_speed = SPEED_1000;
5959                 current_link_up = true;
5960                 if (bmcr & BMCR_FULLDPLX)
5961                         current_duplex = DUPLEX_FULL;
5962                 else
5963                         current_duplex = DUPLEX_HALF;
5964
5965                 local_adv = 0;
5966                 remote_adv = 0;
5967
5968                 if (bmcr & BMCR_ANENABLE) {
5969                         u32 common;
5970
5971                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5972                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5973                         common = local_adv & remote_adv;
5974                         if (common & (ADVERTISE_1000XHALF |
5975                                       ADVERTISE_1000XFULL)) {
5976                                 if (common & ADVERTISE_1000XFULL)
5977                                         current_duplex = DUPLEX_FULL;
5978                                 else
5979                                         current_duplex = DUPLEX_HALF;
5980
5981                                 tp->link_config.rmt_adv =
5982                                            mii_adv_to_ethtool_adv_x(remote_adv);
5983                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5984                                 /* Link is up via parallel detect */
5985                         } else {
5986                                 current_link_up = false;
5987                         }
5988                 }
5989         }
5990
5991 fiber_setup_done:
5992         if (current_link_up && current_duplex == DUPLEX_FULL)
5993                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5994
5995         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5996         if (tp->link_config.active_duplex == DUPLEX_HALF)
5997                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5998
5999         tw32_f(MAC_MODE, tp->mac_mode);
6000         udelay(40);
6001
6002         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6003
6004         tp->link_config.active_speed = current_speed;
6005         tp->link_config.active_duplex = current_duplex;
6006
6007         tg3_test_and_report_link_chg(tp, current_link_up);
6008         return err;
6009 }
6010
6011 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6012 {
6013         if (tp->serdes_counter) {
6014                 /* Give autoneg time to complete. */
6015                 tp->serdes_counter--;
6016                 return;
6017         }
6018
6019         if (!tp->link_up &&
6020             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6021                 u32 bmcr;
6022
6023                 tg3_readphy(tp, MII_BMCR, &bmcr);
6024                 if (bmcr & BMCR_ANENABLE) {
6025                         u32 phy1, phy2;
6026
6027                         /* Select shadow register 0x1f */
6028                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6029                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6030
6031                         /* Select expansion interrupt status register */
6032                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6033                                          MII_TG3_DSP_EXP1_INT_STAT);
6034                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6035                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6036
6037                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6038                                 /* We have signal detect and not receiving
6039                                  * config code words, link is up by parallel
6040                                  * detection.
6041                                  */
6042
6043                                 bmcr &= ~BMCR_ANENABLE;
6044                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6045                                 tg3_writephy(tp, MII_BMCR, bmcr);
6046                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6047                         }
6048                 }
6049         } else if (tp->link_up &&
6050                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6051                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6052                 u32 phy2;
6053
6054                 /* Select expansion interrupt status register */
6055                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6056                                  MII_TG3_DSP_EXP1_INT_STAT);
6057                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6058                 if (phy2 & 0x20) {
6059                         u32 bmcr;
6060
6061                         /* Config code words received, turn on autoneg. */
6062                         tg3_readphy(tp, MII_BMCR, &bmcr);
6063                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6064
6065                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6066
6067                 }
6068         }
6069 }
6070
6071 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6072 {
6073         u32 val;
6074         int err;
6075
6076         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6077                 err = tg3_setup_fiber_phy(tp, force_reset);
6078         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6079                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6080         else
6081                 err = tg3_setup_copper_phy(tp, force_reset);
6082
6083         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6084                 u32 scale;
6085
6086                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6087                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6088                         scale = 65;
6089                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6090                         scale = 6;
6091                 else
6092                         scale = 12;
6093
6094                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6095                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6096                 tw32(GRC_MISC_CFG, val);
6097         }
6098
6099         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6100               (6 << TX_LENGTHS_IPG_SHIFT);
6101         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6102             tg3_asic_rev(tp) == ASIC_REV_5762)
6103                 val |= tr32(MAC_TX_LENGTHS) &
6104                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6105                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6106
6107         if (tp->link_config.active_speed == SPEED_1000 &&
6108             tp->link_config.active_duplex == DUPLEX_HALF)
6109                 tw32(MAC_TX_LENGTHS, val |
6110                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6111         else
6112                 tw32(MAC_TX_LENGTHS, val |
6113                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6114
6115         if (!tg3_flag(tp, 5705_PLUS)) {
6116                 if (tp->link_up) {
6117                         tw32(HOSTCC_STAT_COAL_TICKS,
6118                              tp->coal.stats_block_coalesce_usecs);
6119                 } else {
6120                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6121                 }
6122         }
6123
6124         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6125                 val = tr32(PCIE_PWR_MGMT_THRESH);
6126                 if (!tp->link_up)
6127                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6128                               tp->pwrmgmt_thresh;
6129                 else
6130                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6131                 tw32(PCIE_PWR_MGMT_THRESH, val);
6132         }
6133
6134         return err;
6135 }
6136
6137 /* tp->lock must be held */
6138 static u64 tg3_refclk_read(struct tg3 *tp)
6139 {
6140         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6141         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6142 }
6143
6144 /* tp->lock must be held */
6145 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6146 {
6147         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6148
6149         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6150         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6151         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6152         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6153 }
6154
6155 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6156 static inline void tg3_full_unlock(struct tg3 *tp);
6157 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6158 {
6159         struct tg3 *tp = netdev_priv(dev);
6160
6161         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6162                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6163                                 SOF_TIMESTAMPING_SOFTWARE;
6164
6165         if (tg3_flag(tp, PTP_CAPABLE)) {
6166                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6167                                         SOF_TIMESTAMPING_RX_HARDWARE |
6168                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6169         }
6170
6171         if (tp->ptp_clock)
6172                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6173         else
6174                 info->phc_index = -1;
6175
6176         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6177
6178         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6179                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6180                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6181                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6182         return 0;
6183 }
6184
6185 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6186 {
6187         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6188         bool neg_adj = false;
6189         u32 correction = 0;
6190
6191         if (ppb < 0) {
6192                 neg_adj = true;
6193                 ppb = -ppb;
6194         }
6195
6196         /* Frequency adjustment is performed using hardware with a 24 bit
6197          * accumulator and a programmable correction value. On each clk, the
6198          * correction value gets added to the accumulator and when it
6199          * overflows, the time counter is incremented/decremented.
6200          *
6201          * So conversion from ppb to correction value is
6202          *              ppb * (1 << 24) / 1000000000
6203          */
6204         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6205                      TG3_EAV_REF_CLK_CORRECT_MASK;
6206
6207         tg3_full_lock(tp, 0);
6208
6209         if (correction)
6210                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6211                      TG3_EAV_REF_CLK_CORRECT_EN |
6212                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6213         else
6214                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6215
6216         tg3_full_unlock(tp);
6217
6218         return 0;
6219 }
6220
6221 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6222 {
6223         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6224
6225         tg3_full_lock(tp, 0);
6226         tp->ptp_adjust += delta;
6227         tg3_full_unlock(tp);
6228
6229         return 0;
6230 }
6231
6232 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6233 {
6234         u64 ns;
6235         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6236
6237         tg3_full_lock(tp, 0);
6238         ns = tg3_refclk_read(tp);
6239         ns += tp->ptp_adjust;
6240         tg3_full_unlock(tp);
6241
6242         *ts = ns_to_timespec64(ns);
6243
6244         return 0;
6245 }
6246
6247 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6248                            const struct timespec64 *ts)
6249 {
6250         u64 ns;
6251         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6252
6253         ns = timespec64_to_ns(ts);
6254
6255         tg3_full_lock(tp, 0);
6256         tg3_refclk_write(tp, ns);
6257         tp->ptp_adjust = 0;
6258         tg3_full_unlock(tp);
6259
6260         return 0;
6261 }
6262
6263 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6264                           struct ptp_clock_request *rq, int on)
6265 {
6266         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6267         u32 clock_ctl;
6268         int rval = 0;
6269
6270         switch (rq->type) {
6271         case PTP_CLK_REQ_PEROUT:
6272                 if (rq->perout.index != 0)
6273                         return -EINVAL;
6274
6275                 tg3_full_lock(tp, 0);
6276                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6277                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6278
6279                 if (on) {
6280                         u64 nsec;
6281
6282                         nsec = rq->perout.start.sec * 1000000000ULL +
6283                                rq->perout.start.nsec;
6284
6285                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6286                                 netdev_warn(tp->dev,
6287                                             "Device supports only a one-shot timesync output, period must be 0\n");
6288                                 rval = -EINVAL;
6289                                 goto err_out;
6290                         }
6291
6292                         if (nsec & (1ULL << 63)) {
6293                                 netdev_warn(tp->dev,
6294                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6295                                 rval = -EINVAL;
6296                                 goto err_out;
6297                         }
6298
6299                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6300                         tw32(TG3_EAV_WATCHDOG0_MSB,
6301                              TG3_EAV_WATCHDOG0_EN |
6302                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6303
6304                         tw32(TG3_EAV_REF_CLCK_CTL,
6305                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6306                 } else {
6307                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6308                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6309                 }
6310
6311 err_out:
6312                 tg3_full_unlock(tp);
6313                 return rval;
6314
6315         default:
6316                 break;
6317         }
6318
6319         return -EOPNOTSUPP;
6320 }
6321
6322 static const struct ptp_clock_info tg3_ptp_caps = {
6323         .owner          = THIS_MODULE,
6324         .name           = "tg3 clock",
6325         .max_adj        = 250000000,
6326         .n_alarm        = 0,
6327         .n_ext_ts       = 0,
6328         .n_per_out      = 1,
6329         .n_pins         = 0,
6330         .pps            = 0,
6331         .adjfreq        = tg3_ptp_adjfreq,
6332         .adjtime        = tg3_ptp_adjtime,
6333         .gettime64      = tg3_ptp_gettime,
6334         .settime64      = tg3_ptp_settime,
6335         .enable         = tg3_ptp_enable,
6336 };
6337
6338 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6339                                      struct skb_shared_hwtstamps *timestamp)
6340 {
6341         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6342         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6343                                            tp->ptp_adjust);
6344 }
6345
6346 /* tp->lock must be held */
6347 static void tg3_ptp_init(struct tg3 *tp)
6348 {
6349         if (!tg3_flag(tp, PTP_CAPABLE))
6350                 return;
6351
6352         /* Initialize the hardware clock to the system time. */
6353         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6354         tp->ptp_adjust = 0;
6355         tp->ptp_info = tg3_ptp_caps;
6356 }
6357
6358 /* tp->lock must be held */
6359 static void tg3_ptp_resume(struct tg3 *tp)
6360 {
6361         if (!tg3_flag(tp, PTP_CAPABLE))
6362                 return;
6363
6364         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6365         tp->ptp_adjust = 0;
6366 }
6367
6368 static void tg3_ptp_fini(struct tg3 *tp)
6369 {
6370         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6371                 return;
6372
6373         ptp_clock_unregister(tp->ptp_clock);
6374         tp->ptp_clock = NULL;
6375         tp->ptp_adjust = 0;
6376 }
6377
6378 static inline int tg3_irq_sync(struct tg3 *tp)
6379 {
6380         return tp->irq_sync;
6381 }
6382
6383 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6384 {
6385         int i;
6386
6387         dst = (u32 *)((u8 *)dst + off);
6388         for (i = 0; i < len; i += sizeof(u32))
6389                 *dst++ = tr32(off + i);
6390 }
6391
6392 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6393 {
6394         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6395         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6396         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6397         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6398         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6399         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6400         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6401         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6402         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6403         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6404         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6405         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6406         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6407         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6408         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6409         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6410         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6411         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6412         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6413
6414         if (tg3_flag(tp, SUPPORT_MSIX))
6415                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6416
6417         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6418         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6419         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6420         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6421         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6422         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6423         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6424         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6425
6426         if (!tg3_flag(tp, 5705_PLUS)) {
6427                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6428                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6429                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6430         }
6431
6432         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6433         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6434         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6435         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6436         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6437
6438         if (tg3_flag(tp, NVRAM))
6439                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6440 }
6441
6442 static void tg3_dump_state(struct tg3 *tp)
6443 {
6444         int i;
6445         u32 *regs;
6446
6447         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6448         if (!regs)
6449                 return;
6450
6451         if (tg3_flag(tp, PCI_EXPRESS)) {
6452                 /* Read up to but not including private PCI registers */
6453                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6454                         regs[i / sizeof(u32)] = tr32(i);
6455         } else
6456                 tg3_dump_legacy_regs(tp, regs);
6457
6458         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6459                 if (!regs[i + 0] && !regs[i + 1] &&
6460                     !regs[i + 2] && !regs[i + 3])
6461                         continue;
6462
6463                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6464                            i * 4,
6465                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6466         }
6467
6468         kfree(regs);
6469
6470         for (i = 0; i < tp->irq_cnt; i++) {
6471                 struct tg3_napi *tnapi = &tp->napi[i];
6472
6473                 /* SW status block */
6474                 netdev_err(tp->dev,
6475                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6476                            i,
6477                            tnapi->hw_status->status,
6478                            tnapi->hw_status->status_tag,
6479                            tnapi->hw_status->rx_jumbo_consumer,
6480                            tnapi->hw_status->rx_consumer,
6481                            tnapi->hw_status->rx_mini_consumer,
6482                            tnapi->hw_status->idx[0].rx_producer,
6483                            tnapi->hw_status->idx[0].tx_consumer);
6484
6485                 netdev_err(tp->dev,
6486                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6487                            i,
6488                            tnapi->last_tag, tnapi->last_irq_tag,
6489                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6490                            tnapi->rx_rcb_ptr,
6491                            tnapi->prodring.rx_std_prod_idx,
6492                            tnapi->prodring.rx_std_cons_idx,
6493                            tnapi->prodring.rx_jmb_prod_idx,
6494                            tnapi->prodring.rx_jmb_cons_idx);
6495         }
6496 }
6497
6498 /* This is called whenever we suspect that the system chipset is re-
6499  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6500  * is bogus tx completions. We try to recover by setting the
6501  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6502  * in the workqueue.
6503  */
6504 static void tg3_tx_recover(struct tg3 *tp)
6505 {
6506         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6507                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6508
6509         netdev_warn(tp->dev,
6510                     "The system may be re-ordering memory-mapped I/O "
6511                     "cycles to the network device, attempting to recover. "
6512                     "Please report the problem to the driver maintainer "
6513                     "and include system chipset information.\n");
6514
6515         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6516 }
6517
6518 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6519 {
6520         /* Tell compiler to fetch tx indices from memory. */
6521         barrier();
6522         return tnapi->tx_pending -
6523                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6524 }
6525
6526 /* Tigon3 never reports partial packet sends.  So we do not
6527  * need special logic to handle SKBs that have not had all
6528  * of their frags sent yet, like SunGEM does.
6529  */
6530 static void tg3_tx(struct tg3_napi *tnapi)
6531 {
6532         struct tg3 *tp = tnapi->tp;
6533         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6534         u32 sw_idx = tnapi->tx_cons;
6535         struct netdev_queue *txq;
6536         int index = tnapi - tp->napi;
6537         unsigned int pkts_compl = 0, bytes_compl = 0;
6538
6539         if (tg3_flag(tp, ENABLE_TSS))
6540                 index--;
6541
6542         txq = netdev_get_tx_queue(tp->dev, index);
6543
6544         while (sw_idx != hw_idx) {
6545                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6546                 struct sk_buff *skb = ri->skb;
6547                 int i, tx_bug = 0;
6548
6549                 if (unlikely(skb == NULL)) {
6550                         tg3_tx_recover(tp);
6551                         return;
6552                 }
6553
6554                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6555                         struct skb_shared_hwtstamps timestamp;
6556                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6557                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6558
6559                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6560
6561                         skb_tstamp_tx(skb, &timestamp);
6562                 }
6563
6564                 pci_unmap_single(tp->pdev,
6565                                  dma_unmap_addr(ri, mapping),
6566                                  skb_headlen(skb),
6567                                  PCI_DMA_TODEVICE);
6568
6569                 ri->skb = NULL;
6570
6571                 while (ri->fragmented) {
6572                         ri->fragmented = false;
6573                         sw_idx = NEXT_TX(sw_idx);
6574                         ri = &tnapi->tx_buffers[sw_idx];
6575                 }
6576
6577                 sw_idx = NEXT_TX(sw_idx);
6578
6579                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6580                         ri = &tnapi->tx_buffers[sw_idx];
6581                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6582                                 tx_bug = 1;
6583
6584                         pci_unmap_page(tp->pdev,
6585                                        dma_unmap_addr(ri, mapping),
6586                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6587                                        PCI_DMA_TODEVICE);
6588
6589                         while (ri->fragmented) {
6590                                 ri->fragmented = false;
6591                                 sw_idx = NEXT_TX(sw_idx);
6592                                 ri = &tnapi->tx_buffers[sw_idx];
6593                         }
6594
6595                         sw_idx = NEXT_TX(sw_idx);
6596                 }
6597
6598                 pkts_compl++;
6599                 bytes_compl += skb->len;
6600
6601                 dev_consume_skb_any(skb);
6602
6603                 if (unlikely(tx_bug)) {
6604                         tg3_tx_recover(tp);
6605                         return;
6606                 }
6607         }
6608
6609         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6610
6611         tnapi->tx_cons = sw_idx;
6612
6613         /* Need to make the tx_cons update visible to tg3_start_xmit()
6614          * before checking for netif_queue_stopped().  Without the
6615          * memory barrier, there is a small possibility that tg3_start_xmit()
6616          * will miss it and cause the queue to be stopped forever.
6617          */
6618         smp_mb();
6619
6620         if (unlikely(netif_tx_queue_stopped(txq) &&
6621                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6622                 __netif_tx_lock(txq, smp_processor_id());
6623                 if (netif_tx_queue_stopped(txq) &&
6624                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6625                         netif_tx_wake_queue(txq);
6626                 __netif_tx_unlock(txq);
6627         }
6628 }
6629
6630 static void tg3_frag_free(bool is_frag, void *data)
6631 {
6632         if (is_frag)
6633                 skb_free_frag(data);
6634         else
6635                 kfree(data);
6636 }
6637
6638 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6639 {
6640         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6641                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6642
6643         if (!ri->data)
6644                 return;
6645
6646         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6647                          map_sz, PCI_DMA_FROMDEVICE);
6648         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6649         ri->data = NULL;
6650 }
6651
6652
6653 /* Returns size of skb allocated or < 0 on error.
6654  *
6655  * We only need to fill in the address because the other members
6656  * of the RX descriptor are invariant, see tg3_init_rings.
6657  *
6658  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6659  * posting buffers we only dirty the first cache line of the RX
6660  * descriptor (containing the address).  Whereas for the RX status
6661  * buffers the cpu only reads the last cacheline of the RX descriptor
6662  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6663  */
6664 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6665                              u32 opaque_key, u32 dest_idx_unmasked,
6666                              unsigned int *frag_size)
6667 {
6668         struct tg3_rx_buffer_desc *desc;
6669         struct ring_info *map;
6670         u8 *data;
6671         dma_addr_t mapping;
6672         int skb_size, data_size, dest_idx;
6673
6674         switch (opaque_key) {
6675         case RXD_OPAQUE_RING_STD:
6676                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6677                 desc = &tpr->rx_std[dest_idx];
6678                 map = &tpr->rx_std_buffers[dest_idx];
6679                 data_size = tp->rx_pkt_map_sz;
6680                 break;
6681
6682         case RXD_OPAQUE_RING_JUMBO:
6683                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6684                 desc = &tpr->rx_jmb[dest_idx].std;
6685                 map = &tpr->rx_jmb_buffers[dest_idx];
6686                 data_size = TG3_RX_JMB_MAP_SZ;
6687                 break;
6688
6689         default:
6690                 return -EINVAL;
6691         }
6692
6693         /* Do not overwrite any of the map or rp information
6694          * until we are sure we can commit to a new buffer.
6695          *
6696          * Callers depend upon this behavior and assume that
6697          * we leave everything unchanged if we fail.
6698          */
6699         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6700                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6701         if (skb_size <= PAGE_SIZE) {
6702                 data = netdev_alloc_frag(skb_size);
6703                 *frag_size = skb_size;
6704         } else {
6705                 data = kmalloc(skb_size, GFP_ATOMIC);
6706                 *frag_size = 0;
6707         }
6708         if (!data)
6709                 return -ENOMEM;
6710
6711         mapping = pci_map_single(tp->pdev,
6712                                  data + TG3_RX_OFFSET(tp),
6713                                  data_size,
6714                                  PCI_DMA_FROMDEVICE);
6715         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6716                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6717                 return -EIO;
6718         }
6719
6720         map->data = data;
6721         dma_unmap_addr_set(map, mapping, mapping);
6722
6723         desc->addr_hi = ((u64)mapping >> 32);
6724         desc->addr_lo = ((u64)mapping & 0xffffffff);
6725
6726         return data_size;
6727 }
6728
6729 /* We only need to move over in the address because the other
6730  * members of the RX descriptor are invariant.  See notes above
6731  * tg3_alloc_rx_data for full details.
6732  */
6733 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6734                            struct tg3_rx_prodring_set *dpr,
6735                            u32 opaque_key, int src_idx,
6736                            u32 dest_idx_unmasked)
6737 {
6738         struct tg3 *tp = tnapi->tp;
6739         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6740         struct ring_info *src_map, *dest_map;
6741         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6742         int dest_idx;
6743
6744         switch (opaque_key) {
6745         case RXD_OPAQUE_RING_STD:
6746                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6747                 dest_desc = &dpr->rx_std[dest_idx];
6748                 dest_map = &dpr->rx_std_buffers[dest_idx];
6749                 src_desc = &spr->rx_std[src_idx];
6750                 src_map = &spr->rx_std_buffers[src_idx];
6751                 break;
6752
6753         case RXD_OPAQUE_RING_JUMBO:
6754                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6755                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6756                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6757                 src_desc = &spr->rx_jmb[src_idx].std;
6758                 src_map = &spr->rx_jmb_buffers[src_idx];
6759                 break;
6760
6761         default:
6762                 return;
6763         }
6764
6765         dest_map->data = src_map->data;
6766         dma_unmap_addr_set(dest_map, mapping,
6767                            dma_unmap_addr(src_map, mapping));
6768         dest_desc->addr_hi = src_desc->addr_hi;
6769         dest_desc->addr_lo = src_desc->addr_lo;
6770
6771         /* Ensure that the update to the skb happens after the physical
6772          * addresses have been transferred to the new BD location.
6773          */
6774         smp_wmb();
6775
6776         src_map->data = NULL;
6777 }
6778
6779 /* The RX ring scheme is composed of multiple rings which post fresh
6780  * buffers to the chip, and one special ring the chip uses to report
6781  * status back to the host.
6782  *
6783  * The special ring reports the status of received packets to the
6784  * host.  The chip does not write into the original descriptor the
6785  * RX buffer was obtained from.  The chip simply takes the original
6786  * descriptor as provided by the host, updates the status and length
6787  * field, then writes this into the next status ring entry.
6788  *
6789  * Each ring the host uses to post buffers to the chip is described
6790  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6791  * it is first placed into the on-chip ram.  When the packet's length
6792  * is known, it walks down the TG3_BDINFO entries to select the ring.
6793  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6794  * which is within the range of the new packet's length is chosen.
6795  *
6796  * The "separate ring for rx status" scheme may sound queer, but it makes
6797  * sense from a cache coherency perspective.  If only the host writes
6798  * to the buffer post rings, and only the chip writes to the rx status
6799  * rings, then cache lines never move beyond shared-modified state.
6800  * If both the host and chip were to write into the same ring, cache line
6801  * eviction could occur since both entities want it in an exclusive state.
6802  */
6803 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6804 {
6805         struct tg3 *tp = tnapi->tp;
6806         u32 work_mask, rx_std_posted = 0;
6807         u32 std_prod_idx, jmb_prod_idx;
6808         u32 sw_idx = tnapi->rx_rcb_ptr;
6809         u16 hw_idx;
6810         int received;
6811         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6812
6813         hw_idx = *(tnapi->rx_rcb_prod_idx);
6814         /*
6815          * We need to order the read of hw_idx and the read of
6816          * the opaque cookie.
6817          */
6818         rmb();
6819         work_mask = 0;
6820         received = 0;
6821         std_prod_idx = tpr->rx_std_prod_idx;
6822         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6823         while (sw_idx != hw_idx && budget > 0) {
6824                 struct ring_info *ri;
6825                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6826                 unsigned int len;
6827                 struct sk_buff *skb;
6828                 dma_addr_t dma_addr;
6829                 u32 opaque_key, desc_idx, *post_ptr;
6830                 u8 *data;
6831                 u64 tstamp = 0;
6832
6833                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6834                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6835                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6836                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6837                         dma_addr = dma_unmap_addr(ri, mapping);
6838                         data = ri->data;
6839                         post_ptr = &std_prod_idx;
6840                         rx_std_posted++;
6841                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6842                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6843                         dma_addr = dma_unmap_addr(ri, mapping);
6844                         data = ri->data;
6845                         post_ptr = &jmb_prod_idx;
6846                 } else
6847                         goto next_pkt_nopost;
6848
6849                 work_mask |= opaque_key;
6850
6851                 if (desc->err_vlan & RXD_ERR_MASK) {
6852                 drop_it:
6853                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6854                                        desc_idx, *post_ptr);
6855                 drop_it_no_recycle:
6856                         /* Other statistics kept track of by card. */
6857                         tp->rx_dropped++;
6858                         goto next_pkt;
6859                 }
6860
6861                 prefetch(data + TG3_RX_OFFSET(tp));
6862                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6863                       ETH_FCS_LEN;
6864
6865                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6866                      RXD_FLAG_PTPSTAT_PTPV1 ||
6867                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6868                      RXD_FLAG_PTPSTAT_PTPV2) {
6869                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6870                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6871                 }
6872
6873                 if (len > TG3_RX_COPY_THRESH(tp)) {
6874                         int skb_size;
6875                         unsigned int frag_size;
6876
6877                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6878                                                     *post_ptr, &frag_size);
6879                         if (skb_size < 0)
6880                                 goto drop_it;
6881
6882                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6883                                          PCI_DMA_FROMDEVICE);
6884
6885                         /* Ensure that the update to the data happens
6886                          * after the usage of the old DMA mapping.
6887                          */
6888                         smp_wmb();
6889
6890                         ri->data = NULL;
6891
6892                         skb = build_skb(data, frag_size);
6893                         if (!skb) {
6894                                 tg3_frag_free(frag_size != 0, data);
6895                                 goto drop_it_no_recycle;
6896                         }
6897                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6898                 } else {
6899                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6900                                        desc_idx, *post_ptr);
6901
6902                         skb = netdev_alloc_skb(tp->dev,
6903                                                len + TG3_RAW_IP_ALIGN);
6904                         if (skb == NULL)
6905                                 goto drop_it_no_recycle;
6906
6907                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6908                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6909                         memcpy(skb->data,
6910                                data + TG3_RX_OFFSET(tp),
6911                                len);
6912                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6913                 }
6914
6915                 skb_put(skb, len);
6916                 if (tstamp)
6917                         tg3_hwclock_to_timestamp(tp, tstamp,
6918                                                  skb_hwtstamps(skb));
6919
6920                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6921                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6922                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6923                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6924                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6925                 else
6926                         skb_checksum_none_assert(skb);
6927
6928                 skb->protocol = eth_type_trans(skb, tp->dev);
6929
6930                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6931                     skb->protocol != htons(ETH_P_8021Q) &&
6932                     skb->protocol != htons(ETH_P_8021AD)) {
6933                         dev_kfree_skb_any(skb);
6934                         goto drop_it_no_recycle;
6935                 }
6936
6937                 if (desc->type_flags & RXD_FLAG_VLAN &&
6938                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6939                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6940                                                desc->err_vlan & RXD_VLAN_MASK);
6941
6942                 napi_gro_receive(&tnapi->napi, skb);
6943
6944                 received++;
6945                 budget--;
6946
6947 next_pkt:
6948                 (*post_ptr)++;
6949
6950                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6951                         tpr->rx_std_prod_idx = std_prod_idx &
6952                                                tp->rx_std_ring_mask;
6953                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6954                                      tpr->rx_std_prod_idx);
6955                         work_mask &= ~RXD_OPAQUE_RING_STD;
6956                         rx_std_posted = 0;
6957                 }
6958 next_pkt_nopost:
6959                 sw_idx++;
6960                 sw_idx &= tp->rx_ret_ring_mask;
6961
6962                 /* Refresh hw_idx to see if there is new work */
6963                 if (sw_idx == hw_idx) {
6964                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6965                         rmb();
6966                 }
6967         }
6968
6969         /* ACK the status ring. */
6970         tnapi->rx_rcb_ptr = sw_idx;
6971         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6972
6973         /* Refill RX ring(s). */
6974         if (!tg3_flag(tp, ENABLE_RSS)) {
6975                 /* Sync BD data before updating mailbox */
6976                 wmb();
6977
6978                 if (work_mask & RXD_OPAQUE_RING_STD) {
6979                         tpr->rx_std_prod_idx = std_prod_idx &
6980                                                tp->rx_std_ring_mask;
6981                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6982                                      tpr->rx_std_prod_idx);
6983                 }
6984                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6985                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6986                                                tp->rx_jmb_ring_mask;
6987                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6988                                      tpr->rx_jmb_prod_idx);
6989                 }
6990                 mmiowb();
6991         } else if (work_mask) {
6992                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6993                  * updated before the producer indices can be updated.
6994                  */
6995                 smp_wmb();
6996
6997                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6998                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6999
7000                 if (tnapi != &tp->napi[1]) {
7001                         tp->rx_refill = true;
7002                         napi_schedule(&tp->napi[1].napi);
7003                 }
7004         }
7005
7006         return received;
7007 }
7008
7009 static void tg3_poll_link(struct tg3 *tp)
7010 {
7011         /* handle link change and other phy events */
7012         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7013                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7014
7015                 if (sblk->status & SD_STATUS_LINK_CHG) {
7016                         sblk->status = SD_STATUS_UPDATED |
7017                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7018                         spin_lock(&tp->lock);
7019                         if (tg3_flag(tp, USE_PHYLIB)) {
7020                                 tw32_f(MAC_STATUS,
7021                                      (MAC_STATUS_SYNC_CHANGED |
7022                                       MAC_STATUS_CFG_CHANGED |
7023                                       MAC_STATUS_MI_COMPLETION |
7024                                       MAC_STATUS_LNKSTATE_CHANGED));
7025                                 udelay(40);
7026                         } else
7027                                 tg3_setup_phy(tp, false);
7028                         spin_unlock(&tp->lock);
7029                 }
7030         }
7031 }
7032
7033 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7034                                 struct tg3_rx_prodring_set *dpr,
7035                                 struct tg3_rx_prodring_set *spr)
7036 {
7037         u32 si, di, cpycnt, src_prod_idx;
7038         int i, err = 0;
7039
7040         while (1) {
7041                 src_prod_idx = spr->rx_std_prod_idx;
7042
7043                 /* Make sure updates to the rx_std_buffers[] entries and the
7044                  * standard producer index are seen in the correct order.
7045                  */
7046                 smp_rmb();
7047
7048                 if (spr->rx_std_cons_idx == src_prod_idx)
7049                         break;
7050
7051                 if (spr->rx_std_cons_idx < src_prod_idx)
7052                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7053                 else
7054                         cpycnt = tp->rx_std_ring_mask + 1 -
7055                                  spr->rx_std_cons_idx;
7056
7057                 cpycnt = min(cpycnt,
7058                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7059
7060                 si = spr->rx_std_cons_idx;
7061                 di = dpr->rx_std_prod_idx;
7062
7063                 for (i = di; i < di + cpycnt; i++) {
7064                         if (dpr->rx_std_buffers[i].data) {
7065                                 cpycnt = i - di;
7066                                 err = -ENOSPC;
7067                                 break;
7068                         }
7069                 }
7070
7071                 if (!cpycnt)
7072                         break;
7073
7074                 /* Ensure that updates to the rx_std_buffers ring and the
7075                  * shadowed hardware producer ring from tg3_recycle_skb() are
7076                  * ordered correctly WRT the skb check above.
7077                  */
7078                 smp_rmb();
7079
7080                 memcpy(&dpr->rx_std_buffers[di],
7081                        &spr->rx_std_buffers[si],
7082                        cpycnt * sizeof(struct ring_info));
7083
7084                 for (i = 0; i < cpycnt; i++, di++, si++) {
7085                         struct tg3_rx_buffer_desc *sbd, *dbd;
7086                         sbd = &spr->rx_std[si];
7087                         dbd = &dpr->rx_std[di];
7088                         dbd->addr_hi = sbd->addr_hi;
7089                         dbd->addr_lo = sbd->addr_lo;
7090                 }
7091
7092                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7093                                        tp->rx_std_ring_mask;
7094                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7095                                        tp->rx_std_ring_mask;
7096         }
7097
7098         while (1) {
7099                 src_prod_idx = spr->rx_jmb_prod_idx;
7100
7101                 /* Make sure updates to the rx_jmb_buffers[] entries and
7102                  * the jumbo producer index are seen in the correct order.
7103                  */
7104                 smp_rmb();
7105
7106                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7107                         break;
7108
7109                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7110                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7111                 else
7112                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7113                                  spr->rx_jmb_cons_idx;
7114
7115                 cpycnt = min(cpycnt,
7116                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7117
7118                 si = spr->rx_jmb_cons_idx;
7119                 di = dpr->rx_jmb_prod_idx;
7120
7121                 for (i = di; i < di + cpycnt; i++) {
7122                         if (dpr->rx_jmb_buffers[i].data) {
7123                                 cpycnt = i - di;
7124                                 err = -ENOSPC;
7125                                 break;
7126                         }
7127                 }
7128
7129                 if (!cpycnt)
7130                         break;
7131
7132                 /* Ensure that updates to the rx_jmb_buffers ring and the
7133                  * shadowed hardware producer ring from tg3_recycle_skb() are
7134                  * ordered correctly WRT the skb check above.
7135                  */
7136                 smp_rmb();
7137
7138                 memcpy(&dpr->rx_jmb_buffers[di],
7139                        &spr->rx_jmb_buffers[si],
7140                        cpycnt * sizeof(struct ring_info));
7141
7142                 for (i = 0; i < cpycnt; i++, di++, si++) {
7143                         struct tg3_rx_buffer_desc *sbd, *dbd;
7144                         sbd = &spr->rx_jmb[si].std;
7145                         dbd = &dpr->rx_jmb[di].std;
7146                         dbd->addr_hi = sbd->addr_hi;
7147                         dbd->addr_lo = sbd->addr_lo;
7148                 }
7149
7150                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7151                                        tp->rx_jmb_ring_mask;
7152                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7153                                        tp->rx_jmb_ring_mask;
7154         }
7155
7156         return err;
7157 }
7158
7159 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7160 {
7161         struct tg3 *tp = tnapi->tp;
7162
7163         /* run TX completion thread */
7164         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7165                 tg3_tx(tnapi);
7166                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7167                         return work_done;
7168         }
7169
7170         if (!tnapi->rx_rcb_prod_idx)
7171                 return work_done;
7172
7173         /* run RX thread, within the bounds set by NAPI.
7174          * All RX "locking" is done by ensuring outside
7175          * code synchronizes with tg3->napi.poll()
7176          */
7177         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7178                 work_done += tg3_rx(tnapi, budget - work_done);
7179
7180         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7181                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7182                 int i, err = 0;
7183                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7184                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7185
7186                 tp->rx_refill = false;
7187                 for (i = 1; i <= tp->rxq_cnt; i++)
7188                         err |= tg3_rx_prodring_xfer(tp, dpr,
7189                                                     &tp->napi[i].prodring);
7190
7191                 wmb();
7192
7193                 if (std_prod_idx != dpr->rx_std_prod_idx)
7194                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7195                                      dpr->rx_std_prod_idx);
7196
7197                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7198                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7199                                      dpr->rx_jmb_prod_idx);
7200
7201                 mmiowb();
7202
7203                 if (err)
7204                         tw32_f(HOSTCC_MODE, tp->coal_now);
7205         }
7206
7207         return work_done;
7208 }
7209
7210 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7211 {
7212         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7213                 schedule_work(&tp->reset_task);
7214 }
7215
7216 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7217 {
7218         cancel_work_sync(&tp->reset_task);
7219         tg3_flag_clear(tp, RESET_TASK_PENDING);
7220         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7221 }
7222
7223 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7224 {
7225         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7226         struct tg3 *tp = tnapi->tp;
7227         int work_done = 0;
7228         struct tg3_hw_status *sblk = tnapi->hw_status;
7229
7230         while (1) {
7231                 work_done = tg3_poll_work(tnapi, work_done, budget);
7232
7233                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7234                         goto tx_recovery;
7235
7236                 if (unlikely(work_done >= budget))
7237                         break;
7238
7239                 /* tp->last_tag is used in tg3_int_reenable() below
7240                  * to tell the hw how much work has been processed,
7241                  * so we must read it before checking for more work.
7242                  */
7243                 tnapi->last_tag = sblk->status_tag;
7244                 tnapi->last_irq_tag = tnapi->last_tag;
7245                 rmb();
7246
7247                 /* check for RX/TX work to do */
7248                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7249                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7250
7251                         /* This test here is not race free, but will reduce
7252                          * the number of interrupts by looping again.
7253                          */
7254                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7255                                 continue;
7256
7257                         napi_complete_done(napi, work_done);
7258                         /* Reenable interrupts. */
7259                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7260
7261                         /* This test here is synchronized by napi_schedule()
7262                          * and napi_complete() to close the race condition.
7263                          */
7264                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7265                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7266                                                   HOSTCC_MODE_ENABLE |
7267                                                   tnapi->coal_now);
7268                         }
7269                         mmiowb();
7270                         break;
7271                 }
7272         }
7273
7274         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7275         return work_done;
7276
7277 tx_recovery:
7278         /* work_done is guaranteed to be less than budget. */
7279         napi_complete(napi);
7280         tg3_reset_task_schedule(tp);
7281         return work_done;
7282 }
7283
7284 static void tg3_process_error(struct tg3 *tp)
7285 {
7286         u32 val;
7287         bool real_error = false;
7288
7289         if (tg3_flag(tp, ERROR_PROCESSED))
7290                 return;
7291
7292         /* Check Flow Attention register */
7293         val = tr32(HOSTCC_FLOW_ATTN);
7294         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7295                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7296                 real_error = true;
7297         }
7298
7299         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7300                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7301                 real_error = true;
7302         }
7303
7304         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7305                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7306                 real_error = true;
7307         }
7308
7309         if (!real_error)
7310                 return;
7311
7312         tg3_dump_state(tp);
7313
7314         tg3_flag_set(tp, ERROR_PROCESSED);
7315         tg3_reset_task_schedule(tp);
7316 }
7317
7318 static int tg3_poll(struct napi_struct *napi, int budget)
7319 {
7320         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7321         struct tg3 *tp = tnapi->tp;
7322         int work_done = 0;
7323         struct tg3_hw_status *sblk = tnapi->hw_status;
7324
7325         while (1) {
7326                 if (sblk->status & SD_STATUS_ERROR)
7327                         tg3_process_error(tp);
7328
7329                 tg3_poll_link(tp);
7330
7331                 work_done = tg3_poll_work(tnapi, work_done, budget);
7332
7333                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7334                         goto tx_recovery;
7335
7336                 if (unlikely(work_done >= budget))
7337                         break;
7338
7339                 if (tg3_flag(tp, TAGGED_STATUS)) {
7340                         /* tp->last_tag is used in tg3_int_reenable() below
7341                          * to tell the hw how much work has been processed,
7342                          * so we must read it before checking for more work.
7343                          */
7344                         tnapi->last_tag = sblk->status_tag;
7345                         tnapi->last_irq_tag = tnapi->last_tag;
7346                         rmb();
7347                 } else
7348                         sblk->status &= ~SD_STATUS_UPDATED;
7349
7350                 if (likely(!tg3_has_work(tnapi))) {
7351                         napi_complete_done(napi, work_done);
7352                         tg3_int_reenable(tnapi);
7353                         break;
7354                 }
7355         }
7356
7357         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7358         return work_done;
7359
7360 tx_recovery:
7361         /* work_done is guaranteed to be less than budget. */
7362         napi_complete(napi);
7363         tg3_reset_task_schedule(tp);
7364         return work_done;
7365 }
7366
7367 static void tg3_napi_disable(struct tg3 *tp)
7368 {
7369         int i;
7370
7371         for (i = tp->irq_cnt - 1; i >= 0; i--)
7372                 napi_disable(&tp->napi[i].napi);
7373 }
7374
7375 static void tg3_napi_enable(struct tg3 *tp)
7376 {
7377         int i;
7378
7379         for (i = 0; i < tp->irq_cnt; i++)
7380                 napi_enable(&tp->napi[i].napi);
7381 }
7382
7383 static void tg3_napi_init(struct tg3 *tp)
7384 {
7385         int i;
7386
7387         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7388         for (i = 1; i < tp->irq_cnt; i++)
7389                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7390 }
7391
7392 static void tg3_napi_fini(struct tg3 *tp)
7393 {
7394         int i;
7395
7396         for (i = 0; i < tp->irq_cnt; i++)
7397                 netif_napi_del(&tp->napi[i].napi);
7398 }
7399
7400 static inline void tg3_netif_stop(struct tg3 *tp)
7401 {
7402         netif_trans_update(tp->dev);    /* prevent tx timeout */
7403         tg3_napi_disable(tp);
7404         netif_carrier_off(tp->dev);
7405         netif_tx_disable(tp->dev);
7406 }
7407
7408 /* tp->lock must be held */
7409 static inline void tg3_netif_start(struct tg3 *tp)
7410 {
7411         tg3_ptp_resume(tp);
7412
7413         /* NOTE: unconditional netif_tx_wake_all_queues is only
7414          * appropriate so long as all callers are assured to
7415          * have free tx slots (such as after tg3_init_hw)
7416          */
7417         netif_tx_wake_all_queues(tp->dev);
7418
7419         if (tp->link_up)
7420                 netif_carrier_on(tp->dev);
7421
7422         tg3_napi_enable(tp);
7423         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7424         tg3_enable_ints(tp);
7425 }
7426
7427 static void tg3_irq_quiesce(struct tg3 *tp)
7428         __releases(tp->lock)
7429         __acquires(tp->lock)
7430 {
7431         int i;
7432
7433         BUG_ON(tp->irq_sync);
7434
7435         tp->irq_sync = 1;
7436         smp_mb();
7437
7438         spin_unlock_bh(&tp->lock);
7439
7440         for (i = 0; i < tp->irq_cnt; i++)
7441                 synchronize_irq(tp->napi[i].irq_vec);
7442
7443         spin_lock_bh(&tp->lock);
7444 }
7445
7446 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7447  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7448  * with as well.  Most of the time, this is not necessary except when
7449  * shutting down the device.
7450  */
7451 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7452 {
7453         spin_lock_bh(&tp->lock);
7454         if (irq_sync)
7455                 tg3_irq_quiesce(tp);
7456 }
7457
7458 static inline void tg3_full_unlock(struct tg3 *tp)
7459 {
7460         spin_unlock_bh(&tp->lock);
7461 }
7462
7463 /* One-shot MSI handler - Chip automatically disables interrupt
7464  * after sending MSI so driver doesn't have to do it.
7465  */
7466 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7467 {
7468         struct tg3_napi *tnapi = dev_id;
7469         struct tg3 *tp = tnapi->tp;
7470
7471         prefetch(tnapi->hw_status);
7472         if (tnapi->rx_rcb)
7473                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7474
7475         if (likely(!tg3_irq_sync(tp)))
7476                 napi_schedule(&tnapi->napi);
7477
7478         return IRQ_HANDLED;
7479 }
7480
7481 /* MSI ISR - No need to check for interrupt sharing and no need to
7482  * flush status block and interrupt mailbox. PCI ordering rules
7483  * guarantee that MSI will arrive after the status block.
7484  */
7485 static irqreturn_t tg3_msi(int irq, void *dev_id)
7486 {
7487         struct tg3_napi *tnapi = dev_id;
7488         struct tg3 *tp = tnapi->tp;
7489
7490         prefetch(tnapi->hw_status);
7491         if (tnapi->rx_rcb)
7492                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7493         /*
7494          * Writing any value to intr-mbox-0 clears PCI INTA# and
7495          * chip-internal interrupt pending events.
7496          * Writing non-zero to intr-mbox-0 additional tells the
7497          * NIC to stop sending us irqs, engaging "in-intr-handler"
7498          * event coalescing.
7499          */
7500         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7501         if (likely(!tg3_irq_sync(tp)))
7502                 napi_schedule(&tnapi->napi);
7503
7504         return IRQ_RETVAL(1);
7505 }
7506
7507 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7508 {
7509         struct tg3_napi *tnapi = dev_id;
7510         struct tg3 *tp = tnapi->tp;
7511         struct tg3_hw_status *sblk = tnapi->hw_status;
7512         unsigned int handled = 1;
7513
7514         /* In INTx mode, it is possible for the interrupt to arrive at
7515          * the CPU before the status block posted prior to the interrupt.
7516          * Reading the PCI State register will confirm whether the
7517          * interrupt is ours and will flush the status block.
7518          */
7519         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7520                 if (tg3_flag(tp, CHIP_RESETTING) ||
7521                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7522                         handled = 0;
7523                         goto out;
7524                 }
7525         }
7526
7527         /*
7528          * Writing any value to intr-mbox-0 clears PCI INTA# and
7529          * chip-internal interrupt pending events.
7530          * Writing non-zero to intr-mbox-0 additional tells the
7531          * NIC to stop sending us irqs, engaging "in-intr-handler"
7532          * event coalescing.
7533          *
7534          * Flush the mailbox to de-assert the IRQ immediately to prevent
7535          * spurious interrupts.  The flush impacts performance but
7536          * excessive spurious interrupts can be worse in some cases.
7537          */
7538         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7539         if (tg3_irq_sync(tp))
7540                 goto out;
7541         sblk->status &= ~SD_STATUS_UPDATED;
7542         if (likely(tg3_has_work(tnapi))) {
7543                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7544                 napi_schedule(&tnapi->napi);
7545         } else {
7546                 /* No work, shared interrupt perhaps?  re-enable
7547                  * interrupts, and flush that PCI write
7548                  */
7549                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7550                                0x00000000);
7551         }
7552 out:
7553         return IRQ_RETVAL(handled);
7554 }
7555
7556 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7557 {
7558         struct tg3_napi *tnapi = dev_id;
7559         struct tg3 *tp = tnapi->tp;
7560         struct tg3_hw_status *sblk = tnapi->hw_status;
7561         unsigned int handled = 1;
7562
7563         /* In INTx mode, it is possible for the interrupt to arrive at
7564          * the CPU before the status block posted prior to the interrupt.
7565          * Reading the PCI State register will confirm whether the
7566          * interrupt is ours and will flush the status block.
7567          */
7568         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7569                 if (tg3_flag(tp, CHIP_RESETTING) ||
7570                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7571                         handled = 0;
7572                         goto out;
7573                 }
7574         }
7575
7576         /*
7577          * writing any value to intr-mbox-0 clears PCI INTA# and
7578          * chip-internal interrupt pending events.
7579          * writing non-zero to intr-mbox-0 additional tells the
7580          * NIC to stop sending us irqs, engaging "in-intr-handler"
7581          * event coalescing.
7582          *
7583          * Flush the mailbox to de-assert the IRQ immediately to prevent
7584          * spurious interrupts.  The flush impacts performance but
7585          * excessive spurious interrupts can be worse in some cases.
7586          */
7587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7588
7589         /*
7590          * In a shared interrupt configuration, sometimes other devices'
7591          * interrupts will scream.  We record the current status tag here
7592          * so that the above check can report that the screaming interrupts
7593          * are unhandled.  Eventually they will be silenced.
7594          */
7595         tnapi->last_irq_tag = sblk->status_tag;
7596
7597         if (tg3_irq_sync(tp))
7598                 goto out;
7599
7600         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7601
7602         napi_schedule(&tnapi->napi);
7603
7604 out:
7605         return IRQ_RETVAL(handled);
7606 }
7607
7608 /* ISR for interrupt test */
7609 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7610 {
7611         struct tg3_napi *tnapi = dev_id;
7612         struct tg3 *tp = tnapi->tp;
7613         struct tg3_hw_status *sblk = tnapi->hw_status;
7614
7615         if ((sblk->status & SD_STATUS_UPDATED) ||
7616             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7617                 tg3_disable_ints(tp);
7618                 return IRQ_RETVAL(1);
7619         }
7620         return IRQ_RETVAL(0);
7621 }
7622
7623 #ifdef CONFIG_NET_POLL_CONTROLLER
7624 static void tg3_poll_controller(struct net_device *dev)
7625 {
7626         int i;
7627         struct tg3 *tp = netdev_priv(dev);
7628
7629         if (tg3_irq_sync(tp))
7630                 return;
7631
7632         for (i = 0; i < tp->irq_cnt; i++)
7633                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7634 }
7635 #endif
7636
7637 static void tg3_tx_timeout(struct net_device *dev)
7638 {
7639         struct tg3 *tp = netdev_priv(dev);
7640
7641         if (netif_msg_tx_err(tp)) {
7642                 netdev_err(dev, "transmit timed out, resetting\n");
7643                 tg3_dump_state(tp);
7644         }
7645
7646         tg3_reset_task_schedule(tp);
7647 }
7648
7649 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7650 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7651 {
7652         u32 base = (u32) mapping & 0xffffffff;
7653
7654         return base + len + 8 < base;
7655 }
7656
7657 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7658  * of any 4GB boundaries: 4G, 8G, etc
7659  */
7660 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7661                                            u32 len, u32 mss)
7662 {
7663         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7664                 u32 base = (u32) mapping & 0xffffffff;
7665
7666                 return ((base + len + (mss & 0x3fff)) < base);
7667         }
7668         return 0;
7669 }
7670
7671 /* Test for DMA addresses > 40-bit */
7672 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7673                                           int len)
7674 {
7675 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7676         if (tg3_flag(tp, 40BIT_DMA_BUG))
7677                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7678         return 0;
7679 #else
7680         return 0;
7681 #endif
7682 }
7683
7684 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7685                                  dma_addr_t mapping, u32 len, u32 flags,
7686                                  u32 mss, u32 vlan)
7687 {
7688         txbd->addr_hi = ((u64) mapping >> 32);
7689         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7690         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7691         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7692 }
7693
7694 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7695                             dma_addr_t map, u32 len, u32 flags,
7696                             u32 mss, u32 vlan)
7697 {
7698         struct tg3 *tp = tnapi->tp;
7699         bool hwbug = false;
7700
7701         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7702                 hwbug = true;
7703
7704         if (tg3_4g_overflow_test(map, len))
7705                 hwbug = true;
7706
7707         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7708                 hwbug = true;
7709
7710         if (tg3_40bit_overflow_test(tp, map, len))
7711                 hwbug = true;
7712
7713         if (tp->dma_limit) {
7714                 u32 prvidx = *entry;
7715                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7716                 while (len > tp->dma_limit && *budget) {
7717                         u32 frag_len = tp->dma_limit;
7718                         len -= tp->dma_limit;
7719
7720                         /* Avoid the 8byte DMA problem */
7721                         if (len <= 8) {
7722                                 len += tp->dma_limit / 2;
7723                                 frag_len = tp->dma_limit / 2;
7724                         }
7725
7726                         tnapi->tx_buffers[*entry].fragmented = true;
7727
7728                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7729                                       frag_len, tmp_flag, mss, vlan);
7730                         *budget -= 1;
7731                         prvidx = *entry;
7732                         *entry = NEXT_TX(*entry);
7733
7734                         map += frag_len;
7735                 }
7736
7737                 if (len) {
7738                         if (*budget) {
7739                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7740                                               len, flags, mss, vlan);
7741                                 *budget -= 1;
7742                                 *entry = NEXT_TX(*entry);
7743                         } else {
7744                                 hwbug = true;
7745                                 tnapi->tx_buffers[prvidx].fragmented = false;
7746                         }
7747                 }
7748         } else {
7749                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7750                               len, flags, mss, vlan);
7751                 *entry = NEXT_TX(*entry);
7752         }
7753
7754         return hwbug;
7755 }
7756
7757 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7758 {
7759         int i;
7760         struct sk_buff *skb;
7761         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7762
7763         skb = txb->skb;
7764         txb->skb = NULL;
7765
7766         pci_unmap_single(tnapi->tp->pdev,
7767                          dma_unmap_addr(txb, mapping),
7768                          skb_headlen(skb),
7769                          PCI_DMA_TODEVICE);
7770
7771         while (txb->fragmented) {
7772                 txb->fragmented = false;
7773                 entry = NEXT_TX(entry);
7774                 txb = &tnapi->tx_buffers[entry];
7775         }
7776
7777         for (i = 0; i <= last; i++) {
7778                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7779
7780                 entry = NEXT_TX(entry);
7781                 txb = &tnapi->tx_buffers[entry];
7782
7783                 pci_unmap_page(tnapi->tp->pdev,
7784                                dma_unmap_addr(txb, mapping),
7785                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7786
7787                 while (txb->fragmented) {
7788                         txb->fragmented = false;
7789                         entry = NEXT_TX(entry);
7790                         txb = &tnapi->tx_buffers[entry];
7791                 }
7792         }
7793 }
7794
7795 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7796 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7797                                        struct sk_buff **pskb,
7798                                        u32 *entry, u32 *budget,
7799                                        u32 base_flags, u32 mss, u32 vlan)
7800 {
7801         struct tg3 *tp = tnapi->tp;
7802         struct sk_buff *new_skb, *skb = *pskb;
7803         dma_addr_t new_addr = 0;
7804         int ret = 0;
7805
7806         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7807                 new_skb = skb_copy(skb, GFP_ATOMIC);
7808         else {
7809                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7810
7811                 new_skb = skb_copy_expand(skb,
7812                                           skb_headroom(skb) + more_headroom,
7813                                           skb_tailroom(skb), GFP_ATOMIC);
7814         }
7815
7816         if (!new_skb) {
7817                 ret = -1;
7818         } else {
7819                 /* New SKB is guaranteed to be linear. */
7820                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7821                                           PCI_DMA_TODEVICE);
7822                 /* Make sure the mapping succeeded */
7823                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7824                         dev_kfree_skb_any(new_skb);
7825                         ret = -1;
7826                 } else {
7827                         u32 save_entry = *entry;
7828
7829                         base_flags |= TXD_FLAG_END;
7830
7831                         tnapi->tx_buffers[*entry].skb = new_skb;
7832                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7833                                            mapping, new_addr);
7834
7835                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7836                                             new_skb->len, base_flags,
7837                                             mss, vlan)) {
7838                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7839                                 dev_kfree_skb_any(new_skb);
7840                                 ret = -1;
7841                         }
7842                 }
7843         }
7844
7845         dev_consume_skb_any(skb);
7846         *pskb = new_skb;
7847         return ret;
7848 }
7849
7850 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7851 {
7852         /* Check if we will never have enough descriptors,
7853          * as gso_segs can be more than current ring size
7854          */
7855         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7856 }
7857
7858 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7859
7860 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7861  * indicated in tg3_tx_frag_set()
7862  */
7863 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7864                        struct netdev_queue *txq, struct sk_buff *skb)
7865 {
7866         struct sk_buff *segs, *nskb;
7867         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7868
7869         /* Estimate the number of fragments in the worst case */
7870         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7871                 netif_tx_stop_queue(txq);
7872
7873                 /* netif_tx_stop_queue() must be done before checking
7874                  * checking tx index in tg3_tx_avail() below, because in
7875                  * tg3_tx(), we update tx index before checking for
7876                  * netif_tx_queue_stopped().
7877                  */
7878                 smp_mb();
7879                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7880                         return NETDEV_TX_BUSY;
7881
7882                 netif_tx_wake_queue(txq);
7883         }
7884
7885         segs = skb_gso_segment(skb, tp->dev->features &
7886                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7887         if (IS_ERR(segs) || !segs)
7888                 goto tg3_tso_bug_end;
7889
7890         do {
7891                 nskb = segs;
7892                 segs = segs->next;
7893                 nskb->next = NULL;
7894                 tg3_start_xmit(nskb, tp->dev);
7895         } while (segs);
7896
7897 tg3_tso_bug_end:
7898         dev_consume_skb_any(skb);
7899
7900         return NETDEV_TX_OK;
7901 }
7902
7903 /* hard_start_xmit for all devices */
7904 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7905 {
7906         struct tg3 *tp = netdev_priv(dev);
7907         u32 len, entry, base_flags, mss, vlan = 0;
7908         u32 budget;
7909         int i = -1, would_hit_hwbug;
7910         dma_addr_t mapping;
7911         struct tg3_napi *tnapi;
7912         struct netdev_queue *txq;
7913         unsigned int last;
7914         struct iphdr *iph = NULL;
7915         struct tcphdr *tcph = NULL;
7916         __sum16 tcp_csum = 0, ip_csum = 0;
7917         __be16 ip_tot_len = 0;
7918
7919         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7920         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7921         if (tg3_flag(tp, ENABLE_TSS))
7922                 tnapi++;
7923
7924         budget = tg3_tx_avail(tnapi);
7925
7926         /* We are running in BH disabled context with netif_tx_lock
7927          * and TX reclaim runs via tp->napi.poll inside of a software
7928          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7929          * no IRQ context deadlocks to worry about either.  Rejoice!
7930          */
7931         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7932                 if (!netif_tx_queue_stopped(txq)) {
7933                         netif_tx_stop_queue(txq);
7934
7935                         /* This is a hard error, log it. */
7936                         netdev_err(dev,
7937                                    "BUG! Tx Ring full when queue awake!\n");
7938                 }
7939                 return NETDEV_TX_BUSY;
7940         }
7941
7942         entry = tnapi->tx_prod;
7943         base_flags = 0;
7944
7945         mss = skb_shinfo(skb)->gso_size;
7946         if (mss) {
7947                 u32 tcp_opt_len, hdr_len;
7948
7949                 if (skb_cow_head(skb, 0))
7950                         goto drop;
7951
7952                 iph = ip_hdr(skb);
7953                 tcp_opt_len = tcp_optlen(skb);
7954
7955                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7956
7957                 /* HW/FW can not correctly segment packets that have been
7958                  * vlan encapsulated.
7959                  */
7960                 if (skb->protocol == htons(ETH_P_8021Q) ||
7961                     skb->protocol == htons(ETH_P_8021AD)) {
7962                         if (tg3_tso_bug_gso_check(tnapi, skb))
7963                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7964                         goto drop;
7965                 }
7966
7967                 if (!skb_is_gso_v6(skb)) {
7968                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7969                             tg3_flag(tp, TSO_BUG)) {
7970                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7971                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7972                                 goto drop;
7973                         }
7974                         ip_csum = iph->check;
7975                         ip_tot_len = iph->tot_len;
7976                         iph->check = 0;
7977                         iph->tot_len = htons(mss + hdr_len);
7978                 }
7979
7980                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7981                                TXD_FLAG_CPU_POST_DMA);
7982
7983                 tcph = tcp_hdr(skb);
7984                 tcp_csum = tcph->check;
7985
7986                 if (tg3_flag(tp, HW_TSO_1) ||
7987                     tg3_flag(tp, HW_TSO_2) ||
7988                     tg3_flag(tp, HW_TSO_3)) {
7989                         tcph->check = 0;
7990                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7991                 } else {
7992                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7993                                                          0, IPPROTO_TCP, 0);
7994                 }
7995
7996                 if (tg3_flag(tp, HW_TSO_3)) {
7997                         mss |= (hdr_len & 0xc) << 12;
7998                         if (hdr_len & 0x10)
7999                                 base_flags |= 0x00000010;
8000                         base_flags |= (hdr_len & 0x3e0) << 5;
8001                 } else if (tg3_flag(tp, HW_TSO_2))
8002                         mss |= hdr_len << 9;
8003                 else if (tg3_flag(tp, HW_TSO_1) ||
8004                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8005                         if (tcp_opt_len || iph->ihl > 5) {
8006                                 int tsflags;
8007
8008                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8009                                 mss |= (tsflags << 11);
8010                         }
8011                 } else {
8012                         if (tcp_opt_len || iph->ihl > 5) {
8013                                 int tsflags;
8014
8015                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8016                                 base_flags |= tsflags << 12;
8017                         }
8018                 }
8019         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8020                 /* HW/FW can not correctly checksum packets that have been
8021                  * vlan encapsulated.
8022                  */
8023                 if (skb->protocol == htons(ETH_P_8021Q) ||
8024                     skb->protocol == htons(ETH_P_8021AD)) {
8025                         if (skb_checksum_help(skb))
8026                                 goto drop;
8027                 } else  {
8028                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8029                 }
8030         }
8031
8032         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8033             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8034                 base_flags |= TXD_FLAG_JMB_PKT;
8035
8036         if (skb_vlan_tag_present(skb)) {
8037                 base_flags |= TXD_FLAG_VLAN;
8038                 vlan = skb_vlan_tag_get(skb);
8039         }
8040
8041         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8042             tg3_flag(tp, TX_TSTAMP_EN)) {
8043                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8044                 base_flags |= TXD_FLAG_HWTSTAMP;
8045         }
8046
8047         len = skb_headlen(skb);
8048
8049         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8050         if (pci_dma_mapping_error(tp->pdev, mapping))
8051                 goto drop;
8052
8053
8054         tnapi->tx_buffers[entry].skb = skb;
8055         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8056
8057         would_hit_hwbug = 0;
8058
8059         if (tg3_flag(tp, 5701_DMA_BUG))
8060                 would_hit_hwbug = 1;
8061
8062         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8063                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8064                             mss, vlan)) {
8065                 would_hit_hwbug = 1;
8066         } else if (skb_shinfo(skb)->nr_frags > 0) {
8067                 u32 tmp_mss = mss;
8068
8069                 if (!tg3_flag(tp, HW_TSO_1) &&
8070                     !tg3_flag(tp, HW_TSO_2) &&
8071                     !tg3_flag(tp, HW_TSO_3))
8072                         tmp_mss = 0;
8073
8074                 /* Now loop through additional data
8075                  * fragments, and queue them.
8076                  */
8077                 last = skb_shinfo(skb)->nr_frags - 1;
8078                 for (i = 0; i <= last; i++) {
8079                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8080
8081                         len = skb_frag_size(frag);
8082                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8083                                                    len, DMA_TO_DEVICE);
8084
8085                         tnapi->tx_buffers[entry].skb = NULL;
8086                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8087                                            mapping);
8088                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8089                                 goto dma_error;
8090
8091                         if (!budget ||
8092                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8093                                             len, base_flags |
8094                                             ((i == last) ? TXD_FLAG_END : 0),
8095                                             tmp_mss, vlan)) {
8096                                 would_hit_hwbug = 1;
8097                                 break;
8098                         }
8099                 }
8100         }
8101
8102         if (would_hit_hwbug) {
8103                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8104
8105                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8106                         /* If it's a TSO packet, do GSO instead of
8107                          * allocating and copying to a large linear SKB
8108                          */
8109                         if (ip_tot_len) {
8110                                 iph->check = ip_csum;
8111                                 iph->tot_len = ip_tot_len;
8112                         }
8113                         tcph->check = tcp_csum;
8114                         return tg3_tso_bug(tp, tnapi, txq, skb);
8115                 }
8116
8117                 /* If the workaround fails due to memory/mapping
8118                  * failure, silently drop this packet.
8119                  */
8120                 entry = tnapi->tx_prod;
8121                 budget = tg3_tx_avail(tnapi);
8122                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8123                                                 base_flags, mss, vlan))
8124                         goto drop_nofree;
8125         }
8126
8127         skb_tx_timestamp(skb);
8128         netdev_tx_sent_queue(txq, skb->len);
8129
8130         /* Sync BD data before updating mailbox */
8131         wmb();
8132
8133         tnapi->tx_prod = entry;
8134         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8135                 netif_tx_stop_queue(txq);
8136
8137                 /* netif_tx_stop_queue() must be done before checking
8138                  * checking tx index in tg3_tx_avail() below, because in
8139                  * tg3_tx(), we update tx index before checking for
8140                  * netif_tx_queue_stopped().
8141                  */
8142                 smp_mb();
8143                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8144                         netif_tx_wake_queue(txq);
8145         }
8146
8147         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8148                 /* Packets are ready, update Tx producer idx on card. */
8149                 tw32_tx_mbox(tnapi->prodmbox, entry);
8150                 mmiowb();
8151         }
8152
8153         return NETDEV_TX_OK;
8154
8155 dma_error:
8156         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8157         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8158 drop:
8159         dev_kfree_skb_any(skb);
8160 drop_nofree:
8161         tp->tx_dropped++;
8162         return NETDEV_TX_OK;
8163 }
8164
8165 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8166 {
8167         if (enable) {
8168                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8169                                   MAC_MODE_PORT_MODE_MASK);
8170
8171                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8172
8173                 if (!tg3_flag(tp, 5705_PLUS))
8174                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8175
8176                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8177                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8178                 else
8179                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8180         } else {
8181                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8182
8183                 if (tg3_flag(tp, 5705_PLUS) ||
8184                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8185                     tg3_asic_rev(tp) == ASIC_REV_5700)
8186                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8187         }
8188
8189         tw32(MAC_MODE, tp->mac_mode);
8190         udelay(40);
8191 }
8192
8193 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8194 {
8195         u32 val, bmcr, mac_mode, ptest = 0;
8196
8197         tg3_phy_toggle_apd(tp, false);
8198         tg3_phy_toggle_automdix(tp, false);
8199
8200         if (extlpbk && tg3_phy_set_extloopbk(tp))
8201                 return -EIO;
8202
8203         bmcr = BMCR_FULLDPLX;
8204         switch (speed) {
8205         case SPEED_10:
8206                 break;
8207         case SPEED_100:
8208                 bmcr |= BMCR_SPEED100;
8209                 break;
8210         case SPEED_1000:
8211         default:
8212                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8213                         speed = SPEED_100;
8214                         bmcr |= BMCR_SPEED100;
8215                 } else {
8216                         speed = SPEED_1000;
8217                         bmcr |= BMCR_SPEED1000;
8218                 }
8219         }
8220
8221         if (extlpbk) {
8222                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8223                         tg3_readphy(tp, MII_CTRL1000, &val);
8224                         val |= CTL1000_AS_MASTER |
8225                                CTL1000_ENABLE_MASTER;
8226                         tg3_writephy(tp, MII_CTRL1000, val);
8227                 } else {
8228                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8229                                 MII_TG3_FET_PTEST_TRIM_2;
8230                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8231                 }
8232         } else
8233                 bmcr |= BMCR_LOOPBACK;
8234
8235         tg3_writephy(tp, MII_BMCR, bmcr);
8236
8237         /* The write needs to be flushed for the FETs */
8238         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8239                 tg3_readphy(tp, MII_BMCR, &bmcr);
8240
8241         udelay(40);
8242
8243         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8244             tg3_asic_rev(tp) == ASIC_REV_5785) {
8245                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8246                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8247                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8248
8249                 /* The write needs to be flushed for the AC131 */
8250                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8251         }
8252
8253         /* Reset to prevent losing 1st rx packet intermittently */
8254         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8255             tg3_flag(tp, 5780_CLASS)) {
8256                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8257                 udelay(10);
8258                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8259         }
8260
8261         mac_mode = tp->mac_mode &
8262                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8263         if (speed == SPEED_1000)
8264                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8265         else
8266                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8267
8268         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8269                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8270
8271                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8272                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8273                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8274                         mac_mode |= MAC_MODE_LINK_POLARITY;
8275
8276                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8277                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8278         }
8279
8280         tw32(MAC_MODE, mac_mode);
8281         udelay(40);
8282
8283         return 0;
8284 }
8285
8286 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8287 {
8288         struct tg3 *tp = netdev_priv(dev);
8289
8290         if (features & NETIF_F_LOOPBACK) {
8291                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8292                         return;
8293
8294                 spin_lock_bh(&tp->lock);
8295                 tg3_mac_loopback(tp, true);
8296                 netif_carrier_on(tp->dev);
8297                 spin_unlock_bh(&tp->lock);
8298                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8299         } else {
8300                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8301                         return;
8302
8303                 spin_lock_bh(&tp->lock);
8304                 tg3_mac_loopback(tp, false);
8305                 /* Force link status check */
8306                 tg3_setup_phy(tp, true);
8307                 spin_unlock_bh(&tp->lock);
8308                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8309         }
8310 }
8311
8312 static netdev_features_t tg3_fix_features(struct net_device *dev,
8313         netdev_features_t features)
8314 {
8315         struct tg3 *tp = netdev_priv(dev);
8316
8317         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8318                 features &= ~NETIF_F_ALL_TSO;
8319
8320         return features;
8321 }
8322
8323 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8324 {
8325         netdev_features_t changed = dev->features ^ features;
8326
8327         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8328                 tg3_set_loopback(dev, features);
8329
8330         return 0;
8331 }
8332
8333 static void tg3_rx_prodring_free(struct tg3 *tp,
8334                                  struct tg3_rx_prodring_set *tpr)
8335 {
8336         int i;
8337
8338         if (tpr != &tp->napi[0].prodring) {
8339                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8340                      i = (i + 1) & tp->rx_std_ring_mask)
8341                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8342                                         tp->rx_pkt_map_sz);
8343
8344                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8345                         for (i = tpr->rx_jmb_cons_idx;
8346                              i != tpr->rx_jmb_prod_idx;
8347                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8348                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8349                                                 TG3_RX_JMB_MAP_SZ);
8350                         }
8351                 }
8352
8353                 return;
8354         }
8355
8356         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8357                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8358                                 tp->rx_pkt_map_sz);
8359
8360         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8361                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8362                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8363                                         TG3_RX_JMB_MAP_SZ);
8364         }
8365 }
8366
8367 /* Initialize rx rings for packet processing.
8368  *
8369  * The chip has been shut down and the driver detached from
8370  * the networking, so no interrupts or new tx packets will
8371  * end up in the driver.  tp->{tx,}lock are held and thus
8372  * we may not sleep.
8373  */
8374 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8375                                  struct tg3_rx_prodring_set *tpr)
8376 {
8377         u32 i, rx_pkt_dma_sz;
8378
8379         tpr->rx_std_cons_idx = 0;
8380         tpr->rx_std_prod_idx = 0;
8381         tpr->rx_jmb_cons_idx = 0;
8382         tpr->rx_jmb_prod_idx = 0;
8383
8384         if (tpr != &tp->napi[0].prodring) {
8385                 memset(&tpr->rx_std_buffers[0], 0,
8386                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8387                 if (tpr->rx_jmb_buffers)
8388                         memset(&tpr->rx_jmb_buffers[0], 0,
8389                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8390                 goto done;
8391         }
8392
8393         /* Zero out all descriptors. */
8394         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8395
8396         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8397         if (tg3_flag(tp, 5780_CLASS) &&
8398             tp->dev->mtu > ETH_DATA_LEN)
8399                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8400         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8401
8402         /* Initialize invariants of the rings, we only set this
8403          * stuff once.  This works because the card does not
8404          * write into the rx buffer posting rings.
8405          */
8406         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8407                 struct tg3_rx_buffer_desc *rxd;
8408
8409                 rxd = &tpr->rx_std[i];
8410                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8411                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8412                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8413                                (i << RXD_OPAQUE_INDEX_SHIFT));
8414         }
8415
8416         /* Now allocate fresh SKBs for each rx ring. */
8417         for (i = 0; i < tp->rx_pending; i++) {
8418                 unsigned int frag_size;
8419
8420                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8421                                       &frag_size) < 0) {
8422                         netdev_warn(tp->dev,
8423                                     "Using a smaller RX standard ring. Only "
8424                                     "%d out of %d buffers were allocated "
8425                                     "successfully\n", i, tp->rx_pending);
8426                         if (i == 0)
8427                                 goto initfail;
8428                         tp->rx_pending = i;
8429                         break;
8430                 }
8431         }
8432
8433         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8434                 goto done;
8435
8436         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8437
8438         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8439                 goto done;
8440
8441         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8442                 struct tg3_rx_buffer_desc *rxd;
8443
8444                 rxd = &tpr->rx_jmb[i].std;
8445                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8446                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8447                                   RXD_FLAG_JUMBO;
8448                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8449                        (i << RXD_OPAQUE_INDEX_SHIFT));
8450         }
8451
8452         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8453                 unsigned int frag_size;
8454
8455                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8456                                       &frag_size) < 0) {
8457                         netdev_warn(tp->dev,
8458                                     "Using a smaller RX jumbo ring. Only %d "
8459                                     "out of %d buffers were allocated "
8460                                     "successfully\n", i, tp->rx_jumbo_pending);
8461                         if (i == 0)
8462                                 goto initfail;
8463                         tp->rx_jumbo_pending = i;
8464                         break;
8465                 }
8466         }
8467
8468 done:
8469         return 0;
8470
8471 initfail:
8472         tg3_rx_prodring_free(tp, tpr);
8473         return -ENOMEM;
8474 }
8475
8476 static void tg3_rx_prodring_fini(struct tg3 *tp,
8477                                  struct tg3_rx_prodring_set *tpr)
8478 {
8479         kfree(tpr->rx_std_buffers);
8480         tpr->rx_std_buffers = NULL;
8481         kfree(tpr->rx_jmb_buffers);
8482         tpr->rx_jmb_buffers = NULL;
8483         if (tpr->rx_std) {
8484                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8485                                   tpr->rx_std, tpr->rx_std_mapping);
8486                 tpr->rx_std = NULL;
8487         }
8488         if (tpr->rx_jmb) {
8489                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8490                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8491                 tpr->rx_jmb = NULL;
8492         }
8493 }
8494
8495 static int tg3_rx_prodring_init(struct tg3 *tp,
8496                                 struct tg3_rx_prodring_set *tpr)
8497 {
8498         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8499                                       GFP_KERNEL);
8500         if (!tpr->rx_std_buffers)
8501                 return -ENOMEM;
8502
8503         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8504                                          TG3_RX_STD_RING_BYTES(tp),
8505                                          &tpr->rx_std_mapping,
8506                                          GFP_KERNEL);
8507         if (!tpr->rx_std)
8508                 goto err_out;
8509
8510         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8511                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8512                                               GFP_KERNEL);
8513                 if (!tpr->rx_jmb_buffers)
8514                         goto err_out;
8515
8516                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8517                                                  TG3_RX_JMB_RING_BYTES(tp),
8518                                                  &tpr->rx_jmb_mapping,
8519                                                  GFP_KERNEL);
8520                 if (!tpr->rx_jmb)
8521                         goto err_out;
8522         }
8523
8524         return 0;
8525
8526 err_out:
8527         tg3_rx_prodring_fini(tp, tpr);
8528         return -ENOMEM;
8529 }
8530
8531 /* Free up pending packets in all rx/tx rings.
8532  *
8533  * The chip has been shut down and the driver detached from
8534  * the networking, so no interrupts or new tx packets will
8535  * end up in the driver.  tp->{tx,}lock is not held and we are not
8536  * in an interrupt context and thus may sleep.
8537  */
8538 static void tg3_free_rings(struct tg3 *tp)
8539 {
8540         int i, j;
8541
8542         for (j = 0; j < tp->irq_cnt; j++) {
8543                 struct tg3_napi *tnapi = &tp->napi[j];
8544
8545                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8546
8547                 if (!tnapi->tx_buffers)
8548                         continue;
8549
8550                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8551                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8552
8553                         if (!skb)
8554                                 continue;
8555
8556                         tg3_tx_skb_unmap(tnapi, i,
8557                                          skb_shinfo(skb)->nr_frags - 1);
8558
8559                         dev_consume_skb_any(skb);
8560                 }
8561                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8562         }
8563 }
8564
8565 /* Initialize tx/rx rings for packet processing.
8566  *
8567  * The chip has been shut down and the driver detached from
8568  * the networking, so no interrupts or new tx packets will
8569  * end up in the driver.  tp->{tx,}lock are held and thus
8570  * we may not sleep.
8571  */
8572 static int tg3_init_rings(struct tg3 *tp)
8573 {
8574         int i;
8575
8576         /* Free up all the SKBs. */
8577         tg3_free_rings(tp);
8578
8579         for (i = 0; i < tp->irq_cnt; i++) {
8580                 struct tg3_napi *tnapi = &tp->napi[i];
8581
8582                 tnapi->last_tag = 0;
8583                 tnapi->last_irq_tag = 0;
8584                 tnapi->hw_status->status = 0;
8585                 tnapi->hw_status->status_tag = 0;
8586                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8587
8588                 tnapi->tx_prod = 0;
8589                 tnapi->tx_cons = 0;
8590                 if (tnapi->tx_ring)
8591                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8592
8593                 tnapi->rx_rcb_ptr = 0;
8594                 if (tnapi->rx_rcb)
8595                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8596
8597                 if (tnapi->prodring.rx_std &&
8598                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8599                         tg3_free_rings(tp);
8600                         return -ENOMEM;
8601                 }
8602         }
8603
8604         return 0;
8605 }
8606
8607 static void tg3_mem_tx_release(struct tg3 *tp)
8608 {
8609         int i;
8610
8611         for (i = 0; i < tp->irq_max; i++) {
8612                 struct tg3_napi *tnapi = &tp->napi[i];
8613
8614                 if (tnapi->tx_ring) {
8615                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8616                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8617                         tnapi->tx_ring = NULL;
8618                 }
8619
8620                 kfree(tnapi->tx_buffers);
8621                 tnapi->tx_buffers = NULL;
8622         }
8623 }
8624
8625 static int tg3_mem_tx_acquire(struct tg3 *tp)
8626 {
8627         int i;
8628         struct tg3_napi *tnapi = &tp->napi[0];
8629
8630         /* If multivector TSS is enabled, vector 0 does not handle
8631          * tx interrupts.  Don't allocate any resources for it.
8632          */
8633         if (tg3_flag(tp, ENABLE_TSS))
8634                 tnapi++;
8635
8636         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8637                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8638                                             sizeof(struct tg3_tx_ring_info),
8639                                             GFP_KERNEL);
8640                 if (!tnapi->tx_buffers)
8641                         goto err_out;
8642
8643                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8644                                                     TG3_TX_RING_BYTES,
8645                                                     &tnapi->tx_desc_mapping,
8646                                                     GFP_KERNEL);
8647                 if (!tnapi->tx_ring)
8648                         goto err_out;
8649         }
8650
8651         return 0;
8652
8653 err_out:
8654         tg3_mem_tx_release(tp);
8655         return -ENOMEM;
8656 }
8657
8658 static void tg3_mem_rx_release(struct tg3 *tp)
8659 {
8660         int i;
8661
8662         for (i = 0; i < tp->irq_max; i++) {
8663                 struct tg3_napi *tnapi = &tp->napi[i];
8664
8665                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8666
8667                 if (!tnapi->rx_rcb)
8668                         continue;
8669
8670                 dma_free_coherent(&tp->pdev->dev,
8671                                   TG3_RX_RCB_RING_BYTES(tp),
8672                                   tnapi->rx_rcb,
8673                                   tnapi->rx_rcb_mapping);
8674                 tnapi->rx_rcb = NULL;
8675         }
8676 }
8677
8678 static int tg3_mem_rx_acquire(struct tg3 *tp)
8679 {
8680         unsigned int i, limit;
8681
8682         limit = tp->rxq_cnt;
8683
8684         /* If RSS is enabled, we need a (dummy) producer ring
8685          * set on vector zero.  This is the true hw prodring.
8686          */
8687         if (tg3_flag(tp, ENABLE_RSS))
8688                 limit++;
8689
8690         for (i = 0; i < limit; i++) {
8691                 struct tg3_napi *tnapi = &tp->napi[i];
8692
8693                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8694                         goto err_out;
8695
8696                 /* If multivector RSS is enabled, vector 0
8697                  * does not handle rx or tx interrupts.
8698                  * Don't allocate any resources for it.
8699                  */
8700                 if (!i && tg3_flag(tp, ENABLE_RSS))
8701                         continue;
8702
8703                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8704                                                     TG3_RX_RCB_RING_BYTES(tp),
8705                                                     &tnapi->rx_rcb_mapping,
8706                                                     GFP_KERNEL);
8707                 if (!tnapi->rx_rcb)
8708                         goto err_out;
8709         }
8710
8711         return 0;
8712
8713 err_out:
8714         tg3_mem_rx_release(tp);
8715         return -ENOMEM;
8716 }
8717
8718 /*
8719  * Must not be invoked with interrupt sources disabled and
8720  * the hardware shutdown down.
8721  */
8722 static void tg3_free_consistent(struct tg3 *tp)
8723 {
8724         int i;
8725
8726         for (i = 0; i < tp->irq_cnt; i++) {
8727                 struct tg3_napi *tnapi = &tp->napi[i];
8728
8729                 if (tnapi->hw_status) {
8730                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8731                                           tnapi->hw_status,
8732                                           tnapi->status_mapping);
8733                         tnapi->hw_status = NULL;
8734                 }
8735         }
8736
8737         tg3_mem_rx_release(tp);
8738         tg3_mem_tx_release(tp);
8739
8740         /* tp->hw_stats can be referenced safely:
8741          *     1. under rtnl_lock
8742          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8743          */
8744         if (tp->hw_stats) {
8745                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8746                                   tp->hw_stats, tp->stats_mapping);
8747                 tp->hw_stats = NULL;
8748         }
8749 }
8750
8751 /*
8752  * Must not be invoked with interrupt sources disabled and
8753  * the hardware shutdown down.  Can sleep.
8754  */
8755 static int tg3_alloc_consistent(struct tg3 *tp)
8756 {
8757         int i;
8758
8759         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8760                                            sizeof(struct tg3_hw_stats),
8761                                            &tp->stats_mapping, GFP_KERNEL);
8762         if (!tp->hw_stats)
8763                 goto err_out;
8764
8765         for (i = 0; i < tp->irq_cnt; i++) {
8766                 struct tg3_napi *tnapi = &tp->napi[i];
8767                 struct tg3_hw_status *sblk;
8768
8769                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8770                                                        TG3_HW_STATUS_SIZE,
8771                                                        &tnapi->status_mapping,
8772                                                        GFP_KERNEL);
8773                 if (!tnapi->hw_status)
8774                         goto err_out;
8775
8776                 sblk = tnapi->hw_status;
8777
8778                 if (tg3_flag(tp, ENABLE_RSS)) {
8779                         u16 *prodptr = NULL;
8780
8781                         /*
8782                          * When RSS is enabled, the status block format changes
8783                          * slightly.  The "rx_jumbo_consumer", "reserved",
8784                          * and "rx_mini_consumer" members get mapped to the
8785                          * other three rx return ring producer indexes.
8786                          */
8787                         switch (i) {
8788                         case 1:
8789                                 prodptr = &sblk->idx[0].rx_producer;
8790                                 break;
8791                         case 2:
8792                                 prodptr = &sblk->rx_jumbo_consumer;
8793                                 break;
8794                         case 3:
8795                                 prodptr = &sblk->reserved;
8796                                 break;
8797                         case 4:
8798                                 prodptr = &sblk->rx_mini_consumer;
8799                                 break;
8800                         }
8801                         tnapi->rx_rcb_prod_idx = prodptr;
8802                 } else {
8803                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8804                 }
8805         }
8806
8807         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8808                 goto err_out;
8809
8810         return 0;
8811
8812 err_out:
8813         tg3_free_consistent(tp);
8814         return -ENOMEM;
8815 }
8816
8817 #define MAX_WAIT_CNT 1000
8818
8819 /* To stop a block, clear the enable bit and poll till it
8820  * clears.  tp->lock is held.
8821  */
8822 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8823 {
8824         unsigned int i;
8825         u32 val;
8826
8827         if (tg3_flag(tp, 5705_PLUS)) {
8828                 switch (ofs) {
8829                 case RCVLSC_MODE:
8830                 case DMAC_MODE:
8831                 case MBFREE_MODE:
8832                 case BUFMGR_MODE:
8833                 case MEMARB_MODE:
8834                         /* We can't enable/disable these bits of the
8835                          * 5705/5750, just say success.
8836                          */
8837                         return 0;
8838
8839                 default:
8840                         break;
8841                 }
8842         }
8843
8844         val = tr32(ofs);
8845         val &= ~enable_bit;
8846         tw32_f(ofs, val);
8847
8848         for (i = 0; i < MAX_WAIT_CNT; i++) {
8849                 if (pci_channel_offline(tp->pdev)) {
8850                         dev_err(&tp->pdev->dev,
8851                                 "tg3_stop_block device offline, "
8852                                 "ofs=%lx enable_bit=%x\n",
8853                                 ofs, enable_bit);
8854                         return -ENODEV;
8855                 }
8856
8857                 udelay(100);
8858                 val = tr32(ofs);
8859                 if ((val & enable_bit) == 0)
8860                         break;
8861         }
8862
8863         if (i == MAX_WAIT_CNT && !silent) {
8864                 dev_err(&tp->pdev->dev,
8865                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8866                         ofs, enable_bit);
8867                 return -ENODEV;
8868         }
8869
8870         return 0;
8871 }
8872
8873 /* tp->lock is held. */
8874 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8875 {
8876         int i, err;
8877
8878         tg3_disable_ints(tp);
8879
8880         if (pci_channel_offline(tp->pdev)) {
8881                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8882                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8883                 err = -ENODEV;
8884                 goto err_no_dev;
8885         }
8886
8887         tp->rx_mode &= ~RX_MODE_ENABLE;
8888         tw32_f(MAC_RX_MODE, tp->rx_mode);
8889         udelay(10);
8890
8891         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8892         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8893         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8894         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8895         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8896         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8897
8898         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8899         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8900         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8901         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8902         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8903         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8904         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8905
8906         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8907         tw32_f(MAC_MODE, tp->mac_mode);
8908         udelay(40);
8909
8910         tp->tx_mode &= ~TX_MODE_ENABLE;
8911         tw32_f(MAC_TX_MODE, tp->tx_mode);
8912
8913         for (i = 0; i < MAX_WAIT_CNT; i++) {
8914                 udelay(100);
8915                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8916                         break;
8917         }
8918         if (i >= MAX_WAIT_CNT) {
8919                 dev_err(&tp->pdev->dev,
8920                         "%s timed out, TX_MODE_ENABLE will not clear "
8921                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8922                 err |= -ENODEV;
8923         }
8924
8925         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8926         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8927         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8928
8929         tw32(FTQ_RESET, 0xffffffff);
8930         tw32(FTQ_RESET, 0x00000000);
8931
8932         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8933         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8934
8935 err_no_dev:
8936         for (i = 0; i < tp->irq_cnt; i++) {
8937                 struct tg3_napi *tnapi = &tp->napi[i];
8938                 if (tnapi->hw_status)
8939                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8940         }
8941
8942         return err;
8943 }
8944
8945 /* Save PCI command register before chip reset */
8946 static void tg3_save_pci_state(struct tg3 *tp)
8947 {
8948         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8949 }
8950
8951 /* Restore PCI state after chip reset */
8952 static void tg3_restore_pci_state(struct tg3 *tp)
8953 {
8954         u32 val;
8955
8956         /* Re-enable indirect register accesses. */
8957         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8958                                tp->misc_host_ctrl);
8959
8960         /* Set MAX PCI retry to zero. */
8961         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8962         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8963             tg3_flag(tp, PCIX_MODE))
8964                 val |= PCISTATE_RETRY_SAME_DMA;
8965         /* Allow reads and writes to the APE register and memory space. */
8966         if (tg3_flag(tp, ENABLE_APE))
8967                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8968                        PCISTATE_ALLOW_APE_SHMEM_WR |
8969                        PCISTATE_ALLOW_APE_PSPACE_WR;
8970         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8971
8972         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8973
8974         if (!tg3_flag(tp, PCI_EXPRESS)) {
8975                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8976                                       tp->pci_cacheline_sz);
8977                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8978                                       tp->pci_lat_timer);
8979         }
8980
8981         /* Make sure PCI-X relaxed ordering bit is clear. */
8982         if (tg3_flag(tp, PCIX_MODE)) {
8983                 u16 pcix_cmd;
8984
8985                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8986                                      &pcix_cmd);
8987                 pcix_cmd &= ~PCI_X_CMD_ERO;
8988                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8989                                       pcix_cmd);
8990         }
8991
8992         if (tg3_flag(tp, 5780_CLASS)) {
8993
8994                 /* Chip reset on 5780 will reset MSI enable bit,
8995                  * so need to restore it.
8996                  */
8997                 if (tg3_flag(tp, USING_MSI)) {
8998                         u16 ctrl;
8999
9000                         pci_read_config_word(tp->pdev,
9001                                              tp->msi_cap + PCI_MSI_FLAGS,
9002                                              &ctrl);
9003                         pci_write_config_word(tp->pdev,
9004                                               tp->msi_cap + PCI_MSI_FLAGS,
9005                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9006                         val = tr32(MSGINT_MODE);
9007                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9008                 }
9009         }
9010 }
9011
9012 static void tg3_override_clk(struct tg3 *tp)
9013 {
9014         u32 val;
9015
9016         switch (tg3_asic_rev(tp)) {
9017         case ASIC_REV_5717:
9018                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9019                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9020                      TG3_CPMU_MAC_ORIDE_ENABLE);
9021                 break;
9022
9023         case ASIC_REV_5719:
9024         case ASIC_REV_5720:
9025                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9026                 break;
9027
9028         default:
9029                 return;
9030         }
9031 }
9032
9033 static void tg3_restore_clk(struct tg3 *tp)
9034 {
9035         u32 val;
9036
9037         switch (tg3_asic_rev(tp)) {
9038         case ASIC_REV_5717:
9039                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9040                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9041                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9042                 break;
9043
9044         case ASIC_REV_5719:
9045         case ASIC_REV_5720:
9046                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9047                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9048                 break;
9049
9050         default:
9051                 return;
9052         }
9053 }
9054
9055 /* tp->lock is held. */
9056 static int tg3_chip_reset(struct tg3 *tp)
9057         __releases(tp->lock)
9058         __acquires(tp->lock)
9059 {
9060         u32 val;
9061         void (*write_op)(struct tg3 *, u32, u32);
9062         int i, err;
9063
9064         if (!pci_device_is_present(tp->pdev))
9065                 return -ENODEV;
9066
9067         tg3_nvram_lock(tp);
9068
9069         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9070
9071         /* No matching tg3_nvram_unlock() after this because
9072          * chip reset below will undo the nvram lock.
9073          */
9074         tp->nvram_lock_cnt = 0;
9075
9076         /* GRC_MISC_CFG core clock reset will clear the memory
9077          * enable bit in PCI register 4 and the MSI enable bit
9078          * on some chips, so we save relevant registers here.
9079          */
9080         tg3_save_pci_state(tp);
9081
9082         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9083             tg3_flag(tp, 5755_PLUS))
9084                 tw32(GRC_FASTBOOT_PC, 0);
9085
9086         /*
9087          * We must avoid the readl() that normally takes place.
9088          * It locks machines, causes machine checks, and other
9089          * fun things.  So, temporarily disable the 5701
9090          * hardware workaround, while we do the reset.
9091          */
9092         write_op = tp->write32;
9093         if (write_op == tg3_write_flush_reg32)
9094                 tp->write32 = tg3_write32;
9095
9096         /* Prevent the irq handler from reading or writing PCI registers
9097          * during chip reset when the memory enable bit in the PCI command
9098          * register may be cleared.  The chip does not generate interrupt
9099          * at this time, but the irq handler may still be called due to irq
9100          * sharing or irqpoll.
9101          */
9102         tg3_flag_set(tp, CHIP_RESETTING);
9103         for (i = 0; i < tp->irq_cnt; i++) {
9104                 struct tg3_napi *tnapi = &tp->napi[i];
9105                 if (tnapi->hw_status) {
9106                         tnapi->hw_status->status = 0;
9107                         tnapi->hw_status->status_tag = 0;
9108                 }
9109                 tnapi->last_tag = 0;
9110                 tnapi->last_irq_tag = 0;
9111         }
9112         smp_mb();
9113
9114         tg3_full_unlock(tp);
9115
9116         for (i = 0; i < tp->irq_cnt; i++)
9117                 synchronize_irq(tp->napi[i].irq_vec);
9118
9119         tg3_full_lock(tp, 0);
9120
9121         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9122                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9123                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9124         }
9125
9126         /* do the reset */
9127         val = GRC_MISC_CFG_CORECLK_RESET;
9128
9129         if (tg3_flag(tp, PCI_EXPRESS)) {
9130                 /* Force PCIe 1.0a mode */
9131                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9132                     !tg3_flag(tp, 57765_PLUS) &&
9133                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9134                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9135                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9136
9137                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9138                         tw32(GRC_MISC_CFG, (1 << 29));
9139                         val |= (1 << 29);
9140                 }
9141         }
9142
9143         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9144                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9145                 tw32(GRC_VCPU_EXT_CTRL,
9146                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9147         }
9148
9149         /* Set the clock to the highest frequency to avoid timeouts. With link
9150          * aware mode, the clock speed could be slow and bootcode does not
9151          * complete within the expected time. Override the clock to allow the
9152          * bootcode to finish sooner and then restore it.
9153          */
9154         tg3_override_clk(tp);
9155
9156         /* Manage gphy power for all CPMU absent PCIe devices. */
9157         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9158                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9159
9160         tw32(GRC_MISC_CFG, val);
9161
9162         /* restore 5701 hardware bug workaround write method */
9163         tp->write32 = write_op;
9164
9165         /* Unfortunately, we have to delay before the PCI read back.
9166          * Some 575X chips even will not respond to a PCI cfg access
9167          * when the reset command is given to the chip.
9168          *
9169          * How do these hardware designers expect things to work
9170          * properly if the PCI write is posted for a long period
9171          * of time?  It is always necessary to have some method by
9172          * which a register read back can occur to push the write
9173          * out which does the reset.
9174          *
9175          * For most tg3 variants the trick below was working.
9176          * Ho hum...
9177          */
9178         udelay(120);
9179
9180         /* Flush PCI posted writes.  The normal MMIO registers
9181          * are inaccessible at this time so this is the only
9182          * way to make this reliably (actually, this is no longer
9183          * the case, see above).  I tried to use indirect
9184          * register read/write but this upset some 5701 variants.
9185          */
9186         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9187
9188         udelay(120);
9189
9190         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9191                 u16 val16;
9192
9193                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9194                         int j;
9195                         u32 cfg_val;
9196
9197                         /* Wait for link training to complete.  */
9198                         for (j = 0; j < 5000; j++)
9199                                 udelay(100);
9200
9201                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9202                         pci_write_config_dword(tp->pdev, 0xc4,
9203                                                cfg_val | (1 << 15));
9204                 }
9205
9206                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9207                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9208                 /*
9209                  * Older PCIe devices only support the 128 byte
9210                  * MPS setting.  Enforce the restriction.
9211                  */
9212                 if (!tg3_flag(tp, CPMU_PRESENT))
9213                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9214                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9215
9216                 /* Clear error status */
9217                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9218                                       PCI_EXP_DEVSTA_CED |
9219                                       PCI_EXP_DEVSTA_NFED |
9220                                       PCI_EXP_DEVSTA_FED |
9221                                       PCI_EXP_DEVSTA_URD);
9222         }
9223
9224         tg3_restore_pci_state(tp);
9225
9226         tg3_flag_clear(tp, CHIP_RESETTING);
9227         tg3_flag_clear(tp, ERROR_PROCESSED);
9228
9229         val = 0;
9230         if (tg3_flag(tp, 5780_CLASS))
9231                 val = tr32(MEMARB_MODE);
9232         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9233
9234         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9235                 tg3_stop_fw(tp);
9236                 tw32(0x5000, 0x400);
9237         }
9238
9239         if (tg3_flag(tp, IS_SSB_CORE)) {
9240                 /*
9241                  * BCM4785: In order to avoid repercussions from using
9242                  * potentially defective internal ROM, stop the Rx RISC CPU,
9243                  * which is not required.
9244                  */
9245                 tg3_stop_fw(tp);
9246                 tg3_halt_cpu(tp, RX_CPU_BASE);
9247         }
9248
9249         err = tg3_poll_fw(tp);
9250         if (err)
9251                 return err;
9252
9253         tw32(GRC_MODE, tp->grc_mode);
9254
9255         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9256                 val = tr32(0xc4);
9257
9258                 tw32(0xc4, val | (1 << 15));
9259         }
9260
9261         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9262             tg3_asic_rev(tp) == ASIC_REV_5705) {
9263                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9264                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9265                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9266                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9267         }
9268
9269         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9270                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9271                 val = tp->mac_mode;
9272         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9273                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9274                 val = tp->mac_mode;
9275         } else
9276                 val = 0;
9277
9278         tw32_f(MAC_MODE, val);
9279         udelay(40);
9280
9281         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9282
9283         tg3_mdio_start(tp);
9284
9285         if (tg3_flag(tp, PCI_EXPRESS) &&
9286             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9287             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9288             !tg3_flag(tp, 57765_PLUS)) {
9289                 val = tr32(0x7c00);
9290
9291                 tw32(0x7c00, val | (1 << 25));
9292         }
9293
9294         tg3_restore_clk(tp);
9295
9296         /* Increase the core clock speed to fix tx timeout issue for 5762
9297          * with 100Mbps link speed.
9298          */
9299         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9300                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9301                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9302                      TG3_CPMU_MAC_ORIDE_ENABLE);
9303         }
9304
9305         /* Reprobe ASF enable state.  */
9306         tg3_flag_clear(tp, ENABLE_ASF);
9307         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9308                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9309
9310         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9311         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9312         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9313                 u32 nic_cfg;
9314
9315                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9316                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9317                         tg3_flag_set(tp, ENABLE_ASF);
9318                         tp->last_event_jiffies = jiffies;
9319                         if (tg3_flag(tp, 5750_PLUS))
9320                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9321
9322                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9323                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9324                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9325                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9326                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9327                 }
9328         }
9329
9330         return 0;
9331 }
9332
9333 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9334 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9335 static void __tg3_set_rx_mode(struct net_device *);
9336
9337 /* tp->lock is held. */
9338 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9339 {
9340         int err;
9341
9342         tg3_stop_fw(tp);
9343
9344         tg3_write_sig_pre_reset(tp, kind);
9345
9346         tg3_abort_hw(tp, silent);
9347         err = tg3_chip_reset(tp);
9348
9349         __tg3_set_mac_addr(tp, false);
9350
9351         tg3_write_sig_legacy(tp, kind);
9352         tg3_write_sig_post_reset(tp, kind);
9353
9354         if (tp->hw_stats) {
9355                 /* Save the stats across chip resets... */
9356                 tg3_get_nstats(tp, &tp->net_stats_prev);
9357                 tg3_get_estats(tp, &tp->estats_prev);
9358
9359                 /* And make sure the next sample is new data */
9360                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9361         }
9362
9363         return err;
9364 }
9365
9366 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9367 {
9368         struct tg3 *tp = netdev_priv(dev);
9369         struct sockaddr *addr = p;
9370         int err = 0;
9371         bool skip_mac_1 = false;
9372
9373         if (!is_valid_ether_addr(addr->sa_data))
9374                 return -EADDRNOTAVAIL;
9375
9376         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9377
9378         if (!netif_running(dev))
9379                 return 0;
9380
9381         if (tg3_flag(tp, ENABLE_ASF)) {
9382                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9383
9384                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9385                 addr0_low = tr32(MAC_ADDR_0_LOW);
9386                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9387                 addr1_low = tr32(MAC_ADDR_1_LOW);
9388
9389                 /* Skip MAC addr 1 if ASF is using it. */
9390                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9391                     !(addr1_high == 0 && addr1_low == 0))
9392                         skip_mac_1 = true;
9393         }
9394         spin_lock_bh(&tp->lock);
9395         __tg3_set_mac_addr(tp, skip_mac_1);
9396         __tg3_set_rx_mode(dev);
9397         spin_unlock_bh(&tp->lock);
9398
9399         return err;
9400 }
9401
9402 /* tp->lock is held. */
9403 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9404                            dma_addr_t mapping, u32 maxlen_flags,
9405                            u32 nic_addr)
9406 {
9407         tg3_write_mem(tp,
9408                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9409                       ((u64) mapping >> 32));
9410         tg3_write_mem(tp,
9411                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9412                       ((u64) mapping & 0xffffffff));
9413         tg3_write_mem(tp,
9414                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9415                        maxlen_flags);
9416
9417         if (!tg3_flag(tp, 5705_PLUS))
9418                 tg3_write_mem(tp,
9419                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9420                               nic_addr);
9421 }
9422
9423
9424 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9425 {
9426         int i = 0;
9427
9428         if (!tg3_flag(tp, ENABLE_TSS)) {
9429                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9430                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9431                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9432         } else {
9433                 tw32(HOSTCC_TXCOL_TICKS, 0);
9434                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9435                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9436
9437                 for (; i < tp->txq_cnt; i++) {
9438                         u32 reg;
9439
9440                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9441                         tw32(reg, ec->tx_coalesce_usecs);
9442                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9443                         tw32(reg, ec->tx_max_coalesced_frames);
9444                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9445                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9446                 }
9447         }
9448
9449         for (; i < tp->irq_max - 1; i++) {
9450                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9451                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9452                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9453         }
9454 }
9455
9456 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9457 {
9458         int i = 0;
9459         u32 limit = tp->rxq_cnt;
9460
9461         if (!tg3_flag(tp, ENABLE_RSS)) {
9462                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9463                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9464                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9465                 limit--;
9466         } else {
9467                 tw32(HOSTCC_RXCOL_TICKS, 0);
9468                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9469                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9470         }
9471
9472         for (; i < limit; i++) {
9473                 u32 reg;
9474
9475                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9476                 tw32(reg, ec->rx_coalesce_usecs);
9477                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9478                 tw32(reg, ec->rx_max_coalesced_frames);
9479                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9480                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9481         }
9482
9483         for (; i < tp->irq_max - 1; i++) {
9484                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9485                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9486                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9487         }
9488 }
9489
9490 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9491 {
9492         tg3_coal_tx_init(tp, ec);
9493         tg3_coal_rx_init(tp, ec);
9494
9495         if (!tg3_flag(tp, 5705_PLUS)) {
9496                 u32 val = ec->stats_block_coalesce_usecs;
9497
9498                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9499                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9500
9501                 if (!tp->link_up)
9502                         val = 0;
9503
9504                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9505         }
9506 }
9507
9508 /* tp->lock is held. */
9509 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9510 {
9511         u32 txrcb, limit;
9512
9513         /* Disable all transmit rings but the first. */
9514         if (!tg3_flag(tp, 5705_PLUS))
9515                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9516         else if (tg3_flag(tp, 5717_PLUS))
9517                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9518         else if (tg3_flag(tp, 57765_CLASS) ||
9519                  tg3_asic_rev(tp) == ASIC_REV_5762)
9520                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9521         else
9522                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9523
9524         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9525              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9526                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9527                               BDINFO_FLAGS_DISABLED);
9528 }
9529
9530 /* tp->lock is held. */
9531 static void tg3_tx_rcbs_init(struct tg3 *tp)
9532 {
9533         int i = 0;
9534         u32 txrcb = NIC_SRAM_SEND_RCB;
9535
9536         if (tg3_flag(tp, ENABLE_TSS))
9537                 i++;
9538
9539         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9540                 struct tg3_napi *tnapi = &tp->napi[i];
9541
9542                 if (!tnapi->tx_ring)
9543                         continue;
9544
9545                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9546                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9547                                NIC_SRAM_TX_BUFFER_DESC);
9548         }
9549 }
9550
9551 /* tp->lock is held. */
9552 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9553 {
9554         u32 rxrcb, limit;
9555
9556         /* Disable all receive return rings but the first. */
9557         if (tg3_flag(tp, 5717_PLUS))
9558                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9559         else if (!tg3_flag(tp, 5705_PLUS))
9560                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9561         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9562                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9563                  tg3_flag(tp, 57765_CLASS))
9564                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9565         else
9566                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9567
9568         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9569              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9570                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9571                               BDINFO_FLAGS_DISABLED);
9572 }
9573
9574 /* tp->lock is held. */
9575 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9576 {
9577         int i = 0;
9578         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9579
9580         if (tg3_flag(tp, ENABLE_RSS))
9581                 i++;
9582
9583         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9584                 struct tg3_napi *tnapi = &tp->napi[i];
9585
9586                 if (!tnapi->rx_rcb)
9587                         continue;
9588
9589                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9590                                (tp->rx_ret_ring_mask + 1) <<
9591                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9592         }
9593 }
9594
9595 /* tp->lock is held. */
9596 static void tg3_rings_reset(struct tg3 *tp)
9597 {
9598         int i;
9599         u32 stblk;
9600         struct tg3_napi *tnapi = &tp->napi[0];
9601
9602         tg3_tx_rcbs_disable(tp);
9603
9604         tg3_rx_ret_rcbs_disable(tp);
9605
9606         /* Disable interrupts */
9607         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9608         tp->napi[0].chk_msi_cnt = 0;
9609         tp->napi[0].last_rx_cons = 0;
9610         tp->napi[0].last_tx_cons = 0;
9611
9612         /* Zero mailbox registers. */
9613         if (tg3_flag(tp, SUPPORT_MSIX)) {
9614                 for (i = 1; i < tp->irq_max; i++) {
9615                         tp->napi[i].tx_prod = 0;
9616                         tp->napi[i].tx_cons = 0;
9617                         if (tg3_flag(tp, ENABLE_TSS))
9618                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9619                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9620                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9621                         tp->napi[i].chk_msi_cnt = 0;
9622                         tp->napi[i].last_rx_cons = 0;
9623                         tp->napi[i].last_tx_cons = 0;
9624                 }
9625                 if (!tg3_flag(tp, ENABLE_TSS))
9626                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9627         } else {
9628                 tp->napi[0].tx_prod = 0;
9629                 tp->napi[0].tx_cons = 0;
9630                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9631                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9632         }
9633
9634         /* Make sure the NIC-based send BD rings are disabled. */
9635         if (!tg3_flag(tp, 5705_PLUS)) {
9636                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9637                 for (i = 0; i < 16; i++)
9638                         tw32_tx_mbox(mbox + i * 8, 0);
9639         }
9640
9641         /* Clear status block in ram. */
9642         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9643
9644         /* Set status block DMA address */
9645         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9646              ((u64) tnapi->status_mapping >> 32));
9647         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9648              ((u64) tnapi->status_mapping & 0xffffffff));
9649
9650         stblk = HOSTCC_STATBLCK_RING1;
9651
9652         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9653                 u64 mapping = (u64)tnapi->status_mapping;
9654                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9655                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9656                 stblk += 8;
9657
9658                 /* Clear status block in ram. */
9659                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9660         }
9661
9662         tg3_tx_rcbs_init(tp);
9663         tg3_rx_ret_rcbs_init(tp);
9664 }
9665
9666 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9667 {
9668         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9669
9670         if (!tg3_flag(tp, 5750_PLUS) ||
9671             tg3_flag(tp, 5780_CLASS) ||
9672             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9673             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9674             tg3_flag(tp, 57765_PLUS))
9675                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9676         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9677                  tg3_asic_rev(tp) == ASIC_REV_5787)
9678                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9679         else
9680                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9681
9682         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9683         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9684
9685         val = min(nic_rep_thresh, host_rep_thresh);
9686         tw32(RCVBDI_STD_THRESH, val);
9687
9688         if (tg3_flag(tp, 57765_PLUS))
9689                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9690
9691         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9692                 return;
9693
9694         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9695
9696         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9697
9698         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9699         tw32(RCVBDI_JUMBO_THRESH, val);
9700
9701         if (tg3_flag(tp, 57765_PLUS))
9702                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9703 }
9704
9705 static inline u32 calc_crc(unsigned char *buf, int len)
9706 {
9707         u32 reg;
9708         u32 tmp;
9709         int j, k;
9710
9711         reg = 0xffffffff;
9712
9713         for (j = 0; j < len; j++) {
9714                 reg ^= buf[j];
9715
9716                 for (k = 0; k < 8; k++) {
9717                         tmp = reg & 0x01;
9718
9719                         reg >>= 1;
9720
9721                         if (tmp)
9722                                 reg ^= CRC32_POLY_LE;
9723                 }
9724         }
9725
9726         return ~reg;
9727 }
9728
9729 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9730 {
9731         /* accept or reject all multicast frames */
9732         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9733         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9734         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9735         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9736 }
9737
9738 static void __tg3_set_rx_mode(struct net_device *dev)
9739 {
9740         struct tg3 *tp = netdev_priv(dev);
9741         u32 rx_mode;
9742
9743         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9744                                   RX_MODE_KEEP_VLAN_TAG);
9745
9746 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9747         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9748          * flag clear.
9749          */
9750         if (!tg3_flag(tp, ENABLE_ASF))
9751                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9752 #endif
9753
9754         if (dev->flags & IFF_PROMISC) {
9755                 /* Promiscuous mode. */
9756                 rx_mode |= RX_MODE_PROMISC;
9757         } else if (dev->flags & IFF_ALLMULTI) {
9758                 /* Accept all multicast. */
9759                 tg3_set_multi(tp, 1);
9760         } else if (netdev_mc_empty(dev)) {
9761                 /* Reject all multicast. */
9762                 tg3_set_multi(tp, 0);
9763         } else {
9764                 /* Accept one or more multicast(s). */
9765                 struct netdev_hw_addr *ha;
9766                 u32 mc_filter[4] = { 0, };
9767                 u32 regidx;
9768                 u32 bit;
9769                 u32 crc;
9770
9771                 netdev_for_each_mc_addr(ha, dev) {
9772                         crc = calc_crc(ha->addr, ETH_ALEN);
9773                         bit = ~crc & 0x7f;
9774                         regidx = (bit & 0x60) >> 5;
9775                         bit &= 0x1f;
9776                         mc_filter[regidx] |= (1 << bit);
9777                 }
9778
9779                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9780                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9781                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9782                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9783         }
9784
9785         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9786                 rx_mode |= RX_MODE_PROMISC;
9787         } else if (!(dev->flags & IFF_PROMISC)) {
9788                 /* Add all entries into to the mac addr filter list */
9789                 int i = 0;
9790                 struct netdev_hw_addr *ha;
9791
9792                 netdev_for_each_uc_addr(ha, dev) {
9793                         __tg3_set_one_mac_addr(tp, ha->addr,
9794                                                i + TG3_UCAST_ADDR_IDX(tp));
9795                         i++;
9796                 }
9797         }
9798
9799         if (rx_mode != tp->rx_mode) {
9800                 tp->rx_mode = rx_mode;
9801                 tw32_f(MAC_RX_MODE, rx_mode);
9802                 udelay(10);
9803         }
9804 }
9805
9806 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9807 {
9808         int i;
9809
9810         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9811                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9812 }
9813
9814 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9815 {
9816         int i;
9817
9818         if (!tg3_flag(tp, SUPPORT_MSIX))
9819                 return;
9820
9821         if (tp->rxq_cnt == 1) {
9822                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9823                 return;
9824         }
9825
9826         /* Validate table against current IRQ count */
9827         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9828                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9829                         break;
9830         }
9831
9832         if (i != TG3_RSS_INDIR_TBL_SIZE)
9833                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9834 }
9835
9836 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9837 {
9838         int i = 0;
9839         u32 reg = MAC_RSS_INDIR_TBL_0;
9840
9841         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9842                 u32 val = tp->rss_ind_tbl[i];
9843                 i++;
9844                 for (; i % 8; i++) {
9845                         val <<= 4;
9846                         val |= tp->rss_ind_tbl[i];
9847                 }
9848                 tw32(reg, val);
9849                 reg += 4;
9850         }
9851 }
9852
9853 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9854 {
9855         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9856                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9857         else
9858                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9859 }
9860
9861 /* tp->lock is held. */
9862 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9863 {
9864         u32 val, rdmac_mode;
9865         int i, err, limit;
9866         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9867
9868         tg3_disable_ints(tp);
9869
9870         tg3_stop_fw(tp);
9871
9872         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9873
9874         if (tg3_flag(tp, INIT_COMPLETE))
9875                 tg3_abort_hw(tp, 1);
9876
9877         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9878             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9879                 tg3_phy_pull_config(tp);
9880                 tg3_eee_pull_config(tp, NULL);
9881                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9882         }
9883
9884         /* Enable MAC control of LPI */
9885         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9886                 tg3_setup_eee(tp);
9887
9888         if (reset_phy)
9889                 tg3_phy_reset(tp);
9890
9891         err = tg3_chip_reset(tp);
9892         if (err)
9893                 return err;
9894
9895         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9896
9897         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9898                 val = tr32(TG3_CPMU_CTRL);
9899                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9900                 tw32(TG3_CPMU_CTRL, val);
9901
9902                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9903                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9904                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9905                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9906
9907                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9908                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9909                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9910                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9911
9912                 val = tr32(TG3_CPMU_HST_ACC);
9913                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9914                 val |= CPMU_HST_ACC_MACCLK_6_25;
9915                 tw32(TG3_CPMU_HST_ACC, val);
9916         }
9917
9918         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9919                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9920                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9921                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9922                 tw32(PCIE_PWR_MGMT_THRESH, val);
9923
9924                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9925                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9926
9927                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9928
9929                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9930                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9931         }
9932
9933         if (tg3_flag(tp, L1PLLPD_EN)) {
9934                 u32 grc_mode = tr32(GRC_MODE);
9935
9936                 /* Access the lower 1K of PL PCIE block registers. */
9937                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9938                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9939
9940                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9941                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9942                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9943
9944                 tw32(GRC_MODE, grc_mode);
9945         }
9946
9947         if (tg3_flag(tp, 57765_CLASS)) {
9948                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9949                         u32 grc_mode = tr32(GRC_MODE);
9950
9951                         /* Access the lower 1K of PL PCIE block registers. */
9952                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9953                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9954
9955                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9956                                    TG3_PCIE_PL_LO_PHYCTL5);
9957                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9958                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9959
9960                         tw32(GRC_MODE, grc_mode);
9961                 }
9962
9963                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9964                         u32 grc_mode;
9965
9966                         /* Fix transmit hangs */
9967                         val = tr32(TG3_CPMU_PADRNG_CTL);
9968                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9969                         tw32(TG3_CPMU_PADRNG_CTL, val);
9970
9971                         grc_mode = tr32(GRC_MODE);
9972
9973                         /* Access the lower 1K of DL PCIE block registers. */
9974                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9975                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9976
9977                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9978                                    TG3_PCIE_DL_LO_FTSMAX);
9979                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9980                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9981                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9982
9983                         tw32(GRC_MODE, grc_mode);
9984                 }
9985
9986                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9987                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9988                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9989                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9990         }
9991
9992         /* This works around an issue with Athlon chipsets on
9993          * B3 tigon3 silicon.  This bit has no effect on any
9994          * other revision.  But do not set this on PCI Express
9995          * chips and don't even touch the clocks if the CPMU is present.
9996          */
9997         if (!tg3_flag(tp, CPMU_PRESENT)) {
9998                 if (!tg3_flag(tp, PCI_EXPRESS))
9999                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10000                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10001         }
10002
10003         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10004             tg3_flag(tp, PCIX_MODE)) {
10005                 val = tr32(TG3PCI_PCISTATE);
10006                 val |= PCISTATE_RETRY_SAME_DMA;
10007                 tw32(TG3PCI_PCISTATE, val);
10008         }
10009
10010         if (tg3_flag(tp, ENABLE_APE)) {
10011                 /* Allow reads and writes to the
10012                  * APE register and memory space.
10013                  */
10014                 val = tr32(TG3PCI_PCISTATE);
10015                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10016                        PCISTATE_ALLOW_APE_SHMEM_WR |
10017                        PCISTATE_ALLOW_APE_PSPACE_WR;
10018                 tw32(TG3PCI_PCISTATE, val);
10019         }
10020
10021         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10022                 /* Enable some hw fixes.  */
10023                 val = tr32(TG3PCI_MSI_DATA);
10024                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10025                 tw32(TG3PCI_MSI_DATA, val);
10026         }
10027
10028         /* Descriptor ring init may make accesses to the
10029          * NIC SRAM area to setup the TX descriptors, so we
10030          * can only do this after the hardware has been
10031          * successfully reset.
10032          */
10033         err = tg3_init_rings(tp);
10034         if (err)
10035                 return err;
10036
10037         if (tg3_flag(tp, 57765_PLUS)) {
10038                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10039                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10040                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10041                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10042                 if (!tg3_flag(tp, 57765_CLASS) &&
10043                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10044                     tg3_asic_rev(tp) != ASIC_REV_5762)
10045                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10046                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10047         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10048                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10049                 /* This value is determined during the probe time DMA
10050                  * engine test, tg3_test_dma.
10051                  */
10052                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10053         }
10054
10055         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10056                           GRC_MODE_4X_NIC_SEND_RINGS |
10057                           GRC_MODE_NO_TX_PHDR_CSUM |
10058                           GRC_MODE_NO_RX_PHDR_CSUM);
10059         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10060
10061         /* Pseudo-header checksum is done by hardware logic and not
10062          * the offload processers, so make the chip do the pseudo-
10063          * header checksums on receive.  For transmit it is more
10064          * convenient to do the pseudo-header checksum in software
10065          * as Linux does that on transmit for us in all cases.
10066          */
10067         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10068
10069         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10070         if (tp->rxptpctl)
10071                 tw32(TG3_RX_PTP_CTL,
10072                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10073
10074         if (tg3_flag(tp, PTP_CAPABLE))
10075                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10076
10077         tw32(GRC_MODE, tp->grc_mode | val);
10078
10079         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10080          * south bridge limitation. As a workaround, Driver is setting MRRS
10081          * to 2048 instead of default 4096.
10082          */
10083         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10084             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10085                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10086                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10087         }
10088
10089         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10090         val = tr32(GRC_MISC_CFG);
10091         val &= ~0xff;
10092         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10093         tw32(GRC_MISC_CFG, val);
10094
10095         /* Initialize MBUF/DESC pool. */
10096         if (tg3_flag(tp, 5750_PLUS)) {
10097                 /* Do nothing.  */
10098         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10099                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10100                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10101                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10102                 else
10103                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10104                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10105                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10106         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10107                 int fw_len;
10108
10109                 fw_len = tp->fw_len;
10110                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10111                 tw32(BUFMGR_MB_POOL_ADDR,
10112                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10113                 tw32(BUFMGR_MB_POOL_SIZE,
10114                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10115         }
10116
10117         if (tp->dev->mtu <= ETH_DATA_LEN) {
10118                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10119                      tp->bufmgr_config.mbuf_read_dma_low_water);
10120                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10121                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10122                 tw32(BUFMGR_MB_HIGH_WATER,
10123                      tp->bufmgr_config.mbuf_high_water);
10124         } else {
10125                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10126                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10127                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10128                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10129                 tw32(BUFMGR_MB_HIGH_WATER,
10130                      tp->bufmgr_config.mbuf_high_water_jumbo);
10131         }
10132         tw32(BUFMGR_DMA_LOW_WATER,
10133              tp->bufmgr_config.dma_low_water);
10134         tw32(BUFMGR_DMA_HIGH_WATER,
10135              tp->bufmgr_config.dma_high_water);
10136
10137         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10138         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10139                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10140         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10141             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10142             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10143             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10144                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10145         tw32(BUFMGR_MODE, val);
10146         for (i = 0; i < 2000; i++) {
10147                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10148                         break;
10149                 udelay(10);
10150         }
10151         if (i >= 2000) {
10152                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10153                 return -ENODEV;
10154         }
10155
10156         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10157                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10158
10159         tg3_setup_rxbd_thresholds(tp);
10160
10161         /* Initialize TG3_BDINFO's at:
10162          *  RCVDBDI_STD_BD:     standard eth size rx ring
10163          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10164          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10165          *
10166          * like so:
10167          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10168          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10169          *                              ring attribute flags
10170          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10171          *
10172          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10173          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10174          *
10175          * The size of each ring is fixed in the firmware, but the location is
10176          * configurable.
10177          */
10178         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10179              ((u64) tpr->rx_std_mapping >> 32));
10180         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10181              ((u64) tpr->rx_std_mapping & 0xffffffff));
10182         if (!tg3_flag(tp, 5717_PLUS))
10183                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10184                      NIC_SRAM_RX_BUFFER_DESC);
10185
10186         /* Disable the mini ring */
10187         if (!tg3_flag(tp, 5705_PLUS))
10188                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10189                      BDINFO_FLAGS_DISABLED);
10190
10191         /* Program the jumbo buffer descriptor ring control
10192          * blocks on those devices that have them.
10193          */
10194         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10195             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10196
10197                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10198                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10199                              ((u64) tpr->rx_jmb_mapping >> 32));
10200                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10201                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10202                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10203                               BDINFO_FLAGS_MAXLEN_SHIFT;
10204                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10205                              val | BDINFO_FLAGS_USE_EXT_RECV);
10206                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10207                             tg3_flag(tp, 57765_CLASS) ||
10208                             tg3_asic_rev(tp) == ASIC_REV_5762)
10209                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10210                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10211                 } else {
10212                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10213                              BDINFO_FLAGS_DISABLED);
10214                 }
10215
10216                 if (tg3_flag(tp, 57765_PLUS)) {
10217                         val = TG3_RX_STD_RING_SIZE(tp);
10218                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10219                         val |= (TG3_RX_STD_DMA_SZ << 2);
10220                 } else
10221                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10222         } else
10223                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10224
10225         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10226
10227         tpr->rx_std_prod_idx = tp->rx_pending;
10228         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10229
10230         tpr->rx_jmb_prod_idx =
10231                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10232         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10233
10234         tg3_rings_reset(tp);
10235
10236         /* Initialize MAC address and backoff seed. */
10237         __tg3_set_mac_addr(tp, false);
10238
10239         /* MTU + ethernet header + FCS + optional VLAN tag */
10240         tw32(MAC_RX_MTU_SIZE,
10241              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10242
10243         /* The slot time is changed by tg3_setup_phy if we
10244          * run at gigabit with half duplex.
10245          */
10246         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10247               (6 << TX_LENGTHS_IPG_SHIFT) |
10248               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10249
10250         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10251             tg3_asic_rev(tp) == ASIC_REV_5762)
10252                 val |= tr32(MAC_TX_LENGTHS) &
10253                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10254                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10255
10256         tw32(MAC_TX_LENGTHS, val);
10257
10258         /* Receive rules. */
10259         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10260         tw32(RCVLPC_CONFIG, 0x0181);
10261
10262         /* Calculate RDMAC_MODE setting early, we need it to determine
10263          * the RCVLPC_STATE_ENABLE mask.
10264          */
10265         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10266                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10267                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10268                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10269                       RDMAC_MODE_LNGREAD_ENAB);
10270
10271         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10272                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10273
10274         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10275             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10276             tg3_asic_rev(tp) == ASIC_REV_57780)
10277                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10278                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10279                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10280
10281         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10282             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10283                 if (tg3_flag(tp, TSO_CAPABLE) &&
10284                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10285                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10286                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10287                            !tg3_flag(tp, IS_5788)) {
10288                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10289                 }
10290         }
10291
10292         if (tg3_flag(tp, PCI_EXPRESS))
10293                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10294
10295         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10296                 tp->dma_limit = 0;
10297                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10298                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10299                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10300                 }
10301         }
10302
10303         if (tg3_flag(tp, HW_TSO_1) ||
10304             tg3_flag(tp, HW_TSO_2) ||
10305             tg3_flag(tp, HW_TSO_3))
10306                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10307
10308         if (tg3_flag(tp, 57765_PLUS) ||
10309             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10310             tg3_asic_rev(tp) == ASIC_REV_57780)
10311                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10312
10313         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10314             tg3_asic_rev(tp) == ASIC_REV_5762)
10315                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10316
10317         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10318             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10319             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10320             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10321             tg3_flag(tp, 57765_PLUS)) {
10322                 u32 tgtreg;
10323
10324                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10325                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10326                 else
10327                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10328
10329                 val = tr32(tgtreg);
10330                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10331                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10332                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10333                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10334                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10335                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10336                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10337                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10338                 }
10339                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10340         }
10341
10342         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10343             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10344             tg3_asic_rev(tp) == ASIC_REV_5762) {
10345                 u32 tgtreg;
10346
10347                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10348                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10349                 else
10350                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10351
10352                 val = tr32(tgtreg);
10353                 tw32(tgtreg, val |
10354                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10355                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10356         }
10357
10358         /* Receive/send statistics. */
10359         if (tg3_flag(tp, 5750_PLUS)) {
10360                 val = tr32(RCVLPC_STATS_ENABLE);
10361                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10362                 tw32(RCVLPC_STATS_ENABLE, val);
10363         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10364                    tg3_flag(tp, TSO_CAPABLE)) {
10365                 val = tr32(RCVLPC_STATS_ENABLE);
10366                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10367                 tw32(RCVLPC_STATS_ENABLE, val);
10368         } else {
10369                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10370         }
10371         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10372         tw32(SNDDATAI_STATSENAB, 0xffffff);
10373         tw32(SNDDATAI_STATSCTRL,
10374              (SNDDATAI_SCTRL_ENABLE |
10375               SNDDATAI_SCTRL_FASTUPD));
10376
10377         /* Setup host coalescing engine. */
10378         tw32(HOSTCC_MODE, 0);
10379         for (i = 0; i < 2000; i++) {
10380                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10381                         break;
10382                 udelay(10);
10383         }
10384
10385         __tg3_set_coalesce(tp, &tp->coal);
10386
10387         if (!tg3_flag(tp, 5705_PLUS)) {
10388                 /* Status/statistics block address.  See tg3_timer,
10389                  * the tg3_periodic_fetch_stats call there, and
10390                  * tg3_get_stats to see how this works for 5705/5750 chips.
10391                  */
10392                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10393                      ((u64) tp->stats_mapping >> 32));
10394                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10395                      ((u64) tp->stats_mapping & 0xffffffff));
10396                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10397
10398                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10399
10400                 /* Clear statistics and status block memory areas */
10401                 for (i = NIC_SRAM_STATS_BLK;
10402                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10403                      i += sizeof(u32)) {
10404                         tg3_write_mem(tp, i, 0);
10405                         udelay(40);
10406                 }
10407         }
10408
10409         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10410
10411         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10412         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10413         if (!tg3_flag(tp, 5705_PLUS))
10414                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10415
10416         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10417                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10418                 /* reset to prevent losing 1st rx packet intermittently */
10419                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10420                 udelay(10);
10421         }
10422
10423         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10424                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10425                         MAC_MODE_FHDE_ENABLE;
10426         if (tg3_flag(tp, ENABLE_APE))
10427                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10428         if (!tg3_flag(tp, 5705_PLUS) &&
10429             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10430             tg3_asic_rev(tp) != ASIC_REV_5700)
10431                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10432         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10433         udelay(40);
10434
10435         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10436          * If TG3_FLAG_IS_NIC is zero, we should read the
10437          * register to preserve the GPIO settings for LOMs. The GPIOs,
10438          * whether used as inputs or outputs, are set by boot code after
10439          * reset.
10440          */
10441         if (!tg3_flag(tp, IS_NIC)) {
10442                 u32 gpio_mask;
10443
10444                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10445                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10446                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10447
10448                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10449                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10450                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10451
10452                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10453                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10454
10455                 tp->grc_local_ctrl &= ~gpio_mask;
10456                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10457
10458                 /* GPIO1 must be driven high for eeprom write protect */
10459                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10460                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10461                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10462         }
10463         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10464         udelay(100);
10465
10466         if (tg3_flag(tp, USING_MSIX)) {
10467                 val = tr32(MSGINT_MODE);
10468                 val |= MSGINT_MODE_ENABLE;
10469                 if (tp->irq_cnt > 1)
10470                         val |= MSGINT_MODE_MULTIVEC_EN;
10471                 if (!tg3_flag(tp, 1SHOT_MSI))
10472                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10473                 tw32(MSGINT_MODE, val);
10474         }
10475
10476         if (!tg3_flag(tp, 5705_PLUS)) {
10477                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10478                 udelay(40);
10479         }
10480
10481         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10482                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10483                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10484                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10485                WDMAC_MODE_LNGREAD_ENAB);
10486
10487         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10488             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10489                 if (tg3_flag(tp, TSO_CAPABLE) &&
10490                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10491                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10492                         /* nothing */
10493                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10494                            !tg3_flag(tp, IS_5788)) {
10495                         val |= WDMAC_MODE_RX_ACCEL;
10496                 }
10497         }
10498
10499         /* Enable host coalescing bug fix */
10500         if (tg3_flag(tp, 5755_PLUS))
10501                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10502
10503         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10504                 val |= WDMAC_MODE_BURST_ALL_DATA;
10505
10506         tw32_f(WDMAC_MODE, val);
10507         udelay(40);
10508
10509         if (tg3_flag(tp, PCIX_MODE)) {
10510                 u16 pcix_cmd;
10511
10512                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10513                                      &pcix_cmd);
10514                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10515                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10516                         pcix_cmd |= PCI_X_CMD_READ_2K;
10517                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10518                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10519                         pcix_cmd |= PCI_X_CMD_READ_2K;
10520                 }
10521                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10522                                       pcix_cmd);
10523         }
10524
10525         tw32_f(RDMAC_MODE, rdmac_mode);
10526         udelay(40);
10527
10528         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10529             tg3_asic_rev(tp) == ASIC_REV_5720) {
10530                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10531                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10532                                 break;
10533                 }
10534                 if (i < TG3_NUM_RDMA_CHANNELS) {
10535                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10536                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10537                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10538                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10539                 }
10540         }
10541
10542         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10543         if (!tg3_flag(tp, 5705_PLUS))
10544                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10545
10546         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10547                 tw32(SNDDATAC_MODE,
10548                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10549         else
10550                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10551
10552         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10553         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10554         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10555         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10556                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10557         tw32(RCVDBDI_MODE, val);
10558         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10559         if (tg3_flag(tp, HW_TSO_1) ||
10560             tg3_flag(tp, HW_TSO_2) ||
10561             tg3_flag(tp, HW_TSO_3))
10562                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10563         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10564         if (tg3_flag(tp, ENABLE_TSS))
10565                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10566         tw32(SNDBDI_MODE, val);
10567         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10568
10569         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10570                 err = tg3_load_5701_a0_firmware_fix(tp);
10571                 if (err)
10572                         return err;
10573         }
10574
10575         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10576                 /* Ignore any errors for the firmware download. If download
10577                  * fails, the device will operate with EEE disabled
10578                  */
10579                 tg3_load_57766_firmware(tp);
10580         }
10581
10582         if (tg3_flag(tp, TSO_CAPABLE)) {
10583                 err = tg3_load_tso_firmware(tp);
10584                 if (err)
10585                         return err;
10586         }
10587
10588         tp->tx_mode = TX_MODE_ENABLE;
10589
10590         if (tg3_flag(tp, 5755_PLUS) ||
10591             tg3_asic_rev(tp) == ASIC_REV_5906)
10592                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10593
10594         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10595             tg3_asic_rev(tp) == ASIC_REV_5762) {
10596                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10597                 tp->tx_mode &= ~val;
10598                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10599         }
10600
10601         tw32_f(MAC_TX_MODE, tp->tx_mode);
10602         udelay(100);
10603
10604         if (tg3_flag(tp, ENABLE_RSS)) {
10605                 u32 rss_key[10];
10606
10607                 tg3_rss_write_indir_tbl(tp);
10608
10609                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10610
10611                 for (i = 0; i < 10 ; i++)
10612                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10613         }
10614
10615         tp->rx_mode = RX_MODE_ENABLE;
10616         if (tg3_flag(tp, 5755_PLUS))
10617                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10618
10619         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10620                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10621
10622         if (tg3_flag(tp, ENABLE_RSS))
10623                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10624                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10625                                RX_MODE_RSS_IPV6_HASH_EN |
10626                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10627                                RX_MODE_RSS_IPV4_HASH_EN |
10628                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10629
10630         tw32_f(MAC_RX_MODE, tp->rx_mode);
10631         udelay(10);
10632
10633         tw32(MAC_LED_CTRL, tp->led_ctrl);
10634
10635         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10636         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10637                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10638                 udelay(10);
10639         }
10640         tw32_f(MAC_RX_MODE, tp->rx_mode);
10641         udelay(10);
10642
10643         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10644                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10645                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10646                         /* Set drive transmission level to 1.2V  */
10647                         /* only if the signal pre-emphasis bit is not set  */
10648                         val = tr32(MAC_SERDES_CFG);
10649                         val &= 0xfffff000;
10650                         val |= 0x880;
10651                         tw32(MAC_SERDES_CFG, val);
10652                 }
10653                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10654                         tw32(MAC_SERDES_CFG, 0x616000);
10655         }
10656
10657         /* Prevent chip from dropping frames when flow control
10658          * is enabled.
10659          */
10660         if (tg3_flag(tp, 57765_CLASS))
10661                 val = 1;
10662         else
10663                 val = 2;
10664         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10665
10666         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10667             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10668                 /* Use hardware link auto-negotiation */
10669                 tg3_flag_set(tp, HW_AUTONEG);
10670         }
10671
10672         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10673             tg3_asic_rev(tp) == ASIC_REV_5714) {
10674                 u32 tmp;
10675
10676                 tmp = tr32(SERDES_RX_CTRL);
10677                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10678                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10679                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10680                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10681         }
10682
10683         if (!tg3_flag(tp, USE_PHYLIB)) {
10684                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10685                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10686
10687                 err = tg3_setup_phy(tp, false);
10688                 if (err)
10689                         return err;
10690
10691                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10692                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10693                         u32 tmp;
10694
10695                         /* Clear CRC stats. */
10696                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10697                                 tg3_writephy(tp, MII_TG3_TEST1,
10698                                              tmp | MII_TG3_TEST1_CRC_EN);
10699                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10700                         }
10701                 }
10702         }
10703
10704         __tg3_set_rx_mode(tp->dev);
10705
10706         /* Initialize receive rules. */
10707         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10708         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10709         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10710         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10711
10712         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10713                 limit = 8;
10714         else
10715                 limit = 16;
10716         if (tg3_flag(tp, ENABLE_ASF))
10717                 limit -= 4;
10718         switch (limit) {
10719         case 16:
10720                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10721                 /* fall through */
10722         case 15:
10723                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10724                 /* fall through */
10725         case 14:
10726                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10727                 /* fall through */
10728         case 13:
10729                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10730                 /* fall through */
10731         case 12:
10732                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10733                 /* fall through */
10734         case 11:
10735                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10736                 /* fall through */
10737         case 10:
10738                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10739                 /* fall through */
10740         case 9:
10741                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10742                 /* fall through */
10743         case 8:
10744                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10745                 /* fall through */
10746         case 7:
10747                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10748                 /* fall through */
10749         case 6:
10750                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10751                 /* fall through */
10752         case 5:
10753                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10754                 /* fall through */
10755         case 4:
10756                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10757         case 3:
10758                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10759         case 2:
10760         case 1:
10761
10762         default:
10763                 break;
10764         }
10765
10766         if (tg3_flag(tp, ENABLE_APE))
10767                 /* Write our heartbeat update interval to APE. */
10768                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10769                                 APE_HOST_HEARTBEAT_INT_5SEC);
10770
10771         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10772
10773         return 0;
10774 }
10775
10776 /* Called at device open time to get the chip ready for
10777  * packet processing.  Invoked with tp->lock held.
10778  */
10779 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10780 {
10781         /* Chip may have been just powered on. If so, the boot code may still
10782          * be running initialization. Wait for it to finish to avoid races in
10783          * accessing the hardware.
10784          */
10785         tg3_enable_register_access(tp);
10786         tg3_poll_fw(tp);
10787
10788         tg3_switch_clocks(tp);
10789
10790         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10791
10792         return tg3_reset_hw(tp, reset_phy);
10793 }
10794
10795 #ifdef CONFIG_TIGON3_HWMON
10796 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10797 {
10798         int i;
10799
10800         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10801                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10802
10803                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10804                 off += len;
10805
10806                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10807                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10808                         memset(ocir, 0, TG3_OCIR_LEN);
10809         }
10810 }
10811
10812 /* sysfs attributes for hwmon */
10813 static ssize_t tg3_show_temp(struct device *dev,
10814                              struct device_attribute *devattr, char *buf)
10815 {
10816         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10817         struct tg3 *tp = dev_get_drvdata(dev);
10818         u32 temperature;
10819
10820         spin_lock_bh(&tp->lock);
10821         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10822                                 sizeof(temperature));
10823         spin_unlock_bh(&tp->lock);
10824         return sprintf(buf, "%u\n", temperature * 1000);
10825 }
10826
10827
10828 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10829                           TG3_TEMP_SENSOR_OFFSET);
10830 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10831                           TG3_TEMP_CAUTION_OFFSET);
10832 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10833                           TG3_TEMP_MAX_OFFSET);
10834
10835 static struct attribute *tg3_attrs[] = {
10836         &sensor_dev_attr_temp1_input.dev_attr.attr,
10837         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10838         &sensor_dev_attr_temp1_max.dev_attr.attr,
10839         NULL
10840 };
10841 ATTRIBUTE_GROUPS(tg3);
10842
10843 static void tg3_hwmon_close(struct tg3 *tp)
10844 {
10845         if (tp->hwmon_dev) {
10846                 hwmon_device_unregister(tp->hwmon_dev);
10847                 tp->hwmon_dev = NULL;
10848         }
10849 }
10850
10851 static void tg3_hwmon_open(struct tg3 *tp)
10852 {
10853         int i;
10854         u32 size = 0;
10855         struct pci_dev *pdev = tp->pdev;
10856         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10857
10858         tg3_sd_scan_scratchpad(tp, ocirs);
10859
10860         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10861                 if (!ocirs[i].src_data_length)
10862                         continue;
10863
10864                 size += ocirs[i].src_hdr_length;
10865                 size += ocirs[i].src_data_length;
10866         }
10867
10868         if (!size)
10869                 return;
10870
10871         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10872                                                           tp, tg3_groups);
10873         if (IS_ERR(tp->hwmon_dev)) {
10874                 tp->hwmon_dev = NULL;
10875                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10876         }
10877 }
10878 #else
10879 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10880 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10881 #endif /* CONFIG_TIGON3_HWMON */
10882
10883
10884 #define TG3_STAT_ADD32(PSTAT, REG) \
10885 do {    u32 __val = tr32(REG); \
10886         (PSTAT)->low += __val; \
10887         if ((PSTAT)->low < __val) \
10888                 (PSTAT)->high += 1; \
10889 } while (0)
10890
10891 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10892 {
10893         struct tg3_hw_stats *sp = tp->hw_stats;
10894
10895         if (!tp->link_up)
10896                 return;
10897
10898         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10899         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10900         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10901         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10902         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10903         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10904         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10905         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10906         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10907         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10908         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10909         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10910         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10911         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10912                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10913                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10914                 u32 val;
10915
10916                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10917                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10918                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10919                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10920         }
10921
10922         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10923         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10924         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10925         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10926         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10927         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10928         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10929         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10930         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10931         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10932         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10933         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10934         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10935         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10936
10937         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10938         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10939             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10940             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10941             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10942                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10943         } else {
10944                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10945                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10946                 if (val) {
10947                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10948                         sp->rx_discards.low += val;
10949                         if (sp->rx_discards.low < val)
10950                                 sp->rx_discards.high += 1;
10951                 }
10952                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10953         }
10954         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10955 }
10956
10957 static void tg3_chk_missed_msi(struct tg3 *tp)
10958 {
10959         u32 i;
10960
10961         for (i = 0; i < tp->irq_cnt; i++) {
10962                 struct tg3_napi *tnapi = &tp->napi[i];
10963
10964                 if (tg3_has_work(tnapi)) {
10965                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10966                             tnapi->last_tx_cons == tnapi->tx_cons) {
10967                                 if (tnapi->chk_msi_cnt < 1) {
10968                                         tnapi->chk_msi_cnt++;
10969                                         return;
10970                                 }
10971                                 tg3_msi(0, tnapi);
10972                         }
10973                 }
10974                 tnapi->chk_msi_cnt = 0;
10975                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10976                 tnapi->last_tx_cons = tnapi->tx_cons;
10977         }
10978 }
10979
10980 static void tg3_timer(struct timer_list *t)
10981 {
10982         struct tg3 *tp = from_timer(tp, t, timer);
10983
10984         spin_lock(&tp->lock);
10985
10986         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10987                 spin_unlock(&tp->lock);
10988                 goto restart_timer;
10989         }
10990
10991         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10992             tg3_flag(tp, 57765_CLASS))
10993                 tg3_chk_missed_msi(tp);
10994
10995         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10996                 /* BCM4785: Flush posted writes from GbE to host memory. */
10997                 tr32(HOSTCC_MODE);
10998         }
10999
11000         if (!tg3_flag(tp, TAGGED_STATUS)) {
11001                 /* All of this garbage is because when using non-tagged
11002                  * IRQ status the mailbox/status_block protocol the chip
11003                  * uses with the cpu is race prone.
11004                  */
11005                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11006                         tw32(GRC_LOCAL_CTRL,
11007                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11008                 } else {
11009                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11010                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11011                 }
11012
11013                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11014                         spin_unlock(&tp->lock);
11015                         tg3_reset_task_schedule(tp);
11016                         goto restart_timer;
11017                 }
11018         }
11019
11020         /* This part only runs once per second. */
11021         if (!--tp->timer_counter) {
11022                 if (tg3_flag(tp, 5705_PLUS))
11023                         tg3_periodic_fetch_stats(tp);
11024
11025                 if (tp->setlpicnt && !--tp->setlpicnt)
11026                         tg3_phy_eee_enable(tp);
11027
11028                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11029                         u32 mac_stat;
11030                         int phy_event;
11031
11032                         mac_stat = tr32(MAC_STATUS);
11033
11034                         phy_event = 0;
11035                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11036                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11037                                         phy_event = 1;
11038                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11039                                 phy_event = 1;
11040
11041                         if (phy_event)
11042                                 tg3_setup_phy(tp, false);
11043                 } else if (tg3_flag(tp, POLL_SERDES)) {
11044                         u32 mac_stat = tr32(MAC_STATUS);
11045                         int need_setup = 0;
11046
11047                         if (tp->link_up &&
11048                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11049                                 need_setup = 1;
11050                         }
11051                         if (!tp->link_up &&
11052                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11053                                          MAC_STATUS_SIGNAL_DET))) {
11054                                 need_setup = 1;
11055                         }
11056                         if (need_setup) {
11057                                 if (!tp->serdes_counter) {
11058                                         tw32_f(MAC_MODE,
11059                                              (tp->mac_mode &
11060                                               ~MAC_MODE_PORT_MODE_MASK));
11061                                         udelay(40);
11062                                         tw32_f(MAC_MODE, tp->mac_mode);
11063                                         udelay(40);
11064                                 }
11065                                 tg3_setup_phy(tp, false);
11066                         }
11067                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11068                            tg3_flag(tp, 5780_CLASS)) {
11069                         tg3_serdes_parallel_detect(tp);
11070                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11071                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11072                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11073                                          TG3_CPMU_STATUS_LINK_MASK);
11074
11075                         if (link_up != tp->link_up)
11076                                 tg3_setup_phy(tp, false);
11077                 }
11078
11079                 tp->timer_counter = tp->timer_multiplier;
11080         }
11081
11082         /* Heartbeat is only sent once every 2 seconds.
11083          *
11084          * The heartbeat is to tell the ASF firmware that the host
11085          * driver is still alive.  In the event that the OS crashes,
11086          * ASF needs to reset the hardware to free up the FIFO space
11087          * that may be filled with rx packets destined for the host.
11088          * If the FIFO is full, ASF will no longer function properly.
11089          *
11090          * Unintended resets have been reported on real time kernels
11091          * where the timer doesn't run on time.  Netpoll will also have
11092          * same problem.
11093          *
11094          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11095          * to check the ring condition when the heartbeat is expiring
11096          * before doing the reset.  This will prevent most unintended
11097          * resets.
11098          */
11099         if (!--tp->asf_counter) {
11100                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11101                         tg3_wait_for_event_ack(tp);
11102
11103                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11104                                       FWCMD_NICDRV_ALIVE3);
11105                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11106                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11107                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11108
11109                         tg3_generate_fw_event(tp);
11110                 }
11111                 tp->asf_counter = tp->asf_multiplier;
11112         }
11113
11114         /* Update the APE heartbeat every 5 seconds.*/
11115         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11116
11117         spin_unlock(&tp->lock);
11118
11119 restart_timer:
11120         tp->timer.expires = jiffies + tp->timer_offset;
11121         add_timer(&tp->timer);
11122 }
11123
11124 static void tg3_timer_init(struct tg3 *tp)
11125 {
11126         if (tg3_flag(tp, TAGGED_STATUS) &&
11127             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11128             !tg3_flag(tp, 57765_CLASS))
11129                 tp->timer_offset = HZ;
11130         else
11131                 tp->timer_offset = HZ / 10;
11132
11133         BUG_ON(tp->timer_offset > HZ);
11134
11135         tp->timer_multiplier = (HZ / tp->timer_offset);
11136         tp->asf_multiplier = (HZ / tp->timer_offset) *
11137                              TG3_FW_UPDATE_FREQ_SEC;
11138
11139         timer_setup(&tp->timer, tg3_timer, 0);
11140 }
11141
11142 static void tg3_timer_start(struct tg3 *tp)
11143 {
11144         tp->asf_counter   = tp->asf_multiplier;
11145         tp->timer_counter = tp->timer_multiplier;
11146
11147         tp->timer.expires = jiffies + tp->timer_offset;
11148         add_timer(&tp->timer);
11149 }
11150
11151 static void tg3_timer_stop(struct tg3 *tp)
11152 {
11153         del_timer_sync(&tp->timer);
11154 }
11155
11156 /* Restart hardware after configuration changes, self-test, etc.
11157  * Invoked with tp->lock held.
11158  */
11159 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11160         __releases(tp->lock)
11161         __acquires(tp->lock)
11162 {
11163         int err;
11164
11165         err = tg3_init_hw(tp, reset_phy);
11166         if (err) {
11167                 netdev_err(tp->dev,
11168                            "Failed to re-initialize device, aborting\n");
11169                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11170                 tg3_full_unlock(tp);
11171                 tg3_timer_stop(tp);
11172                 tp->irq_sync = 0;
11173                 tg3_napi_enable(tp);
11174                 dev_close(tp->dev);
11175                 tg3_full_lock(tp, 0);
11176         }
11177         return err;
11178 }
11179
11180 static void tg3_reset_task(struct work_struct *work)
11181 {
11182         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11183         int err;
11184
11185         rtnl_lock();
11186         tg3_full_lock(tp, 0);
11187
11188         if (!netif_running(tp->dev)) {
11189                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11190                 tg3_full_unlock(tp);
11191                 rtnl_unlock();
11192                 return;
11193         }
11194
11195         tg3_full_unlock(tp);
11196
11197         tg3_phy_stop(tp);
11198
11199         tg3_netif_stop(tp);
11200
11201         tg3_full_lock(tp, 1);
11202
11203         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11204                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11205                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11206                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11207                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11208         }
11209
11210         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11211         err = tg3_init_hw(tp, true);
11212         if (err)
11213                 goto out;
11214
11215         tg3_netif_start(tp);
11216
11217 out:
11218         tg3_full_unlock(tp);
11219
11220         if (!err)
11221                 tg3_phy_start(tp);
11222
11223         tg3_flag_clear(tp, RESET_TASK_PENDING);
11224         rtnl_unlock();
11225 }
11226
11227 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11228 {
11229         irq_handler_t fn;
11230         unsigned long flags;
11231         char *name;
11232         struct tg3_napi *tnapi = &tp->napi[irq_num];
11233
11234         if (tp->irq_cnt == 1)
11235                 name = tp->dev->name;
11236         else {
11237                 name = &tnapi->irq_lbl[0];
11238                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11239                         snprintf(name, IFNAMSIZ,
11240                                  "%s-txrx-%d", tp->dev->name, irq_num);
11241                 else if (tnapi->tx_buffers)
11242                         snprintf(name, IFNAMSIZ,
11243                                  "%s-tx-%d", tp->dev->name, irq_num);
11244                 else if (tnapi->rx_rcb)
11245                         snprintf(name, IFNAMSIZ,
11246                                  "%s-rx-%d", tp->dev->name, irq_num);
11247                 else
11248                         snprintf(name, IFNAMSIZ,
11249                                  "%s-%d", tp->dev->name, irq_num);
11250                 name[IFNAMSIZ-1] = 0;
11251         }
11252
11253         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11254                 fn = tg3_msi;
11255                 if (tg3_flag(tp, 1SHOT_MSI))
11256                         fn = tg3_msi_1shot;
11257                 flags = 0;
11258         } else {
11259                 fn = tg3_interrupt;
11260                 if (tg3_flag(tp, TAGGED_STATUS))
11261                         fn = tg3_interrupt_tagged;
11262                 flags = IRQF_SHARED;
11263         }
11264
11265         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11266 }
11267
11268 static int tg3_test_interrupt(struct tg3 *tp)
11269 {
11270         struct tg3_napi *tnapi = &tp->napi[0];
11271         struct net_device *dev = tp->dev;
11272         int err, i, intr_ok = 0;
11273         u32 val;
11274
11275         if (!netif_running(dev))
11276                 return -ENODEV;
11277
11278         tg3_disable_ints(tp);
11279
11280         free_irq(tnapi->irq_vec, tnapi);
11281
11282         /*
11283          * Turn off MSI one shot mode.  Otherwise this test has no
11284          * observable way to know whether the interrupt was delivered.
11285          */
11286         if (tg3_flag(tp, 57765_PLUS)) {
11287                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11288                 tw32(MSGINT_MODE, val);
11289         }
11290
11291         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11292                           IRQF_SHARED, dev->name, tnapi);
11293         if (err)
11294                 return err;
11295
11296         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11297         tg3_enable_ints(tp);
11298
11299         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11300                tnapi->coal_now);
11301
11302         for (i = 0; i < 5; i++) {
11303                 u32 int_mbox, misc_host_ctrl;
11304
11305                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11306                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11307
11308                 if ((int_mbox != 0) ||
11309                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11310                         intr_ok = 1;
11311                         break;
11312                 }
11313
11314                 if (tg3_flag(tp, 57765_PLUS) &&
11315                     tnapi->hw_status->status_tag != tnapi->last_tag)
11316                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11317
11318                 msleep(10);
11319         }
11320
11321         tg3_disable_ints(tp);
11322
11323         free_irq(tnapi->irq_vec, tnapi);
11324
11325         err = tg3_request_irq(tp, 0);
11326
11327         if (err)
11328                 return err;
11329
11330         if (intr_ok) {
11331                 /* Reenable MSI one shot mode. */
11332                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11333                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11334                         tw32(MSGINT_MODE, val);
11335                 }
11336                 return 0;
11337         }
11338
11339         return -EIO;
11340 }
11341
11342 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11343  * successfully restored
11344  */
11345 static int tg3_test_msi(struct tg3 *tp)
11346 {
11347         int err;
11348         u16 pci_cmd;
11349
11350         if (!tg3_flag(tp, USING_MSI))
11351                 return 0;
11352
11353         /* Turn off SERR reporting in case MSI terminates with Master
11354          * Abort.
11355          */
11356         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11357         pci_write_config_word(tp->pdev, PCI_COMMAND,
11358                               pci_cmd & ~PCI_COMMAND_SERR);
11359
11360         err = tg3_test_interrupt(tp);
11361
11362         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11363
11364         if (!err)
11365                 return 0;
11366
11367         /* other failures */
11368         if (err != -EIO)
11369                 return err;
11370
11371         /* MSI test failed, go back to INTx mode */
11372         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11373                     "to INTx mode. Please report this failure to the PCI "
11374                     "maintainer and include system chipset information\n");
11375
11376         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11377
11378         pci_disable_msi(tp->pdev);
11379
11380         tg3_flag_clear(tp, USING_MSI);
11381         tp->napi[0].irq_vec = tp->pdev->irq;
11382
11383         err = tg3_request_irq(tp, 0);
11384         if (err)
11385                 return err;
11386
11387         /* Need to reset the chip because the MSI cycle may have terminated
11388          * with Master Abort.
11389          */
11390         tg3_full_lock(tp, 1);
11391
11392         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11393         err = tg3_init_hw(tp, true);
11394
11395         tg3_full_unlock(tp);
11396
11397         if (err)
11398                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11399
11400         return err;
11401 }
11402
11403 static int tg3_request_firmware(struct tg3 *tp)
11404 {
11405         const struct tg3_firmware_hdr *fw_hdr;
11406
11407         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11408                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11409                            tp->fw_needed);
11410                 return -ENOENT;
11411         }
11412
11413         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11414
11415         /* Firmware blob starts with version numbers, followed by
11416          * start address and _full_ length including BSS sections
11417          * (which must be longer than the actual data, of course
11418          */
11419
11420         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11421         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11422                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11423                            tp->fw_len, tp->fw_needed);
11424                 release_firmware(tp->fw);
11425                 tp->fw = NULL;
11426                 return -EINVAL;
11427         }
11428
11429         /* We no longer need firmware; we have it. */
11430         tp->fw_needed = NULL;
11431         return 0;
11432 }
11433
11434 static u32 tg3_irq_count(struct tg3 *tp)
11435 {
11436         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11437
11438         if (irq_cnt > 1) {
11439                 /* We want as many rx rings enabled as there are cpus.
11440                  * In multiqueue MSI-X mode, the first MSI-X vector
11441                  * only deals with link interrupts, etc, so we add
11442                  * one to the number of vectors we are requesting.
11443                  */
11444                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11445         }
11446
11447         return irq_cnt;
11448 }
11449
11450 static bool tg3_enable_msix(struct tg3 *tp)
11451 {
11452         int i, rc;
11453         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11454
11455         tp->txq_cnt = tp->txq_req;
11456         tp->rxq_cnt = tp->rxq_req;
11457         if (!tp->rxq_cnt)
11458                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11459         if (tp->rxq_cnt > tp->rxq_max)
11460                 tp->rxq_cnt = tp->rxq_max;
11461
11462         /* Disable multiple TX rings by default.  Simple round-robin hardware
11463          * scheduling of the TX rings can cause starvation of rings with
11464          * small packets when other rings have TSO or jumbo packets.
11465          */
11466         if (!tp->txq_req)
11467                 tp->txq_cnt = 1;
11468
11469         tp->irq_cnt = tg3_irq_count(tp);
11470
11471         for (i = 0; i < tp->irq_max; i++) {
11472                 msix_ent[i].entry  = i;
11473                 msix_ent[i].vector = 0;
11474         }
11475
11476         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11477         if (rc < 0) {
11478                 return false;
11479         } else if (rc < tp->irq_cnt) {
11480                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11481                               tp->irq_cnt, rc);
11482                 tp->irq_cnt = rc;
11483                 tp->rxq_cnt = max(rc - 1, 1);
11484                 if (tp->txq_cnt)
11485                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11486         }
11487
11488         for (i = 0; i < tp->irq_max; i++)
11489                 tp->napi[i].irq_vec = msix_ent[i].vector;
11490
11491         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11492                 pci_disable_msix(tp->pdev);
11493                 return false;
11494         }
11495
11496         if (tp->irq_cnt == 1)
11497                 return true;
11498
11499         tg3_flag_set(tp, ENABLE_RSS);
11500
11501         if (tp->txq_cnt > 1)
11502                 tg3_flag_set(tp, ENABLE_TSS);
11503
11504         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11505
11506         return true;
11507 }
11508
11509 static void tg3_ints_init(struct tg3 *tp)
11510 {
11511         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11512             !tg3_flag(tp, TAGGED_STATUS)) {
11513                 /* All MSI supporting chips should support tagged
11514                  * status.  Assert that this is the case.
11515                  */
11516                 netdev_warn(tp->dev,
11517                             "MSI without TAGGED_STATUS? Not using MSI\n");
11518                 goto defcfg;
11519         }
11520
11521         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11522                 tg3_flag_set(tp, USING_MSIX);
11523         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11524                 tg3_flag_set(tp, USING_MSI);
11525
11526         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11527                 u32 msi_mode = tr32(MSGINT_MODE);
11528                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11529                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11530                 if (!tg3_flag(tp, 1SHOT_MSI))
11531                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11532                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11533         }
11534 defcfg:
11535         if (!tg3_flag(tp, USING_MSIX)) {
11536                 tp->irq_cnt = 1;
11537                 tp->napi[0].irq_vec = tp->pdev->irq;
11538         }
11539
11540         if (tp->irq_cnt == 1) {
11541                 tp->txq_cnt = 1;
11542                 tp->rxq_cnt = 1;
11543                 netif_set_real_num_tx_queues(tp->dev, 1);
11544                 netif_set_real_num_rx_queues(tp->dev, 1);
11545         }
11546 }
11547
11548 static void tg3_ints_fini(struct tg3 *tp)
11549 {
11550         if (tg3_flag(tp, USING_MSIX))
11551                 pci_disable_msix(tp->pdev);
11552         else if (tg3_flag(tp, USING_MSI))
11553                 pci_disable_msi(tp->pdev);
11554         tg3_flag_clear(tp, USING_MSI);
11555         tg3_flag_clear(tp, USING_MSIX);
11556         tg3_flag_clear(tp, ENABLE_RSS);
11557         tg3_flag_clear(tp, ENABLE_TSS);
11558 }
11559
11560 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11561                      bool init)
11562 {
11563         struct net_device *dev = tp->dev;
11564         int i, err;
11565
11566         /*
11567          * Setup interrupts first so we know how
11568          * many NAPI resources to allocate
11569          */
11570         tg3_ints_init(tp);
11571
11572         tg3_rss_check_indir_tbl(tp);
11573
11574         /* The placement of this call is tied
11575          * to the setup and use of Host TX descriptors.
11576          */
11577         err = tg3_alloc_consistent(tp);
11578         if (err)
11579                 goto out_ints_fini;
11580
11581         tg3_napi_init(tp);
11582
11583         tg3_napi_enable(tp);
11584
11585         for (i = 0; i < tp->irq_cnt; i++) {
11586                 err = tg3_request_irq(tp, i);
11587                 if (err) {
11588                         for (i--; i >= 0; i--) {
11589                                 struct tg3_napi *tnapi = &tp->napi[i];
11590
11591                                 free_irq(tnapi->irq_vec, tnapi);
11592                         }
11593                         goto out_napi_fini;
11594                 }
11595         }
11596
11597         tg3_full_lock(tp, 0);
11598
11599         if (init)
11600                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11601
11602         err = tg3_init_hw(tp, reset_phy);
11603         if (err) {
11604                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605                 tg3_free_rings(tp);
11606         }
11607
11608         tg3_full_unlock(tp);
11609
11610         if (err)
11611                 goto out_free_irq;
11612
11613         if (test_irq && tg3_flag(tp, USING_MSI)) {
11614                 err = tg3_test_msi(tp);
11615
11616                 if (err) {
11617                         tg3_full_lock(tp, 0);
11618                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11619                         tg3_free_rings(tp);
11620                         tg3_full_unlock(tp);
11621
11622                         goto out_napi_fini;
11623                 }
11624
11625                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11626                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11627
11628                         tw32(PCIE_TRANSACTION_CFG,
11629                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11630                 }
11631         }
11632
11633         tg3_phy_start(tp);
11634
11635         tg3_hwmon_open(tp);
11636
11637         tg3_full_lock(tp, 0);
11638
11639         tg3_timer_start(tp);
11640         tg3_flag_set(tp, INIT_COMPLETE);
11641         tg3_enable_ints(tp);
11642
11643         tg3_ptp_resume(tp);
11644
11645         tg3_full_unlock(tp);
11646
11647         netif_tx_start_all_queues(dev);
11648
11649         /*
11650          * Reset loopback feature if it was turned on while the device was down
11651          * make sure that it's installed properly now.
11652          */
11653         if (dev->features & NETIF_F_LOOPBACK)
11654                 tg3_set_loopback(dev, dev->features);
11655
11656         return 0;
11657
11658 out_free_irq:
11659         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11660                 struct tg3_napi *tnapi = &tp->napi[i];
11661                 free_irq(tnapi->irq_vec, tnapi);
11662         }
11663
11664 out_napi_fini:
11665         tg3_napi_disable(tp);
11666         tg3_napi_fini(tp);
11667         tg3_free_consistent(tp);
11668
11669 out_ints_fini:
11670         tg3_ints_fini(tp);
11671
11672         return err;
11673 }
11674
11675 static void tg3_stop(struct tg3 *tp)
11676 {
11677         int i;
11678
11679         tg3_reset_task_cancel(tp);
11680         tg3_netif_stop(tp);
11681
11682         tg3_timer_stop(tp);
11683
11684         tg3_hwmon_close(tp);
11685
11686         tg3_phy_stop(tp);
11687
11688         tg3_full_lock(tp, 1);
11689
11690         tg3_disable_ints(tp);
11691
11692         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11693         tg3_free_rings(tp);
11694         tg3_flag_clear(tp, INIT_COMPLETE);
11695
11696         tg3_full_unlock(tp);
11697
11698         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11699                 struct tg3_napi *tnapi = &tp->napi[i];
11700                 free_irq(tnapi->irq_vec, tnapi);
11701         }
11702
11703         tg3_ints_fini(tp);
11704
11705         tg3_napi_fini(tp);
11706
11707         tg3_free_consistent(tp);
11708 }
11709
11710 static int tg3_open(struct net_device *dev)
11711 {
11712         struct tg3 *tp = netdev_priv(dev);
11713         int err;
11714
11715         if (tp->pcierr_recovery) {
11716                 netdev_err(dev, "Failed to open device. PCI error recovery "
11717                            "in progress\n");
11718                 return -EAGAIN;
11719         }
11720
11721         if (tp->fw_needed) {
11722                 err = tg3_request_firmware(tp);
11723                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11724                         if (err) {
11725                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11726                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11727                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11728                                 netdev_warn(tp->dev, "EEE capability restored\n");
11729                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11730                         }
11731                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11732                         if (err)
11733                                 return err;
11734                 } else if (err) {
11735                         netdev_warn(tp->dev, "TSO capability disabled\n");
11736                         tg3_flag_clear(tp, TSO_CAPABLE);
11737                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11738                         netdev_notice(tp->dev, "TSO capability restored\n");
11739                         tg3_flag_set(tp, TSO_CAPABLE);
11740                 }
11741         }
11742
11743         tg3_carrier_off(tp);
11744
11745         err = tg3_power_up(tp);
11746         if (err)
11747                 return err;
11748
11749         tg3_full_lock(tp, 0);
11750
11751         tg3_disable_ints(tp);
11752         tg3_flag_clear(tp, INIT_COMPLETE);
11753
11754         tg3_full_unlock(tp);
11755
11756         err = tg3_start(tp,
11757                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11758                         true, true);
11759         if (err) {
11760                 tg3_frob_aux_power(tp, false);
11761                 pci_set_power_state(tp->pdev, PCI_D3hot);
11762         }
11763
11764         return err;
11765 }
11766
11767 static int tg3_close(struct net_device *dev)
11768 {
11769         struct tg3 *tp = netdev_priv(dev);
11770
11771         if (tp->pcierr_recovery) {
11772                 netdev_err(dev, "Failed to close device. PCI error recovery "
11773                            "in progress\n");
11774                 return -EAGAIN;
11775         }
11776
11777         tg3_stop(tp);
11778
11779         if (pci_device_is_present(tp->pdev)) {
11780                 tg3_power_down_prepare(tp);
11781
11782                 tg3_carrier_off(tp);
11783         }
11784         return 0;
11785 }
11786
11787 static inline u64 get_stat64(tg3_stat64_t *val)
11788 {
11789        return ((u64)val->high << 32) | ((u64)val->low);
11790 }
11791
11792 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11793 {
11794         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11795
11796         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11797             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11798              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11799                 u32 val;
11800
11801                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11802                         tg3_writephy(tp, MII_TG3_TEST1,
11803                                      val | MII_TG3_TEST1_CRC_EN);
11804                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11805                 } else
11806                         val = 0;
11807
11808                 tp->phy_crc_errors += val;
11809
11810                 return tp->phy_crc_errors;
11811         }
11812
11813         return get_stat64(&hw_stats->rx_fcs_errors);
11814 }
11815
11816 #define ESTAT_ADD(member) \
11817         estats->member =        old_estats->member + \
11818                                 get_stat64(&hw_stats->member)
11819
11820 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11821 {
11822         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11823         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11824
11825         ESTAT_ADD(rx_octets);
11826         ESTAT_ADD(rx_fragments);
11827         ESTAT_ADD(rx_ucast_packets);
11828         ESTAT_ADD(rx_mcast_packets);
11829         ESTAT_ADD(rx_bcast_packets);
11830         ESTAT_ADD(rx_fcs_errors);
11831         ESTAT_ADD(rx_align_errors);
11832         ESTAT_ADD(rx_xon_pause_rcvd);
11833         ESTAT_ADD(rx_xoff_pause_rcvd);
11834         ESTAT_ADD(rx_mac_ctrl_rcvd);
11835         ESTAT_ADD(rx_xoff_entered);
11836         ESTAT_ADD(rx_frame_too_long_errors);
11837         ESTAT_ADD(rx_jabbers);
11838         ESTAT_ADD(rx_undersize_packets);
11839         ESTAT_ADD(rx_in_length_errors);
11840         ESTAT_ADD(rx_out_length_errors);
11841         ESTAT_ADD(rx_64_or_less_octet_packets);
11842         ESTAT_ADD(rx_65_to_127_octet_packets);
11843         ESTAT_ADD(rx_128_to_255_octet_packets);
11844         ESTAT_ADD(rx_256_to_511_octet_packets);
11845         ESTAT_ADD(rx_512_to_1023_octet_packets);
11846         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11847         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11848         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11849         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11850         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11851
11852         ESTAT_ADD(tx_octets);
11853         ESTAT_ADD(tx_collisions);
11854         ESTAT_ADD(tx_xon_sent);
11855         ESTAT_ADD(tx_xoff_sent);
11856         ESTAT_ADD(tx_flow_control);
11857         ESTAT_ADD(tx_mac_errors);
11858         ESTAT_ADD(tx_single_collisions);
11859         ESTAT_ADD(tx_mult_collisions);
11860         ESTAT_ADD(tx_deferred);
11861         ESTAT_ADD(tx_excessive_collisions);
11862         ESTAT_ADD(tx_late_collisions);
11863         ESTAT_ADD(tx_collide_2times);
11864         ESTAT_ADD(tx_collide_3times);
11865         ESTAT_ADD(tx_collide_4times);
11866         ESTAT_ADD(tx_collide_5times);
11867         ESTAT_ADD(tx_collide_6times);
11868         ESTAT_ADD(tx_collide_7times);
11869         ESTAT_ADD(tx_collide_8times);
11870         ESTAT_ADD(tx_collide_9times);
11871         ESTAT_ADD(tx_collide_10times);
11872         ESTAT_ADD(tx_collide_11times);
11873         ESTAT_ADD(tx_collide_12times);
11874         ESTAT_ADD(tx_collide_13times);
11875         ESTAT_ADD(tx_collide_14times);
11876         ESTAT_ADD(tx_collide_15times);
11877         ESTAT_ADD(tx_ucast_packets);
11878         ESTAT_ADD(tx_mcast_packets);
11879         ESTAT_ADD(tx_bcast_packets);
11880         ESTAT_ADD(tx_carrier_sense_errors);
11881         ESTAT_ADD(tx_discards);
11882         ESTAT_ADD(tx_errors);
11883
11884         ESTAT_ADD(dma_writeq_full);
11885         ESTAT_ADD(dma_write_prioq_full);
11886         ESTAT_ADD(rxbds_empty);
11887         ESTAT_ADD(rx_discards);
11888         ESTAT_ADD(rx_errors);
11889         ESTAT_ADD(rx_threshold_hit);
11890
11891         ESTAT_ADD(dma_readq_full);
11892         ESTAT_ADD(dma_read_prioq_full);
11893         ESTAT_ADD(tx_comp_queue_full);
11894
11895         ESTAT_ADD(ring_set_send_prod_index);
11896         ESTAT_ADD(ring_status_update);
11897         ESTAT_ADD(nic_irqs);
11898         ESTAT_ADD(nic_avoided_irqs);
11899         ESTAT_ADD(nic_tx_threshold_hit);
11900
11901         ESTAT_ADD(mbuf_lwm_thresh_hit);
11902 }
11903
11904 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11905 {
11906         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11907         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11908
11909         stats->rx_packets = old_stats->rx_packets +
11910                 get_stat64(&hw_stats->rx_ucast_packets) +
11911                 get_stat64(&hw_stats->rx_mcast_packets) +
11912                 get_stat64(&hw_stats->rx_bcast_packets);
11913
11914         stats->tx_packets = old_stats->tx_packets +
11915                 get_stat64(&hw_stats->tx_ucast_packets) +
11916                 get_stat64(&hw_stats->tx_mcast_packets) +
11917                 get_stat64(&hw_stats->tx_bcast_packets);
11918
11919         stats->rx_bytes = old_stats->rx_bytes +
11920                 get_stat64(&hw_stats->rx_octets);
11921         stats->tx_bytes = old_stats->tx_bytes +
11922                 get_stat64(&hw_stats->tx_octets);
11923
11924         stats->rx_errors = old_stats->rx_errors +
11925                 get_stat64(&hw_stats->rx_errors);
11926         stats->tx_errors = old_stats->tx_errors +
11927                 get_stat64(&hw_stats->tx_errors) +
11928                 get_stat64(&hw_stats->tx_mac_errors) +
11929                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11930                 get_stat64(&hw_stats->tx_discards);
11931
11932         stats->multicast = old_stats->multicast +
11933                 get_stat64(&hw_stats->rx_mcast_packets);
11934         stats->collisions = old_stats->collisions +
11935                 get_stat64(&hw_stats->tx_collisions);
11936
11937         stats->rx_length_errors = old_stats->rx_length_errors +
11938                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11939                 get_stat64(&hw_stats->rx_undersize_packets);
11940
11941         stats->rx_frame_errors = old_stats->rx_frame_errors +
11942                 get_stat64(&hw_stats->rx_align_errors);
11943         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11944                 get_stat64(&hw_stats->tx_discards);
11945         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11946                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11947
11948         stats->rx_crc_errors = old_stats->rx_crc_errors +
11949                 tg3_calc_crc_errors(tp);
11950
11951         stats->rx_missed_errors = old_stats->rx_missed_errors +
11952                 get_stat64(&hw_stats->rx_discards);
11953
11954         stats->rx_dropped = tp->rx_dropped;
11955         stats->tx_dropped = tp->tx_dropped;
11956 }
11957
11958 static int tg3_get_regs_len(struct net_device *dev)
11959 {
11960         return TG3_REG_BLK_SIZE;
11961 }
11962
11963 static void tg3_get_regs(struct net_device *dev,
11964                 struct ethtool_regs *regs, void *_p)
11965 {
11966         struct tg3 *tp = netdev_priv(dev);
11967
11968         regs->version = 0;
11969
11970         memset(_p, 0, TG3_REG_BLK_SIZE);
11971
11972         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11973                 return;
11974
11975         tg3_full_lock(tp, 0);
11976
11977         tg3_dump_legacy_regs(tp, (u32 *)_p);
11978
11979         tg3_full_unlock(tp);
11980 }
11981
11982 static int tg3_get_eeprom_len(struct net_device *dev)
11983 {
11984         struct tg3 *tp = netdev_priv(dev);
11985
11986         return tp->nvram_size;
11987 }
11988
11989 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11990 {
11991         struct tg3 *tp = netdev_priv(dev);
11992         int ret, cpmu_restore = 0;
11993         u8  *pd;
11994         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11995         __be32 val;
11996
11997         if (tg3_flag(tp, NO_NVRAM))
11998                 return -EINVAL;
11999
12000         offset = eeprom->offset;
12001         len = eeprom->len;
12002         eeprom->len = 0;
12003
12004         eeprom->magic = TG3_EEPROM_MAGIC;
12005
12006         /* Override clock, link aware and link idle modes */
12007         if (tg3_flag(tp, CPMU_PRESENT)) {
12008                 cpmu_val = tr32(TG3_CPMU_CTRL);
12009                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12010                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12011                         tw32(TG3_CPMU_CTRL, cpmu_val &
12012                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12013                                              CPMU_CTRL_LINK_IDLE_MODE));
12014                         cpmu_restore = 1;
12015                 }
12016         }
12017         tg3_override_clk(tp);
12018
12019         if (offset & 3) {
12020                 /* adjustments to start on required 4 byte boundary */
12021                 b_offset = offset & 3;
12022                 b_count = 4 - b_offset;
12023                 if (b_count > len) {
12024                         /* i.e. offset=1 len=2 */
12025                         b_count = len;
12026                 }
12027                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12028                 if (ret)
12029                         goto eeprom_done;
12030                 memcpy(data, ((char *)&val) + b_offset, b_count);
12031                 len -= b_count;
12032                 offset += b_count;
12033                 eeprom->len += b_count;
12034         }
12035
12036         /* read bytes up to the last 4 byte boundary */
12037         pd = &data[eeprom->len];
12038         for (i = 0; i < (len - (len & 3)); i += 4) {
12039                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12040                 if (ret) {
12041                         if (i)
12042                                 i -= 4;
12043                         eeprom->len += i;
12044                         goto eeprom_done;
12045                 }
12046                 memcpy(pd + i, &val, 4);
12047                 if (need_resched()) {
12048                         if (signal_pending(current)) {
12049                                 eeprom->len += i;
12050                                 ret = -EINTR;
12051                                 goto eeprom_done;
12052                         }
12053                         cond_resched();
12054                 }
12055         }
12056         eeprom->len += i;
12057
12058         if (len & 3) {
12059                 /* read last bytes not ending on 4 byte boundary */
12060                 pd = &data[eeprom->len];
12061                 b_count = len & 3;
12062                 b_offset = offset + len - b_count;
12063                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12064                 if (ret)
12065                         goto eeprom_done;
12066                 memcpy(pd, &val, b_count);
12067                 eeprom->len += b_count;
12068         }
12069         ret = 0;
12070
12071 eeprom_done:
12072         /* Restore clock, link aware and link idle modes */
12073         tg3_restore_clk(tp);
12074         if (cpmu_restore)
12075                 tw32(TG3_CPMU_CTRL, cpmu_val);
12076
12077         return ret;
12078 }
12079
12080 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12081 {
12082         struct tg3 *tp = netdev_priv(dev);
12083         int ret;
12084         u32 offset, len, b_offset, odd_len;
12085         u8 *buf;
12086         __be32 start = 0, end;
12087
12088         if (tg3_flag(tp, NO_NVRAM) ||
12089             eeprom->magic != TG3_EEPROM_MAGIC)
12090                 return -EINVAL;
12091
12092         offset = eeprom->offset;
12093         len = eeprom->len;
12094
12095         if ((b_offset = (offset & 3))) {
12096                 /* adjustments to start on required 4 byte boundary */
12097                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12098                 if (ret)
12099                         return ret;
12100                 len += b_offset;
12101                 offset &= ~3;
12102                 if (len < 4)
12103                         len = 4;
12104         }
12105
12106         odd_len = 0;
12107         if (len & 3) {
12108                 /* adjustments to end on required 4 byte boundary */
12109                 odd_len = 1;
12110                 len = (len + 3) & ~3;
12111                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12112                 if (ret)
12113                         return ret;
12114         }
12115
12116         buf = data;
12117         if (b_offset || odd_len) {
12118                 buf = kmalloc(len, GFP_KERNEL);
12119                 if (!buf)
12120                         return -ENOMEM;
12121                 if (b_offset)
12122                         memcpy(buf, &start, 4);
12123                 if (odd_len)
12124                         memcpy(buf+len-4, &end, 4);
12125                 memcpy(buf + b_offset, data, eeprom->len);
12126         }
12127
12128         ret = tg3_nvram_write_block(tp, offset, len, buf);
12129
12130         if (buf != data)
12131                 kfree(buf);
12132
12133         return ret;
12134 }
12135
12136 static int tg3_get_link_ksettings(struct net_device *dev,
12137                                   struct ethtool_link_ksettings *cmd)
12138 {
12139         struct tg3 *tp = netdev_priv(dev);
12140         u32 supported, advertising;
12141
12142         if (tg3_flag(tp, USE_PHYLIB)) {
12143                 struct phy_device *phydev;
12144                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12145                         return -EAGAIN;
12146                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12147                 phy_ethtool_ksettings_get(phydev, cmd);
12148
12149                 return 0;
12150         }
12151
12152         supported = (SUPPORTED_Autoneg);
12153
12154         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12155                 supported |= (SUPPORTED_1000baseT_Half |
12156                               SUPPORTED_1000baseT_Full);
12157
12158         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12159                 supported |= (SUPPORTED_100baseT_Half |
12160                               SUPPORTED_100baseT_Full |
12161                               SUPPORTED_10baseT_Half |
12162                               SUPPORTED_10baseT_Full |
12163                               SUPPORTED_TP);
12164                 cmd->base.port = PORT_TP;
12165         } else {
12166                 supported |= SUPPORTED_FIBRE;
12167                 cmd->base.port = PORT_FIBRE;
12168         }
12169         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12170                                                 supported);
12171
12172         advertising = tp->link_config.advertising;
12173         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12174                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12175                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12176                                 advertising |= ADVERTISED_Pause;
12177                         } else {
12178                                 advertising |= ADVERTISED_Pause |
12179                                         ADVERTISED_Asym_Pause;
12180                         }
12181                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12182                         advertising |= ADVERTISED_Asym_Pause;
12183                 }
12184         }
12185         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12186                                                 advertising);
12187
12188         if (netif_running(dev) && tp->link_up) {
12189                 cmd->base.speed = tp->link_config.active_speed;
12190                 cmd->base.duplex = tp->link_config.active_duplex;
12191                 ethtool_convert_legacy_u32_to_link_mode(
12192                         cmd->link_modes.lp_advertising,
12193                         tp->link_config.rmt_adv);
12194
12195                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12196                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12197                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12198                         else
12199                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12200                 }
12201         } else {
12202                 cmd->base.speed = SPEED_UNKNOWN;
12203                 cmd->base.duplex = DUPLEX_UNKNOWN;
12204                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12205         }
12206         cmd->base.phy_address = tp->phy_addr;
12207         cmd->base.autoneg = tp->link_config.autoneg;
12208         return 0;
12209 }
12210
12211 static int tg3_set_link_ksettings(struct net_device *dev,
12212                                   const struct ethtool_link_ksettings *cmd)
12213 {
12214         struct tg3 *tp = netdev_priv(dev);
12215         u32 speed = cmd->base.speed;
12216         u32 advertising;
12217
12218         if (tg3_flag(tp, USE_PHYLIB)) {
12219                 struct phy_device *phydev;
12220                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12221                         return -EAGAIN;
12222                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12223                 return phy_ethtool_ksettings_set(phydev, cmd);
12224         }
12225
12226         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12227             cmd->base.autoneg != AUTONEG_DISABLE)
12228                 return -EINVAL;
12229
12230         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12231             cmd->base.duplex != DUPLEX_FULL &&
12232             cmd->base.duplex != DUPLEX_HALF)
12233                 return -EINVAL;
12234
12235         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12236                                                 cmd->link_modes.advertising);
12237
12238         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12239                 u32 mask = ADVERTISED_Autoneg |
12240                            ADVERTISED_Pause |
12241                            ADVERTISED_Asym_Pause;
12242
12243                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12244                         mask |= ADVERTISED_1000baseT_Half |
12245                                 ADVERTISED_1000baseT_Full;
12246
12247                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12248                         mask |= ADVERTISED_100baseT_Half |
12249                                 ADVERTISED_100baseT_Full |
12250                                 ADVERTISED_10baseT_Half |
12251                                 ADVERTISED_10baseT_Full |
12252                                 ADVERTISED_TP;
12253                 else
12254                         mask |= ADVERTISED_FIBRE;
12255
12256                 if (advertising & ~mask)
12257                         return -EINVAL;
12258
12259                 mask &= (ADVERTISED_1000baseT_Half |
12260                          ADVERTISED_1000baseT_Full |
12261                          ADVERTISED_100baseT_Half |
12262                          ADVERTISED_100baseT_Full |
12263                          ADVERTISED_10baseT_Half |
12264                          ADVERTISED_10baseT_Full);
12265
12266                 advertising &= mask;
12267         } else {
12268                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12269                         if (speed != SPEED_1000)
12270                                 return -EINVAL;
12271
12272                         if (cmd->base.duplex != DUPLEX_FULL)
12273                                 return -EINVAL;
12274                 } else {
12275                         if (speed != SPEED_100 &&
12276                             speed != SPEED_10)
12277                                 return -EINVAL;
12278                 }
12279         }
12280
12281         tg3_full_lock(tp, 0);
12282
12283         tp->link_config.autoneg = cmd->base.autoneg;
12284         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12285                 tp->link_config.advertising = (advertising |
12286                                               ADVERTISED_Autoneg);
12287                 tp->link_config.speed = SPEED_UNKNOWN;
12288                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12289         } else {
12290                 tp->link_config.advertising = 0;
12291                 tp->link_config.speed = speed;
12292                 tp->link_config.duplex = cmd->base.duplex;
12293         }
12294
12295         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12296
12297         tg3_warn_mgmt_link_flap(tp);
12298
12299         if (netif_running(dev))
12300                 tg3_setup_phy(tp, true);
12301
12302         tg3_full_unlock(tp);
12303
12304         return 0;
12305 }
12306
12307 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12308 {
12309         struct tg3 *tp = netdev_priv(dev);
12310
12311         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12312         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12313         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12314         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12315 }
12316
12317 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12318 {
12319         struct tg3 *tp = netdev_priv(dev);
12320
12321         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12322                 wol->supported = WAKE_MAGIC;
12323         else
12324                 wol->supported = 0;
12325         wol->wolopts = 0;
12326         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12327                 wol->wolopts = WAKE_MAGIC;
12328         memset(&wol->sopass, 0, sizeof(wol->sopass));
12329 }
12330
12331 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12332 {
12333         struct tg3 *tp = netdev_priv(dev);
12334         struct device *dp = &tp->pdev->dev;
12335
12336         if (wol->wolopts & ~WAKE_MAGIC)
12337                 return -EINVAL;
12338         if ((wol->wolopts & WAKE_MAGIC) &&
12339             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12340                 return -EINVAL;
12341
12342         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12343
12344         if (device_may_wakeup(dp))
12345                 tg3_flag_set(tp, WOL_ENABLE);
12346         else
12347                 tg3_flag_clear(tp, WOL_ENABLE);
12348
12349         return 0;
12350 }
12351
12352 static u32 tg3_get_msglevel(struct net_device *dev)
12353 {
12354         struct tg3 *tp = netdev_priv(dev);
12355         return tp->msg_enable;
12356 }
12357
12358 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12359 {
12360         struct tg3 *tp = netdev_priv(dev);
12361         tp->msg_enable = value;
12362 }
12363
12364 static int tg3_nway_reset(struct net_device *dev)
12365 {
12366         struct tg3 *tp = netdev_priv(dev);
12367         int r;
12368
12369         if (!netif_running(dev))
12370                 return -EAGAIN;
12371
12372         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12373                 return -EINVAL;
12374
12375         tg3_warn_mgmt_link_flap(tp);
12376
12377         if (tg3_flag(tp, USE_PHYLIB)) {
12378                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12379                         return -EAGAIN;
12380                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12381         } else {
12382                 u32 bmcr;
12383
12384                 spin_lock_bh(&tp->lock);
12385                 r = -EINVAL;
12386                 tg3_readphy(tp, MII_BMCR, &bmcr);
12387                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12388                     ((bmcr & BMCR_ANENABLE) ||
12389                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12390                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12391                                                    BMCR_ANENABLE);
12392                         r = 0;
12393                 }
12394                 spin_unlock_bh(&tp->lock);
12395         }
12396
12397         return r;
12398 }
12399
12400 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12401 {
12402         struct tg3 *tp = netdev_priv(dev);
12403
12404         ering->rx_max_pending = tp->rx_std_ring_mask;
12405         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12406                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12407         else
12408                 ering->rx_jumbo_max_pending = 0;
12409
12410         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12411
12412         ering->rx_pending = tp->rx_pending;
12413         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12414                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12415         else
12416                 ering->rx_jumbo_pending = 0;
12417
12418         ering->tx_pending = tp->napi[0].tx_pending;
12419 }
12420
12421 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12422 {
12423         struct tg3 *tp = netdev_priv(dev);
12424         int i, irq_sync = 0, err = 0;
12425
12426         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12427             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12428             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12429             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12430             (tg3_flag(tp, TSO_BUG) &&
12431              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12432                 return -EINVAL;
12433
12434         if (netif_running(dev)) {
12435                 tg3_phy_stop(tp);
12436                 tg3_netif_stop(tp);
12437                 irq_sync = 1;
12438         }
12439
12440         tg3_full_lock(tp, irq_sync);
12441
12442         tp->rx_pending = ering->rx_pending;
12443
12444         if (tg3_flag(tp, MAX_RXPEND_64) &&
12445             tp->rx_pending > 63)
12446                 tp->rx_pending = 63;
12447
12448         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12449                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12450
12451         for (i = 0; i < tp->irq_max; i++)
12452                 tp->napi[i].tx_pending = ering->tx_pending;
12453
12454         if (netif_running(dev)) {
12455                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12456                 err = tg3_restart_hw(tp, false);
12457                 if (!err)
12458                         tg3_netif_start(tp);
12459         }
12460
12461         tg3_full_unlock(tp);
12462
12463         if (irq_sync && !err)
12464                 tg3_phy_start(tp);
12465
12466         return err;
12467 }
12468
12469 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12470 {
12471         struct tg3 *tp = netdev_priv(dev);
12472
12473         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12474
12475         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12476                 epause->rx_pause = 1;
12477         else
12478                 epause->rx_pause = 0;
12479
12480         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12481                 epause->tx_pause = 1;
12482         else
12483                 epause->tx_pause = 0;
12484 }
12485
12486 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12487 {
12488         struct tg3 *tp = netdev_priv(dev);
12489         int err = 0;
12490
12491         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12492                 tg3_warn_mgmt_link_flap(tp);
12493
12494         if (tg3_flag(tp, USE_PHYLIB)) {
12495                 struct phy_device *phydev;
12496
12497                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12498
12499                 if (!phy_validate_pause(phydev, epause))
12500                         return -EINVAL;
12501
12502                 tp->link_config.flowctrl = 0;
12503                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12504                 if (epause->rx_pause) {
12505                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12506
12507                         if (epause->tx_pause) {
12508                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12509                         }
12510                 } else if (epause->tx_pause) {
12511                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12512                 }
12513
12514                 if (epause->autoneg)
12515                         tg3_flag_set(tp, PAUSE_AUTONEG);
12516                 else
12517                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12518
12519                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12520                         if (phydev->autoneg) {
12521                                 /* phy_set_asym_pause() will
12522                                  * renegotiate the link to inform our
12523                                  * link partner of our flow control
12524                                  * settings, even if the flow control
12525                                  * is forced.  Let tg3_adjust_link()
12526                                  * do the final flow control setup.
12527                                  */
12528                                 return 0;
12529                         }
12530
12531                         if (!epause->autoneg)
12532                                 tg3_setup_flow_control(tp, 0, 0);
12533                 }
12534         } else {
12535                 int irq_sync = 0;
12536
12537                 if (netif_running(dev)) {
12538                         tg3_netif_stop(tp);
12539                         irq_sync = 1;
12540                 }
12541
12542                 tg3_full_lock(tp, irq_sync);
12543
12544                 if (epause->autoneg)
12545                         tg3_flag_set(tp, PAUSE_AUTONEG);
12546                 else
12547                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12548                 if (epause->rx_pause)
12549                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12550                 else
12551                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12552                 if (epause->tx_pause)
12553                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12554                 else
12555                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12556
12557                 if (netif_running(dev)) {
12558                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12559                         err = tg3_restart_hw(tp, false);
12560                         if (!err)
12561                                 tg3_netif_start(tp);
12562                 }
12563
12564                 tg3_full_unlock(tp);
12565         }
12566
12567         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12568
12569         return err;
12570 }
12571
12572 static int tg3_get_sset_count(struct net_device *dev, int sset)
12573 {
12574         switch (sset) {
12575         case ETH_SS_TEST:
12576                 return TG3_NUM_TEST;
12577         case ETH_SS_STATS:
12578                 return TG3_NUM_STATS;
12579         default:
12580                 return -EOPNOTSUPP;
12581         }
12582 }
12583
12584 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12585                          u32 *rules __always_unused)
12586 {
12587         struct tg3 *tp = netdev_priv(dev);
12588
12589         if (!tg3_flag(tp, SUPPORT_MSIX))
12590                 return -EOPNOTSUPP;
12591
12592         switch (info->cmd) {
12593         case ETHTOOL_GRXRINGS:
12594                 if (netif_running(tp->dev))
12595                         info->data = tp->rxq_cnt;
12596                 else {
12597                         info->data = num_online_cpus();
12598                         if (info->data > TG3_RSS_MAX_NUM_QS)
12599                                 info->data = TG3_RSS_MAX_NUM_QS;
12600                 }
12601
12602                 return 0;
12603
12604         default:
12605                 return -EOPNOTSUPP;
12606         }
12607 }
12608
12609 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12610 {
12611         u32 size = 0;
12612         struct tg3 *tp = netdev_priv(dev);
12613
12614         if (tg3_flag(tp, SUPPORT_MSIX))
12615                 size = TG3_RSS_INDIR_TBL_SIZE;
12616
12617         return size;
12618 }
12619
12620 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12621 {
12622         struct tg3 *tp = netdev_priv(dev);
12623         int i;
12624
12625         if (hfunc)
12626                 *hfunc = ETH_RSS_HASH_TOP;
12627         if (!indir)
12628                 return 0;
12629
12630         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12631                 indir[i] = tp->rss_ind_tbl[i];
12632
12633         return 0;
12634 }
12635
12636 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12637                         const u8 hfunc)
12638 {
12639         struct tg3 *tp = netdev_priv(dev);
12640         size_t i;
12641
12642         /* We require at least one supported parameter to be changed and no
12643          * change in any of the unsupported parameters
12644          */
12645         if (key ||
12646             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12647                 return -EOPNOTSUPP;
12648
12649         if (!indir)
12650                 return 0;
12651
12652         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12653                 tp->rss_ind_tbl[i] = indir[i];
12654
12655         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12656                 return 0;
12657
12658         /* It is legal to write the indirection
12659          * table while the device is running.
12660          */
12661         tg3_full_lock(tp, 0);
12662         tg3_rss_write_indir_tbl(tp);
12663         tg3_full_unlock(tp);
12664
12665         return 0;
12666 }
12667
12668 static void tg3_get_channels(struct net_device *dev,
12669                              struct ethtool_channels *channel)
12670 {
12671         struct tg3 *tp = netdev_priv(dev);
12672         u32 deflt_qs = netif_get_num_default_rss_queues();
12673
12674         channel->max_rx = tp->rxq_max;
12675         channel->max_tx = tp->txq_max;
12676
12677         if (netif_running(dev)) {
12678                 channel->rx_count = tp->rxq_cnt;
12679                 channel->tx_count = tp->txq_cnt;
12680         } else {
12681                 if (tp->rxq_req)
12682                         channel->rx_count = tp->rxq_req;
12683                 else
12684                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12685
12686                 if (tp->txq_req)
12687                         channel->tx_count = tp->txq_req;
12688                 else
12689                         channel->tx_count = min(deflt_qs, tp->txq_max);
12690         }
12691 }
12692
12693 static int tg3_set_channels(struct net_device *dev,
12694                             struct ethtool_channels *channel)
12695 {
12696         struct tg3 *tp = netdev_priv(dev);
12697
12698         if (!tg3_flag(tp, SUPPORT_MSIX))
12699                 return -EOPNOTSUPP;
12700
12701         if (channel->rx_count > tp->rxq_max ||
12702             channel->tx_count > tp->txq_max)
12703                 return -EINVAL;
12704
12705         tp->rxq_req = channel->rx_count;
12706         tp->txq_req = channel->tx_count;
12707
12708         if (!netif_running(dev))
12709                 return 0;
12710
12711         tg3_stop(tp);
12712
12713         tg3_carrier_off(tp);
12714
12715         tg3_start(tp, true, false, false);
12716
12717         return 0;
12718 }
12719
12720 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12721 {
12722         switch (stringset) {
12723         case ETH_SS_STATS:
12724                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12725                 break;
12726         case ETH_SS_TEST:
12727                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12728                 break;
12729         default:
12730                 WARN_ON(1);     /* we need a WARN() */
12731                 break;
12732         }
12733 }
12734
12735 static int tg3_set_phys_id(struct net_device *dev,
12736                             enum ethtool_phys_id_state state)
12737 {
12738         struct tg3 *tp = netdev_priv(dev);
12739
12740         if (!netif_running(tp->dev))
12741                 return -EAGAIN;
12742
12743         switch (state) {
12744         case ETHTOOL_ID_ACTIVE:
12745                 return 1;       /* cycle on/off once per second */
12746
12747         case ETHTOOL_ID_ON:
12748                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12749                      LED_CTRL_1000MBPS_ON |
12750                      LED_CTRL_100MBPS_ON |
12751                      LED_CTRL_10MBPS_ON |
12752                      LED_CTRL_TRAFFIC_OVERRIDE |
12753                      LED_CTRL_TRAFFIC_BLINK |
12754                      LED_CTRL_TRAFFIC_LED);
12755                 break;
12756
12757         case ETHTOOL_ID_OFF:
12758                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12759                      LED_CTRL_TRAFFIC_OVERRIDE);
12760                 break;
12761
12762         case ETHTOOL_ID_INACTIVE:
12763                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12764                 break;
12765         }
12766
12767         return 0;
12768 }
12769
12770 static void tg3_get_ethtool_stats(struct net_device *dev,
12771                                    struct ethtool_stats *estats, u64 *tmp_stats)
12772 {
12773         struct tg3 *tp = netdev_priv(dev);
12774
12775         if (tp->hw_stats)
12776                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12777         else
12778                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12779 }
12780
12781 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12782 {
12783         int i;
12784         __be32 *buf;
12785         u32 offset = 0, len = 0;
12786         u32 magic, val;
12787
12788         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12789                 return NULL;
12790
12791         if (magic == TG3_EEPROM_MAGIC) {
12792                 for (offset = TG3_NVM_DIR_START;
12793                      offset < TG3_NVM_DIR_END;
12794                      offset += TG3_NVM_DIRENT_SIZE) {
12795                         if (tg3_nvram_read(tp, offset, &val))
12796                                 return NULL;
12797
12798                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12799                             TG3_NVM_DIRTYPE_EXTVPD)
12800                                 break;
12801                 }
12802
12803                 if (offset != TG3_NVM_DIR_END) {
12804                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12805                         if (tg3_nvram_read(tp, offset + 4, &offset))
12806                                 return NULL;
12807
12808                         offset = tg3_nvram_logical_addr(tp, offset);
12809                 }
12810         }
12811
12812         if (!offset || !len) {
12813                 offset = TG3_NVM_VPD_OFF;
12814                 len = TG3_NVM_VPD_LEN;
12815         }
12816
12817         buf = kmalloc(len, GFP_KERNEL);
12818         if (buf == NULL)
12819                 return NULL;
12820
12821         if (magic == TG3_EEPROM_MAGIC) {
12822                 for (i = 0; i < len; i += 4) {
12823                         /* The data is in little-endian format in NVRAM.
12824                          * Use the big-endian read routines to preserve
12825                          * the byte order as it exists in NVRAM.
12826                          */
12827                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12828                                 goto error;
12829                 }
12830         } else {
12831                 u8 *ptr;
12832                 ssize_t cnt;
12833                 unsigned int pos = 0;
12834
12835                 ptr = (u8 *)&buf[0];
12836                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12837                         cnt = pci_read_vpd(tp->pdev, pos,
12838                                            len - pos, ptr);
12839                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12840                                 cnt = 0;
12841                         else if (cnt < 0)
12842                                 goto error;
12843                 }
12844                 if (pos != len)
12845                         goto error;
12846         }
12847
12848         *vpdlen = len;
12849
12850         return buf;
12851
12852 error:
12853         kfree(buf);
12854         return NULL;
12855 }
12856
12857 #define NVRAM_TEST_SIZE 0x100
12858 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12859 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12860 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12861 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12862 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12863 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12864 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12865 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12866
12867 static int tg3_test_nvram(struct tg3 *tp)
12868 {
12869         u32 csum, magic, len;
12870         __be32 *buf;
12871         int i, j, k, err = 0, size;
12872
12873         if (tg3_flag(tp, NO_NVRAM))
12874                 return 0;
12875
12876         if (tg3_nvram_read(tp, 0, &magic) != 0)
12877                 return -EIO;
12878
12879         if (magic == TG3_EEPROM_MAGIC)
12880                 size = NVRAM_TEST_SIZE;
12881         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12882                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12883                     TG3_EEPROM_SB_FORMAT_1) {
12884                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12885                         case TG3_EEPROM_SB_REVISION_0:
12886                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12887                                 break;
12888                         case TG3_EEPROM_SB_REVISION_2:
12889                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12890                                 break;
12891                         case TG3_EEPROM_SB_REVISION_3:
12892                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12893                                 break;
12894                         case TG3_EEPROM_SB_REVISION_4:
12895                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12896                                 break;
12897                         case TG3_EEPROM_SB_REVISION_5:
12898                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12899                                 break;
12900                         case TG3_EEPROM_SB_REVISION_6:
12901                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12902                                 break;
12903                         default:
12904                                 return -EIO;
12905                         }
12906                 } else
12907                         return 0;
12908         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12909                 size = NVRAM_SELFBOOT_HW_SIZE;
12910         else
12911                 return -EIO;
12912
12913         buf = kmalloc(size, GFP_KERNEL);
12914         if (buf == NULL)
12915                 return -ENOMEM;
12916
12917         err = -EIO;
12918         for (i = 0, j = 0; i < size; i += 4, j++) {
12919                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12920                 if (err)
12921                         break;
12922         }
12923         if (i < size)
12924                 goto out;
12925
12926         /* Selfboot format */
12927         magic = be32_to_cpu(buf[0]);
12928         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12929             TG3_EEPROM_MAGIC_FW) {
12930                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12931
12932                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12933                     TG3_EEPROM_SB_REVISION_2) {
12934                         /* For rev 2, the csum doesn't include the MBA. */
12935                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12936                                 csum8 += buf8[i];
12937                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12938                                 csum8 += buf8[i];
12939                 } else {
12940                         for (i = 0; i < size; i++)
12941                                 csum8 += buf8[i];
12942                 }
12943
12944                 if (csum8 == 0) {
12945                         err = 0;
12946                         goto out;
12947                 }
12948
12949                 err = -EIO;
12950                 goto out;
12951         }
12952
12953         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12954             TG3_EEPROM_MAGIC_HW) {
12955                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12956                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12957                 u8 *buf8 = (u8 *) buf;
12958
12959                 /* Separate the parity bits and the data bytes.  */
12960                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12961                         if ((i == 0) || (i == 8)) {
12962                                 int l;
12963                                 u8 msk;
12964
12965                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12966                                         parity[k++] = buf8[i] & msk;
12967                                 i++;
12968                         } else if (i == 16) {
12969                                 int l;
12970                                 u8 msk;
12971
12972                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12973                                         parity[k++] = buf8[i] & msk;
12974                                 i++;
12975
12976                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12977                                         parity[k++] = buf8[i] & msk;
12978                                 i++;
12979                         }
12980                         data[j++] = buf8[i];
12981                 }
12982
12983                 err = -EIO;
12984                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12985                         u8 hw8 = hweight8(data[i]);
12986
12987                         if ((hw8 & 0x1) && parity[i])
12988                                 goto out;
12989                         else if (!(hw8 & 0x1) && !parity[i])
12990                                 goto out;
12991                 }
12992                 err = 0;
12993                 goto out;
12994         }
12995
12996         err = -EIO;
12997
12998         /* Bootstrap checksum at offset 0x10 */
12999         csum = calc_crc((unsigned char *) buf, 0x10);
13000         if (csum != le32_to_cpu(buf[0x10/4]))
13001                 goto out;
13002
13003         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13004         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13005         if (csum != le32_to_cpu(buf[0xfc/4]))
13006                 goto out;
13007
13008         kfree(buf);
13009
13010         buf = tg3_vpd_readblock(tp, &len);
13011         if (!buf)
13012                 return -ENOMEM;
13013
13014         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13015         if (i > 0) {
13016                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13017                 if (j < 0)
13018                         goto out;
13019
13020                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13021                         goto out;
13022
13023                 i += PCI_VPD_LRDT_TAG_SIZE;
13024                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13025                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13026                 if (j > 0) {
13027                         u8 csum8 = 0;
13028
13029                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13030
13031                         for (i = 0; i <= j; i++)
13032                                 csum8 += ((u8 *)buf)[i];
13033
13034                         if (csum8)
13035                                 goto out;
13036                 }
13037         }
13038
13039         err = 0;
13040
13041 out:
13042         kfree(buf);
13043         return err;
13044 }
13045
13046 #define TG3_SERDES_TIMEOUT_SEC  2
13047 #define TG3_COPPER_TIMEOUT_SEC  6
13048
13049 static int tg3_test_link(struct tg3 *tp)
13050 {
13051         int i, max;
13052
13053         if (!netif_running(tp->dev))
13054                 return -ENODEV;
13055
13056         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13057                 max = TG3_SERDES_TIMEOUT_SEC;
13058         else
13059                 max = TG3_COPPER_TIMEOUT_SEC;
13060
13061         for (i = 0; i < max; i++) {
13062                 if (tp->link_up)
13063                         return 0;
13064
13065                 if (msleep_interruptible(1000))
13066                         break;
13067         }
13068
13069         return -EIO;
13070 }
13071
13072 /* Only test the commonly used registers */
13073 static int tg3_test_registers(struct tg3 *tp)
13074 {
13075         int i, is_5705, is_5750;
13076         u32 offset, read_mask, write_mask, val, save_val, read_val;
13077         static struct {
13078                 u16 offset;
13079                 u16 flags;
13080 #define TG3_FL_5705     0x1
13081 #define TG3_FL_NOT_5705 0x2
13082 #define TG3_FL_NOT_5788 0x4
13083 #define TG3_FL_NOT_5750 0x8
13084                 u32 read_mask;
13085                 u32 write_mask;
13086         } reg_tbl[] = {
13087                 /* MAC Control Registers */
13088                 { MAC_MODE, TG3_FL_NOT_5705,
13089                         0x00000000, 0x00ef6f8c },
13090                 { MAC_MODE, TG3_FL_5705,
13091                         0x00000000, 0x01ef6b8c },
13092                 { MAC_STATUS, TG3_FL_NOT_5705,
13093                         0x03800107, 0x00000000 },
13094                 { MAC_STATUS, TG3_FL_5705,
13095                         0x03800100, 0x00000000 },
13096                 { MAC_ADDR_0_HIGH, 0x0000,
13097                         0x00000000, 0x0000ffff },
13098                 { MAC_ADDR_0_LOW, 0x0000,
13099                         0x00000000, 0xffffffff },
13100                 { MAC_RX_MTU_SIZE, 0x0000,
13101                         0x00000000, 0x0000ffff },
13102                 { MAC_TX_MODE, 0x0000,
13103                         0x00000000, 0x00000070 },
13104                 { MAC_TX_LENGTHS, 0x0000,
13105                         0x00000000, 0x00003fff },
13106                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13107                         0x00000000, 0x000007fc },
13108                 { MAC_RX_MODE, TG3_FL_5705,
13109                         0x00000000, 0x000007dc },
13110                 { MAC_HASH_REG_0, 0x0000,
13111                         0x00000000, 0xffffffff },
13112                 { MAC_HASH_REG_1, 0x0000,
13113                         0x00000000, 0xffffffff },
13114                 { MAC_HASH_REG_2, 0x0000,
13115                         0x00000000, 0xffffffff },
13116                 { MAC_HASH_REG_3, 0x0000,
13117                         0x00000000, 0xffffffff },
13118
13119                 /* Receive Data and Receive BD Initiator Control Registers. */
13120                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13121                         0x00000000, 0xffffffff },
13122                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13123                         0x00000000, 0xffffffff },
13124                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13125                         0x00000000, 0x00000003 },
13126                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13127                         0x00000000, 0xffffffff },
13128                 { RCVDBDI_STD_BD+0, 0x0000,
13129                         0x00000000, 0xffffffff },
13130                 { RCVDBDI_STD_BD+4, 0x0000,
13131                         0x00000000, 0xffffffff },
13132                 { RCVDBDI_STD_BD+8, 0x0000,
13133                         0x00000000, 0xffff0002 },
13134                 { RCVDBDI_STD_BD+0xc, 0x0000,
13135                         0x00000000, 0xffffffff },
13136
13137                 /* Receive BD Initiator Control Registers. */
13138                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13139                         0x00000000, 0xffffffff },
13140                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13141                         0x00000000, 0x000003ff },
13142                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13143                         0x00000000, 0xffffffff },
13144
13145                 /* Host Coalescing Control Registers. */
13146                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13147                         0x00000000, 0x00000004 },
13148                 { HOSTCC_MODE, TG3_FL_5705,
13149                         0x00000000, 0x000000f6 },
13150                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13151                         0x00000000, 0xffffffff },
13152                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13153                         0x00000000, 0x000003ff },
13154                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13155                         0x00000000, 0xffffffff },
13156                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13157                         0x00000000, 0x000003ff },
13158                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13159                         0x00000000, 0xffffffff },
13160                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13161                         0x00000000, 0x000000ff },
13162                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13163                         0x00000000, 0xffffffff },
13164                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13165                         0x00000000, 0x000000ff },
13166                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13167                         0x00000000, 0xffffffff },
13168                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13169                         0x00000000, 0xffffffff },
13170                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13171                         0x00000000, 0xffffffff },
13172                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13173                         0x00000000, 0x000000ff },
13174                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13175                         0x00000000, 0xffffffff },
13176                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13177                         0x00000000, 0x000000ff },
13178                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13179                         0x00000000, 0xffffffff },
13180                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13181                         0x00000000, 0xffffffff },
13182                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13183                         0x00000000, 0xffffffff },
13184                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13185                         0x00000000, 0xffffffff },
13186                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13187                         0x00000000, 0xffffffff },
13188                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13189                         0xffffffff, 0x00000000 },
13190                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13191                         0xffffffff, 0x00000000 },
13192
13193                 /* Buffer Manager Control Registers. */
13194                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13195                         0x00000000, 0x007fff80 },
13196                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13197                         0x00000000, 0x007fffff },
13198                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13199                         0x00000000, 0x0000003f },
13200                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13201                         0x00000000, 0x000001ff },
13202                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13203                         0x00000000, 0x000001ff },
13204                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13205                         0xffffffff, 0x00000000 },
13206                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13207                         0xffffffff, 0x00000000 },
13208
13209                 /* Mailbox Registers */
13210                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13211                         0x00000000, 0x000001ff },
13212                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13213                         0x00000000, 0x000001ff },
13214                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13215                         0x00000000, 0x000007ff },
13216                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13217                         0x00000000, 0x000001ff },
13218
13219                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13220         };
13221
13222         is_5705 = is_5750 = 0;
13223         if (tg3_flag(tp, 5705_PLUS)) {
13224                 is_5705 = 1;
13225                 if (tg3_flag(tp, 5750_PLUS))
13226                         is_5750 = 1;
13227         }
13228
13229         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13230                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13231                         continue;
13232
13233                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13234                         continue;
13235
13236                 if (tg3_flag(tp, IS_5788) &&
13237                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13238                         continue;
13239
13240                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13241                         continue;
13242
13243                 offset = (u32) reg_tbl[i].offset;
13244                 read_mask = reg_tbl[i].read_mask;
13245                 write_mask = reg_tbl[i].write_mask;
13246
13247                 /* Save the original register content */
13248                 save_val = tr32(offset);
13249
13250                 /* Determine the read-only value. */
13251                 read_val = save_val & read_mask;
13252
13253                 /* Write zero to the register, then make sure the read-only bits
13254                  * are not changed and the read/write bits are all zeros.
13255                  */
13256                 tw32(offset, 0);
13257
13258                 val = tr32(offset);
13259
13260                 /* Test the read-only and read/write bits. */
13261                 if (((val & read_mask) != read_val) || (val & write_mask))
13262                         goto out;
13263
13264                 /* Write ones to all the bits defined by RdMask and WrMask, then
13265                  * make sure the read-only bits are not changed and the
13266                  * read/write bits are all ones.
13267                  */
13268                 tw32(offset, read_mask | write_mask);
13269
13270                 val = tr32(offset);
13271
13272                 /* Test the read-only bits. */
13273                 if ((val & read_mask) != read_val)
13274                         goto out;
13275
13276                 /* Test the read/write bits. */
13277                 if ((val & write_mask) != write_mask)
13278                         goto out;
13279
13280                 tw32(offset, save_val);
13281         }
13282
13283         return 0;
13284
13285 out:
13286         if (netif_msg_hw(tp))
13287                 netdev_err(tp->dev,
13288                            "Register test failed at offset %x\n", offset);
13289         tw32(offset, save_val);
13290         return -EIO;
13291 }
13292
13293 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13294 {
13295         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13296         int i;
13297         u32 j;
13298
13299         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13300                 for (j = 0; j < len; j += 4) {
13301                         u32 val;
13302
13303                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13304                         tg3_read_mem(tp, offset + j, &val);
13305                         if (val != test_pattern[i])
13306                                 return -EIO;
13307                 }
13308         }
13309         return 0;
13310 }
13311
13312 static int tg3_test_memory(struct tg3 *tp)
13313 {
13314         static struct mem_entry {
13315                 u32 offset;
13316                 u32 len;
13317         } mem_tbl_570x[] = {
13318                 { 0x00000000, 0x00b50},
13319                 { 0x00002000, 0x1c000},
13320                 { 0xffffffff, 0x00000}
13321         }, mem_tbl_5705[] = {
13322                 { 0x00000100, 0x0000c},
13323                 { 0x00000200, 0x00008},
13324                 { 0x00004000, 0x00800},
13325                 { 0x00006000, 0x01000},
13326                 { 0x00008000, 0x02000},
13327                 { 0x00010000, 0x0e000},
13328                 { 0xffffffff, 0x00000}
13329         }, mem_tbl_5755[] = {
13330                 { 0x00000200, 0x00008},
13331                 { 0x00004000, 0x00800},
13332                 { 0x00006000, 0x00800},
13333                 { 0x00008000, 0x02000},
13334                 { 0x00010000, 0x0c000},
13335                 { 0xffffffff, 0x00000}
13336         }, mem_tbl_5906[] = {
13337                 { 0x00000200, 0x00008},
13338                 { 0x00004000, 0x00400},
13339                 { 0x00006000, 0x00400},
13340                 { 0x00008000, 0x01000},
13341                 { 0x00010000, 0x01000},
13342                 { 0xffffffff, 0x00000}
13343         }, mem_tbl_5717[] = {
13344                 { 0x00000200, 0x00008},
13345                 { 0x00010000, 0x0a000},
13346                 { 0x00020000, 0x13c00},
13347                 { 0xffffffff, 0x00000}
13348         }, mem_tbl_57765[] = {
13349                 { 0x00000200, 0x00008},
13350                 { 0x00004000, 0x00800},
13351                 { 0x00006000, 0x09800},
13352                 { 0x00010000, 0x0a000},
13353                 { 0xffffffff, 0x00000}
13354         };
13355         struct mem_entry *mem_tbl;
13356         int err = 0;
13357         int i;
13358
13359         if (tg3_flag(tp, 5717_PLUS))
13360                 mem_tbl = mem_tbl_5717;
13361         else if (tg3_flag(tp, 57765_CLASS) ||
13362                  tg3_asic_rev(tp) == ASIC_REV_5762)
13363                 mem_tbl = mem_tbl_57765;
13364         else if (tg3_flag(tp, 5755_PLUS))
13365                 mem_tbl = mem_tbl_5755;
13366         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13367                 mem_tbl = mem_tbl_5906;
13368         else if (tg3_flag(tp, 5705_PLUS))
13369                 mem_tbl = mem_tbl_5705;
13370         else
13371                 mem_tbl = mem_tbl_570x;
13372
13373         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13374                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13375                 if (err)
13376                         break;
13377         }
13378
13379         return err;
13380 }
13381
13382 #define TG3_TSO_MSS             500
13383
13384 #define TG3_TSO_IP_HDR_LEN      20
13385 #define TG3_TSO_TCP_HDR_LEN     20
13386 #define TG3_TSO_TCP_OPT_LEN     12
13387
13388 static const u8 tg3_tso_header[] = {
13389 0x08, 0x00,
13390 0x45, 0x00, 0x00, 0x00,
13391 0x00, 0x00, 0x40, 0x00,
13392 0x40, 0x06, 0x00, 0x00,
13393 0x0a, 0x00, 0x00, 0x01,
13394 0x0a, 0x00, 0x00, 0x02,
13395 0x0d, 0x00, 0xe0, 0x00,
13396 0x00, 0x00, 0x01, 0x00,
13397 0x00, 0x00, 0x02, 0x00,
13398 0x80, 0x10, 0x10, 0x00,
13399 0x14, 0x09, 0x00, 0x00,
13400 0x01, 0x01, 0x08, 0x0a,
13401 0x11, 0x11, 0x11, 0x11,
13402 0x11, 0x11, 0x11, 0x11,
13403 };
13404
13405 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13406 {
13407         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13408         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13409         u32 budget;
13410         struct sk_buff *skb;
13411         u8 *tx_data, *rx_data;
13412         dma_addr_t map;
13413         int num_pkts, tx_len, rx_len, i, err;
13414         struct tg3_rx_buffer_desc *desc;
13415         struct tg3_napi *tnapi, *rnapi;
13416         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13417
13418         tnapi = &tp->napi[0];
13419         rnapi = &tp->napi[0];
13420         if (tp->irq_cnt > 1) {
13421                 if (tg3_flag(tp, ENABLE_RSS))
13422                         rnapi = &tp->napi[1];
13423                 if (tg3_flag(tp, ENABLE_TSS))
13424                         tnapi = &tp->napi[1];
13425         }
13426         coal_now = tnapi->coal_now | rnapi->coal_now;
13427
13428         err = -EIO;
13429
13430         tx_len = pktsz;
13431         skb = netdev_alloc_skb(tp->dev, tx_len);
13432         if (!skb)
13433                 return -ENOMEM;
13434
13435         tx_data = skb_put(skb, tx_len);
13436         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13437         memset(tx_data + ETH_ALEN, 0x0, 8);
13438
13439         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13440
13441         if (tso_loopback) {
13442                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13443
13444                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13445                               TG3_TSO_TCP_OPT_LEN;
13446
13447                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13448                        sizeof(tg3_tso_header));
13449                 mss = TG3_TSO_MSS;
13450
13451                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13452                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13453
13454                 /* Set the total length field in the IP header */
13455                 iph->tot_len = htons((u16)(mss + hdr_len));
13456
13457                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13458                               TXD_FLAG_CPU_POST_DMA);
13459
13460                 if (tg3_flag(tp, HW_TSO_1) ||
13461                     tg3_flag(tp, HW_TSO_2) ||
13462                     tg3_flag(tp, HW_TSO_3)) {
13463                         struct tcphdr *th;
13464                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13465                         th = (struct tcphdr *)&tx_data[val];
13466                         th->check = 0;
13467                 } else
13468                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13469
13470                 if (tg3_flag(tp, HW_TSO_3)) {
13471                         mss |= (hdr_len & 0xc) << 12;
13472                         if (hdr_len & 0x10)
13473                                 base_flags |= 0x00000010;
13474                         base_flags |= (hdr_len & 0x3e0) << 5;
13475                 } else if (tg3_flag(tp, HW_TSO_2))
13476                         mss |= hdr_len << 9;
13477                 else if (tg3_flag(tp, HW_TSO_1) ||
13478                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13479                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13480                 } else {
13481                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13482                 }
13483
13484                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13485         } else {
13486                 num_pkts = 1;
13487                 data_off = ETH_HLEN;
13488
13489                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13490                     tx_len > VLAN_ETH_FRAME_LEN)
13491                         base_flags |= TXD_FLAG_JMB_PKT;
13492         }
13493
13494         for (i = data_off; i < tx_len; i++)
13495                 tx_data[i] = (u8) (i & 0xff);
13496
13497         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13498         if (pci_dma_mapping_error(tp->pdev, map)) {
13499                 dev_kfree_skb(skb);
13500                 return -EIO;
13501         }
13502
13503         val = tnapi->tx_prod;
13504         tnapi->tx_buffers[val].skb = skb;
13505         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13506
13507         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13508                rnapi->coal_now);
13509
13510         udelay(10);
13511
13512         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13513
13514         budget = tg3_tx_avail(tnapi);
13515         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13516                             base_flags | TXD_FLAG_END, mss, 0)) {
13517                 tnapi->tx_buffers[val].skb = NULL;
13518                 dev_kfree_skb(skb);
13519                 return -EIO;
13520         }
13521
13522         tnapi->tx_prod++;
13523
13524         /* Sync BD data before updating mailbox */
13525         wmb();
13526
13527         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13528         tr32_mailbox(tnapi->prodmbox);
13529
13530         udelay(10);
13531
13532         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13533         for (i = 0; i < 35; i++) {
13534                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13535                        coal_now);
13536
13537                 udelay(10);
13538
13539                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13540                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13541                 if ((tx_idx == tnapi->tx_prod) &&
13542                     (rx_idx == (rx_start_idx + num_pkts)))
13543                         break;
13544         }
13545
13546         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13547         dev_kfree_skb(skb);
13548
13549         if (tx_idx != tnapi->tx_prod)
13550                 goto out;
13551
13552         if (rx_idx != rx_start_idx + num_pkts)
13553                 goto out;
13554
13555         val = data_off;
13556         while (rx_idx != rx_start_idx) {
13557                 desc = &rnapi->rx_rcb[rx_start_idx++];
13558                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13559                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13560
13561                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13562                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13563                         goto out;
13564
13565                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13566                          - ETH_FCS_LEN;
13567
13568                 if (!tso_loopback) {
13569                         if (rx_len != tx_len)
13570                                 goto out;
13571
13572                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13573                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13574                                         goto out;
13575                         } else {
13576                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13577                                         goto out;
13578                         }
13579                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13580                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13581                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13582                         goto out;
13583                 }
13584
13585                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13586                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13587                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13588                                              mapping);
13589                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13590                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13591                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13592                                              mapping);
13593                 } else
13594                         goto out;
13595
13596                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13597                                             PCI_DMA_FROMDEVICE);
13598
13599                 rx_data += TG3_RX_OFFSET(tp);
13600                 for (i = data_off; i < rx_len; i++, val++) {
13601                         if (*(rx_data + i) != (u8) (val & 0xff))
13602                                 goto out;
13603                 }
13604         }
13605
13606         err = 0;
13607
13608         /* tg3_free_rings will unmap and free the rx_data */
13609 out:
13610         return err;
13611 }
13612
13613 #define TG3_STD_LOOPBACK_FAILED         1
13614 #define TG3_JMB_LOOPBACK_FAILED         2
13615 #define TG3_TSO_LOOPBACK_FAILED         4
13616 #define TG3_LOOPBACK_FAILED \
13617         (TG3_STD_LOOPBACK_FAILED | \
13618          TG3_JMB_LOOPBACK_FAILED | \
13619          TG3_TSO_LOOPBACK_FAILED)
13620
13621 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13622 {
13623         int err = -EIO;
13624         u32 eee_cap;
13625         u32 jmb_pkt_sz = 9000;
13626
13627         if (tp->dma_limit)
13628                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13629
13630         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13631         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13632
13633         if (!netif_running(tp->dev)) {
13634                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13635                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13636                 if (do_extlpbk)
13637                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13638                 goto done;
13639         }
13640
13641         err = tg3_reset_hw(tp, true);
13642         if (err) {
13643                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13644                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13645                 if (do_extlpbk)
13646                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13647                 goto done;
13648         }
13649
13650         if (tg3_flag(tp, ENABLE_RSS)) {
13651                 int i;
13652
13653                 /* Reroute all rx packets to the 1st queue */
13654                 for (i = MAC_RSS_INDIR_TBL_0;
13655                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13656                         tw32(i, 0x0);
13657         }
13658
13659         /* HW errata - mac loopback fails in some cases on 5780.
13660          * Normal traffic and PHY loopback are not affected by
13661          * errata.  Also, the MAC loopback test is deprecated for
13662          * all newer ASIC revisions.
13663          */
13664         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13665             !tg3_flag(tp, CPMU_PRESENT)) {
13666                 tg3_mac_loopback(tp, true);
13667
13668                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13669                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13670
13671                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13672                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13673                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13674
13675                 tg3_mac_loopback(tp, false);
13676         }
13677
13678         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13679             !tg3_flag(tp, USE_PHYLIB)) {
13680                 int i;
13681
13682                 tg3_phy_lpbk_set(tp, 0, false);
13683
13684                 /* Wait for link */
13685                 for (i = 0; i < 100; i++) {
13686                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13687                                 break;
13688                         mdelay(1);
13689                 }
13690
13691                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13692                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13693                 if (tg3_flag(tp, TSO_CAPABLE) &&
13694                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13695                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13696                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13697                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13698                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13699
13700                 if (do_extlpbk) {
13701                         tg3_phy_lpbk_set(tp, 0, true);
13702
13703                         /* All link indications report up, but the hardware
13704                          * isn't really ready for about 20 msec.  Double it
13705                          * to be sure.
13706                          */
13707                         mdelay(40);
13708
13709                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13710                                 data[TG3_EXT_LOOPB_TEST] |=
13711                                                         TG3_STD_LOOPBACK_FAILED;
13712                         if (tg3_flag(tp, TSO_CAPABLE) &&
13713                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13714                                 data[TG3_EXT_LOOPB_TEST] |=
13715                                                         TG3_TSO_LOOPBACK_FAILED;
13716                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13717                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13718                                 data[TG3_EXT_LOOPB_TEST] |=
13719                                                         TG3_JMB_LOOPBACK_FAILED;
13720                 }
13721
13722                 /* Re-enable gphy autopowerdown. */
13723                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13724                         tg3_phy_toggle_apd(tp, true);
13725         }
13726
13727         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13728                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13729
13730 done:
13731         tp->phy_flags |= eee_cap;
13732
13733         return err;
13734 }
13735
13736 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13737                           u64 *data)
13738 {
13739         struct tg3 *tp = netdev_priv(dev);
13740         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13741
13742         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13743                 if (tg3_power_up(tp)) {
13744                         etest->flags |= ETH_TEST_FL_FAILED;
13745                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13746                         return;
13747                 }
13748                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13749         }
13750
13751         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13752
13753         if (tg3_test_nvram(tp) != 0) {
13754                 etest->flags |= ETH_TEST_FL_FAILED;
13755                 data[TG3_NVRAM_TEST] = 1;
13756         }
13757         if (!doextlpbk && tg3_test_link(tp)) {
13758                 etest->flags |= ETH_TEST_FL_FAILED;
13759                 data[TG3_LINK_TEST] = 1;
13760         }
13761         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13762                 int err, err2 = 0, irq_sync = 0;
13763
13764                 if (netif_running(dev)) {
13765                         tg3_phy_stop(tp);
13766                         tg3_netif_stop(tp);
13767                         irq_sync = 1;
13768                 }
13769
13770                 tg3_full_lock(tp, irq_sync);
13771                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13772                 err = tg3_nvram_lock(tp);
13773                 tg3_halt_cpu(tp, RX_CPU_BASE);
13774                 if (!tg3_flag(tp, 5705_PLUS))
13775                         tg3_halt_cpu(tp, TX_CPU_BASE);
13776                 if (!err)
13777                         tg3_nvram_unlock(tp);
13778
13779                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13780                         tg3_phy_reset(tp);
13781
13782                 if (tg3_test_registers(tp) != 0) {
13783                         etest->flags |= ETH_TEST_FL_FAILED;
13784                         data[TG3_REGISTER_TEST] = 1;
13785                 }
13786
13787                 if (tg3_test_memory(tp) != 0) {
13788                         etest->flags |= ETH_TEST_FL_FAILED;
13789                         data[TG3_MEMORY_TEST] = 1;
13790                 }
13791
13792                 if (doextlpbk)
13793                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13794
13795                 if (tg3_test_loopback(tp, data, doextlpbk))
13796                         etest->flags |= ETH_TEST_FL_FAILED;
13797
13798                 tg3_full_unlock(tp);
13799
13800                 if (tg3_test_interrupt(tp) != 0) {
13801                         etest->flags |= ETH_TEST_FL_FAILED;
13802                         data[TG3_INTERRUPT_TEST] = 1;
13803                 }
13804
13805                 tg3_full_lock(tp, 0);
13806
13807                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13808                 if (netif_running(dev)) {
13809                         tg3_flag_set(tp, INIT_COMPLETE);
13810                         err2 = tg3_restart_hw(tp, true);
13811                         if (!err2)
13812                                 tg3_netif_start(tp);
13813                 }
13814
13815                 tg3_full_unlock(tp);
13816
13817                 if (irq_sync && !err2)
13818                         tg3_phy_start(tp);
13819         }
13820         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13821                 tg3_power_down_prepare(tp);
13822
13823 }
13824
13825 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13826 {
13827         struct tg3 *tp = netdev_priv(dev);
13828         struct hwtstamp_config stmpconf;
13829
13830         if (!tg3_flag(tp, PTP_CAPABLE))
13831                 return -EOPNOTSUPP;
13832
13833         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13834                 return -EFAULT;
13835
13836         if (stmpconf.flags)
13837                 return -EINVAL;
13838
13839         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13840             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13841                 return -ERANGE;
13842
13843         switch (stmpconf.rx_filter) {
13844         case HWTSTAMP_FILTER_NONE:
13845                 tp->rxptpctl = 0;
13846                 break;
13847         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13848                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13849                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13850                 break;
13851         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13853                                TG3_RX_PTP_CTL_SYNC_EVNT;
13854                 break;
13855         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13857                                TG3_RX_PTP_CTL_DELAY_REQ;
13858                 break;
13859         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13861                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13862                 break;
13863         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13864                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13865                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13866                 break;
13867         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13868                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13869                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13870                 break;
13871         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13872                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13873                                TG3_RX_PTP_CTL_SYNC_EVNT;
13874                 break;
13875         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13876                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13877                                TG3_RX_PTP_CTL_SYNC_EVNT;
13878                 break;
13879         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13880                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13881                                TG3_RX_PTP_CTL_SYNC_EVNT;
13882                 break;
13883         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13885                                TG3_RX_PTP_CTL_DELAY_REQ;
13886                 break;
13887         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13888                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13889                                TG3_RX_PTP_CTL_DELAY_REQ;
13890                 break;
13891         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13892                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13893                                TG3_RX_PTP_CTL_DELAY_REQ;
13894                 break;
13895         default:
13896                 return -ERANGE;
13897         }
13898
13899         if (netif_running(dev) && tp->rxptpctl)
13900                 tw32(TG3_RX_PTP_CTL,
13901                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13902
13903         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13904                 tg3_flag_set(tp, TX_TSTAMP_EN);
13905         else
13906                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13907
13908         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13909                 -EFAULT : 0;
13910 }
13911
13912 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13913 {
13914         struct tg3 *tp = netdev_priv(dev);
13915         struct hwtstamp_config stmpconf;
13916
13917         if (!tg3_flag(tp, PTP_CAPABLE))
13918                 return -EOPNOTSUPP;
13919
13920         stmpconf.flags = 0;
13921         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13922                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13923
13924         switch (tp->rxptpctl) {
13925         case 0:
13926                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13927                 break;
13928         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13929                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13930                 break;
13931         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13932                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13933                 break;
13934         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13935                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13936                 break;
13937         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13938                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13939                 break;
13940         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13941                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13942                 break;
13943         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13944                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13945                 break;
13946         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13947                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13948                 break;
13949         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13950                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13951                 break;
13952         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13953                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13954                 break;
13955         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13956                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13957                 break;
13958         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13959                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13960                 break;
13961         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13962                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13963                 break;
13964         default:
13965                 WARN_ON_ONCE(1);
13966                 return -ERANGE;
13967         }
13968
13969         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13970                 -EFAULT : 0;
13971 }
13972
13973 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13974 {
13975         struct mii_ioctl_data *data = if_mii(ifr);
13976         struct tg3 *tp = netdev_priv(dev);
13977         int err;
13978
13979         if (tg3_flag(tp, USE_PHYLIB)) {
13980                 struct phy_device *phydev;
13981                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13982                         return -EAGAIN;
13983                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13984                 return phy_mii_ioctl(phydev, ifr, cmd);
13985         }
13986
13987         switch (cmd) {
13988         case SIOCGMIIPHY:
13989                 data->phy_id = tp->phy_addr;
13990
13991                 /* fall through */
13992         case SIOCGMIIREG: {
13993                 u32 mii_regval;
13994
13995                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13996                         break;                  /* We have no PHY */
13997
13998                 if (!netif_running(dev))
13999                         return -EAGAIN;
14000
14001                 spin_lock_bh(&tp->lock);
14002                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14003                                     data->reg_num & 0x1f, &mii_regval);
14004                 spin_unlock_bh(&tp->lock);
14005
14006                 data->val_out = mii_regval;
14007
14008                 return err;
14009         }
14010
14011         case SIOCSMIIREG:
14012                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14013                         break;                  /* We have no PHY */
14014
14015                 if (!netif_running(dev))
14016                         return -EAGAIN;
14017
14018                 spin_lock_bh(&tp->lock);
14019                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14020                                      data->reg_num & 0x1f, data->val_in);
14021                 spin_unlock_bh(&tp->lock);
14022
14023                 return err;
14024
14025         case SIOCSHWTSTAMP:
14026                 return tg3_hwtstamp_set(dev, ifr);
14027
14028         case SIOCGHWTSTAMP:
14029                 return tg3_hwtstamp_get(dev, ifr);
14030
14031         default:
14032                 /* do nothing */
14033                 break;
14034         }
14035         return -EOPNOTSUPP;
14036 }
14037
14038 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14039 {
14040         struct tg3 *tp = netdev_priv(dev);
14041
14042         memcpy(ec, &tp->coal, sizeof(*ec));
14043         return 0;
14044 }
14045
14046 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14047 {
14048         struct tg3 *tp = netdev_priv(dev);
14049         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14050         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14051
14052         if (!tg3_flag(tp, 5705_PLUS)) {
14053                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14054                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14055                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14056                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14057         }
14058
14059         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14060             (!ec->rx_coalesce_usecs) ||
14061             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14062             (!ec->tx_coalesce_usecs) ||
14063             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14064             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14065             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14066             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14067             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14068             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14069             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14070             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14071                 return -EINVAL;
14072
14073         /* Only copy relevant parameters, ignore all others. */
14074         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14075         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14076         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14077         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14078         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14079         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14080         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14081         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14082         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14083
14084         if (netif_running(dev)) {
14085                 tg3_full_lock(tp, 0);
14086                 __tg3_set_coalesce(tp, &tp->coal);
14087                 tg3_full_unlock(tp);
14088         }
14089         return 0;
14090 }
14091
14092 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14093 {
14094         struct tg3 *tp = netdev_priv(dev);
14095
14096         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14097                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14098                 return -EOPNOTSUPP;
14099         }
14100
14101         if (edata->advertised != tp->eee.advertised) {
14102                 netdev_warn(tp->dev,
14103                             "Direct manipulation of EEE advertisement is not supported\n");
14104                 return -EINVAL;
14105         }
14106
14107         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14108                 netdev_warn(tp->dev,
14109                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14110                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14111                 return -EINVAL;
14112         }
14113
14114         tp->eee = *edata;
14115
14116         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14117         tg3_warn_mgmt_link_flap(tp);
14118
14119         if (netif_running(tp->dev)) {
14120                 tg3_full_lock(tp, 0);
14121                 tg3_setup_eee(tp);
14122                 tg3_phy_reset(tp);
14123                 tg3_full_unlock(tp);
14124         }
14125
14126         return 0;
14127 }
14128
14129 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14130 {
14131         struct tg3 *tp = netdev_priv(dev);
14132
14133         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14134                 netdev_warn(tp->dev,
14135                             "Board does not support EEE!\n");
14136                 return -EOPNOTSUPP;
14137         }
14138
14139         *edata = tp->eee;
14140         return 0;
14141 }
14142
14143 static const struct ethtool_ops tg3_ethtool_ops = {
14144         .get_drvinfo            = tg3_get_drvinfo,
14145         .get_regs_len           = tg3_get_regs_len,
14146         .get_regs               = tg3_get_regs,
14147         .get_wol                = tg3_get_wol,
14148         .set_wol                = tg3_set_wol,
14149         .get_msglevel           = tg3_get_msglevel,
14150         .set_msglevel           = tg3_set_msglevel,
14151         .nway_reset             = tg3_nway_reset,
14152         .get_link               = ethtool_op_get_link,
14153         .get_eeprom_len         = tg3_get_eeprom_len,
14154         .get_eeprom             = tg3_get_eeprom,
14155         .set_eeprom             = tg3_set_eeprom,
14156         .get_ringparam          = tg3_get_ringparam,
14157         .set_ringparam          = tg3_set_ringparam,
14158         .get_pauseparam         = tg3_get_pauseparam,
14159         .set_pauseparam         = tg3_set_pauseparam,
14160         .self_test              = tg3_self_test,
14161         .get_strings            = tg3_get_strings,
14162         .set_phys_id            = tg3_set_phys_id,
14163         .get_ethtool_stats      = tg3_get_ethtool_stats,
14164         .get_coalesce           = tg3_get_coalesce,
14165         .set_coalesce           = tg3_set_coalesce,
14166         .get_sset_count         = tg3_get_sset_count,
14167         .get_rxnfc              = tg3_get_rxnfc,
14168         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14169         .get_rxfh               = tg3_get_rxfh,
14170         .set_rxfh               = tg3_set_rxfh,
14171         .get_channels           = tg3_get_channels,
14172         .set_channels           = tg3_set_channels,
14173         .get_ts_info            = tg3_get_ts_info,
14174         .get_eee                = tg3_get_eee,
14175         .set_eee                = tg3_set_eee,
14176         .get_link_ksettings     = tg3_get_link_ksettings,
14177         .set_link_ksettings     = tg3_set_link_ksettings,
14178 };
14179
14180 static void tg3_get_stats64(struct net_device *dev,
14181                             struct rtnl_link_stats64 *stats)
14182 {
14183         struct tg3 *tp = netdev_priv(dev);
14184
14185         spin_lock_bh(&tp->lock);
14186         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14187                 *stats = tp->net_stats_prev;
14188                 spin_unlock_bh(&tp->lock);
14189                 return;
14190         }
14191
14192         tg3_get_nstats(tp, stats);
14193         spin_unlock_bh(&tp->lock);
14194 }
14195
14196 static void tg3_set_rx_mode(struct net_device *dev)
14197 {
14198         struct tg3 *tp = netdev_priv(dev);
14199
14200         if (!netif_running(dev))
14201                 return;
14202
14203         tg3_full_lock(tp, 0);
14204         __tg3_set_rx_mode(dev);
14205         tg3_full_unlock(tp);
14206 }
14207
14208 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14209                                int new_mtu)
14210 {
14211         dev->mtu = new_mtu;
14212
14213         if (new_mtu > ETH_DATA_LEN) {
14214                 if (tg3_flag(tp, 5780_CLASS)) {
14215                         netdev_update_features(dev);
14216                         tg3_flag_clear(tp, TSO_CAPABLE);
14217                 } else {
14218                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14219                 }
14220         } else {
14221                 if (tg3_flag(tp, 5780_CLASS)) {
14222                         tg3_flag_set(tp, TSO_CAPABLE);
14223                         netdev_update_features(dev);
14224                 }
14225                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14226         }
14227 }
14228
14229 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14230 {
14231         struct tg3 *tp = netdev_priv(dev);
14232         int err;
14233         bool reset_phy = false;
14234
14235         if (!netif_running(dev)) {
14236                 /* We'll just catch it later when the
14237                  * device is up'd.
14238                  */
14239                 tg3_set_mtu(dev, tp, new_mtu);
14240                 return 0;
14241         }
14242
14243         tg3_phy_stop(tp);
14244
14245         tg3_netif_stop(tp);
14246
14247         tg3_set_mtu(dev, tp, new_mtu);
14248
14249         tg3_full_lock(tp, 1);
14250
14251         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14252
14253         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14254          * breaks all requests to 256 bytes.
14255          */
14256         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14257             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14258             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14259             tg3_asic_rev(tp) == ASIC_REV_5720)
14260                 reset_phy = true;
14261
14262         err = tg3_restart_hw(tp, reset_phy);
14263
14264         if (!err)
14265                 tg3_netif_start(tp);
14266
14267         tg3_full_unlock(tp);
14268
14269         if (!err)
14270                 tg3_phy_start(tp);
14271
14272         return err;
14273 }
14274
14275 static const struct net_device_ops tg3_netdev_ops = {
14276         .ndo_open               = tg3_open,
14277         .ndo_stop               = tg3_close,
14278         .ndo_start_xmit         = tg3_start_xmit,
14279         .ndo_get_stats64        = tg3_get_stats64,
14280         .ndo_validate_addr      = eth_validate_addr,
14281         .ndo_set_rx_mode        = tg3_set_rx_mode,
14282         .ndo_set_mac_address    = tg3_set_mac_addr,
14283         .ndo_do_ioctl           = tg3_ioctl,
14284         .ndo_tx_timeout         = tg3_tx_timeout,
14285         .ndo_change_mtu         = tg3_change_mtu,
14286         .ndo_fix_features       = tg3_fix_features,
14287         .ndo_set_features       = tg3_set_features,
14288 #ifdef CONFIG_NET_POLL_CONTROLLER
14289         .ndo_poll_controller    = tg3_poll_controller,
14290 #endif
14291 };
14292
14293 static void tg3_get_eeprom_size(struct tg3 *tp)
14294 {
14295         u32 cursize, val, magic;
14296
14297         tp->nvram_size = EEPROM_CHIP_SIZE;
14298
14299         if (tg3_nvram_read(tp, 0, &magic) != 0)
14300                 return;
14301
14302         if ((magic != TG3_EEPROM_MAGIC) &&
14303             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14304             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14305                 return;
14306
14307         /*
14308          * Size the chip by reading offsets at increasing powers of two.
14309          * When we encounter our validation signature, we know the addressing
14310          * has wrapped around, and thus have our chip size.
14311          */
14312         cursize = 0x10;
14313
14314         while (cursize < tp->nvram_size) {
14315                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14316                         return;
14317
14318                 if (val == magic)
14319                         break;
14320
14321                 cursize <<= 1;
14322         }
14323
14324         tp->nvram_size = cursize;
14325 }
14326
14327 static void tg3_get_nvram_size(struct tg3 *tp)
14328 {
14329         u32 val;
14330
14331         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14332                 return;
14333
14334         /* Selfboot format */
14335         if (val != TG3_EEPROM_MAGIC) {
14336                 tg3_get_eeprom_size(tp);
14337                 return;
14338         }
14339
14340         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14341                 if (val != 0) {
14342                         /* This is confusing.  We want to operate on the
14343                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14344                          * call will read from NVRAM and byteswap the data
14345                          * according to the byteswapping settings for all
14346                          * other register accesses.  This ensures the data we
14347                          * want will always reside in the lower 16-bits.
14348                          * However, the data in NVRAM is in LE format, which
14349                          * means the data from the NVRAM read will always be
14350                          * opposite the endianness of the CPU.  The 16-bit
14351                          * byteswap then brings the data to CPU endianness.
14352                          */
14353                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14354                         return;
14355                 }
14356         }
14357         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14358 }
14359
14360 static void tg3_get_nvram_info(struct tg3 *tp)
14361 {
14362         u32 nvcfg1;
14363
14364         nvcfg1 = tr32(NVRAM_CFG1);
14365         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14366                 tg3_flag_set(tp, FLASH);
14367         } else {
14368                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14369                 tw32(NVRAM_CFG1, nvcfg1);
14370         }
14371
14372         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14373             tg3_flag(tp, 5780_CLASS)) {
14374                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14375                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14376                         tp->nvram_jedecnum = JEDEC_ATMEL;
14377                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14378                         tg3_flag_set(tp, NVRAM_BUFFERED);
14379                         break;
14380                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14381                         tp->nvram_jedecnum = JEDEC_ATMEL;
14382                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14383                         break;
14384                 case FLASH_VENDOR_ATMEL_EEPROM:
14385                         tp->nvram_jedecnum = JEDEC_ATMEL;
14386                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14387                         tg3_flag_set(tp, NVRAM_BUFFERED);
14388                         break;
14389                 case FLASH_VENDOR_ST:
14390                         tp->nvram_jedecnum = JEDEC_ST;
14391                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14392                         tg3_flag_set(tp, NVRAM_BUFFERED);
14393                         break;
14394                 case FLASH_VENDOR_SAIFUN:
14395                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14396                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14397                         break;
14398                 case FLASH_VENDOR_SST_SMALL:
14399                 case FLASH_VENDOR_SST_LARGE:
14400                         tp->nvram_jedecnum = JEDEC_SST;
14401                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14402                         break;
14403                 }
14404         } else {
14405                 tp->nvram_jedecnum = JEDEC_ATMEL;
14406                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14407                 tg3_flag_set(tp, NVRAM_BUFFERED);
14408         }
14409 }
14410
14411 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14412 {
14413         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14414         case FLASH_5752PAGE_SIZE_256:
14415                 tp->nvram_pagesize = 256;
14416                 break;
14417         case FLASH_5752PAGE_SIZE_512:
14418                 tp->nvram_pagesize = 512;
14419                 break;
14420         case FLASH_5752PAGE_SIZE_1K:
14421                 tp->nvram_pagesize = 1024;
14422                 break;
14423         case FLASH_5752PAGE_SIZE_2K:
14424                 tp->nvram_pagesize = 2048;
14425                 break;
14426         case FLASH_5752PAGE_SIZE_4K:
14427                 tp->nvram_pagesize = 4096;
14428                 break;
14429         case FLASH_5752PAGE_SIZE_264:
14430                 tp->nvram_pagesize = 264;
14431                 break;
14432         case FLASH_5752PAGE_SIZE_528:
14433                 tp->nvram_pagesize = 528;
14434                 break;
14435         }
14436 }
14437
14438 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14439 {
14440         u32 nvcfg1;
14441
14442         nvcfg1 = tr32(NVRAM_CFG1);
14443
14444         /* NVRAM protection for TPM */
14445         if (nvcfg1 & (1 << 27))
14446                 tg3_flag_set(tp, PROTECTED_NVRAM);
14447
14448         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14449         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14450         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14451                 tp->nvram_jedecnum = JEDEC_ATMEL;
14452                 tg3_flag_set(tp, NVRAM_BUFFERED);
14453                 break;
14454         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14455                 tp->nvram_jedecnum = JEDEC_ATMEL;
14456                 tg3_flag_set(tp, NVRAM_BUFFERED);
14457                 tg3_flag_set(tp, FLASH);
14458                 break;
14459         case FLASH_5752VENDOR_ST_M45PE10:
14460         case FLASH_5752VENDOR_ST_M45PE20:
14461         case FLASH_5752VENDOR_ST_M45PE40:
14462                 tp->nvram_jedecnum = JEDEC_ST;
14463                 tg3_flag_set(tp, NVRAM_BUFFERED);
14464                 tg3_flag_set(tp, FLASH);
14465                 break;
14466         }
14467
14468         if (tg3_flag(tp, FLASH)) {
14469                 tg3_nvram_get_pagesize(tp, nvcfg1);
14470         } else {
14471                 /* For eeprom, set pagesize to maximum eeprom size */
14472                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14473
14474                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14475                 tw32(NVRAM_CFG1, nvcfg1);
14476         }
14477 }
14478
14479 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14480 {
14481         u32 nvcfg1, protect = 0;
14482
14483         nvcfg1 = tr32(NVRAM_CFG1);
14484
14485         /* NVRAM protection for TPM */
14486         if (nvcfg1 & (1 << 27)) {
14487                 tg3_flag_set(tp, PROTECTED_NVRAM);
14488                 protect = 1;
14489         }
14490
14491         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14492         switch (nvcfg1) {
14493         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14494         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14495         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14496         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14497                 tp->nvram_jedecnum = JEDEC_ATMEL;
14498                 tg3_flag_set(tp, NVRAM_BUFFERED);
14499                 tg3_flag_set(tp, FLASH);
14500                 tp->nvram_pagesize = 264;
14501                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14502                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14503                         tp->nvram_size = (protect ? 0x3e200 :
14504                                           TG3_NVRAM_SIZE_512KB);
14505                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14506                         tp->nvram_size = (protect ? 0x1f200 :
14507                                           TG3_NVRAM_SIZE_256KB);
14508                 else
14509                         tp->nvram_size = (protect ? 0x1f200 :
14510                                           TG3_NVRAM_SIZE_128KB);
14511                 break;
14512         case FLASH_5752VENDOR_ST_M45PE10:
14513         case FLASH_5752VENDOR_ST_M45PE20:
14514         case FLASH_5752VENDOR_ST_M45PE40:
14515                 tp->nvram_jedecnum = JEDEC_ST;
14516                 tg3_flag_set(tp, NVRAM_BUFFERED);
14517                 tg3_flag_set(tp, FLASH);
14518                 tp->nvram_pagesize = 256;
14519                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14520                         tp->nvram_size = (protect ?
14521                                           TG3_NVRAM_SIZE_64KB :
14522                                           TG3_NVRAM_SIZE_128KB);
14523                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14524                         tp->nvram_size = (protect ?
14525                                           TG3_NVRAM_SIZE_64KB :
14526                                           TG3_NVRAM_SIZE_256KB);
14527                 else
14528                         tp->nvram_size = (protect ?
14529                                           TG3_NVRAM_SIZE_128KB :
14530                                           TG3_NVRAM_SIZE_512KB);
14531                 break;
14532         }
14533 }
14534
14535 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14536 {
14537         u32 nvcfg1;
14538
14539         nvcfg1 = tr32(NVRAM_CFG1);
14540
14541         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14542         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14543         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14544         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14545         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14546                 tp->nvram_jedecnum = JEDEC_ATMEL;
14547                 tg3_flag_set(tp, NVRAM_BUFFERED);
14548                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14549
14550                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14551                 tw32(NVRAM_CFG1, nvcfg1);
14552                 break;
14553         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14554         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14555         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14556         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14557                 tp->nvram_jedecnum = JEDEC_ATMEL;
14558                 tg3_flag_set(tp, NVRAM_BUFFERED);
14559                 tg3_flag_set(tp, FLASH);
14560                 tp->nvram_pagesize = 264;
14561                 break;
14562         case FLASH_5752VENDOR_ST_M45PE10:
14563         case FLASH_5752VENDOR_ST_M45PE20:
14564         case FLASH_5752VENDOR_ST_M45PE40:
14565                 tp->nvram_jedecnum = JEDEC_ST;
14566                 tg3_flag_set(tp, NVRAM_BUFFERED);
14567                 tg3_flag_set(tp, FLASH);
14568                 tp->nvram_pagesize = 256;
14569                 break;
14570         }
14571 }
14572
14573 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14574 {
14575         u32 nvcfg1, protect = 0;
14576
14577         nvcfg1 = tr32(NVRAM_CFG1);
14578
14579         /* NVRAM protection for TPM */
14580         if (nvcfg1 & (1 << 27)) {
14581                 tg3_flag_set(tp, PROTECTED_NVRAM);
14582                 protect = 1;
14583         }
14584
14585         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14586         switch (nvcfg1) {
14587         case FLASH_5761VENDOR_ATMEL_ADB021D:
14588         case FLASH_5761VENDOR_ATMEL_ADB041D:
14589         case FLASH_5761VENDOR_ATMEL_ADB081D:
14590         case FLASH_5761VENDOR_ATMEL_ADB161D:
14591         case FLASH_5761VENDOR_ATMEL_MDB021D:
14592         case FLASH_5761VENDOR_ATMEL_MDB041D:
14593         case FLASH_5761VENDOR_ATMEL_MDB081D:
14594         case FLASH_5761VENDOR_ATMEL_MDB161D:
14595                 tp->nvram_jedecnum = JEDEC_ATMEL;
14596                 tg3_flag_set(tp, NVRAM_BUFFERED);
14597                 tg3_flag_set(tp, FLASH);
14598                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14599                 tp->nvram_pagesize = 256;
14600                 break;
14601         case FLASH_5761VENDOR_ST_A_M45PE20:
14602         case FLASH_5761VENDOR_ST_A_M45PE40:
14603         case FLASH_5761VENDOR_ST_A_M45PE80:
14604         case FLASH_5761VENDOR_ST_A_M45PE16:
14605         case FLASH_5761VENDOR_ST_M_M45PE20:
14606         case FLASH_5761VENDOR_ST_M_M45PE40:
14607         case FLASH_5761VENDOR_ST_M_M45PE80:
14608         case FLASH_5761VENDOR_ST_M_M45PE16:
14609                 tp->nvram_jedecnum = JEDEC_ST;
14610                 tg3_flag_set(tp, NVRAM_BUFFERED);
14611                 tg3_flag_set(tp, FLASH);
14612                 tp->nvram_pagesize = 256;
14613                 break;
14614         }
14615
14616         if (protect) {
14617                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14618         } else {
14619                 switch (nvcfg1) {
14620                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14621                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14622                 case FLASH_5761VENDOR_ST_A_M45PE16:
14623                 case FLASH_5761VENDOR_ST_M_M45PE16:
14624                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14625                         break;
14626                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14627                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14628                 case FLASH_5761VENDOR_ST_A_M45PE80:
14629                 case FLASH_5761VENDOR_ST_M_M45PE80:
14630                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14631                         break;
14632                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14633                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14634                 case FLASH_5761VENDOR_ST_A_M45PE40:
14635                 case FLASH_5761VENDOR_ST_M_M45PE40:
14636                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14637                         break;
14638                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14639                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14640                 case FLASH_5761VENDOR_ST_A_M45PE20:
14641                 case FLASH_5761VENDOR_ST_M_M45PE20:
14642                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14643                         break;
14644                 }
14645         }
14646 }
14647
14648 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14649 {
14650         tp->nvram_jedecnum = JEDEC_ATMEL;
14651         tg3_flag_set(tp, NVRAM_BUFFERED);
14652         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14653 }
14654
14655 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14656 {
14657         u32 nvcfg1;
14658
14659         nvcfg1 = tr32(NVRAM_CFG1);
14660
14661         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14662         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14663         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14664                 tp->nvram_jedecnum = JEDEC_ATMEL;
14665                 tg3_flag_set(tp, NVRAM_BUFFERED);
14666                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14667
14668                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14669                 tw32(NVRAM_CFG1, nvcfg1);
14670                 return;
14671         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14672         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14673         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14674         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14675         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14676         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14677         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14678                 tp->nvram_jedecnum = JEDEC_ATMEL;
14679                 tg3_flag_set(tp, NVRAM_BUFFERED);
14680                 tg3_flag_set(tp, FLASH);
14681
14682                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14683                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14684                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14685                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14686                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14687                         break;
14688                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14689                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14690                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14691                         break;
14692                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14693                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14694                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14695                         break;
14696                 }
14697                 break;
14698         case FLASH_5752VENDOR_ST_M45PE10:
14699         case FLASH_5752VENDOR_ST_M45PE20:
14700         case FLASH_5752VENDOR_ST_M45PE40:
14701                 tp->nvram_jedecnum = JEDEC_ST;
14702                 tg3_flag_set(tp, NVRAM_BUFFERED);
14703                 tg3_flag_set(tp, FLASH);
14704
14705                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14706                 case FLASH_5752VENDOR_ST_M45PE10:
14707                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14708                         break;
14709                 case FLASH_5752VENDOR_ST_M45PE20:
14710                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14711                         break;
14712                 case FLASH_5752VENDOR_ST_M45PE40:
14713                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14714                         break;
14715                 }
14716                 break;
14717         default:
14718                 tg3_flag_set(tp, NO_NVRAM);
14719                 return;
14720         }
14721
14722         tg3_nvram_get_pagesize(tp, nvcfg1);
14723         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14724                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14725 }
14726
14727
14728 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14729 {
14730         u32 nvcfg1;
14731
14732         nvcfg1 = tr32(NVRAM_CFG1);
14733
14734         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14735         case FLASH_5717VENDOR_ATMEL_EEPROM:
14736         case FLASH_5717VENDOR_MICRO_EEPROM:
14737                 tp->nvram_jedecnum = JEDEC_ATMEL;
14738                 tg3_flag_set(tp, NVRAM_BUFFERED);
14739                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14740
14741                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14742                 tw32(NVRAM_CFG1, nvcfg1);
14743                 return;
14744         case FLASH_5717VENDOR_ATMEL_MDB011D:
14745         case FLASH_5717VENDOR_ATMEL_ADB011B:
14746         case FLASH_5717VENDOR_ATMEL_ADB011D:
14747         case FLASH_5717VENDOR_ATMEL_MDB021D:
14748         case FLASH_5717VENDOR_ATMEL_ADB021B:
14749         case FLASH_5717VENDOR_ATMEL_ADB021D:
14750         case FLASH_5717VENDOR_ATMEL_45USPT:
14751                 tp->nvram_jedecnum = JEDEC_ATMEL;
14752                 tg3_flag_set(tp, NVRAM_BUFFERED);
14753                 tg3_flag_set(tp, FLASH);
14754
14755                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14756                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14757                         /* Detect size with tg3_nvram_get_size() */
14758                         break;
14759                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14760                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14761                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14762                         break;
14763                 default:
14764                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14765                         break;
14766                 }
14767                 break;
14768         case FLASH_5717VENDOR_ST_M_M25PE10:
14769         case FLASH_5717VENDOR_ST_A_M25PE10:
14770         case FLASH_5717VENDOR_ST_M_M45PE10:
14771         case FLASH_5717VENDOR_ST_A_M45PE10:
14772         case FLASH_5717VENDOR_ST_M_M25PE20:
14773         case FLASH_5717VENDOR_ST_A_M25PE20:
14774         case FLASH_5717VENDOR_ST_M_M45PE20:
14775         case FLASH_5717VENDOR_ST_A_M45PE20:
14776         case FLASH_5717VENDOR_ST_25USPT:
14777         case FLASH_5717VENDOR_ST_45USPT:
14778                 tp->nvram_jedecnum = JEDEC_ST;
14779                 tg3_flag_set(tp, NVRAM_BUFFERED);
14780                 tg3_flag_set(tp, FLASH);
14781
14782                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14783                 case FLASH_5717VENDOR_ST_M_M25PE20:
14784                 case FLASH_5717VENDOR_ST_M_M45PE20:
14785                         /* Detect size with tg3_nvram_get_size() */
14786                         break;
14787                 case FLASH_5717VENDOR_ST_A_M25PE20:
14788                 case FLASH_5717VENDOR_ST_A_M45PE20:
14789                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14790                         break;
14791                 default:
14792                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14793                         break;
14794                 }
14795                 break;
14796         default:
14797                 tg3_flag_set(tp, NO_NVRAM);
14798                 return;
14799         }
14800
14801         tg3_nvram_get_pagesize(tp, nvcfg1);
14802         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14803                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14804 }
14805
14806 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14807 {
14808         u32 nvcfg1, nvmpinstrp, nv_status;
14809
14810         nvcfg1 = tr32(NVRAM_CFG1);
14811         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14812
14813         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14814                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14815                         tg3_flag_set(tp, NO_NVRAM);
14816                         return;
14817                 }
14818
14819                 switch (nvmpinstrp) {
14820                 case FLASH_5762_MX25L_100:
14821                 case FLASH_5762_MX25L_200:
14822                 case FLASH_5762_MX25L_400:
14823                 case FLASH_5762_MX25L_800:
14824                 case FLASH_5762_MX25L_160_320:
14825                         tp->nvram_pagesize = 4096;
14826                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14827                         tg3_flag_set(tp, NVRAM_BUFFERED);
14828                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14829                         tg3_flag_set(tp, FLASH);
14830                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14831                         tp->nvram_size =
14832                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14833                                                 AUTOSENSE_DEVID_MASK)
14834                                         << AUTOSENSE_SIZE_IN_MB);
14835                         return;
14836
14837                 case FLASH_5762_EEPROM_HD:
14838                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14839                         break;
14840                 case FLASH_5762_EEPROM_LD:
14841                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14842                         break;
14843                 case FLASH_5720VENDOR_M_ST_M45PE20:
14844                         /* This pinstrap supports multiple sizes, so force it
14845                          * to read the actual size from location 0xf0.
14846                          */
14847                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14848                         break;
14849                 }
14850         }
14851
14852         switch (nvmpinstrp) {
14853         case FLASH_5720_EEPROM_HD:
14854         case FLASH_5720_EEPROM_LD:
14855                 tp->nvram_jedecnum = JEDEC_ATMEL;
14856                 tg3_flag_set(tp, NVRAM_BUFFERED);
14857
14858                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14859                 tw32(NVRAM_CFG1, nvcfg1);
14860                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14861                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14862                 else
14863                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14864                 return;
14865         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14866         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14867         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14868         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14869         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14870         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14871         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14872         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14873         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14874         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14875         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14876         case FLASH_5720VENDOR_ATMEL_45USPT:
14877                 tp->nvram_jedecnum = JEDEC_ATMEL;
14878                 tg3_flag_set(tp, NVRAM_BUFFERED);
14879                 tg3_flag_set(tp, FLASH);
14880
14881                 switch (nvmpinstrp) {
14882                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14883                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14884                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14885                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14886                         break;
14887                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14888                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14889                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14890                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14891                         break;
14892                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14893                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14894                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14895                         break;
14896                 default:
14897                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14898                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14899                         break;
14900                 }
14901                 break;
14902         case FLASH_5720VENDOR_M_ST_M25PE10:
14903         case FLASH_5720VENDOR_M_ST_M45PE10:
14904         case FLASH_5720VENDOR_A_ST_M25PE10:
14905         case FLASH_5720VENDOR_A_ST_M45PE10:
14906         case FLASH_5720VENDOR_M_ST_M25PE20:
14907         case FLASH_5720VENDOR_M_ST_M45PE20:
14908         case FLASH_5720VENDOR_A_ST_M25PE20:
14909         case FLASH_5720VENDOR_A_ST_M45PE20:
14910         case FLASH_5720VENDOR_M_ST_M25PE40:
14911         case FLASH_5720VENDOR_M_ST_M45PE40:
14912         case FLASH_5720VENDOR_A_ST_M25PE40:
14913         case FLASH_5720VENDOR_A_ST_M45PE40:
14914         case FLASH_5720VENDOR_M_ST_M25PE80:
14915         case FLASH_5720VENDOR_M_ST_M45PE80:
14916         case FLASH_5720VENDOR_A_ST_M25PE80:
14917         case FLASH_5720VENDOR_A_ST_M45PE80:
14918         case FLASH_5720VENDOR_ST_25USPT:
14919         case FLASH_5720VENDOR_ST_45USPT:
14920                 tp->nvram_jedecnum = JEDEC_ST;
14921                 tg3_flag_set(tp, NVRAM_BUFFERED);
14922                 tg3_flag_set(tp, FLASH);
14923
14924                 switch (nvmpinstrp) {
14925                 case FLASH_5720VENDOR_M_ST_M25PE20:
14926                 case FLASH_5720VENDOR_M_ST_M45PE20:
14927                 case FLASH_5720VENDOR_A_ST_M25PE20:
14928                 case FLASH_5720VENDOR_A_ST_M45PE20:
14929                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14930                         break;
14931                 case FLASH_5720VENDOR_M_ST_M25PE40:
14932                 case FLASH_5720VENDOR_M_ST_M45PE40:
14933                 case FLASH_5720VENDOR_A_ST_M25PE40:
14934                 case FLASH_5720VENDOR_A_ST_M45PE40:
14935                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14936                         break;
14937                 case FLASH_5720VENDOR_M_ST_M25PE80:
14938                 case FLASH_5720VENDOR_M_ST_M45PE80:
14939                 case FLASH_5720VENDOR_A_ST_M25PE80:
14940                 case FLASH_5720VENDOR_A_ST_M45PE80:
14941                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14942                         break;
14943                 default:
14944                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14945                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14946                         break;
14947                 }
14948                 break;
14949         default:
14950                 tg3_flag_set(tp, NO_NVRAM);
14951                 return;
14952         }
14953
14954         tg3_nvram_get_pagesize(tp, nvcfg1);
14955         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14956                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14957
14958         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14959                 u32 val;
14960
14961                 if (tg3_nvram_read(tp, 0, &val))
14962                         return;
14963
14964                 if (val != TG3_EEPROM_MAGIC &&
14965                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14966                         tg3_flag_set(tp, NO_NVRAM);
14967         }
14968 }
14969
14970 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14971 static void tg3_nvram_init(struct tg3 *tp)
14972 {
14973         if (tg3_flag(tp, IS_SSB_CORE)) {
14974                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14975                 tg3_flag_clear(tp, NVRAM);
14976                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14977                 tg3_flag_set(tp, NO_NVRAM);
14978                 return;
14979         }
14980
14981         tw32_f(GRC_EEPROM_ADDR,
14982              (EEPROM_ADDR_FSM_RESET |
14983               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14984                EEPROM_ADDR_CLKPERD_SHIFT)));
14985
14986         msleep(1);
14987
14988         /* Enable seeprom accesses. */
14989         tw32_f(GRC_LOCAL_CTRL,
14990              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14991         udelay(100);
14992
14993         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14994             tg3_asic_rev(tp) != ASIC_REV_5701) {
14995                 tg3_flag_set(tp, NVRAM);
14996
14997                 if (tg3_nvram_lock(tp)) {
14998                         netdev_warn(tp->dev,
14999                                     "Cannot get nvram lock, %s failed\n",
15000                                     __func__);
15001                         return;
15002                 }
15003                 tg3_enable_nvram_access(tp);
15004
15005                 tp->nvram_size = 0;
15006
15007                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15008                         tg3_get_5752_nvram_info(tp);
15009                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15010                         tg3_get_5755_nvram_info(tp);
15011                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15012                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15013                          tg3_asic_rev(tp) == ASIC_REV_5785)
15014                         tg3_get_5787_nvram_info(tp);
15015                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15016                         tg3_get_5761_nvram_info(tp);
15017                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15018                         tg3_get_5906_nvram_info(tp);
15019                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15020                          tg3_flag(tp, 57765_CLASS))
15021                         tg3_get_57780_nvram_info(tp);
15022                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15023                          tg3_asic_rev(tp) == ASIC_REV_5719)
15024                         tg3_get_5717_nvram_info(tp);
15025                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15026                          tg3_asic_rev(tp) == ASIC_REV_5762)
15027                         tg3_get_5720_nvram_info(tp);
15028                 else
15029                         tg3_get_nvram_info(tp);
15030
15031                 if (tp->nvram_size == 0)
15032                         tg3_get_nvram_size(tp);
15033
15034                 tg3_disable_nvram_access(tp);
15035                 tg3_nvram_unlock(tp);
15036
15037         } else {
15038                 tg3_flag_clear(tp, NVRAM);
15039                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15040
15041                 tg3_get_eeprom_size(tp);
15042         }
15043 }
15044
15045 struct subsys_tbl_ent {
15046         u16 subsys_vendor, subsys_devid;
15047         u32 phy_id;
15048 };
15049
15050 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15051         /* Broadcom boards. */
15052         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15053           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15054         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15055           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15056         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15057           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15058         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15059           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15060         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15061           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15062         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15063           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15064         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15065           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15066         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15067           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15068         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15069           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15070         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15071           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15072         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15073           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15074
15075         /* 3com boards. */
15076         { TG3PCI_SUBVENDOR_ID_3COM,
15077           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15078         { TG3PCI_SUBVENDOR_ID_3COM,
15079           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15080         { TG3PCI_SUBVENDOR_ID_3COM,
15081           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15082         { TG3PCI_SUBVENDOR_ID_3COM,
15083           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15084         { TG3PCI_SUBVENDOR_ID_3COM,
15085           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15086
15087         /* DELL boards. */
15088         { TG3PCI_SUBVENDOR_ID_DELL,
15089           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15090         { TG3PCI_SUBVENDOR_ID_DELL,
15091           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15092         { TG3PCI_SUBVENDOR_ID_DELL,
15093           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15094         { TG3PCI_SUBVENDOR_ID_DELL,
15095           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15096
15097         /* Compaq boards. */
15098         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15099           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15100         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15101           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15102         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15103           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15104         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15105           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15106         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15107           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15108
15109         /* IBM boards. */
15110         { TG3PCI_SUBVENDOR_ID_IBM,
15111           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15112 };
15113
15114 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15115 {
15116         int i;
15117
15118         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15119                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15120                      tp->pdev->subsystem_vendor) &&
15121                     (subsys_id_to_phy_id[i].subsys_devid ==
15122                      tp->pdev->subsystem_device))
15123                         return &subsys_id_to_phy_id[i];
15124         }
15125         return NULL;
15126 }
15127
15128 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15129 {
15130         u32 val;
15131
15132         tp->phy_id = TG3_PHY_ID_INVALID;
15133         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15134
15135         /* Assume an onboard device and WOL capable by default.  */
15136         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15137         tg3_flag_set(tp, WOL_CAP);
15138
15139         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15140                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15141                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15142                         tg3_flag_set(tp, IS_NIC);
15143                 }
15144                 val = tr32(VCPU_CFGSHDW);
15145                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15146                         tg3_flag_set(tp, ASPM_WORKAROUND);
15147                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15148                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15149                         tg3_flag_set(tp, WOL_ENABLE);
15150                         device_set_wakeup_enable(&tp->pdev->dev, true);
15151                 }
15152                 goto done;
15153         }
15154
15155         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15156         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15157                 u32 nic_cfg, led_cfg;
15158                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15159                 u32 nic_phy_id, ver, eeprom_phy_id;
15160                 int eeprom_phy_serdes = 0;
15161
15162                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15163                 tp->nic_sram_data_cfg = nic_cfg;
15164
15165                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15166                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15167                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15168                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15169                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15170                     (ver > 0) && (ver < 0x100))
15171                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15172
15173                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15174                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15175
15176                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15177                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15178                     tg3_asic_rev(tp) == ASIC_REV_5720)
15179                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15180
15181                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15182                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15183                         eeprom_phy_serdes = 1;
15184
15185                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15186                 if (nic_phy_id != 0) {
15187                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15188                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15189
15190                         eeprom_phy_id  = (id1 >> 16) << 10;
15191                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15192                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15193                 } else
15194                         eeprom_phy_id = 0;
15195
15196                 tp->phy_id = eeprom_phy_id;
15197                 if (eeprom_phy_serdes) {
15198                         if (!tg3_flag(tp, 5705_PLUS))
15199                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15200                         else
15201                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15202                 }
15203
15204                 if (tg3_flag(tp, 5750_PLUS))
15205                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15206                                     SHASTA_EXT_LED_MODE_MASK);
15207                 else
15208                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15209
15210                 switch (led_cfg) {
15211                 default:
15212                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15213                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15214                         break;
15215
15216                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15217                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15218                         break;
15219
15220                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15221                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15222
15223                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15224                          * read on some older 5700/5701 bootcode.
15225                          */
15226                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15227                             tg3_asic_rev(tp) == ASIC_REV_5701)
15228                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15229
15230                         break;
15231
15232                 case SHASTA_EXT_LED_SHARED:
15233                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15234                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15235                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15236                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15237                                                  LED_CTRL_MODE_PHY_2);
15238
15239                         if (tg3_flag(tp, 5717_PLUS) ||
15240                             tg3_asic_rev(tp) == ASIC_REV_5762)
15241                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15242                                                 LED_CTRL_BLINK_RATE_MASK;
15243
15244                         break;
15245
15246                 case SHASTA_EXT_LED_MAC:
15247                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15248                         break;
15249
15250                 case SHASTA_EXT_LED_COMBO:
15251                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15252                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15253                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15254                                                  LED_CTRL_MODE_PHY_2);
15255                         break;
15256
15257                 }
15258
15259                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15260                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15261                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15262                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15263
15264                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15265                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15266
15267                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15268                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15269                         if ((tp->pdev->subsystem_vendor ==
15270                              PCI_VENDOR_ID_ARIMA) &&
15271                             (tp->pdev->subsystem_device == 0x205a ||
15272                              tp->pdev->subsystem_device == 0x2063))
15273                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15274                 } else {
15275                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15276                         tg3_flag_set(tp, IS_NIC);
15277                 }
15278
15279                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15280                         tg3_flag_set(tp, ENABLE_ASF);
15281                         if (tg3_flag(tp, 5750_PLUS))
15282                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15283                 }
15284
15285                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15286                     tg3_flag(tp, 5750_PLUS))
15287                         tg3_flag_set(tp, ENABLE_APE);
15288
15289                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15290                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15291                         tg3_flag_clear(tp, WOL_CAP);
15292
15293                 if (tg3_flag(tp, WOL_CAP) &&
15294                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15295                         tg3_flag_set(tp, WOL_ENABLE);
15296                         device_set_wakeup_enable(&tp->pdev->dev, true);
15297                 }
15298
15299                 if (cfg2 & (1 << 17))
15300                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15301
15302                 /* serdes signal pre-emphasis in register 0x590 set by */
15303                 /* bootcode if bit 18 is set */
15304                 if (cfg2 & (1 << 18))
15305                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15306
15307                 if ((tg3_flag(tp, 57765_PLUS) ||
15308                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15309                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15310                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15311                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15312
15313                 if (tg3_flag(tp, PCI_EXPRESS)) {
15314                         u32 cfg3;
15315
15316                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15317                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15318                             !tg3_flag(tp, 57765_PLUS) &&
15319                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15320                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15321                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15322                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15323                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15324                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15325                 }
15326
15327                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15328                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15329                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15330                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15331                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15332                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15333
15334                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15335                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15336         }
15337 done:
15338         if (tg3_flag(tp, WOL_CAP))
15339                 device_set_wakeup_enable(&tp->pdev->dev,
15340                                          tg3_flag(tp, WOL_ENABLE));
15341         else
15342                 device_set_wakeup_capable(&tp->pdev->dev, false);
15343 }
15344
15345 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15346 {
15347         int i, err;
15348         u32 val2, off = offset * 8;
15349
15350         err = tg3_nvram_lock(tp);
15351         if (err)
15352                 return err;
15353
15354         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15355         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15356                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15357         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15358         udelay(10);
15359
15360         for (i = 0; i < 100; i++) {
15361                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15362                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15363                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15364                         break;
15365                 }
15366                 udelay(10);
15367         }
15368
15369         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15370
15371         tg3_nvram_unlock(tp);
15372         if (val2 & APE_OTP_STATUS_CMD_DONE)
15373                 return 0;
15374
15375         return -EBUSY;
15376 }
15377
15378 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15379 {
15380         int i;
15381         u32 val;
15382
15383         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15384         tw32(OTP_CTRL, cmd);
15385
15386         /* Wait for up to 1 ms for command to execute. */
15387         for (i = 0; i < 100; i++) {
15388                 val = tr32(OTP_STATUS);
15389                 if (val & OTP_STATUS_CMD_DONE)
15390                         break;
15391                 udelay(10);
15392         }
15393
15394         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15395 }
15396
15397 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15398  * configuration is a 32-bit value that straddles the alignment boundary.
15399  * We do two 32-bit reads and then shift and merge the results.
15400  */
15401 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15402 {
15403         u32 bhalf_otp, thalf_otp;
15404
15405         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15406
15407         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15408                 return 0;
15409
15410         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15411
15412         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15413                 return 0;
15414
15415         thalf_otp = tr32(OTP_READ_DATA);
15416
15417         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15418
15419         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15420                 return 0;
15421
15422         bhalf_otp = tr32(OTP_READ_DATA);
15423
15424         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15425 }
15426
15427 static void tg3_phy_init_link_config(struct tg3 *tp)
15428 {
15429         u32 adv = ADVERTISED_Autoneg;
15430
15431         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15432                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15433                         adv |= ADVERTISED_1000baseT_Half;
15434                 adv |= ADVERTISED_1000baseT_Full;
15435         }
15436
15437         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15438                 adv |= ADVERTISED_100baseT_Half |
15439                        ADVERTISED_100baseT_Full |
15440                        ADVERTISED_10baseT_Half |
15441                        ADVERTISED_10baseT_Full |
15442                        ADVERTISED_TP;
15443         else
15444                 adv |= ADVERTISED_FIBRE;
15445
15446         tp->link_config.advertising = adv;
15447         tp->link_config.speed = SPEED_UNKNOWN;
15448         tp->link_config.duplex = DUPLEX_UNKNOWN;
15449         tp->link_config.autoneg = AUTONEG_ENABLE;
15450         tp->link_config.active_speed = SPEED_UNKNOWN;
15451         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15452
15453         tp->old_link = -1;
15454 }
15455
15456 static int tg3_phy_probe(struct tg3 *tp)
15457 {
15458         u32 hw_phy_id_1, hw_phy_id_2;
15459         u32 hw_phy_id, hw_phy_id_masked;
15460         int err;
15461
15462         /* flow control autonegotiation is default behavior */
15463         tg3_flag_set(tp, PAUSE_AUTONEG);
15464         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15465
15466         if (tg3_flag(tp, ENABLE_APE)) {
15467                 switch (tp->pci_fn) {
15468                 case 0:
15469                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15470                         break;
15471                 case 1:
15472                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15473                         break;
15474                 case 2:
15475                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15476                         break;
15477                 case 3:
15478                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15479                         break;
15480                 }
15481         }
15482
15483         if (!tg3_flag(tp, ENABLE_ASF) &&
15484             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15485             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15486                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15487                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15488
15489         if (tg3_flag(tp, USE_PHYLIB))
15490                 return tg3_phy_init(tp);
15491
15492         /* Reading the PHY ID register can conflict with ASF
15493          * firmware access to the PHY hardware.
15494          */
15495         err = 0;
15496         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15497                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15498         } else {
15499                 /* Now read the physical PHY_ID from the chip and verify
15500                  * that it is sane.  If it doesn't look good, we fall back
15501                  * to either the hard-coded table based PHY_ID and failing
15502                  * that the value found in the eeprom area.
15503                  */
15504                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15505                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15506
15507                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15508                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15509                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15510
15511                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15512         }
15513
15514         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15515                 tp->phy_id = hw_phy_id;
15516                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15517                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15518                 else
15519                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15520         } else {
15521                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15522                         /* Do nothing, phy ID already set up in
15523                          * tg3_get_eeprom_hw_cfg().
15524                          */
15525                 } else {
15526                         struct subsys_tbl_ent *p;
15527
15528                         /* No eeprom signature?  Try the hardcoded
15529                          * subsys device table.
15530                          */
15531                         p = tg3_lookup_by_subsys(tp);
15532                         if (p) {
15533                                 tp->phy_id = p->phy_id;
15534                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15535                                 /* For now we saw the IDs 0xbc050cd0,
15536                                  * 0xbc050f80 and 0xbc050c30 on devices
15537                                  * connected to an BCM4785 and there are
15538                                  * probably more. Just assume that the phy is
15539                                  * supported when it is connected to a SSB core
15540                                  * for now.
15541                                  */
15542                                 return -ENODEV;
15543                         }
15544
15545                         if (!tp->phy_id ||
15546                             tp->phy_id == TG3_PHY_ID_BCM8002)
15547                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15548                 }
15549         }
15550
15551         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15552             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15553              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15554              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15555              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15556              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15557               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15558              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15559               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15560                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15561
15562                 tp->eee.supported = SUPPORTED_100baseT_Full |
15563                                     SUPPORTED_1000baseT_Full;
15564                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15565                                      ADVERTISED_1000baseT_Full;
15566                 tp->eee.eee_enabled = 1;
15567                 tp->eee.tx_lpi_enabled = 1;
15568                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15569         }
15570
15571         tg3_phy_init_link_config(tp);
15572
15573         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15574             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15575             !tg3_flag(tp, ENABLE_APE) &&
15576             !tg3_flag(tp, ENABLE_ASF)) {
15577                 u32 bmsr, dummy;
15578
15579                 tg3_readphy(tp, MII_BMSR, &bmsr);
15580                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15581                     (bmsr & BMSR_LSTATUS))
15582                         goto skip_phy_reset;
15583
15584                 err = tg3_phy_reset(tp);
15585                 if (err)
15586                         return err;
15587
15588                 tg3_phy_set_wirespeed(tp);
15589
15590                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15591                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15592                                             tp->link_config.flowctrl);
15593
15594                         tg3_writephy(tp, MII_BMCR,
15595                                      BMCR_ANENABLE | BMCR_ANRESTART);
15596                 }
15597         }
15598
15599 skip_phy_reset:
15600         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15601                 err = tg3_init_5401phy_dsp(tp);
15602                 if (err)
15603                         return err;
15604
15605                 err = tg3_init_5401phy_dsp(tp);
15606         }
15607
15608         return err;
15609 }
15610
15611 static void tg3_read_vpd(struct tg3 *tp)
15612 {
15613         u8 *vpd_data;
15614         unsigned int block_end, rosize, len;
15615         u32 vpdlen;
15616         int j, i = 0;
15617
15618         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15619         if (!vpd_data)
15620                 goto out_no_vpd;
15621
15622         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15623         if (i < 0)
15624                 goto out_not_found;
15625
15626         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15627         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15628         i += PCI_VPD_LRDT_TAG_SIZE;
15629
15630         if (block_end > vpdlen)
15631                 goto out_not_found;
15632
15633         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15634                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15635         if (j > 0) {
15636                 len = pci_vpd_info_field_size(&vpd_data[j]);
15637
15638                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15639                 if (j + len > block_end || len != 4 ||
15640                     memcmp(&vpd_data[j], "1028", 4))
15641                         goto partno;
15642
15643                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15644                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15645                 if (j < 0)
15646                         goto partno;
15647
15648                 len = pci_vpd_info_field_size(&vpd_data[j]);
15649
15650                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15651                 if (j + len > block_end)
15652                         goto partno;
15653
15654                 if (len >= sizeof(tp->fw_ver))
15655                         len = sizeof(tp->fw_ver) - 1;
15656                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15657                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15658                          &vpd_data[j]);
15659         }
15660
15661 partno:
15662         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15663                                       PCI_VPD_RO_KEYWORD_PARTNO);
15664         if (i < 0)
15665                 goto out_not_found;
15666
15667         len = pci_vpd_info_field_size(&vpd_data[i]);
15668
15669         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15670         if (len > TG3_BPN_SIZE ||
15671             (len + i) > vpdlen)
15672                 goto out_not_found;
15673
15674         memcpy(tp->board_part_number, &vpd_data[i], len);
15675
15676 out_not_found:
15677         kfree(vpd_data);
15678         if (tp->board_part_number[0])
15679                 return;
15680
15681 out_no_vpd:
15682         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15683                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15684                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15685                         strcpy(tp->board_part_number, "BCM5717");
15686                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15687                         strcpy(tp->board_part_number, "BCM5718");
15688                 else
15689                         goto nomatch;
15690         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15691                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15692                         strcpy(tp->board_part_number, "BCM57780");
15693                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15694                         strcpy(tp->board_part_number, "BCM57760");
15695                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15696                         strcpy(tp->board_part_number, "BCM57790");
15697                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15698                         strcpy(tp->board_part_number, "BCM57788");
15699                 else
15700                         goto nomatch;
15701         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15702                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15703                         strcpy(tp->board_part_number, "BCM57761");
15704                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15705                         strcpy(tp->board_part_number, "BCM57765");
15706                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15707                         strcpy(tp->board_part_number, "BCM57781");
15708                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15709                         strcpy(tp->board_part_number, "BCM57785");
15710                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15711                         strcpy(tp->board_part_number, "BCM57791");
15712                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15713                         strcpy(tp->board_part_number, "BCM57795");
15714                 else
15715                         goto nomatch;
15716         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15717                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15718                         strcpy(tp->board_part_number, "BCM57762");
15719                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15720                         strcpy(tp->board_part_number, "BCM57766");
15721                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15722                         strcpy(tp->board_part_number, "BCM57782");
15723                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15724                         strcpy(tp->board_part_number, "BCM57786");
15725                 else
15726                         goto nomatch;
15727         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15728                 strcpy(tp->board_part_number, "BCM95906");
15729         } else {
15730 nomatch:
15731                 strcpy(tp->board_part_number, "none");
15732         }
15733 }
15734
15735 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15736 {
15737         u32 val;
15738
15739         if (tg3_nvram_read(tp, offset, &val) ||
15740             (val & 0xfc000000) != 0x0c000000 ||
15741             tg3_nvram_read(tp, offset + 4, &val) ||
15742             val != 0)
15743                 return 0;
15744
15745         return 1;
15746 }
15747
15748 static void tg3_read_bc_ver(struct tg3 *tp)
15749 {
15750         u32 val, offset, start, ver_offset;
15751         int i, dst_off;
15752         bool newver = false;
15753
15754         if (tg3_nvram_read(tp, 0xc, &offset) ||
15755             tg3_nvram_read(tp, 0x4, &start))
15756                 return;
15757
15758         offset = tg3_nvram_logical_addr(tp, offset);
15759
15760         if (tg3_nvram_read(tp, offset, &val))
15761                 return;
15762
15763         if ((val & 0xfc000000) == 0x0c000000) {
15764                 if (tg3_nvram_read(tp, offset + 4, &val))
15765                         return;
15766
15767                 if (val == 0)
15768                         newver = true;
15769         }
15770
15771         dst_off = strlen(tp->fw_ver);
15772
15773         if (newver) {
15774                 if (TG3_VER_SIZE - dst_off < 16 ||
15775                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15776                         return;
15777
15778                 offset = offset + ver_offset - start;
15779                 for (i = 0; i < 16; i += 4) {
15780                         __be32 v;
15781                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15782                                 return;
15783
15784                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15785                 }
15786         } else {
15787                 u32 major, minor;
15788
15789                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15790                         return;
15791
15792                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15793                         TG3_NVM_BCVER_MAJSFT;
15794                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15795                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15796                          "v%d.%02d", major, minor);
15797         }
15798 }
15799
15800 static void tg3_read_hwsb_ver(struct tg3 *tp)
15801 {
15802         u32 val, major, minor;
15803
15804         /* Use native endian representation */
15805         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15806                 return;
15807
15808         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15809                 TG3_NVM_HWSB_CFG1_MAJSFT;
15810         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15811                 TG3_NVM_HWSB_CFG1_MINSFT;
15812
15813         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15814 }
15815
15816 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15817 {
15818         u32 offset, major, minor, build;
15819
15820         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15821
15822         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15823                 return;
15824
15825         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15826         case TG3_EEPROM_SB_REVISION_0:
15827                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15828                 break;
15829         case TG3_EEPROM_SB_REVISION_2:
15830                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15831                 break;
15832         case TG3_EEPROM_SB_REVISION_3:
15833                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15834                 break;
15835         case TG3_EEPROM_SB_REVISION_4:
15836                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15837                 break;
15838         case TG3_EEPROM_SB_REVISION_5:
15839                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15840                 break;
15841         case TG3_EEPROM_SB_REVISION_6:
15842                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15843                 break;
15844         default:
15845                 return;
15846         }
15847
15848         if (tg3_nvram_read(tp, offset, &val))
15849                 return;
15850
15851         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15852                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15853         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15854                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15855         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15856
15857         if (minor > 99 || build > 26)
15858                 return;
15859
15860         offset = strlen(tp->fw_ver);
15861         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15862                  " v%d.%02d", major, minor);
15863
15864         if (build > 0) {
15865                 offset = strlen(tp->fw_ver);
15866                 if (offset < TG3_VER_SIZE - 1)
15867                         tp->fw_ver[offset] = 'a' + build - 1;
15868         }
15869 }
15870
15871 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15872 {
15873         u32 val, offset, start;
15874         int i, vlen;
15875
15876         for (offset = TG3_NVM_DIR_START;
15877              offset < TG3_NVM_DIR_END;
15878              offset += TG3_NVM_DIRENT_SIZE) {
15879                 if (tg3_nvram_read(tp, offset, &val))
15880                         return;
15881
15882                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15883                         break;
15884         }
15885
15886         if (offset == TG3_NVM_DIR_END)
15887                 return;
15888
15889         if (!tg3_flag(tp, 5705_PLUS))
15890                 start = 0x08000000;
15891         else if (tg3_nvram_read(tp, offset - 4, &start))
15892                 return;
15893
15894         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15895             !tg3_fw_img_is_valid(tp, offset) ||
15896             tg3_nvram_read(tp, offset + 8, &val))
15897                 return;
15898
15899         offset += val - start;
15900
15901         vlen = strlen(tp->fw_ver);
15902
15903         tp->fw_ver[vlen++] = ',';
15904         tp->fw_ver[vlen++] = ' ';
15905
15906         for (i = 0; i < 4; i++) {
15907                 __be32 v;
15908                 if (tg3_nvram_read_be32(tp, offset, &v))
15909                         return;
15910
15911                 offset += sizeof(v);
15912
15913                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15914                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15915                         break;
15916                 }
15917
15918                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15919                 vlen += sizeof(v);
15920         }
15921 }
15922
15923 static void tg3_probe_ncsi(struct tg3 *tp)
15924 {
15925         u32 apedata;
15926
15927         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15928         if (apedata != APE_SEG_SIG_MAGIC)
15929                 return;
15930
15931         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15932         if (!(apedata & APE_FW_STATUS_READY))
15933                 return;
15934
15935         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15936                 tg3_flag_set(tp, APE_HAS_NCSI);
15937 }
15938
15939 static void tg3_read_dash_ver(struct tg3 *tp)
15940 {
15941         int vlen;
15942         u32 apedata;
15943         char *fwtype;
15944
15945         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15946
15947         if (tg3_flag(tp, APE_HAS_NCSI))
15948                 fwtype = "NCSI";
15949         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15950                 fwtype = "SMASH";
15951         else
15952                 fwtype = "DASH";
15953
15954         vlen = strlen(tp->fw_ver);
15955
15956         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15957                  fwtype,
15958                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15959                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15960                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15961                  (apedata & APE_FW_VERSION_BLDMSK));
15962 }
15963
15964 static void tg3_read_otp_ver(struct tg3 *tp)
15965 {
15966         u32 val, val2;
15967
15968         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15969                 return;
15970
15971         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15972             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15973             TG3_OTP_MAGIC0_VALID(val)) {
15974                 u64 val64 = (u64) val << 32 | val2;
15975                 u32 ver = 0;
15976                 int i, vlen;
15977
15978                 for (i = 0; i < 7; i++) {
15979                         if ((val64 & 0xff) == 0)
15980                                 break;
15981                         ver = val64 & 0xff;
15982                         val64 >>= 8;
15983                 }
15984                 vlen = strlen(tp->fw_ver);
15985                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15986         }
15987 }
15988
15989 static void tg3_read_fw_ver(struct tg3 *tp)
15990 {
15991         u32 val;
15992         bool vpd_vers = false;
15993
15994         if (tp->fw_ver[0] != 0)
15995                 vpd_vers = true;
15996
15997         if (tg3_flag(tp, NO_NVRAM)) {
15998                 strcat(tp->fw_ver, "sb");
15999                 tg3_read_otp_ver(tp);
16000                 return;
16001         }
16002
16003         if (tg3_nvram_read(tp, 0, &val))
16004                 return;
16005
16006         if (val == TG3_EEPROM_MAGIC)
16007                 tg3_read_bc_ver(tp);
16008         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16009                 tg3_read_sb_ver(tp, val);
16010         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16011                 tg3_read_hwsb_ver(tp);
16012
16013         if (tg3_flag(tp, ENABLE_ASF)) {
16014                 if (tg3_flag(tp, ENABLE_APE)) {
16015                         tg3_probe_ncsi(tp);
16016                         if (!vpd_vers)
16017                                 tg3_read_dash_ver(tp);
16018                 } else if (!vpd_vers) {
16019                         tg3_read_mgmtfw_ver(tp);
16020                 }
16021         }
16022
16023         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16024 }
16025
16026 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16027 {
16028         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16029                 return TG3_RX_RET_MAX_SIZE_5717;
16030         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16031                 return TG3_RX_RET_MAX_SIZE_5700;
16032         else
16033                 return TG3_RX_RET_MAX_SIZE_5705;
16034 }
16035
16036 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16037         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16038         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16039         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16040         { },
16041 };
16042
16043 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16044 {
16045         struct pci_dev *peer;
16046         unsigned int func, devnr = tp->pdev->devfn & ~7;
16047
16048         for (func = 0; func < 8; func++) {
16049                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16050                 if (peer && peer != tp->pdev)
16051                         break;
16052                 pci_dev_put(peer);
16053         }
16054         /* 5704 can be configured in single-port mode, set peer to
16055          * tp->pdev in that case.
16056          */
16057         if (!peer) {
16058                 peer = tp->pdev;
16059                 return peer;
16060         }
16061
16062         /*
16063          * We don't need to keep the refcount elevated; there's no way
16064          * to remove one half of this device without removing the other
16065          */
16066         pci_dev_put(peer);
16067
16068         return peer;
16069 }
16070
16071 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16072 {
16073         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16074         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16075                 u32 reg;
16076
16077                 /* All devices that use the alternate
16078                  * ASIC REV location have a CPMU.
16079                  */
16080                 tg3_flag_set(tp, CPMU_PRESENT);
16081
16082                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16083                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16084                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16085                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16086                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16087                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16088                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16089                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16090                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16091                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16092                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16093                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16094                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16095                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16096                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16097                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16098                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16099                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16100                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16101                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16102                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16103                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16104                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16105                 else
16106                         reg = TG3PCI_PRODID_ASICREV;
16107
16108                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16109         }
16110
16111         /* Wrong chip ID in 5752 A0. This code can be removed later
16112          * as A0 is not in production.
16113          */
16114         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16115                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16116
16117         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16118                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16119
16120         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16121             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16122             tg3_asic_rev(tp) == ASIC_REV_5720)
16123                 tg3_flag_set(tp, 5717_PLUS);
16124
16125         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16126             tg3_asic_rev(tp) == ASIC_REV_57766)
16127                 tg3_flag_set(tp, 57765_CLASS);
16128
16129         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16130              tg3_asic_rev(tp) == ASIC_REV_5762)
16131                 tg3_flag_set(tp, 57765_PLUS);
16132
16133         /* Intentionally exclude ASIC_REV_5906 */
16134         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16135             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16136             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16137             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16138             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16139             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16140             tg3_flag(tp, 57765_PLUS))
16141                 tg3_flag_set(tp, 5755_PLUS);
16142
16143         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16144             tg3_asic_rev(tp) == ASIC_REV_5714)
16145                 tg3_flag_set(tp, 5780_CLASS);
16146
16147         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16148             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16149             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16150             tg3_flag(tp, 5755_PLUS) ||
16151             tg3_flag(tp, 5780_CLASS))
16152                 tg3_flag_set(tp, 5750_PLUS);
16153
16154         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16155             tg3_flag(tp, 5750_PLUS))
16156                 tg3_flag_set(tp, 5705_PLUS);
16157 }
16158
16159 static bool tg3_10_100_only_device(struct tg3 *tp,
16160                                    const struct pci_device_id *ent)
16161 {
16162         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16163
16164         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16165              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16166             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16167                 return true;
16168
16169         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16170                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16171                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16172                                 return true;
16173                 } else {
16174                         return true;
16175                 }
16176         }
16177
16178         return false;
16179 }
16180
16181 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16182 {
16183         u32 misc_ctrl_reg;
16184         u32 pci_state_reg, grc_misc_cfg;
16185         u32 val;
16186         u16 pci_cmd;
16187         int err;
16188
16189         /* Force memory write invalidate off.  If we leave it on,
16190          * then on 5700_BX chips we have to enable a workaround.
16191          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16192          * to match the cacheline size.  The Broadcom driver have this
16193          * workaround but turns MWI off all the times so never uses
16194          * it.  This seems to suggest that the workaround is insufficient.
16195          */
16196         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16197         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16198         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16199
16200         /* Important! -- Make sure register accesses are byteswapped
16201          * correctly.  Also, for those chips that require it, make
16202          * sure that indirect register accesses are enabled before
16203          * the first operation.
16204          */
16205         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16206                               &misc_ctrl_reg);
16207         tp->misc_host_ctrl |= (misc_ctrl_reg &
16208                                MISC_HOST_CTRL_CHIPREV);
16209         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16210                                tp->misc_host_ctrl);
16211
16212         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16213
16214         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16215          * we need to disable memory and use config. cycles
16216          * only to access all registers. The 5702/03 chips
16217          * can mistakenly decode the special cycles from the
16218          * ICH chipsets as memory write cycles, causing corruption
16219          * of register and memory space. Only certain ICH bridges
16220          * will drive special cycles with non-zero data during the
16221          * address phase which can fall within the 5703's address
16222          * range. This is not an ICH bug as the PCI spec allows
16223          * non-zero address during special cycles. However, only
16224          * these ICH bridges are known to drive non-zero addresses
16225          * during special cycles.
16226          *
16227          * Since special cycles do not cross PCI bridges, we only
16228          * enable this workaround if the 5703 is on the secondary
16229          * bus of these ICH bridges.
16230          */
16231         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16232             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16233                 static struct tg3_dev_id {
16234                         u32     vendor;
16235                         u32     device;
16236                         u32     rev;
16237                 } ich_chipsets[] = {
16238                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16239                           PCI_ANY_ID },
16240                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16241                           PCI_ANY_ID },
16242                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16243                           0xa },
16244                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16245                           PCI_ANY_ID },
16246                         { },
16247                 };
16248                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16249                 struct pci_dev *bridge = NULL;
16250
16251                 while (pci_id->vendor != 0) {
16252                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16253                                                 bridge);
16254                         if (!bridge) {
16255                                 pci_id++;
16256                                 continue;
16257                         }
16258                         if (pci_id->rev != PCI_ANY_ID) {
16259                                 if (bridge->revision > pci_id->rev)
16260                                         continue;
16261                         }
16262                         if (bridge->subordinate &&
16263                             (bridge->subordinate->number ==
16264                              tp->pdev->bus->number)) {
16265                                 tg3_flag_set(tp, ICH_WORKAROUND);
16266                                 pci_dev_put(bridge);
16267                                 break;
16268                         }
16269                 }
16270         }
16271
16272         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16273                 static struct tg3_dev_id {
16274                         u32     vendor;
16275                         u32     device;
16276                 } bridge_chipsets[] = {
16277                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16278                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16279                         { },
16280                 };
16281                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16282                 struct pci_dev *bridge = NULL;
16283
16284                 while (pci_id->vendor != 0) {
16285                         bridge = pci_get_device(pci_id->vendor,
16286                                                 pci_id->device,
16287                                                 bridge);
16288                         if (!bridge) {
16289                                 pci_id++;
16290                                 continue;
16291                         }
16292                         if (bridge->subordinate &&
16293                             (bridge->subordinate->number <=
16294                              tp->pdev->bus->number) &&
16295                             (bridge->subordinate->busn_res.end >=
16296                              tp->pdev->bus->number)) {
16297                                 tg3_flag_set(tp, 5701_DMA_BUG);
16298                                 pci_dev_put(bridge);
16299                                 break;
16300                         }
16301                 }
16302         }
16303
16304         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16305          * DMA addresses > 40-bit. This bridge may have other additional
16306          * 57xx devices behind it in some 4-port NIC designs for example.
16307          * Any tg3 device found behind the bridge will also need the 40-bit
16308          * DMA workaround.
16309          */
16310         if (tg3_flag(tp, 5780_CLASS)) {
16311                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16312                 tp->msi_cap = tp->pdev->msi_cap;
16313         } else {
16314                 struct pci_dev *bridge = NULL;
16315
16316                 do {
16317                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16318                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16319                                                 bridge);
16320                         if (bridge && bridge->subordinate &&
16321                             (bridge->subordinate->number <=
16322                              tp->pdev->bus->number) &&
16323                             (bridge->subordinate->busn_res.end >=
16324                              tp->pdev->bus->number)) {
16325                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16326                                 pci_dev_put(bridge);
16327                                 break;
16328                         }
16329                 } while (bridge);
16330         }
16331
16332         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16333             tg3_asic_rev(tp) == ASIC_REV_5714)
16334                 tp->pdev_peer = tg3_find_peer(tp);
16335
16336         /* Determine TSO capabilities */
16337         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16338                 ; /* Do nothing. HW bug. */
16339         else if (tg3_flag(tp, 57765_PLUS))
16340                 tg3_flag_set(tp, HW_TSO_3);
16341         else if (tg3_flag(tp, 5755_PLUS) ||
16342                  tg3_asic_rev(tp) == ASIC_REV_5906)
16343                 tg3_flag_set(tp, HW_TSO_2);
16344         else if (tg3_flag(tp, 5750_PLUS)) {
16345                 tg3_flag_set(tp, HW_TSO_1);
16346                 tg3_flag_set(tp, TSO_BUG);
16347                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16348                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16349                         tg3_flag_clear(tp, TSO_BUG);
16350         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16351                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16352                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16353                 tg3_flag_set(tp, FW_TSO);
16354                 tg3_flag_set(tp, TSO_BUG);
16355                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16356                         tp->fw_needed = FIRMWARE_TG3TSO5;
16357                 else
16358                         tp->fw_needed = FIRMWARE_TG3TSO;
16359         }
16360
16361         /* Selectively allow TSO based on operating conditions */
16362         if (tg3_flag(tp, HW_TSO_1) ||
16363             tg3_flag(tp, HW_TSO_2) ||
16364             tg3_flag(tp, HW_TSO_3) ||
16365             tg3_flag(tp, FW_TSO)) {
16366                 /* For firmware TSO, assume ASF is disabled.
16367                  * We'll disable TSO later if we discover ASF
16368                  * is enabled in tg3_get_eeprom_hw_cfg().
16369                  */
16370                 tg3_flag_set(tp, TSO_CAPABLE);
16371         } else {
16372                 tg3_flag_clear(tp, TSO_CAPABLE);
16373                 tg3_flag_clear(tp, TSO_BUG);
16374                 tp->fw_needed = NULL;
16375         }
16376
16377         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16378                 tp->fw_needed = FIRMWARE_TG3;
16379
16380         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16381                 tp->fw_needed = FIRMWARE_TG357766;
16382
16383         tp->irq_max = 1;
16384
16385         if (tg3_flag(tp, 5750_PLUS)) {
16386                 tg3_flag_set(tp, SUPPORT_MSI);
16387                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16388                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16389                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16390                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16391                      tp->pdev_peer == tp->pdev))
16392                         tg3_flag_clear(tp, SUPPORT_MSI);
16393
16394                 if (tg3_flag(tp, 5755_PLUS) ||
16395                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16396                         tg3_flag_set(tp, 1SHOT_MSI);
16397                 }
16398
16399                 if (tg3_flag(tp, 57765_PLUS)) {
16400                         tg3_flag_set(tp, SUPPORT_MSIX);
16401                         tp->irq_max = TG3_IRQ_MAX_VECS;
16402                 }
16403         }
16404
16405         tp->txq_max = 1;
16406         tp->rxq_max = 1;
16407         if (tp->irq_max > 1) {
16408                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16409                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16410
16411                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16412                     tg3_asic_rev(tp) == ASIC_REV_5720)
16413                         tp->txq_max = tp->irq_max - 1;
16414         }
16415
16416         if (tg3_flag(tp, 5755_PLUS) ||
16417             tg3_asic_rev(tp) == ASIC_REV_5906)
16418                 tg3_flag_set(tp, SHORT_DMA_BUG);
16419
16420         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16421                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16422
16423         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16424             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16425             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16426             tg3_asic_rev(tp) == ASIC_REV_5762)
16427                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16428
16429         if (tg3_flag(tp, 57765_PLUS) &&
16430             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16431                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16432
16433         if (!tg3_flag(tp, 5705_PLUS) ||
16434             tg3_flag(tp, 5780_CLASS) ||
16435             tg3_flag(tp, USE_JUMBO_BDFLAG))
16436                 tg3_flag_set(tp, JUMBO_CAPABLE);
16437
16438         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16439                               &pci_state_reg);
16440
16441         if (pci_is_pcie(tp->pdev)) {
16442                 u16 lnkctl;
16443
16444                 tg3_flag_set(tp, PCI_EXPRESS);
16445
16446                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16447                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16448                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16449                                 tg3_flag_clear(tp, HW_TSO_2);
16450                                 tg3_flag_clear(tp, TSO_CAPABLE);
16451                         }
16452                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16453                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16454                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16455                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16456                                 tg3_flag_set(tp, CLKREQ_BUG);
16457                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16458                         tg3_flag_set(tp, L1PLLPD_EN);
16459                 }
16460         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16461                 /* BCM5785 devices are effectively PCIe devices, and should
16462                  * follow PCIe codepaths, but do not have a PCIe capabilities
16463                  * section.
16464                  */
16465                 tg3_flag_set(tp, PCI_EXPRESS);
16466         } else if (!tg3_flag(tp, 5705_PLUS) ||
16467                    tg3_flag(tp, 5780_CLASS)) {
16468                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16469                 if (!tp->pcix_cap) {
16470                         dev_err(&tp->pdev->dev,
16471                                 "Cannot find PCI-X capability, aborting\n");
16472                         return -EIO;
16473                 }
16474
16475                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16476                         tg3_flag_set(tp, PCIX_MODE);
16477         }
16478
16479         /* If we have an AMD 762 or VIA K8T800 chipset, write
16480          * reordering to the mailbox registers done by the host
16481          * controller can cause major troubles.  We read back from
16482          * every mailbox register write to force the writes to be
16483          * posted to the chip in order.
16484          */
16485         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16486             !tg3_flag(tp, PCI_EXPRESS))
16487                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16488
16489         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16490                              &tp->pci_cacheline_sz);
16491         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16492                              &tp->pci_lat_timer);
16493         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16494             tp->pci_lat_timer < 64) {
16495                 tp->pci_lat_timer = 64;
16496                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16497                                       tp->pci_lat_timer);
16498         }
16499
16500         /* Important! -- It is critical that the PCI-X hw workaround
16501          * situation is decided before the first MMIO register access.
16502          */
16503         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16504                 /* 5700 BX chips need to have their TX producer index
16505                  * mailboxes written twice to workaround a bug.
16506                  */
16507                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16508
16509                 /* If we are in PCI-X mode, enable register write workaround.
16510                  *
16511                  * The workaround is to use indirect register accesses
16512                  * for all chip writes not to mailbox registers.
16513                  */
16514                 if (tg3_flag(tp, PCIX_MODE)) {
16515                         u32 pm_reg;
16516
16517                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16518
16519                         /* The chip can have it's power management PCI config
16520                          * space registers clobbered due to this bug.
16521                          * So explicitly force the chip into D0 here.
16522                          */
16523                         pci_read_config_dword(tp->pdev,
16524                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16525                                               &pm_reg);
16526                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16527                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16528                         pci_write_config_dword(tp->pdev,
16529                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16530                                                pm_reg);
16531
16532                         /* Also, force SERR#/PERR# in PCI command. */
16533                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16534                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16535                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16536                 }
16537         }
16538
16539         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16540                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16541         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16542                 tg3_flag_set(tp, PCI_32BIT);
16543
16544         /* Chip-specific fixup from Broadcom driver */
16545         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16546             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16547                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16548                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16549         }
16550
16551         /* Default fast path register access methods */
16552         tp->read32 = tg3_read32;
16553         tp->write32 = tg3_write32;
16554         tp->read32_mbox = tg3_read32;
16555         tp->write32_mbox = tg3_write32;
16556         tp->write32_tx_mbox = tg3_write32;
16557         tp->write32_rx_mbox = tg3_write32;
16558
16559         /* Various workaround register access methods */
16560         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16561                 tp->write32 = tg3_write_indirect_reg32;
16562         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16563                  (tg3_flag(tp, PCI_EXPRESS) &&
16564                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16565                 /*
16566                  * Back to back register writes can cause problems on these
16567                  * chips, the workaround is to read back all reg writes
16568                  * except those to mailbox regs.
16569                  *
16570                  * See tg3_write_indirect_reg32().
16571                  */
16572                 tp->write32 = tg3_write_flush_reg32;
16573         }
16574
16575         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16576                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16577                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16578                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16579         }
16580
16581         if (tg3_flag(tp, ICH_WORKAROUND)) {
16582                 tp->read32 = tg3_read_indirect_reg32;
16583                 tp->write32 = tg3_write_indirect_reg32;
16584                 tp->read32_mbox = tg3_read_indirect_mbox;
16585                 tp->write32_mbox = tg3_write_indirect_mbox;
16586                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16587                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16588
16589                 iounmap(tp->regs);
16590                 tp->regs = NULL;
16591
16592                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16593                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16594                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16595         }
16596         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16597                 tp->read32_mbox = tg3_read32_mbox_5906;
16598                 tp->write32_mbox = tg3_write32_mbox_5906;
16599                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16600                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16601         }
16602
16603         if (tp->write32 == tg3_write_indirect_reg32 ||
16604             (tg3_flag(tp, PCIX_MODE) &&
16605              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16606               tg3_asic_rev(tp) == ASIC_REV_5701)))
16607                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16608
16609         /* The memory arbiter has to be enabled in order for SRAM accesses
16610          * to succeed.  Normally on powerup the tg3 chip firmware will make
16611          * sure it is enabled, but other entities such as system netboot
16612          * code might disable it.
16613          */
16614         val = tr32(MEMARB_MODE);
16615         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16616
16617         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16618         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16619             tg3_flag(tp, 5780_CLASS)) {
16620                 if (tg3_flag(tp, PCIX_MODE)) {
16621                         pci_read_config_dword(tp->pdev,
16622                                               tp->pcix_cap + PCI_X_STATUS,
16623                                               &val);
16624                         tp->pci_fn = val & 0x7;
16625                 }
16626         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16627                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16628                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16629                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16630                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16631                         val = tr32(TG3_CPMU_STATUS);
16632
16633                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16634                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16635                 else
16636                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16637                                      TG3_CPMU_STATUS_FSHFT_5719;
16638         }
16639
16640         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16641                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16642                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16643         }
16644
16645         /* Get eeprom hw config before calling tg3_set_power_state().
16646          * In particular, the TG3_FLAG_IS_NIC flag must be
16647          * determined before calling tg3_set_power_state() so that
16648          * we know whether or not to switch out of Vaux power.
16649          * When the flag is set, it means that GPIO1 is used for eeprom
16650          * write protect and also implies that it is a LOM where GPIOs
16651          * are not used to switch power.
16652          */
16653         tg3_get_eeprom_hw_cfg(tp);
16654
16655         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16656                 tg3_flag_clear(tp, TSO_CAPABLE);
16657                 tg3_flag_clear(tp, TSO_BUG);
16658                 tp->fw_needed = NULL;
16659         }
16660
16661         if (tg3_flag(tp, ENABLE_APE)) {
16662                 /* Allow reads and writes to the
16663                  * APE register and memory space.
16664                  */
16665                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16666                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16667                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16668                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16669                                        pci_state_reg);
16670
16671                 tg3_ape_lock_init(tp);
16672                 tp->ape_hb_interval =
16673                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16674         }
16675
16676         /* Set up tp->grc_local_ctrl before calling
16677          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16678          * will bring 5700's external PHY out of reset.
16679          * It is also used as eeprom write protect on LOMs.
16680          */
16681         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16682         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16683             tg3_flag(tp, EEPROM_WRITE_PROT))
16684                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16685                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16686         /* Unused GPIO3 must be driven as output on 5752 because there
16687          * are no pull-up resistors on unused GPIO pins.
16688          */
16689         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16690                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16691
16692         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16693             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16694             tg3_flag(tp, 57765_CLASS))
16695                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16696
16697         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16698             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16699                 /* Turn off the debug UART. */
16700                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16701                 if (tg3_flag(tp, IS_NIC))
16702                         /* Keep VMain power. */
16703                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16704                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16705         }
16706
16707         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16708                 tp->grc_local_ctrl |=
16709                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16710
16711         /* Switch out of Vaux if it is a NIC */
16712         tg3_pwrsrc_switch_to_vmain(tp);
16713
16714         /* Derive initial jumbo mode from MTU assigned in
16715          * ether_setup() via the alloc_etherdev() call
16716          */
16717         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16718                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16719
16720         /* Determine WakeOnLan speed to use. */
16721         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16722             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16723             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16724             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16725                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16726         } else {
16727                 tg3_flag_set(tp, WOL_SPEED_100MB);
16728         }
16729
16730         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16731                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16732
16733         /* A few boards don't want Ethernet@WireSpeed phy feature */
16734         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16735             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16736              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16737              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16738             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16739             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16740                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16741
16742         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16743             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16744                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16745         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16746                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16747
16748         if (tg3_flag(tp, 5705_PLUS) &&
16749             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16750             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16751             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16752             !tg3_flag(tp, 57765_PLUS)) {
16753                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16754                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16755                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16756                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16757                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16758                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16759                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16760                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16761                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16762                 } else
16763                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16764         }
16765
16766         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16767             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16768                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16769                 if (tp->phy_otp == 0)
16770                         tp->phy_otp = TG3_OTP_DEFAULT;
16771         }
16772
16773         if (tg3_flag(tp, CPMU_PRESENT))
16774                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16775         else
16776                 tp->mi_mode = MAC_MI_MODE_BASE;
16777
16778         tp->coalesce_mode = 0;
16779         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16780             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16781                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16782
16783         /* Set these bits to enable statistics workaround. */
16784         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16785             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16786             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16787             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16788                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16789                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16790         }
16791
16792         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16793             tg3_asic_rev(tp) == ASIC_REV_57780)
16794                 tg3_flag_set(tp, USE_PHYLIB);
16795
16796         err = tg3_mdio_init(tp);
16797         if (err)
16798                 return err;
16799
16800         /* Initialize data/descriptor byte/word swapping. */
16801         val = tr32(GRC_MODE);
16802         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16803             tg3_asic_rev(tp) == ASIC_REV_5762)
16804                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16805                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16806                         GRC_MODE_B2HRX_ENABLE |
16807                         GRC_MODE_HTX2B_ENABLE |
16808                         GRC_MODE_HOST_STACKUP);
16809         else
16810                 val &= GRC_MODE_HOST_STACKUP;
16811
16812         tw32(GRC_MODE, val | tp->grc_mode);
16813
16814         tg3_switch_clocks(tp);
16815
16816         /* Clear this out for sanity. */
16817         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16818
16819         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16820         tw32(TG3PCI_REG_BASE_ADDR, 0);
16821
16822         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16823                               &pci_state_reg);
16824         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16825             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16826                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16827                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16828                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16829                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16830                         void __iomem *sram_base;
16831
16832                         /* Write some dummy words into the SRAM status block
16833                          * area, see if it reads back correctly.  If the return
16834                          * value is bad, force enable the PCIX workaround.
16835                          */
16836                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16837
16838                         writel(0x00000000, sram_base);
16839                         writel(0x00000000, sram_base + 4);
16840                         writel(0xffffffff, sram_base + 4);
16841                         if (readl(sram_base) != 0x00000000)
16842                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16843                 }
16844         }
16845
16846         udelay(50);
16847         tg3_nvram_init(tp);
16848
16849         /* If the device has an NVRAM, no need to load patch firmware */
16850         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16851             !tg3_flag(tp, NO_NVRAM))
16852                 tp->fw_needed = NULL;
16853
16854         grc_misc_cfg = tr32(GRC_MISC_CFG);
16855         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16856
16857         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16858             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16859              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16860                 tg3_flag_set(tp, IS_5788);
16861
16862         if (!tg3_flag(tp, IS_5788) &&
16863             tg3_asic_rev(tp) != ASIC_REV_5700)
16864                 tg3_flag_set(tp, TAGGED_STATUS);
16865         if (tg3_flag(tp, TAGGED_STATUS)) {
16866                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16867                                       HOSTCC_MODE_CLRTICK_TXBD);
16868
16869                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16870                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16871                                        tp->misc_host_ctrl);
16872         }
16873
16874         /* Preserve the APE MAC_MODE bits */
16875         if (tg3_flag(tp, ENABLE_APE))
16876                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16877         else
16878                 tp->mac_mode = 0;
16879
16880         if (tg3_10_100_only_device(tp, ent))
16881                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16882
16883         err = tg3_phy_probe(tp);
16884         if (err) {
16885                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16886                 /* ... but do not return immediately ... */
16887                 tg3_mdio_fini(tp);
16888         }
16889
16890         tg3_read_vpd(tp);
16891         tg3_read_fw_ver(tp);
16892
16893         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16894                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16895         } else {
16896                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16897                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16898                 else
16899                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16900         }
16901
16902         /* 5700 {AX,BX} chips have a broken status block link
16903          * change bit implementation, so we must use the
16904          * status register in those cases.
16905          */
16906         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16907                 tg3_flag_set(tp, USE_LINKCHG_REG);
16908         else
16909                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16910
16911         /* The led_ctrl is set during tg3_phy_probe, here we might
16912          * have to force the link status polling mechanism based
16913          * upon subsystem IDs.
16914          */
16915         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16916             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16917             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16918                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16919                 tg3_flag_set(tp, USE_LINKCHG_REG);
16920         }
16921
16922         /* For all SERDES we poll the MAC status register. */
16923         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16924                 tg3_flag_set(tp, POLL_SERDES);
16925         else
16926                 tg3_flag_clear(tp, POLL_SERDES);
16927
16928         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16929                 tg3_flag_set(tp, POLL_CPMU_LINK);
16930
16931         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16932         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16933         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16934             tg3_flag(tp, PCIX_MODE)) {
16935                 tp->rx_offset = NET_SKB_PAD;
16936 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16937                 tp->rx_copy_thresh = ~(u16)0;
16938 #endif
16939         }
16940
16941         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16942         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16943         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16944
16945         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16946
16947         /* Increment the rx prod index on the rx std ring by at most
16948          * 8 for these chips to workaround hw errata.
16949          */
16950         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16951             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16952             tg3_asic_rev(tp) == ASIC_REV_5755)
16953                 tp->rx_std_max_post = 8;
16954
16955         if (tg3_flag(tp, ASPM_WORKAROUND))
16956                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16957                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16958
16959         return err;
16960 }
16961
16962 #ifdef CONFIG_SPARC
16963 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16964 {
16965         struct net_device *dev = tp->dev;
16966         struct pci_dev *pdev = tp->pdev;
16967         struct device_node *dp = pci_device_to_OF_node(pdev);
16968         const unsigned char *addr;
16969         int len;
16970
16971         addr = of_get_property(dp, "local-mac-address", &len);
16972         if (addr && len == ETH_ALEN) {
16973                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16974                 return 0;
16975         }
16976         return -ENODEV;
16977 }
16978
16979 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16980 {
16981         struct net_device *dev = tp->dev;
16982
16983         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16984         return 0;
16985 }
16986 #endif
16987
16988 static int tg3_get_device_address(struct tg3 *tp)
16989 {
16990         struct net_device *dev = tp->dev;
16991         u32 hi, lo, mac_offset;
16992         int addr_ok = 0;
16993         int err;
16994
16995 #ifdef CONFIG_SPARC
16996         if (!tg3_get_macaddr_sparc(tp))
16997                 return 0;
16998 #endif
16999
17000         if (tg3_flag(tp, IS_SSB_CORE)) {
17001                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17002                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17003                         return 0;
17004         }
17005
17006         mac_offset = 0x7c;
17007         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17008             tg3_flag(tp, 5780_CLASS)) {
17009                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17010                         mac_offset = 0xcc;
17011                 if (tg3_nvram_lock(tp))
17012                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17013                 else
17014                         tg3_nvram_unlock(tp);
17015         } else if (tg3_flag(tp, 5717_PLUS)) {
17016                 if (tp->pci_fn & 1)
17017                         mac_offset = 0xcc;
17018                 if (tp->pci_fn > 1)
17019                         mac_offset += 0x18c;
17020         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17021                 mac_offset = 0x10;
17022
17023         /* First try to get it from MAC address mailbox. */
17024         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17025         if ((hi >> 16) == 0x484b) {
17026                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17027                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17028
17029                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17030                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17031                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17032                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17033                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17034
17035                 /* Some old bootcode may report a 0 MAC address in SRAM */
17036                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17037         }
17038         if (!addr_ok) {
17039                 /* Next, try NVRAM. */
17040                 if (!tg3_flag(tp, NO_NVRAM) &&
17041                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17042                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17043                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17044                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17045                 }
17046                 /* Finally just fetch it out of the MAC control regs. */
17047                 else {
17048                         hi = tr32(MAC_ADDR_0_HIGH);
17049                         lo = tr32(MAC_ADDR_0_LOW);
17050
17051                         dev->dev_addr[5] = lo & 0xff;
17052                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17053                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17054                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17055                         dev->dev_addr[1] = hi & 0xff;
17056                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17057                 }
17058         }
17059
17060         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17061 #ifdef CONFIG_SPARC
17062                 if (!tg3_get_default_macaddr_sparc(tp))
17063                         return 0;
17064 #endif
17065                 return -EINVAL;
17066         }
17067         return 0;
17068 }
17069
17070 #define BOUNDARY_SINGLE_CACHELINE       1
17071 #define BOUNDARY_MULTI_CACHELINE        2
17072
17073 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17074 {
17075         int cacheline_size;
17076         u8 byte;
17077         int goal;
17078
17079         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17080         if (byte == 0)
17081                 cacheline_size = 1024;
17082         else
17083                 cacheline_size = (int) byte * 4;
17084
17085         /* On 5703 and later chips, the boundary bits have no
17086          * effect.
17087          */
17088         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17089             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17090             !tg3_flag(tp, PCI_EXPRESS))
17091                 goto out;
17092
17093 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17094         goal = BOUNDARY_MULTI_CACHELINE;
17095 #else
17096 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17097         goal = BOUNDARY_SINGLE_CACHELINE;
17098 #else
17099         goal = 0;
17100 #endif
17101 #endif
17102
17103         if (tg3_flag(tp, 57765_PLUS)) {
17104                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17105                 goto out;
17106         }
17107
17108         if (!goal)
17109                 goto out;
17110
17111         /* PCI controllers on most RISC systems tend to disconnect
17112          * when a device tries to burst across a cache-line boundary.
17113          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17114          *
17115          * Unfortunately, for PCI-E there are only limited
17116          * write-side controls for this, and thus for reads
17117          * we will still get the disconnects.  We'll also waste
17118          * these PCI cycles for both read and write for chips
17119          * other than 5700 and 5701 which do not implement the
17120          * boundary bits.
17121          */
17122         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17123                 switch (cacheline_size) {
17124                 case 16:
17125                 case 32:
17126                 case 64:
17127                 case 128:
17128                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17129                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17130                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17131                         } else {
17132                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17133                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17134                         }
17135                         break;
17136
17137                 case 256:
17138                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17139                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17140                         break;
17141
17142                 default:
17143                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17144                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17145                         break;
17146                 }
17147         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17148                 switch (cacheline_size) {
17149                 case 16:
17150                 case 32:
17151                 case 64:
17152                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17153                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17154                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17155                                 break;
17156                         }
17157                         /* fallthrough */
17158                 case 128:
17159                 default:
17160                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17161                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17162                         break;
17163                 }
17164         } else {
17165                 switch (cacheline_size) {
17166                 case 16:
17167                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17168                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17169                                         DMA_RWCTRL_WRITE_BNDRY_16);
17170                                 break;
17171                         }
17172                         /* fallthrough */
17173                 case 32:
17174                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17175                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17176                                         DMA_RWCTRL_WRITE_BNDRY_32);
17177                                 break;
17178                         }
17179                         /* fallthrough */
17180                 case 64:
17181                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17182                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17183                                         DMA_RWCTRL_WRITE_BNDRY_64);
17184                                 break;
17185                         }
17186                         /* fallthrough */
17187                 case 128:
17188                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17189                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17190                                         DMA_RWCTRL_WRITE_BNDRY_128);
17191                                 break;
17192                         }
17193                         /* fallthrough */
17194                 case 256:
17195                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17196                                 DMA_RWCTRL_WRITE_BNDRY_256);
17197                         break;
17198                 case 512:
17199                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17200                                 DMA_RWCTRL_WRITE_BNDRY_512);
17201                         break;
17202                 case 1024:
17203                 default:
17204                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17205                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17206                         break;
17207                 }
17208         }
17209
17210 out:
17211         return val;
17212 }
17213
17214 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17215                            int size, bool to_device)
17216 {
17217         struct tg3_internal_buffer_desc test_desc;
17218         u32 sram_dma_descs;
17219         int i, ret;
17220
17221         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17222
17223         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17224         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17225         tw32(RDMAC_STATUS, 0);
17226         tw32(WDMAC_STATUS, 0);
17227
17228         tw32(BUFMGR_MODE, 0);
17229         tw32(FTQ_RESET, 0);
17230
17231         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17232         test_desc.addr_lo = buf_dma & 0xffffffff;
17233         test_desc.nic_mbuf = 0x00002100;
17234         test_desc.len = size;
17235
17236         /*
17237          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17238          * the *second* time the tg3 driver was getting loaded after an
17239          * initial scan.
17240          *
17241          * Broadcom tells me:
17242          *   ...the DMA engine is connected to the GRC block and a DMA
17243          *   reset may affect the GRC block in some unpredictable way...
17244          *   The behavior of resets to individual blocks has not been tested.
17245          *
17246          * Broadcom noted the GRC reset will also reset all sub-components.
17247          */
17248         if (to_device) {
17249                 test_desc.cqid_sqid = (13 << 8) | 2;
17250
17251                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17252                 udelay(40);
17253         } else {
17254                 test_desc.cqid_sqid = (16 << 8) | 7;
17255
17256                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17257                 udelay(40);
17258         }
17259         test_desc.flags = 0x00000005;
17260
17261         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17262                 u32 val;
17263
17264                 val = *(((u32 *)&test_desc) + i);
17265                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17266                                        sram_dma_descs + (i * sizeof(u32)));
17267                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17268         }
17269         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17270
17271         if (to_device)
17272                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17273         else
17274                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17275
17276         ret = -ENODEV;
17277         for (i = 0; i < 40; i++) {
17278                 u32 val;
17279
17280                 if (to_device)
17281                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17282                 else
17283                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17284                 if ((val & 0xffff) == sram_dma_descs) {
17285                         ret = 0;
17286                         break;
17287                 }
17288
17289                 udelay(100);
17290         }
17291
17292         return ret;
17293 }
17294
17295 #define TEST_BUFFER_SIZE        0x2000
17296
17297 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17298         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17299         { },
17300 };
17301
17302 static int tg3_test_dma(struct tg3 *tp)
17303 {
17304         dma_addr_t buf_dma;
17305         u32 *buf, saved_dma_rwctrl;
17306         int ret = 0;
17307
17308         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17309                                  &buf_dma, GFP_KERNEL);
17310         if (!buf) {
17311                 ret = -ENOMEM;
17312                 goto out_nofree;
17313         }
17314
17315         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17316                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17317
17318         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17319
17320         if (tg3_flag(tp, 57765_PLUS))
17321                 goto out;
17322
17323         if (tg3_flag(tp, PCI_EXPRESS)) {
17324                 /* DMA read watermark not used on PCIE */
17325                 tp->dma_rwctrl |= 0x00180000;
17326         } else if (!tg3_flag(tp, PCIX_MODE)) {
17327                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17328                     tg3_asic_rev(tp) == ASIC_REV_5750)
17329                         tp->dma_rwctrl |= 0x003f0000;
17330                 else
17331                         tp->dma_rwctrl |= 0x003f000f;
17332         } else {
17333                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17334                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17335                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17336                         u32 read_water = 0x7;
17337
17338                         /* If the 5704 is behind the EPB bridge, we can
17339                          * do the less restrictive ONE_DMA workaround for
17340                          * better performance.
17341                          */
17342                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17343                             tg3_asic_rev(tp) == ASIC_REV_5704)
17344                                 tp->dma_rwctrl |= 0x8000;
17345                         else if (ccval == 0x6 || ccval == 0x7)
17346                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17347
17348                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17349                                 read_water = 4;
17350                         /* Set bit 23 to enable PCIX hw bug fix */
17351                         tp->dma_rwctrl |=
17352                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17353                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17354                                 (1 << 23);
17355                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17356                         /* 5780 always in PCIX mode */
17357                         tp->dma_rwctrl |= 0x00144000;
17358                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17359                         /* 5714 always in PCIX mode */
17360                         tp->dma_rwctrl |= 0x00148000;
17361                 } else {
17362                         tp->dma_rwctrl |= 0x001b000f;
17363                 }
17364         }
17365         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17366                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17367
17368         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17369             tg3_asic_rev(tp) == ASIC_REV_5704)
17370                 tp->dma_rwctrl &= 0xfffffff0;
17371
17372         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17373             tg3_asic_rev(tp) == ASIC_REV_5701) {
17374                 /* Remove this if it causes problems for some boards. */
17375                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17376
17377                 /* On 5700/5701 chips, we need to set this bit.
17378                  * Otherwise the chip will issue cacheline transactions
17379                  * to streamable DMA memory with not all the byte
17380                  * enables turned on.  This is an error on several
17381                  * RISC PCI controllers, in particular sparc64.
17382                  *
17383                  * On 5703/5704 chips, this bit has been reassigned
17384                  * a different meaning.  In particular, it is used
17385                  * on those chips to enable a PCI-X workaround.
17386                  */
17387                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17388         }
17389
17390         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17391
17392
17393         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17394             tg3_asic_rev(tp) != ASIC_REV_5701)
17395                 goto out;
17396
17397         /* It is best to perform DMA test with maximum write burst size
17398          * to expose the 5700/5701 write DMA bug.
17399          */
17400         saved_dma_rwctrl = tp->dma_rwctrl;
17401         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17402         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17403
17404         while (1) {
17405                 u32 *p = buf, i;
17406
17407                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17408                         p[i] = i;
17409
17410                 /* Send the buffer to the chip. */
17411                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17412                 if (ret) {
17413                         dev_err(&tp->pdev->dev,
17414                                 "%s: Buffer write failed. err = %d\n",
17415                                 __func__, ret);
17416                         break;
17417                 }
17418
17419                 /* Now read it back. */
17420                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17421                 if (ret) {
17422                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17423                                 "err = %d\n", __func__, ret);
17424                         break;
17425                 }
17426
17427                 /* Verify it. */
17428                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17429                         if (p[i] == i)
17430                                 continue;
17431
17432                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17433                             DMA_RWCTRL_WRITE_BNDRY_16) {
17434                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17435                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17436                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17437                                 break;
17438                         } else {
17439                                 dev_err(&tp->pdev->dev,
17440                                         "%s: Buffer corrupted on read back! "
17441                                         "(%d != %d)\n", __func__, p[i], i);
17442                                 ret = -ENODEV;
17443                                 goto out;
17444                         }
17445                 }
17446
17447                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17448                         /* Success. */
17449                         ret = 0;
17450                         break;
17451                 }
17452         }
17453         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17454             DMA_RWCTRL_WRITE_BNDRY_16) {
17455                 /* DMA test passed without adjusting DMA boundary,
17456                  * now look for chipsets that are known to expose the
17457                  * DMA bug without failing the test.
17458                  */
17459                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17460                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17461                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17462                 } else {
17463                         /* Safe to use the calculated DMA boundary. */
17464                         tp->dma_rwctrl = saved_dma_rwctrl;
17465                 }
17466
17467                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17468         }
17469
17470 out:
17471         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17472 out_nofree:
17473         return ret;
17474 }
17475
17476 static void tg3_init_bufmgr_config(struct tg3 *tp)
17477 {
17478         if (tg3_flag(tp, 57765_PLUS)) {
17479                 tp->bufmgr_config.mbuf_read_dma_low_water =
17480                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17481                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17482                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17483                 tp->bufmgr_config.mbuf_high_water =
17484                         DEFAULT_MB_HIGH_WATER_57765;
17485
17486                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17487                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17488                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17489                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17490                 tp->bufmgr_config.mbuf_high_water_jumbo =
17491                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17492         } else if (tg3_flag(tp, 5705_PLUS)) {
17493                 tp->bufmgr_config.mbuf_read_dma_low_water =
17494                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17495                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17496                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17497                 tp->bufmgr_config.mbuf_high_water =
17498                         DEFAULT_MB_HIGH_WATER_5705;
17499                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17500                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17501                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17502                         tp->bufmgr_config.mbuf_high_water =
17503                                 DEFAULT_MB_HIGH_WATER_5906;
17504                 }
17505
17506                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17507                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17508                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17509                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17510                 tp->bufmgr_config.mbuf_high_water_jumbo =
17511                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17512         } else {
17513                 tp->bufmgr_config.mbuf_read_dma_low_water =
17514                         DEFAULT_MB_RDMA_LOW_WATER;
17515                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17516                         DEFAULT_MB_MACRX_LOW_WATER;
17517                 tp->bufmgr_config.mbuf_high_water =
17518                         DEFAULT_MB_HIGH_WATER;
17519
17520                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17521                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17522                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17523                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17524                 tp->bufmgr_config.mbuf_high_water_jumbo =
17525                         DEFAULT_MB_HIGH_WATER_JUMBO;
17526         }
17527
17528         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17529         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17530 }
17531
17532 static char *tg3_phy_string(struct tg3 *tp)
17533 {
17534         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17535         case TG3_PHY_ID_BCM5400:        return "5400";
17536         case TG3_PHY_ID_BCM5401:        return "5401";
17537         case TG3_PHY_ID_BCM5411:        return "5411";
17538         case TG3_PHY_ID_BCM5701:        return "5701";
17539         case TG3_PHY_ID_BCM5703:        return "5703";
17540         case TG3_PHY_ID_BCM5704:        return "5704";
17541         case TG3_PHY_ID_BCM5705:        return "5705";
17542         case TG3_PHY_ID_BCM5750:        return "5750";
17543         case TG3_PHY_ID_BCM5752:        return "5752";
17544         case TG3_PHY_ID_BCM5714:        return "5714";
17545         case TG3_PHY_ID_BCM5780:        return "5780";
17546         case TG3_PHY_ID_BCM5755:        return "5755";
17547         case TG3_PHY_ID_BCM5787:        return "5787";
17548         case TG3_PHY_ID_BCM5784:        return "5784";
17549         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17550         case TG3_PHY_ID_BCM5906:        return "5906";
17551         case TG3_PHY_ID_BCM5761:        return "5761";
17552         case TG3_PHY_ID_BCM5718C:       return "5718C";
17553         case TG3_PHY_ID_BCM5718S:       return "5718S";
17554         case TG3_PHY_ID_BCM57765:       return "57765";
17555         case TG3_PHY_ID_BCM5719C:       return "5719C";
17556         case TG3_PHY_ID_BCM5720C:       return "5720C";
17557         case TG3_PHY_ID_BCM5762:        return "5762C";
17558         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17559         case 0:                 return "serdes";
17560         default:                return "unknown";
17561         }
17562 }
17563
17564 static char *tg3_bus_string(struct tg3 *tp, char *str)
17565 {
17566         if (tg3_flag(tp, PCI_EXPRESS)) {
17567                 strcpy(str, "PCI Express");
17568                 return str;
17569         } else if (tg3_flag(tp, PCIX_MODE)) {
17570                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17571
17572                 strcpy(str, "PCIX:");
17573
17574                 if ((clock_ctrl == 7) ||
17575                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17576                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17577                         strcat(str, "133MHz");
17578                 else if (clock_ctrl == 0)
17579                         strcat(str, "33MHz");
17580                 else if (clock_ctrl == 2)
17581                         strcat(str, "50MHz");
17582                 else if (clock_ctrl == 4)
17583                         strcat(str, "66MHz");
17584                 else if (clock_ctrl == 6)
17585                         strcat(str, "100MHz");
17586         } else {
17587                 strcpy(str, "PCI:");
17588                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17589                         strcat(str, "66MHz");
17590                 else
17591                         strcat(str, "33MHz");
17592         }
17593         if (tg3_flag(tp, PCI_32BIT))
17594                 strcat(str, ":32-bit");
17595         else
17596                 strcat(str, ":64-bit");
17597         return str;
17598 }
17599
17600 static void tg3_init_coal(struct tg3 *tp)
17601 {
17602         struct ethtool_coalesce *ec = &tp->coal;
17603
17604         memset(ec, 0, sizeof(*ec));
17605         ec->cmd = ETHTOOL_GCOALESCE;
17606         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17607         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17608         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17609         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17610         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17611         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17612         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17613         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17614         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17615
17616         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17617                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17618                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17619                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17620                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17621                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17622         }
17623
17624         if (tg3_flag(tp, 5705_PLUS)) {
17625                 ec->rx_coalesce_usecs_irq = 0;
17626                 ec->tx_coalesce_usecs_irq = 0;
17627                 ec->stats_block_coalesce_usecs = 0;
17628         }
17629 }
17630
17631 static int tg3_init_one(struct pci_dev *pdev,
17632                                   const struct pci_device_id *ent)
17633 {
17634         struct net_device *dev;
17635         struct tg3 *tp;
17636         int i, err;
17637         u32 sndmbx, rcvmbx, intmbx;
17638         char str[40];
17639         u64 dma_mask, persist_dma_mask;
17640         netdev_features_t features = 0;
17641
17642         printk_once(KERN_INFO "%s\n", version);
17643
17644         err = pci_enable_device(pdev);
17645         if (err) {
17646                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17647                 return err;
17648         }
17649
17650         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17651         if (err) {
17652                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17653                 goto err_out_disable_pdev;
17654         }
17655
17656         pci_set_master(pdev);
17657
17658         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17659         if (!dev) {
17660                 err = -ENOMEM;
17661                 goto err_out_free_res;
17662         }
17663
17664         SET_NETDEV_DEV(dev, &pdev->dev);
17665
17666         tp = netdev_priv(dev);
17667         tp->pdev = pdev;
17668         tp->dev = dev;
17669         tp->rx_mode = TG3_DEF_RX_MODE;
17670         tp->tx_mode = TG3_DEF_TX_MODE;
17671         tp->irq_sync = 1;
17672         tp->pcierr_recovery = false;
17673
17674         if (tg3_debug > 0)
17675                 tp->msg_enable = tg3_debug;
17676         else
17677                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17678
17679         if (pdev_is_ssb_gige_core(pdev)) {
17680                 tg3_flag_set(tp, IS_SSB_CORE);
17681                 if (ssb_gige_must_flush_posted_writes(pdev))
17682                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17683                 if (ssb_gige_one_dma_at_once(pdev))
17684                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17685                 if (ssb_gige_have_roboswitch(pdev)) {
17686                         tg3_flag_set(tp, USE_PHYLIB);
17687                         tg3_flag_set(tp, ROBOSWITCH);
17688                 }
17689                 if (ssb_gige_is_rgmii(pdev))
17690                         tg3_flag_set(tp, RGMII_MODE);
17691         }
17692
17693         /* The word/byte swap controls here control register access byte
17694          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17695          * setting below.
17696          */
17697         tp->misc_host_ctrl =
17698                 MISC_HOST_CTRL_MASK_PCI_INT |
17699                 MISC_HOST_CTRL_WORD_SWAP |
17700                 MISC_HOST_CTRL_INDIR_ACCESS |
17701                 MISC_HOST_CTRL_PCISTATE_RW;
17702
17703         /* The NONFRM (non-frame) byte/word swap controls take effect
17704          * on descriptor entries, anything which isn't packet data.
17705          *
17706          * The StrongARM chips on the board (one for tx, one for rx)
17707          * are running in big-endian mode.
17708          */
17709         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17710                         GRC_MODE_WSWAP_NONFRM_DATA);
17711 #ifdef __BIG_ENDIAN
17712         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17713 #endif
17714         spin_lock_init(&tp->lock);
17715         spin_lock_init(&tp->indirect_lock);
17716         INIT_WORK(&tp->reset_task, tg3_reset_task);
17717
17718         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17719         if (!tp->regs) {
17720                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17721                 err = -ENOMEM;
17722                 goto err_out_free_dev;
17723         }
17724
17725         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17726             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17728             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17729             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17730             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17731             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17732             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17733             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17734             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17735             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17736             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17737             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17738             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17739             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17740                 tg3_flag_set(tp, ENABLE_APE);
17741                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17742                 if (!tp->aperegs) {
17743                         dev_err(&pdev->dev,
17744                                 "Cannot map APE registers, aborting\n");
17745                         err = -ENOMEM;
17746                         goto err_out_iounmap;
17747                 }
17748         }
17749
17750         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17751         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17752
17753         dev->ethtool_ops = &tg3_ethtool_ops;
17754         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17755         dev->netdev_ops = &tg3_netdev_ops;
17756         dev->irq = pdev->irq;
17757
17758         err = tg3_get_invariants(tp, ent);
17759         if (err) {
17760                 dev_err(&pdev->dev,
17761                         "Problem fetching invariants of chip, aborting\n");
17762                 goto err_out_apeunmap;
17763         }
17764
17765         /* The EPB bridge inside 5714, 5715, and 5780 and any
17766          * device behind the EPB cannot support DMA addresses > 40-bit.
17767          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17768          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17769          * do DMA address check in tg3_start_xmit().
17770          */
17771         if (tg3_flag(tp, IS_5788))
17772                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17773         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17774                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17775 #ifdef CONFIG_HIGHMEM
17776                 dma_mask = DMA_BIT_MASK(64);
17777 #endif
17778         } else
17779                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17780
17781         /* Configure DMA attributes. */
17782         if (dma_mask > DMA_BIT_MASK(32)) {
17783                 err = pci_set_dma_mask(pdev, dma_mask);
17784                 if (!err) {
17785                         features |= NETIF_F_HIGHDMA;
17786                         err = pci_set_consistent_dma_mask(pdev,
17787                                                           persist_dma_mask);
17788                         if (err < 0) {
17789                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17790                                         "DMA for consistent allocations\n");
17791                                 goto err_out_apeunmap;
17792                         }
17793                 }
17794         }
17795         if (err || dma_mask == DMA_BIT_MASK(32)) {
17796                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17797                 if (err) {
17798                         dev_err(&pdev->dev,
17799                                 "No usable DMA configuration, aborting\n");
17800                         goto err_out_apeunmap;
17801                 }
17802         }
17803
17804         tg3_init_bufmgr_config(tp);
17805
17806         /* 5700 B0 chips do not support checksumming correctly due
17807          * to hardware bugs.
17808          */
17809         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17810                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17811
17812                 if (tg3_flag(tp, 5755_PLUS))
17813                         features |= NETIF_F_IPV6_CSUM;
17814         }
17815
17816         /* TSO is on by default on chips that support hardware TSO.
17817          * Firmware TSO on older chips gives lower performance, so it
17818          * is off by default, but can be enabled using ethtool.
17819          */
17820         if ((tg3_flag(tp, HW_TSO_1) ||
17821              tg3_flag(tp, HW_TSO_2) ||
17822              tg3_flag(tp, HW_TSO_3)) &&
17823             (features & NETIF_F_IP_CSUM))
17824                 features |= NETIF_F_TSO;
17825         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17826                 if (features & NETIF_F_IPV6_CSUM)
17827                         features |= NETIF_F_TSO6;
17828                 if (tg3_flag(tp, HW_TSO_3) ||
17829                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17830                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17831                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17832                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17833                     tg3_asic_rev(tp) == ASIC_REV_57780)
17834                         features |= NETIF_F_TSO_ECN;
17835         }
17836
17837         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17838                          NETIF_F_HW_VLAN_CTAG_RX;
17839         dev->vlan_features |= features;
17840
17841         /*
17842          * Add loopback capability only for a subset of devices that support
17843          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17844          * loopback for the remaining devices.
17845          */
17846         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17847             !tg3_flag(tp, CPMU_PRESENT))
17848                 /* Add the loopback capability */
17849                 features |= NETIF_F_LOOPBACK;
17850
17851         dev->hw_features |= features;
17852         dev->priv_flags |= IFF_UNICAST_FLT;
17853
17854         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17855         dev->min_mtu = TG3_MIN_MTU;
17856         dev->max_mtu = TG3_MAX_MTU(tp);
17857
17858         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17859             !tg3_flag(tp, TSO_CAPABLE) &&
17860             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17861                 tg3_flag_set(tp, MAX_RXPEND_64);
17862                 tp->rx_pending = 63;
17863         }
17864
17865         err = tg3_get_device_address(tp);
17866         if (err) {
17867                 dev_err(&pdev->dev,
17868                         "Could not obtain valid ethernet address, aborting\n");
17869                 goto err_out_apeunmap;
17870         }
17871
17872         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17873         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17874         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17875         for (i = 0; i < tp->irq_max; i++) {
17876                 struct tg3_napi *tnapi = &tp->napi[i];
17877
17878                 tnapi->tp = tp;
17879                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17880
17881                 tnapi->int_mbox = intmbx;
17882                 if (i <= 4)
17883                         intmbx += 0x8;
17884                 else
17885                         intmbx += 0x4;
17886
17887                 tnapi->consmbox = rcvmbx;
17888                 tnapi->prodmbox = sndmbx;
17889
17890                 if (i)
17891                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17892                 else
17893                         tnapi->coal_now = HOSTCC_MODE_NOW;
17894
17895                 if (!tg3_flag(tp, SUPPORT_MSIX))
17896                         break;
17897
17898                 /*
17899                  * If we support MSIX, we'll be using RSS.  If we're using
17900                  * RSS, the first vector only handles link interrupts and the
17901                  * remaining vectors handle rx and tx interrupts.  Reuse the
17902                  * mailbox values for the next iteration.  The values we setup
17903                  * above are still useful for the single vectored mode.
17904                  */
17905                 if (!i)
17906                         continue;
17907
17908                 rcvmbx += 0x8;
17909
17910                 if (sndmbx & 0x4)
17911                         sndmbx -= 0x4;
17912                 else
17913                         sndmbx += 0xc;
17914         }
17915
17916         /*
17917          * Reset chip in case UNDI or EFI driver did not shutdown
17918          * DMA self test will enable WDMAC and we'll see (spurious)
17919          * pending DMA on the PCI bus at that point.
17920          */
17921         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17922             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17923                 tg3_full_lock(tp, 0);
17924                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17925                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17926                 tg3_full_unlock(tp);
17927         }
17928
17929         err = tg3_test_dma(tp);
17930         if (err) {
17931                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17932                 goto err_out_apeunmap;
17933         }
17934
17935         tg3_init_coal(tp);
17936
17937         pci_set_drvdata(pdev, dev);
17938
17939         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17940             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17941             tg3_asic_rev(tp) == ASIC_REV_5762)
17942                 tg3_flag_set(tp, PTP_CAPABLE);
17943
17944         tg3_timer_init(tp);
17945
17946         tg3_carrier_off(tp);
17947
17948         err = register_netdev(dev);
17949         if (err) {
17950                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17951                 goto err_out_apeunmap;
17952         }
17953
17954         if (tg3_flag(tp, PTP_CAPABLE)) {
17955                 tg3_ptp_init(tp);
17956                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17957                                                    &tp->pdev->dev);
17958                 if (IS_ERR(tp->ptp_clock))
17959                         tp->ptp_clock = NULL;
17960         }
17961
17962         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17963                     tp->board_part_number,
17964                     tg3_chip_rev_id(tp),
17965                     tg3_bus_string(tp, str),
17966                     dev->dev_addr);
17967
17968         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17969                 char *ethtype;
17970
17971                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17972                         ethtype = "10/100Base-TX";
17973                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17974                         ethtype = "1000Base-SX";
17975                 else
17976                         ethtype = "10/100/1000Base-T";
17977
17978                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17979                             "(WireSpeed[%d], EEE[%d])\n",
17980                             tg3_phy_string(tp), ethtype,
17981                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17982                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17983         }
17984
17985         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17986                     (dev->features & NETIF_F_RXCSUM) != 0,
17987                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17988                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17989                     tg3_flag(tp, ENABLE_ASF) != 0,
17990                     tg3_flag(tp, TSO_CAPABLE) != 0);
17991         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17992                     tp->dma_rwctrl,
17993                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17994                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17995
17996         pci_save_state(pdev);
17997
17998         return 0;
17999
18000 err_out_apeunmap:
18001         if (tp->aperegs) {
18002                 iounmap(tp->aperegs);
18003                 tp->aperegs = NULL;
18004         }
18005
18006 err_out_iounmap:
18007         if (tp->regs) {
18008                 iounmap(tp->regs);
18009                 tp->regs = NULL;
18010         }
18011
18012 err_out_free_dev:
18013         free_netdev(dev);
18014
18015 err_out_free_res:
18016         pci_release_regions(pdev);
18017
18018 err_out_disable_pdev:
18019         if (pci_is_enabled(pdev))
18020                 pci_disable_device(pdev);
18021         return err;
18022 }
18023
18024 static void tg3_remove_one(struct pci_dev *pdev)
18025 {
18026         struct net_device *dev = pci_get_drvdata(pdev);
18027
18028         if (dev) {
18029                 struct tg3 *tp = netdev_priv(dev);
18030
18031                 tg3_ptp_fini(tp);
18032
18033                 release_firmware(tp->fw);
18034
18035                 tg3_reset_task_cancel(tp);
18036
18037                 if (tg3_flag(tp, USE_PHYLIB)) {
18038                         tg3_phy_fini(tp);
18039                         tg3_mdio_fini(tp);
18040                 }
18041
18042                 unregister_netdev(dev);
18043                 if (tp->aperegs) {
18044                         iounmap(tp->aperegs);
18045                         tp->aperegs = NULL;
18046                 }
18047                 if (tp->regs) {
18048                         iounmap(tp->regs);
18049                         tp->regs = NULL;
18050                 }
18051                 free_netdev(dev);
18052                 pci_release_regions(pdev);
18053                 pci_disable_device(pdev);
18054         }
18055 }
18056
18057 #ifdef CONFIG_PM_SLEEP
18058 static int tg3_suspend(struct device *device)
18059 {
18060         struct pci_dev *pdev = to_pci_dev(device);
18061         struct net_device *dev = pci_get_drvdata(pdev);
18062         struct tg3 *tp = netdev_priv(dev);
18063         int err = 0;
18064
18065         rtnl_lock();
18066
18067         if (!netif_running(dev))
18068                 goto unlock;
18069
18070         tg3_reset_task_cancel(tp);
18071         tg3_phy_stop(tp);
18072         tg3_netif_stop(tp);
18073
18074         tg3_timer_stop(tp);
18075
18076         tg3_full_lock(tp, 1);
18077         tg3_disable_ints(tp);
18078         tg3_full_unlock(tp);
18079
18080         netif_device_detach(dev);
18081
18082         tg3_full_lock(tp, 0);
18083         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18084         tg3_flag_clear(tp, INIT_COMPLETE);
18085         tg3_full_unlock(tp);
18086
18087         err = tg3_power_down_prepare(tp);
18088         if (err) {
18089                 int err2;
18090
18091                 tg3_full_lock(tp, 0);
18092
18093                 tg3_flag_set(tp, INIT_COMPLETE);
18094                 err2 = tg3_restart_hw(tp, true);
18095                 if (err2)
18096                         goto out;
18097
18098                 tg3_timer_start(tp);
18099
18100                 netif_device_attach(dev);
18101                 tg3_netif_start(tp);
18102
18103 out:
18104                 tg3_full_unlock(tp);
18105
18106                 if (!err2)
18107                         tg3_phy_start(tp);
18108         }
18109
18110 unlock:
18111         rtnl_unlock();
18112         return err;
18113 }
18114
18115 static int tg3_resume(struct device *device)
18116 {
18117         struct pci_dev *pdev = to_pci_dev(device);
18118         struct net_device *dev = pci_get_drvdata(pdev);
18119         struct tg3 *tp = netdev_priv(dev);
18120         int err = 0;
18121
18122         rtnl_lock();
18123
18124         if (!netif_running(dev))
18125                 goto unlock;
18126
18127         netif_device_attach(dev);
18128
18129         tg3_full_lock(tp, 0);
18130
18131         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18132
18133         tg3_flag_set(tp, INIT_COMPLETE);
18134         err = tg3_restart_hw(tp,
18135                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18136         if (err)
18137                 goto out;
18138
18139         tg3_timer_start(tp);
18140
18141         tg3_netif_start(tp);
18142
18143 out:
18144         tg3_full_unlock(tp);
18145
18146         if (!err)
18147                 tg3_phy_start(tp);
18148
18149 unlock:
18150         rtnl_unlock();
18151         return err;
18152 }
18153 #endif /* CONFIG_PM_SLEEP */
18154
18155 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18156
18157 static void tg3_shutdown(struct pci_dev *pdev)
18158 {
18159         struct net_device *dev = pci_get_drvdata(pdev);
18160         struct tg3 *tp = netdev_priv(dev);
18161
18162         rtnl_lock();
18163         netif_device_detach(dev);
18164
18165         if (netif_running(dev))
18166                 dev_close(dev);
18167
18168         if (system_state == SYSTEM_POWER_OFF)
18169                 tg3_power_down(tp);
18170
18171         rtnl_unlock();
18172 }
18173
18174 /**
18175  * tg3_io_error_detected - called when PCI error is detected
18176  * @pdev: Pointer to PCI device
18177  * @state: The current pci connection state
18178  *
18179  * This function is called after a PCI bus error affecting
18180  * this device has been detected.
18181  */
18182 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18183                                               pci_channel_state_t state)
18184 {
18185         struct net_device *netdev = pci_get_drvdata(pdev);
18186         struct tg3 *tp = netdev_priv(netdev);
18187         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18188
18189         netdev_info(netdev, "PCI I/O error detected\n");
18190
18191         rtnl_lock();
18192
18193         /* We probably don't have netdev yet */
18194         if (!netdev || !netif_running(netdev))
18195                 goto done;
18196
18197         /* We needn't recover from permanent error */
18198         if (state == pci_channel_io_frozen)
18199                 tp->pcierr_recovery = true;
18200
18201         tg3_phy_stop(tp);
18202
18203         tg3_netif_stop(tp);
18204
18205         tg3_timer_stop(tp);
18206
18207         /* Want to make sure that the reset task doesn't run */
18208         tg3_reset_task_cancel(tp);
18209
18210         netif_device_detach(netdev);
18211
18212         /* Clean up software state, even if MMIO is blocked */
18213         tg3_full_lock(tp, 0);
18214         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18215         tg3_full_unlock(tp);
18216
18217 done:
18218         if (state == pci_channel_io_perm_failure) {
18219                 if (netdev) {
18220                         tg3_napi_enable(tp);
18221                         dev_close(netdev);
18222                 }
18223                 err = PCI_ERS_RESULT_DISCONNECT;
18224         } else {
18225                 pci_disable_device(pdev);
18226         }
18227
18228         rtnl_unlock();
18229
18230         return err;
18231 }
18232
18233 /**
18234  * tg3_io_slot_reset - called after the pci bus has been reset.
18235  * @pdev: Pointer to PCI device
18236  *
18237  * Restart the card from scratch, as if from a cold-boot.
18238  * At this point, the card has exprienced a hard reset,
18239  * followed by fixups by BIOS, and has its config space
18240  * set up identically to what it was at cold boot.
18241  */
18242 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18243 {
18244         struct net_device *netdev = pci_get_drvdata(pdev);
18245         struct tg3 *tp = netdev_priv(netdev);
18246         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18247         int err;
18248
18249         rtnl_lock();
18250
18251         if (pci_enable_device(pdev)) {
18252                 dev_err(&pdev->dev,
18253                         "Cannot re-enable PCI device after reset.\n");
18254                 goto done;
18255         }
18256
18257         pci_set_master(pdev);
18258         pci_restore_state(pdev);
18259         pci_save_state(pdev);
18260
18261         if (!netdev || !netif_running(netdev)) {
18262                 rc = PCI_ERS_RESULT_RECOVERED;
18263                 goto done;
18264         }
18265
18266         err = tg3_power_up(tp);
18267         if (err)
18268                 goto done;
18269
18270         rc = PCI_ERS_RESULT_RECOVERED;
18271
18272 done:
18273         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18274                 tg3_napi_enable(tp);
18275                 dev_close(netdev);
18276         }
18277         rtnl_unlock();
18278
18279         return rc;
18280 }
18281
18282 /**
18283  * tg3_io_resume - called when traffic can start flowing again.
18284  * @pdev: Pointer to PCI device
18285  *
18286  * This callback is called when the error recovery driver tells
18287  * us that its OK to resume normal operation.
18288  */
18289 static void tg3_io_resume(struct pci_dev *pdev)
18290 {
18291         struct net_device *netdev = pci_get_drvdata(pdev);
18292         struct tg3 *tp = netdev_priv(netdev);
18293         int err;
18294
18295         rtnl_lock();
18296
18297         if (!netdev || !netif_running(netdev))
18298                 goto done;
18299
18300         tg3_full_lock(tp, 0);
18301         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18302         tg3_flag_set(tp, INIT_COMPLETE);
18303         err = tg3_restart_hw(tp, true);
18304         if (err) {
18305                 tg3_full_unlock(tp);
18306                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18307                 goto done;
18308         }
18309
18310         netif_device_attach(netdev);
18311
18312         tg3_timer_start(tp);
18313
18314         tg3_netif_start(tp);
18315
18316         tg3_full_unlock(tp);
18317
18318         tg3_phy_start(tp);
18319
18320 done:
18321         tp->pcierr_recovery = false;
18322         rtnl_unlock();
18323 }
18324
18325 static const struct pci_error_handlers tg3_err_handler = {
18326         .error_detected = tg3_io_error_detected,
18327         .slot_reset     = tg3_io_slot_reset,
18328         .resume         = tg3_io_resume
18329 };
18330
18331 static struct pci_driver tg3_driver = {
18332         .name           = DRV_MODULE_NAME,
18333         .id_table       = tg3_pci_tbl,
18334         .probe          = tg3_init_one,
18335         .remove         = tg3_remove_one,
18336         .err_handler    = &tg3_err_handler,
18337         .driver.pm      = &tg3_pm_ops,
18338         .shutdown       = tg3_shutdown,
18339 };
18340
18341 module_pci_driver(tg3_driver);