[TG3]: Add management FW version to ethtool report
[sfrench/cifs-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.84"
68 #define DRV_MODULE_RELDATE      "October 12, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109 out:
1110         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1111                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1112                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1113                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1114                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1115                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1116                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1117         }
1118         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1119                 tg3_writephy(tp, 0x1c, 0x8d68);
1120                 tg3_writephy(tp, 0x1c, 0x8d68);
1121         }
1122         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1123                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1124                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1125                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1126                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1127                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1128                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1129                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1130                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1131         }
1132         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1133                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1134                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1135                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1136                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1137                         tg3_writephy(tp, MII_TG3_TEST1,
1138                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1139                 } else
1140                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1141                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1142         }
1143         /* Set Extended packet length bit (bit 14) on all chips that */
1144         /* support jumbo frames */
1145         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1146                 /* Cannot do read-modify-write on 5401 */
1147                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1148         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1149                 u32 phy_reg;
1150
1151                 /* Set bit 14 with read-modify-write to preserve other bits */
1152                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1153                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1154                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1155         }
1156
1157         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1158          * jumbo frames transmission.
1159          */
1160         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1161                 u32 phy_reg;
1162
1163                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1164                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1165                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1166         }
1167
1168         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1169                 /* adjust output voltage */
1170                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1171         }
1172
1173         tg3_phy_toggle_automdix(tp, 1);
1174         tg3_phy_set_wirespeed(tp);
1175         return 0;
1176 }
1177
1178 static void tg3_frob_aux_power(struct tg3 *tp)
1179 {
1180         struct tg3 *tp_peer = tp;
1181
1182         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1183                 return;
1184
1185         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1186             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1187                 struct net_device *dev_peer;
1188
1189                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1190                 /* remove_one() may have been run on the peer. */
1191                 if (!dev_peer)
1192                         tp_peer = tp;
1193                 else
1194                         tp_peer = netdev_priv(dev_peer);
1195         }
1196
1197         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1198             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1199             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1200             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1201                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1202                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1203                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1204                                     (GRC_LCLCTRL_GPIO_OE0 |
1205                                      GRC_LCLCTRL_GPIO_OE1 |
1206                                      GRC_LCLCTRL_GPIO_OE2 |
1207                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1208                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1209                                     100);
1210                 } else {
1211                         u32 no_gpio2;
1212                         u32 grc_local_ctrl = 0;
1213
1214                         if (tp_peer != tp &&
1215                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1216                                 return;
1217
1218                         /* Workaround to prevent overdrawing Amps. */
1219                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1220                             ASIC_REV_5714) {
1221                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1222                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1223                                             grc_local_ctrl, 100);
1224                         }
1225
1226                         /* On 5753 and variants, GPIO2 cannot be used. */
1227                         no_gpio2 = tp->nic_sram_data_cfg &
1228                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1229
1230                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1231                                          GRC_LCLCTRL_GPIO_OE1 |
1232                                          GRC_LCLCTRL_GPIO_OE2 |
1233                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1234                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1235                         if (no_gpio2) {
1236                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1237                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1238                         }
1239                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1240                                                     grc_local_ctrl, 100);
1241
1242                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1243
1244                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1245                                                     grc_local_ctrl, 100);
1246
1247                         if (!no_gpio2) {
1248                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1249                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1250                                             grc_local_ctrl, 100);
1251                         }
1252                 }
1253         } else {
1254                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1255                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1256                         if (tp_peer != tp &&
1257                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1258                                 return;
1259
1260                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1261                                     (GRC_LCLCTRL_GPIO_OE1 |
1262                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1263
1264                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1265                                     GRC_LCLCTRL_GPIO_OE1, 100);
1266
1267                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268                                     (GRC_LCLCTRL_GPIO_OE1 |
1269                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1270                 }
1271         }
1272 }
1273
1274 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1275 {
1276         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1277                 return 1;
1278         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1279                 if (speed != SPEED_10)
1280                         return 1;
1281         } else if (speed == SPEED_10)
1282                 return 1;
1283
1284         return 0;
1285 }
1286
1287 static int tg3_setup_phy(struct tg3 *, int);
1288
1289 #define RESET_KIND_SHUTDOWN     0
1290 #define RESET_KIND_INIT         1
1291 #define RESET_KIND_SUSPEND      2
1292
1293 static void tg3_write_sig_post_reset(struct tg3 *, int);
1294 static int tg3_halt_cpu(struct tg3 *, u32);
1295 static int tg3_nvram_lock(struct tg3 *);
1296 static void tg3_nvram_unlock(struct tg3 *);
1297
1298 static void tg3_power_down_phy(struct tg3 *tp)
1299 {
1300         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1301                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1302                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1303                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1304
1305                         sg_dig_ctrl |=
1306                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1307                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1308                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1309                 }
1310                 return;
1311         }
1312
1313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1314                 u32 val;
1315
1316                 tg3_bmcr_reset(tp);
1317                 val = tr32(GRC_MISC_CFG);
1318                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1319                 udelay(40);
1320                 return;
1321         } else {
1322                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1323                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1324                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1325         }
1326
1327         /* The PHY should not be powered down on some chips because
1328          * of bugs.
1329          */
1330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1331             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1332             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1333              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1334                 return;
1335         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1336 }
1337
1338 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1339 {
1340         u32 misc_host_ctrl;
1341         u16 power_control, power_caps;
1342         int pm = tp->pm_cap;
1343
1344         /* Make sure register accesses (indirect or otherwise)
1345          * will function correctly.
1346          */
1347         pci_write_config_dword(tp->pdev,
1348                                TG3PCI_MISC_HOST_CTRL,
1349                                tp->misc_host_ctrl);
1350
1351         pci_read_config_word(tp->pdev,
1352                              pm + PCI_PM_CTRL,
1353                              &power_control);
1354         power_control |= PCI_PM_CTRL_PME_STATUS;
1355         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1356         switch (state) {
1357         case PCI_D0:
1358                 power_control |= 0;
1359                 pci_write_config_word(tp->pdev,
1360                                       pm + PCI_PM_CTRL,
1361                                       power_control);
1362                 udelay(100);    /* Delay after power state change */
1363
1364                 /* Switch out of Vaux if it is a NIC */
1365                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1366                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1367
1368                 return 0;
1369
1370         case PCI_D1:
1371                 power_control |= 1;
1372                 break;
1373
1374         case PCI_D2:
1375                 power_control |= 2;
1376                 break;
1377
1378         case PCI_D3hot:
1379                 power_control |= 3;
1380                 break;
1381
1382         default:
1383                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1384                        "requested.\n",
1385                        tp->dev->name, state);
1386                 return -EINVAL;
1387         };
1388
1389         power_control |= PCI_PM_CTRL_PME_ENABLE;
1390
1391         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1392         tw32(TG3PCI_MISC_HOST_CTRL,
1393              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1394
1395         if (tp->link_config.phy_is_low_power == 0) {
1396                 tp->link_config.phy_is_low_power = 1;
1397                 tp->link_config.orig_speed = tp->link_config.speed;
1398                 tp->link_config.orig_duplex = tp->link_config.duplex;
1399                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1400         }
1401
1402         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1403                 tp->link_config.speed = SPEED_10;
1404                 tp->link_config.duplex = DUPLEX_HALF;
1405                 tp->link_config.autoneg = AUTONEG_ENABLE;
1406                 tg3_setup_phy(tp, 0);
1407         }
1408
1409         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1410                 u32 val;
1411
1412                 val = tr32(GRC_VCPU_EXT_CTRL);
1413                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1414         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1415                 int i;
1416                 u32 val;
1417
1418                 for (i = 0; i < 200; i++) {
1419                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1420                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1421                                 break;
1422                         msleep(1);
1423                 }
1424         }
1425         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1426                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1427                                                      WOL_DRV_STATE_SHUTDOWN |
1428                                                      WOL_DRV_WOL |
1429                                                      WOL_SET_MAGIC_PKT);
1430
1431         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1432
1433         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1434                 u32 mac_mode;
1435
1436                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1437                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1438                         udelay(40);
1439
1440                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1441                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1442                         else
1443                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1444
1445                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1446                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1447                             ASIC_REV_5700) {
1448                                 u32 speed = (tp->tg3_flags &
1449                                              TG3_FLAG_WOL_SPEED_100MB) ?
1450                                              SPEED_100 : SPEED_10;
1451                                 if (tg3_5700_link_polarity(tp, speed))
1452                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1453                                 else
1454                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1455                         }
1456                 } else {
1457                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1458                 }
1459
1460                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1461                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1462
1463                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1464                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1465                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1466
1467                 tw32_f(MAC_MODE, mac_mode);
1468                 udelay(100);
1469
1470                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1471                 udelay(10);
1472         }
1473
1474         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1475             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1476              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1477                 u32 base_val;
1478
1479                 base_val = tp->pci_clock_ctrl;
1480                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1481                              CLOCK_CTRL_TXCLK_DISABLE);
1482
1483                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1484                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1485         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1486                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1487                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1488                 /* do nothing */
1489         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1490                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1491                 u32 newbits1, newbits2;
1492
1493                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1494                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1495                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1496                                     CLOCK_CTRL_TXCLK_DISABLE |
1497                                     CLOCK_CTRL_ALTCLK);
1498                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1499                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1500                         newbits1 = CLOCK_CTRL_625_CORE;
1501                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1502                 } else {
1503                         newbits1 = CLOCK_CTRL_ALTCLK;
1504                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1505                 }
1506
1507                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1508                             40);
1509
1510                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1511                             40);
1512
1513                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1514                         u32 newbits3;
1515
1516                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1517                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1518                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1519                                             CLOCK_CTRL_TXCLK_DISABLE |
1520                                             CLOCK_CTRL_44MHZ_CORE);
1521                         } else {
1522                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1523                         }
1524
1525                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1526                                     tp->pci_clock_ctrl | newbits3, 40);
1527                 }
1528         }
1529
1530         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1531             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1532             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1533                 tg3_power_down_phy(tp);
1534
1535         tg3_frob_aux_power(tp);
1536
1537         /* Workaround for unstable PLL clock */
1538         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1539             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1540                 u32 val = tr32(0x7d00);
1541
1542                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1543                 tw32(0x7d00, val);
1544                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1545                         int err;
1546
1547                         err = tg3_nvram_lock(tp);
1548                         tg3_halt_cpu(tp, RX_CPU_BASE);
1549                         if (!err)
1550                                 tg3_nvram_unlock(tp);
1551                 }
1552         }
1553
1554         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1555
1556         /* Finally, set the new power state. */
1557         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1558         udelay(100);    /* Delay after power state change */
1559
1560         return 0;
1561 }
1562
1563 static void tg3_link_report(struct tg3 *tp)
1564 {
1565         if (!netif_carrier_ok(tp->dev)) {
1566                 if (netif_msg_link(tp))
1567                         printk(KERN_INFO PFX "%s: Link is down.\n",
1568                                tp->dev->name);
1569         } else if (netif_msg_link(tp)) {
1570                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1571                        tp->dev->name,
1572                        (tp->link_config.active_speed == SPEED_1000 ?
1573                         1000 :
1574                         (tp->link_config.active_speed == SPEED_100 ?
1575                          100 : 10)),
1576                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1577                         "full" : "half"));
1578
1579                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1580                        "%s for RX.\n",
1581                        tp->dev->name,
1582                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1583                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1584         }
1585 }
1586
1587 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1588 {
1589         u32 new_tg3_flags = 0;
1590         u32 old_rx_mode = tp->rx_mode;
1591         u32 old_tx_mode = tp->tx_mode;
1592
1593         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1594
1595                 /* Convert 1000BaseX flow control bits to 1000BaseT
1596                  * bits before resolving flow control.
1597                  */
1598                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1599                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1600                                        ADVERTISE_PAUSE_ASYM);
1601                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1602
1603                         if (local_adv & ADVERTISE_1000XPAUSE)
1604                                 local_adv |= ADVERTISE_PAUSE_CAP;
1605                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1606                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1607                         if (remote_adv & LPA_1000XPAUSE)
1608                                 remote_adv |= LPA_PAUSE_CAP;
1609                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1610                                 remote_adv |= LPA_PAUSE_ASYM;
1611                 }
1612
1613                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1614                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1615                                 if (remote_adv & LPA_PAUSE_CAP)
1616                                         new_tg3_flags |=
1617                                                 (TG3_FLAG_RX_PAUSE |
1618                                                 TG3_FLAG_TX_PAUSE);
1619                                 else if (remote_adv & LPA_PAUSE_ASYM)
1620                                         new_tg3_flags |=
1621                                                 (TG3_FLAG_RX_PAUSE);
1622                         } else {
1623                                 if (remote_adv & LPA_PAUSE_CAP)
1624                                         new_tg3_flags |=
1625                                                 (TG3_FLAG_RX_PAUSE |
1626                                                 TG3_FLAG_TX_PAUSE);
1627                         }
1628                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1629                         if ((remote_adv & LPA_PAUSE_CAP) &&
1630                         (remote_adv & LPA_PAUSE_ASYM))
1631                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1632                 }
1633
1634                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1635                 tp->tg3_flags |= new_tg3_flags;
1636         } else {
1637                 new_tg3_flags = tp->tg3_flags;
1638         }
1639
1640         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1641                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1642         else
1643                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1644
1645         if (old_rx_mode != tp->rx_mode) {
1646                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1647         }
1648
1649         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1650                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1651         else
1652                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1653
1654         if (old_tx_mode != tp->tx_mode) {
1655                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1656         }
1657 }
1658
1659 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1660 {
1661         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1662         case MII_TG3_AUX_STAT_10HALF:
1663                 *speed = SPEED_10;
1664                 *duplex = DUPLEX_HALF;
1665                 break;
1666
1667         case MII_TG3_AUX_STAT_10FULL:
1668                 *speed = SPEED_10;
1669                 *duplex = DUPLEX_FULL;
1670                 break;
1671
1672         case MII_TG3_AUX_STAT_100HALF:
1673                 *speed = SPEED_100;
1674                 *duplex = DUPLEX_HALF;
1675                 break;
1676
1677         case MII_TG3_AUX_STAT_100FULL:
1678                 *speed = SPEED_100;
1679                 *duplex = DUPLEX_FULL;
1680                 break;
1681
1682         case MII_TG3_AUX_STAT_1000HALF:
1683                 *speed = SPEED_1000;
1684                 *duplex = DUPLEX_HALF;
1685                 break;
1686
1687         case MII_TG3_AUX_STAT_1000FULL:
1688                 *speed = SPEED_1000;
1689                 *duplex = DUPLEX_FULL;
1690                 break;
1691
1692         default:
1693                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1694                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1695                                  SPEED_10;
1696                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1697                                   DUPLEX_HALF;
1698                         break;
1699                 }
1700                 *speed = SPEED_INVALID;
1701                 *duplex = DUPLEX_INVALID;
1702                 break;
1703         };
1704 }
1705
1706 static void tg3_phy_copper_begin(struct tg3 *tp)
1707 {
1708         u32 new_adv;
1709         int i;
1710
1711         if (tp->link_config.phy_is_low_power) {
1712                 /* Entering low power mode.  Disable gigabit and
1713                  * 100baseT advertisements.
1714                  */
1715                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1716
1717                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1718                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1719                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1720                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1721
1722                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1723         } else if (tp->link_config.speed == SPEED_INVALID) {
1724                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1725                         tp->link_config.advertising &=
1726                                 ~(ADVERTISED_1000baseT_Half |
1727                                   ADVERTISED_1000baseT_Full);
1728
1729                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1730                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1731                         new_adv |= ADVERTISE_10HALF;
1732                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1733                         new_adv |= ADVERTISE_10FULL;
1734                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1735                         new_adv |= ADVERTISE_100HALF;
1736                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1737                         new_adv |= ADVERTISE_100FULL;
1738                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1739
1740                 if (tp->link_config.advertising &
1741                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1742                         new_adv = 0;
1743                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1744                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1745                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1746                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1747                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1748                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1749                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1750                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1751                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1752                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1753                 } else {
1754                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1755                 }
1756         } else {
1757                 /* Asking for a specific link mode. */
1758                 if (tp->link_config.speed == SPEED_1000) {
1759                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1760                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1761
1762                         if (tp->link_config.duplex == DUPLEX_FULL)
1763                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1764                         else
1765                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1766                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1767                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1768                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1769                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1770                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1771                 } else {
1772                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1773
1774                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1775                         if (tp->link_config.speed == SPEED_100) {
1776                                 if (tp->link_config.duplex == DUPLEX_FULL)
1777                                         new_adv |= ADVERTISE_100FULL;
1778                                 else
1779                                         new_adv |= ADVERTISE_100HALF;
1780                         } else {
1781                                 if (tp->link_config.duplex == DUPLEX_FULL)
1782                                         new_adv |= ADVERTISE_10FULL;
1783                                 else
1784                                         new_adv |= ADVERTISE_10HALF;
1785                         }
1786                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1787                 }
1788         }
1789
1790         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1791             tp->link_config.speed != SPEED_INVALID) {
1792                 u32 bmcr, orig_bmcr;
1793
1794                 tp->link_config.active_speed = tp->link_config.speed;
1795                 tp->link_config.active_duplex = tp->link_config.duplex;
1796
1797                 bmcr = 0;
1798                 switch (tp->link_config.speed) {
1799                 default:
1800                 case SPEED_10:
1801                         break;
1802
1803                 case SPEED_100:
1804                         bmcr |= BMCR_SPEED100;
1805                         break;
1806
1807                 case SPEED_1000:
1808                         bmcr |= TG3_BMCR_SPEED1000;
1809                         break;
1810                 };
1811
1812                 if (tp->link_config.duplex == DUPLEX_FULL)
1813                         bmcr |= BMCR_FULLDPLX;
1814
1815                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1816                     (bmcr != orig_bmcr)) {
1817                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1818                         for (i = 0; i < 1500; i++) {
1819                                 u32 tmp;
1820
1821                                 udelay(10);
1822                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1823                                     tg3_readphy(tp, MII_BMSR, &tmp))
1824                                         continue;
1825                                 if (!(tmp & BMSR_LSTATUS)) {
1826                                         udelay(40);
1827                                         break;
1828                                 }
1829                         }
1830                         tg3_writephy(tp, MII_BMCR, bmcr);
1831                         udelay(40);
1832                 }
1833         } else {
1834                 tg3_writephy(tp, MII_BMCR,
1835                              BMCR_ANENABLE | BMCR_ANRESTART);
1836         }
1837 }
1838
1839 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1840 {
1841         int err;
1842
1843         /* Turn off tap power management. */
1844         /* Set Extended packet length bit */
1845         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1846
1847         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1848         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1849
1850         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1851         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1852
1853         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1854         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1855
1856         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1857         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1858
1859         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1860         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1861
1862         udelay(40);
1863
1864         return err;
1865 }
1866
1867 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1868 {
1869         u32 adv_reg, all_mask = 0;
1870
1871         if (mask & ADVERTISED_10baseT_Half)
1872                 all_mask |= ADVERTISE_10HALF;
1873         if (mask & ADVERTISED_10baseT_Full)
1874                 all_mask |= ADVERTISE_10FULL;
1875         if (mask & ADVERTISED_100baseT_Half)
1876                 all_mask |= ADVERTISE_100HALF;
1877         if (mask & ADVERTISED_100baseT_Full)
1878                 all_mask |= ADVERTISE_100FULL;
1879
1880         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1881                 return 0;
1882
1883         if ((adv_reg & all_mask) != all_mask)
1884                 return 0;
1885         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1886                 u32 tg3_ctrl;
1887
1888                 all_mask = 0;
1889                 if (mask & ADVERTISED_1000baseT_Half)
1890                         all_mask |= ADVERTISE_1000HALF;
1891                 if (mask & ADVERTISED_1000baseT_Full)
1892                         all_mask |= ADVERTISE_1000FULL;
1893
1894                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1895                         return 0;
1896
1897                 if ((tg3_ctrl & all_mask) != all_mask)
1898                         return 0;
1899         }
1900         return 1;
1901 }
1902
1903 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1904 {
1905         int current_link_up;
1906         u32 bmsr, dummy;
1907         u16 current_speed;
1908         u8 current_duplex;
1909         int i, err;
1910
1911         tw32(MAC_EVENT, 0);
1912
1913         tw32_f(MAC_STATUS,
1914              (MAC_STATUS_SYNC_CHANGED |
1915               MAC_STATUS_CFG_CHANGED |
1916               MAC_STATUS_MI_COMPLETION |
1917               MAC_STATUS_LNKSTATE_CHANGED));
1918         udelay(40);
1919
1920         tp->mi_mode = MAC_MI_MODE_BASE;
1921         tw32_f(MAC_MI_MODE, tp->mi_mode);
1922         udelay(80);
1923
1924         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1925
1926         /* Some third-party PHYs need to be reset on link going
1927          * down.
1928          */
1929         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1930              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1931              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1932             netif_carrier_ok(tp->dev)) {
1933                 tg3_readphy(tp, MII_BMSR, &bmsr);
1934                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1935                     !(bmsr & BMSR_LSTATUS))
1936                         force_reset = 1;
1937         }
1938         if (force_reset)
1939                 tg3_phy_reset(tp);
1940
1941         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1942                 tg3_readphy(tp, MII_BMSR, &bmsr);
1943                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1944                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1945                         bmsr = 0;
1946
1947                 if (!(bmsr & BMSR_LSTATUS)) {
1948                         err = tg3_init_5401phy_dsp(tp);
1949                         if (err)
1950                                 return err;
1951
1952                         tg3_readphy(tp, MII_BMSR, &bmsr);
1953                         for (i = 0; i < 1000; i++) {
1954                                 udelay(10);
1955                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1956                                     (bmsr & BMSR_LSTATUS)) {
1957                                         udelay(40);
1958                                         break;
1959                                 }
1960                         }
1961
1962                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1963                             !(bmsr & BMSR_LSTATUS) &&
1964                             tp->link_config.active_speed == SPEED_1000) {
1965                                 err = tg3_phy_reset(tp);
1966                                 if (!err)
1967                                         err = tg3_init_5401phy_dsp(tp);
1968                                 if (err)
1969                                         return err;
1970                         }
1971                 }
1972         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1973                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1974                 /* 5701 {A0,B0} CRC bug workaround */
1975                 tg3_writephy(tp, 0x15, 0x0a75);
1976                 tg3_writephy(tp, 0x1c, 0x8c68);
1977                 tg3_writephy(tp, 0x1c, 0x8d68);
1978                 tg3_writephy(tp, 0x1c, 0x8c68);
1979         }
1980
1981         /* Clear pending interrupts... */
1982         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1983         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1984
1985         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1986                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1987         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1988                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1989
1990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1991             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1992                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1993                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1994                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1995                 else
1996                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1997         }
1998
1999         current_link_up = 0;
2000         current_speed = SPEED_INVALID;
2001         current_duplex = DUPLEX_INVALID;
2002
2003         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2004                 u32 val;
2005
2006                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2007                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2008                 if (!(val & (1 << 10))) {
2009                         val |= (1 << 10);
2010                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2011                         goto relink;
2012                 }
2013         }
2014
2015         bmsr = 0;
2016         for (i = 0; i < 100; i++) {
2017                 tg3_readphy(tp, MII_BMSR, &bmsr);
2018                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2019                     (bmsr & BMSR_LSTATUS))
2020                         break;
2021                 udelay(40);
2022         }
2023
2024         if (bmsr & BMSR_LSTATUS) {
2025                 u32 aux_stat, bmcr;
2026
2027                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2028                 for (i = 0; i < 2000; i++) {
2029                         udelay(10);
2030                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2031                             aux_stat)
2032                                 break;
2033                 }
2034
2035                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2036                                              &current_speed,
2037                                              &current_duplex);
2038
2039                 bmcr = 0;
2040                 for (i = 0; i < 200; i++) {
2041                         tg3_readphy(tp, MII_BMCR, &bmcr);
2042                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2043                                 continue;
2044                         if (bmcr && bmcr != 0x7fff)
2045                                 break;
2046                         udelay(10);
2047                 }
2048
2049                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2050                         if (bmcr & BMCR_ANENABLE) {
2051                                 current_link_up = 1;
2052
2053                                 /* Force autoneg restart if we are exiting
2054                                  * low power mode.
2055                                  */
2056                                 if (!tg3_copper_is_advertising_all(tp,
2057                                                 tp->link_config.advertising))
2058                                         current_link_up = 0;
2059                         } else {
2060                                 current_link_up = 0;
2061                         }
2062                 } else {
2063                         if (!(bmcr & BMCR_ANENABLE) &&
2064                             tp->link_config.speed == current_speed &&
2065                             tp->link_config.duplex == current_duplex) {
2066                                 current_link_up = 1;
2067                         } else {
2068                                 current_link_up = 0;
2069                         }
2070                 }
2071
2072                 tp->link_config.active_speed = current_speed;
2073                 tp->link_config.active_duplex = current_duplex;
2074         }
2075
2076         if (current_link_up == 1 &&
2077             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2078             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2079                 u32 local_adv, remote_adv;
2080
2081                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2082                         local_adv = 0;
2083                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2084
2085                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2086                         remote_adv = 0;
2087
2088                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2089
2090                 /* If we are not advertising full pause capability,
2091                  * something is wrong.  Bring the link down and reconfigure.
2092                  */
2093                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2094                         current_link_up = 0;
2095                 } else {
2096                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2097                 }
2098         }
2099 relink:
2100         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2101                 u32 tmp;
2102
2103                 tg3_phy_copper_begin(tp);
2104
2105                 tg3_readphy(tp, MII_BMSR, &tmp);
2106                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2107                     (tmp & BMSR_LSTATUS))
2108                         current_link_up = 1;
2109         }
2110
2111         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2112         if (current_link_up == 1) {
2113                 if (tp->link_config.active_speed == SPEED_100 ||
2114                     tp->link_config.active_speed == SPEED_10)
2115                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2116                 else
2117                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2118         } else
2119                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2120
2121         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2122         if (tp->link_config.active_duplex == DUPLEX_HALF)
2123                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2124
2125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2126                 if (current_link_up == 1 &&
2127                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2128                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2129                 else
2130                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2131         }
2132
2133         /* ??? Without this setting Netgear GA302T PHY does not
2134          * ??? send/receive packets...
2135          */
2136         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2137             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2138                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2139                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2140                 udelay(80);
2141         }
2142
2143         tw32_f(MAC_MODE, tp->mac_mode);
2144         udelay(40);
2145
2146         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2147                 /* Polled via timer. */
2148                 tw32_f(MAC_EVENT, 0);
2149         } else {
2150                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2151         }
2152         udelay(40);
2153
2154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2155             current_link_up == 1 &&
2156             tp->link_config.active_speed == SPEED_1000 &&
2157             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2158              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2159                 udelay(120);
2160                 tw32_f(MAC_STATUS,
2161                      (MAC_STATUS_SYNC_CHANGED |
2162                       MAC_STATUS_CFG_CHANGED));
2163                 udelay(40);
2164                 tg3_write_mem(tp,
2165                               NIC_SRAM_FIRMWARE_MBOX,
2166                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2167         }
2168
2169         if (current_link_up != netif_carrier_ok(tp->dev)) {
2170                 if (current_link_up)
2171                         netif_carrier_on(tp->dev);
2172                 else
2173                         netif_carrier_off(tp->dev);
2174                 tg3_link_report(tp);
2175         }
2176
2177         return 0;
2178 }
2179
2180 struct tg3_fiber_aneginfo {
2181         int state;
2182 #define ANEG_STATE_UNKNOWN              0
2183 #define ANEG_STATE_AN_ENABLE            1
2184 #define ANEG_STATE_RESTART_INIT         2
2185 #define ANEG_STATE_RESTART              3
2186 #define ANEG_STATE_DISABLE_LINK_OK      4
2187 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2188 #define ANEG_STATE_ABILITY_DETECT       6
2189 #define ANEG_STATE_ACK_DETECT_INIT      7
2190 #define ANEG_STATE_ACK_DETECT           8
2191 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2192 #define ANEG_STATE_COMPLETE_ACK         10
2193 #define ANEG_STATE_IDLE_DETECT_INIT     11
2194 #define ANEG_STATE_IDLE_DETECT          12
2195 #define ANEG_STATE_LINK_OK              13
2196 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2197 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2198
2199         u32 flags;
2200 #define MR_AN_ENABLE            0x00000001
2201 #define MR_RESTART_AN           0x00000002
2202 #define MR_AN_COMPLETE          0x00000004
2203 #define MR_PAGE_RX              0x00000008
2204 #define MR_NP_LOADED            0x00000010
2205 #define MR_TOGGLE_TX            0x00000020
2206 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2207 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2208 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2209 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2210 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2211 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2212 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2213 #define MR_TOGGLE_RX            0x00002000
2214 #define MR_NP_RX                0x00004000
2215
2216 #define MR_LINK_OK              0x80000000
2217
2218         unsigned long link_time, cur_time;
2219
2220         u32 ability_match_cfg;
2221         int ability_match_count;
2222
2223         char ability_match, idle_match, ack_match;
2224
2225         u32 txconfig, rxconfig;
2226 #define ANEG_CFG_NP             0x00000080
2227 #define ANEG_CFG_ACK            0x00000040
2228 #define ANEG_CFG_RF2            0x00000020
2229 #define ANEG_CFG_RF1            0x00000010
2230 #define ANEG_CFG_PS2            0x00000001
2231 #define ANEG_CFG_PS1            0x00008000
2232 #define ANEG_CFG_HD             0x00004000
2233 #define ANEG_CFG_FD             0x00002000
2234 #define ANEG_CFG_INVAL          0x00001f06
2235
2236 };
2237 #define ANEG_OK         0
2238 #define ANEG_DONE       1
2239 #define ANEG_TIMER_ENAB 2
2240 #define ANEG_FAILED     -1
2241
2242 #define ANEG_STATE_SETTLE_TIME  10000
2243
2244 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2245                                    struct tg3_fiber_aneginfo *ap)
2246 {
2247         unsigned long delta;
2248         u32 rx_cfg_reg;
2249         int ret;
2250
2251         if (ap->state == ANEG_STATE_UNKNOWN) {
2252                 ap->rxconfig = 0;
2253                 ap->link_time = 0;
2254                 ap->cur_time = 0;
2255                 ap->ability_match_cfg = 0;
2256                 ap->ability_match_count = 0;
2257                 ap->ability_match = 0;
2258                 ap->idle_match = 0;
2259                 ap->ack_match = 0;
2260         }
2261         ap->cur_time++;
2262
2263         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2264                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2265
2266                 if (rx_cfg_reg != ap->ability_match_cfg) {
2267                         ap->ability_match_cfg = rx_cfg_reg;
2268                         ap->ability_match = 0;
2269                         ap->ability_match_count = 0;
2270                 } else {
2271                         if (++ap->ability_match_count > 1) {
2272                                 ap->ability_match = 1;
2273                                 ap->ability_match_cfg = rx_cfg_reg;
2274                         }
2275                 }
2276                 if (rx_cfg_reg & ANEG_CFG_ACK)
2277                         ap->ack_match = 1;
2278                 else
2279                         ap->ack_match = 0;
2280
2281                 ap->idle_match = 0;
2282         } else {
2283                 ap->idle_match = 1;
2284                 ap->ability_match_cfg = 0;
2285                 ap->ability_match_count = 0;
2286                 ap->ability_match = 0;
2287                 ap->ack_match = 0;
2288
2289                 rx_cfg_reg = 0;
2290         }
2291
2292         ap->rxconfig = rx_cfg_reg;
2293         ret = ANEG_OK;
2294
2295         switch(ap->state) {
2296         case ANEG_STATE_UNKNOWN:
2297                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2298                         ap->state = ANEG_STATE_AN_ENABLE;
2299
2300                 /* fallthru */
2301         case ANEG_STATE_AN_ENABLE:
2302                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2303                 if (ap->flags & MR_AN_ENABLE) {
2304                         ap->link_time = 0;
2305                         ap->cur_time = 0;
2306                         ap->ability_match_cfg = 0;
2307                         ap->ability_match_count = 0;
2308                         ap->ability_match = 0;
2309                         ap->idle_match = 0;
2310                         ap->ack_match = 0;
2311
2312                         ap->state = ANEG_STATE_RESTART_INIT;
2313                 } else {
2314                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2315                 }
2316                 break;
2317
2318         case ANEG_STATE_RESTART_INIT:
2319                 ap->link_time = ap->cur_time;
2320                 ap->flags &= ~(MR_NP_LOADED);
2321                 ap->txconfig = 0;
2322                 tw32(MAC_TX_AUTO_NEG, 0);
2323                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2324                 tw32_f(MAC_MODE, tp->mac_mode);
2325                 udelay(40);
2326
2327                 ret = ANEG_TIMER_ENAB;
2328                 ap->state = ANEG_STATE_RESTART;
2329
2330                 /* fallthru */
2331         case ANEG_STATE_RESTART:
2332                 delta = ap->cur_time - ap->link_time;
2333                 if (delta > ANEG_STATE_SETTLE_TIME) {
2334                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2335                 } else {
2336                         ret = ANEG_TIMER_ENAB;
2337                 }
2338                 break;
2339
2340         case ANEG_STATE_DISABLE_LINK_OK:
2341                 ret = ANEG_DONE;
2342                 break;
2343
2344         case ANEG_STATE_ABILITY_DETECT_INIT:
2345                 ap->flags &= ~(MR_TOGGLE_TX);
2346                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2347                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2348                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2349                 tw32_f(MAC_MODE, tp->mac_mode);
2350                 udelay(40);
2351
2352                 ap->state = ANEG_STATE_ABILITY_DETECT;
2353                 break;
2354
2355         case ANEG_STATE_ABILITY_DETECT:
2356                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2357                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2358                 }
2359                 break;
2360
2361         case ANEG_STATE_ACK_DETECT_INIT:
2362                 ap->txconfig |= ANEG_CFG_ACK;
2363                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2364                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2365                 tw32_f(MAC_MODE, tp->mac_mode);
2366                 udelay(40);
2367
2368                 ap->state = ANEG_STATE_ACK_DETECT;
2369
2370                 /* fallthru */
2371         case ANEG_STATE_ACK_DETECT:
2372                 if (ap->ack_match != 0) {
2373                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2374                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2375                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2376                         } else {
2377                                 ap->state = ANEG_STATE_AN_ENABLE;
2378                         }
2379                 } else if (ap->ability_match != 0 &&
2380                            ap->rxconfig == 0) {
2381                         ap->state = ANEG_STATE_AN_ENABLE;
2382                 }
2383                 break;
2384
2385         case ANEG_STATE_COMPLETE_ACK_INIT:
2386                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2387                         ret = ANEG_FAILED;
2388                         break;
2389                 }
2390                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2391                                MR_LP_ADV_HALF_DUPLEX |
2392                                MR_LP_ADV_SYM_PAUSE |
2393                                MR_LP_ADV_ASYM_PAUSE |
2394                                MR_LP_ADV_REMOTE_FAULT1 |
2395                                MR_LP_ADV_REMOTE_FAULT2 |
2396                                MR_LP_ADV_NEXT_PAGE |
2397                                MR_TOGGLE_RX |
2398                                MR_NP_RX);
2399                 if (ap->rxconfig & ANEG_CFG_FD)
2400                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2401                 if (ap->rxconfig & ANEG_CFG_HD)
2402                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2403                 if (ap->rxconfig & ANEG_CFG_PS1)
2404                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2405                 if (ap->rxconfig & ANEG_CFG_PS2)
2406                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2407                 if (ap->rxconfig & ANEG_CFG_RF1)
2408                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2409                 if (ap->rxconfig & ANEG_CFG_RF2)
2410                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2411                 if (ap->rxconfig & ANEG_CFG_NP)
2412                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2413
2414                 ap->link_time = ap->cur_time;
2415
2416                 ap->flags ^= (MR_TOGGLE_TX);
2417                 if (ap->rxconfig & 0x0008)
2418                         ap->flags |= MR_TOGGLE_RX;
2419                 if (ap->rxconfig & ANEG_CFG_NP)
2420                         ap->flags |= MR_NP_RX;
2421                 ap->flags |= MR_PAGE_RX;
2422
2423                 ap->state = ANEG_STATE_COMPLETE_ACK;
2424                 ret = ANEG_TIMER_ENAB;
2425                 break;
2426
2427         case ANEG_STATE_COMPLETE_ACK:
2428                 if (ap->ability_match != 0 &&
2429                     ap->rxconfig == 0) {
2430                         ap->state = ANEG_STATE_AN_ENABLE;
2431                         break;
2432                 }
2433                 delta = ap->cur_time - ap->link_time;
2434                 if (delta > ANEG_STATE_SETTLE_TIME) {
2435                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2436                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2437                         } else {
2438                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2439                                     !(ap->flags & MR_NP_RX)) {
2440                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2441                                 } else {
2442                                         ret = ANEG_FAILED;
2443                                 }
2444                         }
2445                 }
2446                 break;
2447
2448         case ANEG_STATE_IDLE_DETECT_INIT:
2449                 ap->link_time = ap->cur_time;
2450                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2451                 tw32_f(MAC_MODE, tp->mac_mode);
2452                 udelay(40);
2453
2454                 ap->state = ANEG_STATE_IDLE_DETECT;
2455                 ret = ANEG_TIMER_ENAB;
2456                 break;
2457
2458         case ANEG_STATE_IDLE_DETECT:
2459                 if (ap->ability_match != 0 &&
2460                     ap->rxconfig == 0) {
2461                         ap->state = ANEG_STATE_AN_ENABLE;
2462                         break;
2463                 }
2464                 delta = ap->cur_time - ap->link_time;
2465                 if (delta > ANEG_STATE_SETTLE_TIME) {
2466                         /* XXX another gem from the Broadcom driver :( */
2467                         ap->state = ANEG_STATE_LINK_OK;
2468                 }
2469                 break;
2470
2471         case ANEG_STATE_LINK_OK:
2472                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2473                 ret = ANEG_DONE;
2474                 break;
2475
2476         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2477                 /* ??? unimplemented */
2478                 break;
2479
2480         case ANEG_STATE_NEXT_PAGE_WAIT:
2481                 /* ??? unimplemented */
2482                 break;
2483
2484         default:
2485                 ret = ANEG_FAILED;
2486                 break;
2487         };
2488
2489         return ret;
2490 }
2491
2492 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2493 {
2494         int res = 0;
2495         struct tg3_fiber_aneginfo aninfo;
2496         int status = ANEG_FAILED;
2497         unsigned int tick;
2498         u32 tmp;
2499
2500         tw32_f(MAC_TX_AUTO_NEG, 0);
2501
2502         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2503         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2504         udelay(40);
2505
2506         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2507         udelay(40);
2508
2509         memset(&aninfo, 0, sizeof(aninfo));
2510         aninfo.flags |= MR_AN_ENABLE;
2511         aninfo.state = ANEG_STATE_UNKNOWN;
2512         aninfo.cur_time = 0;
2513         tick = 0;
2514         while (++tick < 195000) {
2515                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2516                 if (status == ANEG_DONE || status == ANEG_FAILED)
2517                         break;
2518
2519                 udelay(1);
2520         }
2521
2522         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2523         tw32_f(MAC_MODE, tp->mac_mode);
2524         udelay(40);
2525
2526         *flags = aninfo.flags;
2527
2528         if (status == ANEG_DONE &&
2529             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2530                              MR_LP_ADV_FULL_DUPLEX)))
2531                 res = 1;
2532
2533         return res;
2534 }
2535
2536 static void tg3_init_bcm8002(struct tg3 *tp)
2537 {
2538         u32 mac_status = tr32(MAC_STATUS);
2539         int i;
2540
2541         /* Reset when initting first time or we have a link. */
2542         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2543             !(mac_status & MAC_STATUS_PCS_SYNCED))
2544                 return;
2545
2546         /* Set PLL lock range. */
2547         tg3_writephy(tp, 0x16, 0x8007);
2548
2549         /* SW reset */
2550         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2551
2552         /* Wait for reset to complete. */
2553         /* XXX schedule_timeout() ... */
2554         for (i = 0; i < 500; i++)
2555                 udelay(10);
2556
2557         /* Config mode; select PMA/Ch 1 regs. */
2558         tg3_writephy(tp, 0x10, 0x8411);
2559
2560         /* Enable auto-lock and comdet, select txclk for tx. */
2561         tg3_writephy(tp, 0x11, 0x0a10);
2562
2563         tg3_writephy(tp, 0x18, 0x00a0);
2564         tg3_writephy(tp, 0x16, 0x41ff);
2565
2566         /* Assert and deassert POR. */
2567         tg3_writephy(tp, 0x13, 0x0400);
2568         udelay(40);
2569         tg3_writephy(tp, 0x13, 0x0000);
2570
2571         tg3_writephy(tp, 0x11, 0x0a50);
2572         udelay(40);
2573         tg3_writephy(tp, 0x11, 0x0a10);
2574
2575         /* Wait for signal to stabilize */
2576         /* XXX schedule_timeout() ... */
2577         for (i = 0; i < 15000; i++)
2578                 udelay(10);
2579
2580         /* Deselect the channel register so we can read the PHYID
2581          * later.
2582          */
2583         tg3_writephy(tp, 0x10, 0x8011);
2584 }
2585
2586 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2587 {
2588         u32 sg_dig_ctrl, sg_dig_status;
2589         u32 serdes_cfg, expected_sg_dig_ctrl;
2590         int workaround, port_a;
2591         int current_link_up;
2592
2593         serdes_cfg = 0;
2594         expected_sg_dig_ctrl = 0;
2595         workaround = 0;
2596         port_a = 1;
2597         current_link_up = 0;
2598
2599         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2600             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2601                 workaround = 1;
2602                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2603                         port_a = 0;
2604
2605                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2606                 /* preserve bits 20-23 for voltage regulator */
2607                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2608         }
2609
2610         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2611
2612         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2613                 if (sg_dig_ctrl & (1 << 31)) {
2614                         if (workaround) {
2615                                 u32 val = serdes_cfg;
2616
2617                                 if (port_a)
2618                                         val |= 0xc010000;
2619                                 else
2620                                         val |= 0x4010000;
2621                                 tw32_f(MAC_SERDES_CFG, val);
2622                         }
2623                         tw32_f(SG_DIG_CTRL, 0x01388400);
2624                 }
2625                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2626                         tg3_setup_flow_control(tp, 0, 0);
2627                         current_link_up = 1;
2628                 }
2629                 goto out;
2630         }
2631
2632         /* Want auto-negotiation.  */
2633         expected_sg_dig_ctrl = 0x81388400;
2634
2635         /* Pause capability */
2636         expected_sg_dig_ctrl |= (1 << 11);
2637
2638         /* Asymettric pause */
2639         expected_sg_dig_ctrl |= (1 << 12);
2640
2641         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2642                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2643                     tp->serdes_counter &&
2644                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2645                                     MAC_STATUS_RCVD_CFG)) ==
2646                      MAC_STATUS_PCS_SYNCED)) {
2647                         tp->serdes_counter--;
2648                         current_link_up = 1;
2649                         goto out;
2650                 }
2651 restart_autoneg:
2652                 if (workaround)
2653                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2654                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2655                 udelay(5);
2656                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2657
2658                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2659                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2660         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2661                                  MAC_STATUS_SIGNAL_DET)) {
2662                 sg_dig_status = tr32(SG_DIG_STATUS);
2663                 mac_status = tr32(MAC_STATUS);
2664
2665                 if ((sg_dig_status & (1 << 1)) &&
2666                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2667                         u32 local_adv, remote_adv;
2668
2669                         local_adv = ADVERTISE_PAUSE_CAP;
2670                         remote_adv = 0;
2671                         if (sg_dig_status & (1 << 19))
2672                                 remote_adv |= LPA_PAUSE_CAP;
2673                         if (sg_dig_status & (1 << 20))
2674                                 remote_adv |= LPA_PAUSE_ASYM;
2675
2676                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2677                         current_link_up = 1;
2678                         tp->serdes_counter = 0;
2679                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2680                 } else if (!(sg_dig_status & (1 << 1))) {
2681                         if (tp->serdes_counter)
2682                                 tp->serdes_counter--;
2683                         else {
2684                                 if (workaround) {
2685                                         u32 val = serdes_cfg;
2686
2687                                         if (port_a)
2688                                                 val |= 0xc010000;
2689                                         else
2690                                                 val |= 0x4010000;
2691
2692                                         tw32_f(MAC_SERDES_CFG, val);
2693                                 }
2694
2695                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2696                                 udelay(40);
2697
2698                                 /* Link parallel detection - link is up */
2699                                 /* only if we have PCS_SYNC and not */
2700                                 /* receiving config code words */
2701                                 mac_status = tr32(MAC_STATUS);
2702                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2703                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2704                                         tg3_setup_flow_control(tp, 0, 0);
2705                                         current_link_up = 1;
2706                                         tp->tg3_flags2 |=
2707                                                 TG3_FLG2_PARALLEL_DETECT;
2708                                         tp->serdes_counter =
2709                                                 SERDES_PARALLEL_DET_TIMEOUT;
2710                                 } else
2711                                         goto restart_autoneg;
2712                         }
2713                 }
2714         } else {
2715                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2716                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2717         }
2718
2719 out:
2720         return current_link_up;
2721 }
2722
2723 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2724 {
2725         int current_link_up = 0;
2726
2727         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2728                 goto out;
2729
2730         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2731                 u32 flags;
2732                 int i;
2733
2734                 if (fiber_autoneg(tp, &flags)) {
2735                         u32 local_adv, remote_adv;
2736
2737                         local_adv = ADVERTISE_PAUSE_CAP;
2738                         remote_adv = 0;
2739                         if (flags & MR_LP_ADV_SYM_PAUSE)
2740                                 remote_adv |= LPA_PAUSE_CAP;
2741                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2742                                 remote_adv |= LPA_PAUSE_ASYM;
2743
2744                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2745
2746                         current_link_up = 1;
2747                 }
2748                 for (i = 0; i < 30; i++) {
2749                         udelay(20);
2750                         tw32_f(MAC_STATUS,
2751                                (MAC_STATUS_SYNC_CHANGED |
2752                                 MAC_STATUS_CFG_CHANGED));
2753                         udelay(40);
2754                         if ((tr32(MAC_STATUS) &
2755                              (MAC_STATUS_SYNC_CHANGED |
2756                               MAC_STATUS_CFG_CHANGED)) == 0)
2757                                 break;
2758                 }
2759
2760                 mac_status = tr32(MAC_STATUS);
2761                 if (current_link_up == 0 &&
2762                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2763                     !(mac_status & MAC_STATUS_RCVD_CFG))
2764                         current_link_up = 1;
2765         } else {
2766                 /* Forcing 1000FD link up. */
2767                 current_link_up = 1;
2768
2769                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2770                 udelay(40);
2771
2772                 tw32_f(MAC_MODE, tp->mac_mode);
2773                 udelay(40);
2774         }
2775
2776 out:
2777         return current_link_up;
2778 }
2779
2780 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2781 {
2782         u32 orig_pause_cfg;
2783         u16 orig_active_speed;
2784         u8 orig_active_duplex;
2785         u32 mac_status;
2786         int current_link_up;
2787         int i;
2788
2789         orig_pause_cfg =
2790                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2791                                   TG3_FLAG_TX_PAUSE));
2792         orig_active_speed = tp->link_config.active_speed;
2793         orig_active_duplex = tp->link_config.active_duplex;
2794
2795         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2796             netif_carrier_ok(tp->dev) &&
2797             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2798                 mac_status = tr32(MAC_STATUS);
2799                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2800                                MAC_STATUS_SIGNAL_DET |
2801                                MAC_STATUS_CFG_CHANGED |
2802                                MAC_STATUS_RCVD_CFG);
2803                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2804                                    MAC_STATUS_SIGNAL_DET)) {
2805                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2806                                             MAC_STATUS_CFG_CHANGED));
2807                         return 0;
2808                 }
2809         }
2810
2811         tw32_f(MAC_TX_AUTO_NEG, 0);
2812
2813         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2814         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2815         tw32_f(MAC_MODE, tp->mac_mode);
2816         udelay(40);
2817
2818         if (tp->phy_id == PHY_ID_BCM8002)
2819                 tg3_init_bcm8002(tp);
2820
2821         /* Enable link change event even when serdes polling.  */
2822         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2823         udelay(40);
2824
2825         current_link_up = 0;
2826         mac_status = tr32(MAC_STATUS);
2827
2828         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2829                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2830         else
2831                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2832
2833         tp->hw_status->status =
2834                 (SD_STATUS_UPDATED |
2835                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2836
2837         for (i = 0; i < 100; i++) {
2838                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2839                                     MAC_STATUS_CFG_CHANGED));
2840                 udelay(5);
2841                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2842                                          MAC_STATUS_CFG_CHANGED |
2843                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2844                         break;
2845         }
2846
2847         mac_status = tr32(MAC_STATUS);
2848         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2849                 current_link_up = 0;
2850                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2851                     tp->serdes_counter == 0) {
2852                         tw32_f(MAC_MODE, (tp->mac_mode |
2853                                           MAC_MODE_SEND_CONFIGS));
2854                         udelay(1);
2855                         tw32_f(MAC_MODE, tp->mac_mode);
2856                 }
2857         }
2858
2859         if (current_link_up == 1) {
2860                 tp->link_config.active_speed = SPEED_1000;
2861                 tp->link_config.active_duplex = DUPLEX_FULL;
2862                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2863                                     LED_CTRL_LNKLED_OVERRIDE |
2864                                     LED_CTRL_1000MBPS_ON));
2865         } else {
2866                 tp->link_config.active_speed = SPEED_INVALID;
2867                 tp->link_config.active_duplex = DUPLEX_INVALID;
2868                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2869                                     LED_CTRL_LNKLED_OVERRIDE |
2870                                     LED_CTRL_TRAFFIC_OVERRIDE));
2871         }
2872
2873         if (current_link_up != netif_carrier_ok(tp->dev)) {
2874                 if (current_link_up)
2875                         netif_carrier_on(tp->dev);
2876                 else
2877                         netif_carrier_off(tp->dev);
2878                 tg3_link_report(tp);
2879         } else {
2880                 u32 now_pause_cfg =
2881                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2882                                          TG3_FLAG_TX_PAUSE);
2883                 if (orig_pause_cfg != now_pause_cfg ||
2884                     orig_active_speed != tp->link_config.active_speed ||
2885                     orig_active_duplex != tp->link_config.active_duplex)
2886                         tg3_link_report(tp);
2887         }
2888
2889         return 0;
2890 }
2891
2892 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2893 {
2894         int current_link_up, err = 0;
2895         u32 bmsr, bmcr;
2896         u16 current_speed;
2897         u8 current_duplex;
2898
2899         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2900         tw32_f(MAC_MODE, tp->mac_mode);
2901         udelay(40);
2902
2903         tw32(MAC_EVENT, 0);
2904
2905         tw32_f(MAC_STATUS,
2906              (MAC_STATUS_SYNC_CHANGED |
2907               MAC_STATUS_CFG_CHANGED |
2908               MAC_STATUS_MI_COMPLETION |
2909               MAC_STATUS_LNKSTATE_CHANGED));
2910         udelay(40);
2911
2912         if (force_reset)
2913                 tg3_phy_reset(tp);
2914
2915         current_link_up = 0;
2916         current_speed = SPEED_INVALID;
2917         current_duplex = DUPLEX_INVALID;
2918
2919         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2920         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2922                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2923                         bmsr |= BMSR_LSTATUS;
2924                 else
2925                         bmsr &= ~BMSR_LSTATUS;
2926         }
2927
2928         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2929
2930         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2931             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2932                 /* do nothing, just check for link up at the end */
2933         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2934                 u32 adv, new_adv;
2935
2936                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2937                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2938                                   ADVERTISE_1000XPAUSE |
2939                                   ADVERTISE_1000XPSE_ASYM |
2940                                   ADVERTISE_SLCT);
2941
2942                 /* Always advertise symmetric PAUSE just like copper */
2943                 new_adv |= ADVERTISE_1000XPAUSE;
2944
2945                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2946                         new_adv |= ADVERTISE_1000XHALF;
2947                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2948                         new_adv |= ADVERTISE_1000XFULL;
2949
2950                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2951                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2952                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2953                         tg3_writephy(tp, MII_BMCR, bmcr);
2954
2955                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2956                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2957                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2958
2959                         return err;
2960                 }
2961         } else {
2962                 u32 new_bmcr;
2963
2964                 bmcr &= ~BMCR_SPEED1000;
2965                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2966
2967                 if (tp->link_config.duplex == DUPLEX_FULL)
2968                         new_bmcr |= BMCR_FULLDPLX;
2969
2970                 if (new_bmcr != bmcr) {
2971                         /* BMCR_SPEED1000 is a reserved bit that needs
2972                          * to be set on write.
2973                          */
2974                         new_bmcr |= BMCR_SPEED1000;
2975
2976                         /* Force a linkdown */
2977                         if (netif_carrier_ok(tp->dev)) {
2978                                 u32 adv;
2979
2980                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2981                                 adv &= ~(ADVERTISE_1000XFULL |
2982                                          ADVERTISE_1000XHALF |
2983                                          ADVERTISE_SLCT);
2984                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2985                                 tg3_writephy(tp, MII_BMCR, bmcr |
2986                                                            BMCR_ANRESTART |
2987                                                            BMCR_ANENABLE);
2988                                 udelay(10);
2989                                 netif_carrier_off(tp->dev);
2990                         }
2991                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2992                         bmcr = new_bmcr;
2993                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2995                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2996                             ASIC_REV_5714) {
2997                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2998                                         bmsr |= BMSR_LSTATUS;
2999                                 else
3000                                         bmsr &= ~BMSR_LSTATUS;
3001                         }
3002                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3003                 }
3004         }
3005
3006         if (bmsr & BMSR_LSTATUS) {
3007                 current_speed = SPEED_1000;
3008                 current_link_up = 1;
3009                 if (bmcr & BMCR_FULLDPLX)
3010                         current_duplex = DUPLEX_FULL;
3011                 else
3012                         current_duplex = DUPLEX_HALF;
3013
3014                 if (bmcr & BMCR_ANENABLE) {
3015                         u32 local_adv, remote_adv, common;
3016
3017                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3018                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3019                         common = local_adv & remote_adv;
3020                         if (common & (ADVERTISE_1000XHALF |
3021                                       ADVERTISE_1000XFULL)) {
3022                                 if (common & ADVERTISE_1000XFULL)
3023                                         current_duplex = DUPLEX_FULL;
3024                                 else
3025                                         current_duplex = DUPLEX_HALF;
3026
3027                                 tg3_setup_flow_control(tp, local_adv,
3028                                                        remote_adv);
3029                         }
3030                         else
3031                                 current_link_up = 0;
3032                 }
3033         }
3034
3035         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3036         if (tp->link_config.active_duplex == DUPLEX_HALF)
3037                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3038
3039         tw32_f(MAC_MODE, tp->mac_mode);
3040         udelay(40);
3041
3042         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3043
3044         tp->link_config.active_speed = current_speed;
3045         tp->link_config.active_duplex = current_duplex;
3046
3047         if (current_link_up != netif_carrier_ok(tp->dev)) {
3048                 if (current_link_up)
3049                         netif_carrier_on(tp->dev);
3050                 else {
3051                         netif_carrier_off(tp->dev);
3052                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3053                 }
3054                 tg3_link_report(tp);
3055         }
3056         return err;
3057 }
3058
3059 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3060 {
3061         if (tp->serdes_counter) {
3062                 /* Give autoneg time to complete. */
3063                 tp->serdes_counter--;
3064                 return;
3065         }
3066         if (!netif_carrier_ok(tp->dev) &&
3067             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3068                 u32 bmcr;
3069
3070                 tg3_readphy(tp, MII_BMCR, &bmcr);
3071                 if (bmcr & BMCR_ANENABLE) {
3072                         u32 phy1, phy2;
3073
3074                         /* Select shadow register 0x1f */
3075                         tg3_writephy(tp, 0x1c, 0x7c00);
3076                         tg3_readphy(tp, 0x1c, &phy1);
3077
3078                         /* Select expansion interrupt status register */
3079                         tg3_writephy(tp, 0x17, 0x0f01);
3080                         tg3_readphy(tp, 0x15, &phy2);
3081                         tg3_readphy(tp, 0x15, &phy2);
3082
3083                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3084                                 /* We have signal detect and not receiving
3085                                  * config code words, link is up by parallel
3086                                  * detection.
3087                                  */
3088
3089                                 bmcr &= ~BMCR_ANENABLE;
3090                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3091                                 tg3_writephy(tp, MII_BMCR, bmcr);
3092                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3093                         }
3094                 }
3095         }
3096         else if (netif_carrier_ok(tp->dev) &&
3097                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3098                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3099                 u32 phy2;
3100
3101                 /* Select expansion interrupt status register */
3102                 tg3_writephy(tp, 0x17, 0x0f01);
3103                 tg3_readphy(tp, 0x15, &phy2);
3104                 if (phy2 & 0x20) {
3105                         u32 bmcr;
3106
3107                         /* Config code words received, turn on autoneg. */
3108                         tg3_readphy(tp, MII_BMCR, &bmcr);
3109                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3110
3111                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3112
3113                 }
3114         }
3115 }
3116
3117 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3118 {
3119         int err;
3120
3121         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3122                 err = tg3_setup_fiber_phy(tp, force_reset);
3123         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3124                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3125         } else {
3126                 err = tg3_setup_copper_phy(tp, force_reset);
3127         }
3128
3129         if (tp->link_config.active_speed == SPEED_1000 &&
3130             tp->link_config.active_duplex == DUPLEX_HALF)
3131                 tw32(MAC_TX_LENGTHS,
3132                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3133                       (6 << TX_LENGTHS_IPG_SHIFT) |
3134                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3135         else
3136                 tw32(MAC_TX_LENGTHS,
3137                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3138                       (6 << TX_LENGTHS_IPG_SHIFT) |
3139                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3140
3141         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3142                 if (netif_carrier_ok(tp->dev)) {
3143                         tw32(HOSTCC_STAT_COAL_TICKS,
3144                              tp->coal.stats_block_coalesce_usecs);
3145                 } else {
3146                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3147                 }
3148         }
3149
3150         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3151                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3152                 if (!netif_carrier_ok(tp->dev))
3153                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3154                               tp->pwrmgmt_thresh;
3155                 else
3156                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3157                 tw32(PCIE_PWR_MGMT_THRESH, val);
3158         }
3159
3160         return err;
3161 }
3162
3163 /* This is called whenever we suspect that the system chipset is re-
3164  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3165  * is bogus tx completions. We try to recover by setting the
3166  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3167  * in the workqueue.
3168  */
3169 static void tg3_tx_recover(struct tg3 *tp)
3170 {
3171         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3172                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3173
3174         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3175                "mapped I/O cycles to the network device, attempting to "
3176                "recover. Please report the problem to the driver maintainer "
3177                "and include system chipset information.\n", tp->dev->name);
3178
3179         spin_lock(&tp->lock);
3180         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3181         spin_unlock(&tp->lock);
3182 }
3183
3184 static inline u32 tg3_tx_avail(struct tg3 *tp)
3185 {
3186         smp_mb();
3187         return (tp->tx_pending -
3188                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3189 }
3190
3191 /* Tigon3 never reports partial packet sends.  So we do not
3192  * need special logic to handle SKBs that have not had all
3193  * of their frags sent yet, like SunGEM does.
3194  */
3195 static void tg3_tx(struct tg3 *tp)
3196 {
3197         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3198         u32 sw_idx = tp->tx_cons;
3199
3200         while (sw_idx != hw_idx) {
3201                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3202                 struct sk_buff *skb = ri->skb;
3203                 int i, tx_bug = 0;
3204
3205                 if (unlikely(skb == NULL)) {
3206                         tg3_tx_recover(tp);
3207                         return;
3208                 }
3209
3210                 pci_unmap_single(tp->pdev,
3211                                  pci_unmap_addr(ri, mapping),
3212                                  skb_headlen(skb),
3213                                  PCI_DMA_TODEVICE);
3214
3215                 ri->skb = NULL;
3216
3217                 sw_idx = NEXT_TX(sw_idx);
3218
3219                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3220                         ri = &tp->tx_buffers[sw_idx];
3221                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3222                                 tx_bug = 1;
3223
3224                         pci_unmap_page(tp->pdev,
3225                                        pci_unmap_addr(ri, mapping),
3226                                        skb_shinfo(skb)->frags[i].size,
3227                                        PCI_DMA_TODEVICE);
3228
3229                         sw_idx = NEXT_TX(sw_idx);
3230                 }
3231
3232                 dev_kfree_skb(skb);
3233
3234                 if (unlikely(tx_bug)) {
3235                         tg3_tx_recover(tp);
3236                         return;
3237                 }
3238         }
3239
3240         tp->tx_cons = sw_idx;
3241
3242         /* Need to make the tx_cons update visible to tg3_start_xmit()
3243          * before checking for netif_queue_stopped().  Without the
3244          * memory barrier, there is a small possibility that tg3_start_xmit()
3245          * will miss it and cause the queue to be stopped forever.
3246          */
3247         smp_mb();
3248
3249         if (unlikely(netif_queue_stopped(tp->dev) &&
3250                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3251                 netif_tx_lock(tp->dev);
3252                 if (netif_queue_stopped(tp->dev) &&
3253                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3254                         netif_wake_queue(tp->dev);
3255                 netif_tx_unlock(tp->dev);
3256         }
3257 }
3258
3259 /* Returns size of skb allocated or < 0 on error.
3260  *
3261  * We only need to fill in the address because the other members
3262  * of the RX descriptor are invariant, see tg3_init_rings.
3263  *
3264  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3265  * posting buffers we only dirty the first cache line of the RX
3266  * descriptor (containing the address).  Whereas for the RX status
3267  * buffers the cpu only reads the last cacheline of the RX descriptor
3268  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3269  */
3270 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3271                             int src_idx, u32 dest_idx_unmasked)
3272 {
3273         struct tg3_rx_buffer_desc *desc;
3274         struct ring_info *map, *src_map;
3275         struct sk_buff *skb;
3276         dma_addr_t mapping;
3277         int skb_size, dest_idx;
3278
3279         src_map = NULL;
3280         switch (opaque_key) {
3281         case RXD_OPAQUE_RING_STD:
3282                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3283                 desc = &tp->rx_std[dest_idx];
3284                 map = &tp->rx_std_buffers[dest_idx];
3285                 if (src_idx >= 0)
3286                         src_map = &tp->rx_std_buffers[src_idx];
3287                 skb_size = tp->rx_pkt_buf_sz;
3288                 break;
3289
3290         case RXD_OPAQUE_RING_JUMBO:
3291                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3292                 desc = &tp->rx_jumbo[dest_idx];
3293                 map = &tp->rx_jumbo_buffers[dest_idx];
3294                 if (src_idx >= 0)
3295                         src_map = &tp->rx_jumbo_buffers[src_idx];
3296                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3297                 break;
3298
3299         default:
3300                 return -EINVAL;
3301         };
3302
3303         /* Do not overwrite any of the map or rp information
3304          * until we are sure we can commit to a new buffer.
3305          *
3306          * Callers depend upon this behavior and assume that
3307          * we leave everything unchanged if we fail.
3308          */
3309         skb = netdev_alloc_skb(tp->dev, skb_size);
3310         if (skb == NULL)
3311                 return -ENOMEM;
3312
3313         skb_reserve(skb, tp->rx_offset);
3314
3315         mapping = pci_map_single(tp->pdev, skb->data,
3316                                  skb_size - tp->rx_offset,
3317                                  PCI_DMA_FROMDEVICE);
3318
3319         map->skb = skb;
3320         pci_unmap_addr_set(map, mapping, mapping);
3321
3322         if (src_map != NULL)
3323                 src_map->skb = NULL;
3324
3325         desc->addr_hi = ((u64)mapping >> 32);
3326         desc->addr_lo = ((u64)mapping & 0xffffffff);
3327
3328         return skb_size;
3329 }
3330
3331 /* We only need to move over in the address because the other
3332  * members of the RX descriptor are invariant.  See notes above
3333  * tg3_alloc_rx_skb for full details.
3334  */
3335 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3336                            int src_idx, u32 dest_idx_unmasked)
3337 {
3338         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3339         struct ring_info *src_map, *dest_map;
3340         int dest_idx;
3341
3342         switch (opaque_key) {
3343         case RXD_OPAQUE_RING_STD:
3344                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3345                 dest_desc = &tp->rx_std[dest_idx];
3346                 dest_map = &tp->rx_std_buffers[dest_idx];
3347                 src_desc = &tp->rx_std[src_idx];
3348                 src_map = &tp->rx_std_buffers[src_idx];
3349                 break;
3350
3351         case RXD_OPAQUE_RING_JUMBO:
3352                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3353                 dest_desc = &tp->rx_jumbo[dest_idx];
3354                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3355                 src_desc = &tp->rx_jumbo[src_idx];
3356                 src_map = &tp->rx_jumbo_buffers[src_idx];
3357                 break;
3358
3359         default:
3360                 return;
3361         };
3362
3363         dest_map->skb = src_map->skb;
3364         pci_unmap_addr_set(dest_map, mapping,
3365                            pci_unmap_addr(src_map, mapping));
3366         dest_desc->addr_hi = src_desc->addr_hi;
3367         dest_desc->addr_lo = src_desc->addr_lo;
3368
3369         src_map->skb = NULL;
3370 }
3371
3372 #if TG3_VLAN_TAG_USED
3373 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3374 {
3375         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3376 }
3377 #endif
3378
3379 /* The RX ring scheme is composed of multiple rings which post fresh
3380  * buffers to the chip, and one special ring the chip uses to report
3381  * status back to the host.
3382  *
3383  * The special ring reports the status of received packets to the
3384  * host.  The chip does not write into the original descriptor the
3385  * RX buffer was obtained from.  The chip simply takes the original
3386  * descriptor as provided by the host, updates the status and length
3387  * field, then writes this into the next status ring entry.
3388  *
3389  * Each ring the host uses to post buffers to the chip is described
3390  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3391  * it is first placed into the on-chip ram.  When the packet's length
3392  * is known, it walks down the TG3_BDINFO entries to select the ring.
3393  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3394  * which is within the range of the new packet's length is chosen.
3395  *
3396  * The "separate ring for rx status" scheme may sound queer, but it makes
3397  * sense from a cache coherency perspective.  If only the host writes
3398  * to the buffer post rings, and only the chip writes to the rx status
3399  * rings, then cache lines never move beyond shared-modified state.
3400  * If both the host and chip were to write into the same ring, cache line
3401  * eviction could occur since both entities want it in an exclusive state.
3402  */
3403 static int tg3_rx(struct tg3 *tp, int budget)
3404 {
3405         u32 work_mask, rx_std_posted = 0;
3406         u32 sw_idx = tp->rx_rcb_ptr;
3407         u16 hw_idx;
3408         int received;
3409
3410         hw_idx = tp->hw_status->idx[0].rx_producer;
3411         /*
3412          * We need to order the read of hw_idx and the read of
3413          * the opaque cookie.
3414          */
3415         rmb();
3416         work_mask = 0;
3417         received = 0;
3418         while (sw_idx != hw_idx && budget > 0) {
3419                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3420                 unsigned int len;
3421                 struct sk_buff *skb;
3422                 dma_addr_t dma_addr;
3423                 u32 opaque_key, desc_idx, *post_ptr;
3424
3425                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3426                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3427                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3428                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3429                                                   mapping);
3430                         skb = tp->rx_std_buffers[desc_idx].skb;
3431                         post_ptr = &tp->rx_std_ptr;
3432                         rx_std_posted++;
3433                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3434                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3435                                                   mapping);
3436                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3437                         post_ptr = &tp->rx_jumbo_ptr;
3438                 }
3439                 else {
3440                         goto next_pkt_nopost;
3441                 }
3442
3443                 work_mask |= opaque_key;
3444
3445                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3446                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3447                 drop_it:
3448                         tg3_recycle_rx(tp, opaque_key,
3449                                        desc_idx, *post_ptr);
3450                 drop_it_no_recycle:
3451                         /* Other statistics kept track of by card. */
3452                         tp->net_stats.rx_dropped++;
3453                         goto next_pkt;
3454                 }
3455
3456                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3457
3458                 if (len > RX_COPY_THRESHOLD
3459                         && tp->rx_offset == 2
3460                         /* rx_offset != 2 iff this is a 5701 card running
3461                          * in PCI-X mode [see tg3_get_invariants()] */
3462                 ) {
3463                         int skb_size;
3464
3465                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3466                                                     desc_idx, *post_ptr);
3467                         if (skb_size < 0)
3468                                 goto drop_it;
3469
3470                         pci_unmap_single(tp->pdev, dma_addr,
3471                                          skb_size - tp->rx_offset,
3472                                          PCI_DMA_FROMDEVICE);
3473
3474                         skb_put(skb, len);
3475                 } else {
3476                         struct sk_buff *copy_skb;
3477
3478                         tg3_recycle_rx(tp, opaque_key,
3479                                        desc_idx, *post_ptr);
3480
3481                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3482                         if (copy_skb == NULL)
3483                                 goto drop_it_no_recycle;
3484
3485                         skb_reserve(copy_skb, 2);
3486                         skb_put(copy_skb, len);
3487                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3488                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3489                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3490
3491                         /* We'll reuse the original ring buffer. */
3492                         skb = copy_skb;
3493                 }
3494
3495                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3496                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3497                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3498                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3499                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3500                 else
3501                         skb->ip_summed = CHECKSUM_NONE;
3502
3503                 skb->protocol = eth_type_trans(skb, tp->dev);
3504 #if TG3_VLAN_TAG_USED
3505                 if (tp->vlgrp != NULL &&
3506                     desc->type_flags & RXD_FLAG_VLAN) {
3507                         tg3_vlan_rx(tp, skb,
3508                                     desc->err_vlan & RXD_VLAN_MASK);
3509                 } else
3510 #endif
3511                         netif_receive_skb(skb);
3512
3513                 tp->dev->last_rx = jiffies;
3514                 received++;
3515                 budget--;
3516
3517 next_pkt:
3518                 (*post_ptr)++;
3519
3520                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3521                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3522
3523                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3524                                      TG3_64BIT_REG_LOW, idx);
3525                         work_mask &= ~RXD_OPAQUE_RING_STD;
3526                         rx_std_posted = 0;
3527                 }
3528 next_pkt_nopost:
3529                 sw_idx++;
3530                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3531
3532                 /* Refresh hw_idx to see if there is new work */
3533                 if (sw_idx == hw_idx) {
3534                         hw_idx = tp->hw_status->idx[0].rx_producer;
3535                         rmb();
3536                 }
3537         }
3538
3539         /* ACK the status ring. */
3540         tp->rx_rcb_ptr = sw_idx;
3541         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3542
3543         /* Refill RX ring(s). */
3544         if (work_mask & RXD_OPAQUE_RING_STD) {
3545                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3546                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3547                              sw_idx);
3548         }
3549         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3550                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3551                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3552                              sw_idx);
3553         }
3554         mmiowb();
3555
3556         return received;
3557 }
3558
3559 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3560 {
3561         struct tg3_hw_status *sblk = tp->hw_status;
3562
3563         /* handle link change and other phy events */
3564         if (!(tp->tg3_flags &
3565               (TG3_FLAG_USE_LINKCHG_REG |
3566                TG3_FLAG_POLL_SERDES))) {
3567                 if (sblk->status & SD_STATUS_LINK_CHG) {
3568                         sblk->status = SD_STATUS_UPDATED |
3569                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3570                         spin_lock(&tp->lock);
3571                         tg3_setup_phy(tp, 0);
3572                         spin_unlock(&tp->lock);
3573                 }
3574         }
3575
3576         /* run TX completion thread */
3577         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3578                 tg3_tx(tp);
3579                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3580                         return work_done;
3581         }
3582
3583         /* run RX thread, within the bounds set by NAPI.
3584          * All RX "locking" is done by ensuring outside
3585          * code synchronizes with tg3->napi.poll()
3586          */
3587         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3588                 work_done += tg3_rx(tp, budget - work_done);
3589
3590         return work_done;
3591 }
3592
3593 static int tg3_poll(struct napi_struct *napi, int budget)
3594 {
3595         struct tg3 *tp = container_of(napi, struct tg3, napi);
3596         int work_done = 0;
3597         struct tg3_hw_status *sblk = tp->hw_status;
3598
3599         while (1) {
3600                 work_done = tg3_poll_work(tp, work_done, budget);
3601
3602                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3603                         goto tx_recovery;
3604
3605                 if (unlikely(work_done >= budget))
3606                         break;
3607
3608                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3609                         /* tp->last_tag is used in tg3_restart_ints() below
3610                          * to tell the hw how much work has been processed,
3611                          * so we must read it before checking for more work.
3612                          */
3613                         tp->last_tag = sblk->status_tag;
3614                         rmb();
3615                 } else
3616                         sblk->status &= ~SD_STATUS_UPDATED;
3617
3618                 if (likely(!tg3_has_work(tp))) {
3619                         netif_rx_complete(tp->dev, napi);
3620                         tg3_restart_ints(tp);
3621                         break;
3622                 }
3623         }
3624
3625         return work_done;
3626
3627 tx_recovery:
3628         /* work_done is guaranteed to be less than budget. */
3629         netif_rx_complete(tp->dev, napi);
3630         schedule_work(&tp->reset_task);
3631         return work_done;
3632 }
3633
3634 static void tg3_irq_quiesce(struct tg3 *tp)
3635 {
3636         BUG_ON(tp->irq_sync);
3637
3638         tp->irq_sync = 1;
3639         smp_mb();
3640
3641         synchronize_irq(tp->pdev->irq);
3642 }
3643
3644 static inline int tg3_irq_sync(struct tg3 *tp)
3645 {
3646         return tp->irq_sync;
3647 }
3648
3649 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3650  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3651  * with as well.  Most of the time, this is not necessary except when
3652  * shutting down the device.
3653  */
3654 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3655 {
3656         spin_lock_bh(&tp->lock);
3657         if (irq_sync)
3658                 tg3_irq_quiesce(tp);
3659 }
3660
3661 static inline void tg3_full_unlock(struct tg3 *tp)
3662 {
3663         spin_unlock_bh(&tp->lock);
3664 }
3665
3666 /* One-shot MSI handler - Chip automatically disables interrupt
3667  * after sending MSI so driver doesn't have to do it.
3668  */
3669 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3670 {
3671         struct net_device *dev = dev_id;
3672         struct tg3 *tp = netdev_priv(dev);
3673
3674         prefetch(tp->hw_status);
3675         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3676
3677         if (likely(!tg3_irq_sync(tp)))
3678                 netif_rx_schedule(dev, &tp->napi);
3679
3680         return IRQ_HANDLED;
3681 }
3682
3683 /* MSI ISR - No need to check for interrupt sharing and no need to
3684  * flush status block and interrupt mailbox. PCI ordering rules
3685  * guarantee that MSI will arrive after the status block.
3686  */
3687 static irqreturn_t tg3_msi(int irq, void *dev_id)
3688 {
3689         struct net_device *dev = dev_id;
3690         struct tg3 *tp = netdev_priv(dev);
3691
3692         prefetch(tp->hw_status);
3693         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3694         /*
3695          * Writing any value to intr-mbox-0 clears PCI INTA# and
3696          * chip-internal interrupt pending events.
3697          * Writing non-zero to intr-mbox-0 additional tells the
3698          * NIC to stop sending us irqs, engaging "in-intr-handler"
3699          * event coalescing.
3700          */
3701         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3702         if (likely(!tg3_irq_sync(tp)))
3703                 netif_rx_schedule(dev, &tp->napi);
3704
3705         return IRQ_RETVAL(1);
3706 }
3707
3708 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3709 {
3710         struct net_device *dev = dev_id;
3711         struct tg3 *tp = netdev_priv(dev);
3712         struct tg3_hw_status *sblk = tp->hw_status;
3713         unsigned int handled = 1;
3714
3715         /* In INTx mode, it is possible for the interrupt to arrive at
3716          * the CPU before the status block posted prior to the interrupt.
3717          * Reading the PCI State register will confirm whether the
3718          * interrupt is ours and will flush the status block.
3719          */
3720         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3721                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3722                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3723                         handled = 0;
3724                         goto out;
3725                 }
3726         }
3727
3728         /*
3729          * Writing any value to intr-mbox-0 clears PCI INTA# and
3730          * chip-internal interrupt pending events.
3731          * Writing non-zero to intr-mbox-0 additional tells the
3732          * NIC to stop sending us irqs, engaging "in-intr-handler"
3733          * event coalescing.
3734          *
3735          * Flush the mailbox to de-assert the IRQ immediately to prevent
3736          * spurious interrupts.  The flush impacts performance but
3737          * excessive spurious interrupts can be worse in some cases.
3738          */
3739         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3740         if (tg3_irq_sync(tp))
3741                 goto out;
3742         sblk->status &= ~SD_STATUS_UPDATED;
3743         if (likely(tg3_has_work(tp))) {
3744                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3745                 netif_rx_schedule(dev, &tp->napi);
3746         } else {
3747                 /* No work, shared interrupt perhaps?  re-enable
3748                  * interrupts, and flush that PCI write
3749                  */
3750                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3751                                0x00000000);
3752         }
3753 out:
3754         return IRQ_RETVAL(handled);
3755 }
3756
3757 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3758 {
3759         struct net_device *dev = dev_id;
3760         struct tg3 *tp = netdev_priv(dev);
3761         struct tg3_hw_status *sblk = tp->hw_status;
3762         unsigned int handled = 1;
3763
3764         /* In INTx mode, it is possible for the interrupt to arrive at
3765          * the CPU before the status block posted prior to the interrupt.
3766          * Reading the PCI State register will confirm whether the
3767          * interrupt is ours and will flush the status block.
3768          */
3769         if (unlikely(sblk->status_tag == tp->last_tag)) {
3770                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3771                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3772                         handled = 0;
3773                         goto out;
3774                 }
3775         }
3776
3777         /*
3778          * writing any value to intr-mbox-0 clears PCI INTA# and
3779          * chip-internal interrupt pending events.
3780          * writing non-zero to intr-mbox-0 additional tells the
3781          * NIC to stop sending us irqs, engaging "in-intr-handler"
3782          * event coalescing.
3783          *
3784          * Flush the mailbox to de-assert the IRQ immediately to prevent
3785          * spurious interrupts.  The flush impacts performance but
3786          * excessive spurious interrupts can be worse in some cases.
3787          */
3788         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3789         if (tg3_irq_sync(tp))
3790                 goto out;
3791         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3792                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3793                 /* Update last_tag to mark that this status has been
3794                  * seen. Because interrupt may be shared, we may be
3795                  * racing with tg3_poll(), so only update last_tag
3796                  * if tg3_poll() is not scheduled.
3797                  */
3798                 tp->last_tag = sblk->status_tag;
3799                 __netif_rx_schedule(dev, &tp->napi);
3800         }
3801 out:
3802         return IRQ_RETVAL(handled);
3803 }
3804
3805 /* ISR for interrupt test */
3806 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3807 {
3808         struct net_device *dev = dev_id;
3809         struct tg3 *tp = netdev_priv(dev);
3810         struct tg3_hw_status *sblk = tp->hw_status;
3811
3812         if ((sblk->status & SD_STATUS_UPDATED) ||
3813             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3814                 tg3_disable_ints(tp);
3815                 return IRQ_RETVAL(1);
3816         }
3817         return IRQ_RETVAL(0);
3818 }
3819
3820 static int tg3_init_hw(struct tg3 *, int);
3821 static int tg3_halt(struct tg3 *, int, int);
3822
3823 /* Restart hardware after configuration changes, self-test, etc.
3824  * Invoked with tp->lock held.
3825  */
3826 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3827 {
3828         int err;
3829
3830         err = tg3_init_hw(tp, reset_phy);
3831         if (err) {
3832                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3833                        "aborting.\n", tp->dev->name);
3834                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3835                 tg3_full_unlock(tp);
3836                 del_timer_sync(&tp->timer);
3837                 tp->irq_sync = 0;
3838                 napi_enable(&tp->napi);
3839                 dev_close(tp->dev);
3840                 tg3_full_lock(tp, 0);
3841         }
3842         return err;
3843 }
3844
3845 #ifdef CONFIG_NET_POLL_CONTROLLER
3846 static void tg3_poll_controller(struct net_device *dev)
3847 {
3848         struct tg3 *tp = netdev_priv(dev);
3849
3850         tg3_interrupt(tp->pdev->irq, dev);
3851 }
3852 #endif
3853
3854 static void tg3_reset_task(struct work_struct *work)
3855 {
3856         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3857         unsigned int restart_timer;
3858
3859         tg3_full_lock(tp, 0);
3860
3861         if (!netif_running(tp->dev)) {
3862                 tg3_full_unlock(tp);
3863                 return;
3864         }
3865
3866         tg3_full_unlock(tp);
3867
3868         tg3_netif_stop(tp);
3869
3870         tg3_full_lock(tp, 1);
3871
3872         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3873         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3874
3875         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3876                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3877                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3878                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3879                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3880         }
3881
3882         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3883         if (tg3_init_hw(tp, 1))
3884                 goto out;
3885
3886         tg3_netif_start(tp);
3887
3888         if (restart_timer)
3889                 mod_timer(&tp->timer, jiffies + 1);
3890
3891 out:
3892         tg3_full_unlock(tp);
3893 }
3894
3895 static void tg3_dump_short_state(struct tg3 *tp)
3896 {
3897         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3898                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3899         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3900                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3901 }
3902
3903 static void tg3_tx_timeout(struct net_device *dev)
3904 {
3905         struct tg3 *tp = netdev_priv(dev);
3906
3907         if (netif_msg_tx_err(tp)) {
3908                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3909                        dev->name);
3910                 tg3_dump_short_state(tp);
3911         }
3912
3913         schedule_work(&tp->reset_task);
3914 }
3915
3916 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3917 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3918 {
3919         u32 base = (u32) mapping & 0xffffffff;
3920
3921         return ((base > 0xffffdcc0) &&
3922                 (base + len + 8 < base));
3923 }
3924
3925 /* Test for DMA addresses > 40-bit */
3926 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3927                                           int len)
3928 {
3929 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3930         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3931                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3932         return 0;
3933 #else
3934         return 0;
3935 #endif
3936 }
3937
3938 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3939
3940 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3941 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3942                                        u32 last_plus_one, u32 *start,
3943                                        u32 base_flags, u32 mss)
3944 {
3945         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3946         dma_addr_t new_addr = 0;
3947         u32 entry = *start;
3948         int i, ret = 0;
3949
3950         if (!new_skb) {
3951                 ret = -1;
3952         } else {
3953                 /* New SKB is guaranteed to be linear. */
3954                 entry = *start;
3955                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3956                                           PCI_DMA_TODEVICE);
3957                 /* Make sure new skb does not cross any 4G boundaries.
3958                  * Drop the packet if it does.
3959                  */
3960                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3961                         ret = -1;
3962                         dev_kfree_skb(new_skb);
3963                         new_skb = NULL;
3964                 } else {
3965                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3966                                     base_flags, 1 | (mss << 1));
3967                         *start = NEXT_TX(entry);
3968                 }
3969         }
3970
3971         /* Now clean up the sw ring entries. */
3972         i = 0;
3973         while (entry != last_plus_one) {
3974                 int len;
3975
3976                 if (i == 0)
3977                         len = skb_headlen(skb);
3978                 else
3979                         len = skb_shinfo(skb)->frags[i-1].size;
3980                 pci_unmap_single(tp->pdev,
3981                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3982                                  len, PCI_DMA_TODEVICE);
3983                 if (i == 0) {
3984                         tp->tx_buffers[entry].skb = new_skb;
3985                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3986                 } else {
3987                         tp->tx_buffers[entry].skb = NULL;
3988                 }
3989                 entry = NEXT_TX(entry);
3990                 i++;
3991         }
3992
3993         dev_kfree_skb(skb);
3994
3995         return ret;
3996 }
3997
3998 static void tg3_set_txd(struct tg3 *tp, int entry,
3999                         dma_addr_t mapping, int len, u32 flags,
4000                         u32 mss_and_is_end)
4001 {
4002         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4003         int is_end = (mss_and_is_end & 0x1);
4004         u32 mss = (mss_and_is_end >> 1);
4005         u32 vlan_tag = 0;
4006
4007         if (is_end)
4008                 flags |= TXD_FLAG_END;
4009         if (flags & TXD_FLAG_VLAN) {
4010                 vlan_tag = flags >> 16;
4011                 flags &= 0xffff;
4012         }
4013         vlan_tag |= (mss << TXD_MSS_SHIFT);
4014
4015         txd->addr_hi = ((u64) mapping >> 32);
4016         txd->addr_lo = ((u64) mapping & 0xffffffff);
4017         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4018         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4019 }
4020
4021 /* hard_start_xmit for devices that don't have any bugs and
4022  * support TG3_FLG2_HW_TSO_2 only.
4023  */
4024 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4025 {
4026         struct tg3 *tp = netdev_priv(dev);
4027         dma_addr_t mapping;
4028         u32 len, entry, base_flags, mss;
4029
4030         len = skb_headlen(skb);
4031
4032         /* We are running in BH disabled context with netif_tx_lock
4033          * and TX reclaim runs via tp->napi.poll inside of a software
4034          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4035          * no IRQ context deadlocks to worry about either.  Rejoice!
4036          */
4037         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4038                 if (!netif_queue_stopped(dev)) {
4039                         netif_stop_queue(dev);
4040
4041                         /* This is a hard error, log it. */
4042                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4043                                "queue awake!\n", dev->name);
4044                 }
4045                 return NETDEV_TX_BUSY;
4046         }
4047
4048         entry = tp->tx_prod;
4049         base_flags = 0;
4050         mss = 0;
4051         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4052                 int tcp_opt_len, ip_tcp_len;
4053
4054                 if (skb_header_cloned(skb) &&
4055                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4056                         dev_kfree_skb(skb);
4057                         goto out_unlock;
4058                 }
4059
4060                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4061                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4062                 else {
4063                         struct iphdr *iph = ip_hdr(skb);
4064
4065                         tcp_opt_len = tcp_optlen(skb);
4066                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4067
4068                         iph->check = 0;
4069                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4070                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4071                 }
4072
4073                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4074                                TXD_FLAG_CPU_POST_DMA);
4075
4076                 tcp_hdr(skb)->check = 0;
4077
4078         }
4079         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4080                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4081 #if TG3_VLAN_TAG_USED
4082         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4083                 base_flags |= (TXD_FLAG_VLAN |
4084                                (vlan_tx_tag_get(skb) << 16));
4085 #endif
4086
4087         /* Queue skb data, a.k.a. the main skb fragment. */
4088         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4089
4090         tp->tx_buffers[entry].skb = skb;
4091         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4092
4093         tg3_set_txd(tp, entry, mapping, len, base_flags,
4094                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4095
4096         entry = NEXT_TX(entry);
4097
4098         /* Now loop through additional data fragments, and queue them. */
4099         if (skb_shinfo(skb)->nr_frags > 0) {
4100                 unsigned int i, last;
4101
4102                 last = skb_shinfo(skb)->nr_frags - 1;
4103                 for (i = 0; i <= last; i++) {
4104                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4105
4106                         len = frag->size;
4107                         mapping = pci_map_page(tp->pdev,
4108                                                frag->page,
4109                                                frag->page_offset,
4110                                                len, PCI_DMA_TODEVICE);
4111
4112                         tp->tx_buffers[entry].skb = NULL;
4113                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4114
4115                         tg3_set_txd(tp, entry, mapping, len,
4116                                     base_flags, (i == last) | (mss << 1));
4117
4118                         entry = NEXT_TX(entry);
4119                 }
4120         }
4121
4122         /* Packets are ready, update Tx producer idx local and on card. */
4123         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4124
4125         tp->tx_prod = entry;
4126         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4127                 netif_stop_queue(dev);
4128                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4129                         netif_wake_queue(tp->dev);
4130         }
4131
4132 out_unlock:
4133         mmiowb();
4134
4135         dev->trans_start = jiffies;
4136
4137         return NETDEV_TX_OK;
4138 }
4139
4140 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4141
4142 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4143  * TSO header is greater than 80 bytes.
4144  */
4145 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4146 {
4147         struct sk_buff *segs, *nskb;
4148
4149         /* Estimate the number of fragments in the worst case */
4150         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4151                 netif_stop_queue(tp->dev);
4152                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4153                         return NETDEV_TX_BUSY;
4154
4155                 netif_wake_queue(tp->dev);
4156         }
4157
4158         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4159         if (unlikely(IS_ERR(segs)))
4160                 goto tg3_tso_bug_end;
4161
4162         do {
4163                 nskb = segs;
4164                 segs = segs->next;
4165                 nskb->next = NULL;
4166                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4167         } while (segs);
4168
4169 tg3_tso_bug_end:
4170         dev_kfree_skb(skb);
4171
4172         return NETDEV_TX_OK;
4173 }
4174
4175 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4176  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4177  */
4178 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4179 {
4180         struct tg3 *tp = netdev_priv(dev);
4181         dma_addr_t mapping;
4182         u32 len, entry, base_flags, mss;
4183         int would_hit_hwbug;
4184
4185         len = skb_headlen(skb);
4186
4187         /* We are running in BH disabled context with netif_tx_lock
4188          * and TX reclaim runs via tp->napi.poll inside of a software
4189          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4190          * no IRQ context deadlocks to worry about either.  Rejoice!
4191          */
4192         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4193                 if (!netif_queue_stopped(dev)) {
4194                         netif_stop_queue(dev);
4195
4196                         /* This is a hard error, log it. */
4197                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4198                                "queue awake!\n", dev->name);
4199                 }
4200                 return NETDEV_TX_BUSY;
4201         }
4202
4203         entry = tp->tx_prod;
4204         base_flags = 0;
4205         if (skb->ip_summed == CHECKSUM_PARTIAL)
4206                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4207         mss = 0;
4208         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4209                 struct iphdr *iph;
4210                 int tcp_opt_len, ip_tcp_len, hdr_len;
4211
4212                 if (skb_header_cloned(skb) &&
4213                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4214                         dev_kfree_skb(skb);
4215                         goto out_unlock;
4216                 }
4217
4218                 tcp_opt_len = tcp_optlen(skb);
4219                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4220
4221                 hdr_len = ip_tcp_len + tcp_opt_len;
4222                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4223                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4224                         return (tg3_tso_bug(tp, skb));
4225
4226                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4227                                TXD_FLAG_CPU_POST_DMA);
4228
4229                 iph = ip_hdr(skb);
4230                 iph->check = 0;
4231                 iph->tot_len = htons(mss + hdr_len);
4232                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4233                         tcp_hdr(skb)->check = 0;
4234                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4235                 } else
4236                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4237                                                                  iph->daddr, 0,
4238                                                                  IPPROTO_TCP,
4239                                                                  0);
4240
4241                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4242                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4243                         if (tcp_opt_len || iph->ihl > 5) {
4244                                 int tsflags;
4245
4246                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4247                                 mss |= (tsflags << 11);
4248                         }
4249                 } else {
4250                         if (tcp_opt_len || iph->ihl > 5) {
4251                                 int tsflags;
4252
4253                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4254                                 base_flags |= tsflags << 12;
4255                         }
4256                 }
4257         }
4258 #if TG3_VLAN_TAG_USED
4259         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4260                 base_flags |= (TXD_FLAG_VLAN |
4261                                (vlan_tx_tag_get(skb) << 16));
4262 #endif
4263
4264         /* Queue skb data, a.k.a. the main skb fragment. */
4265         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4266
4267         tp->tx_buffers[entry].skb = skb;
4268         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4269
4270         would_hit_hwbug = 0;
4271
4272         if (tg3_4g_overflow_test(mapping, len))
4273                 would_hit_hwbug = 1;
4274
4275         tg3_set_txd(tp, entry, mapping, len, base_flags,
4276                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4277
4278         entry = NEXT_TX(entry);
4279
4280         /* Now loop through additional data fragments, and queue them. */
4281         if (skb_shinfo(skb)->nr_frags > 0) {
4282                 unsigned int i, last;
4283
4284                 last = skb_shinfo(skb)->nr_frags - 1;
4285                 for (i = 0; i <= last; i++) {
4286                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4287
4288                         len = frag->size;
4289                         mapping = pci_map_page(tp->pdev,
4290                                                frag->page,
4291                                                frag->page_offset,
4292                                                len, PCI_DMA_TODEVICE);
4293
4294                         tp->tx_buffers[entry].skb = NULL;
4295                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4296
4297                         if (tg3_4g_overflow_test(mapping, len))
4298                                 would_hit_hwbug = 1;
4299
4300                         if (tg3_40bit_overflow_test(tp, mapping, len))
4301                                 would_hit_hwbug = 1;
4302
4303                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4304                                 tg3_set_txd(tp, entry, mapping, len,
4305                                             base_flags, (i == last)|(mss << 1));
4306                         else
4307                                 tg3_set_txd(tp, entry, mapping, len,
4308                                             base_flags, (i == last));
4309
4310                         entry = NEXT_TX(entry);
4311                 }
4312         }
4313
4314         if (would_hit_hwbug) {
4315                 u32 last_plus_one = entry;
4316                 u32 start;
4317
4318                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4319                 start &= (TG3_TX_RING_SIZE - 1);
4320
4321                 /* If the workaround fails due to memory/mapping
4322                  * failure, silently drop this packet.
4323                  */
4324                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4325                                                 &start, base_flags, mss))
4326                         goto out_unlock;
4327
4328                 entry = start;
4329         }
4330
4331         /* Packets are ready, update Tx producer idx local and on card. */
4332         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4333
4334         tp->tx_prod = entry;
4335         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4336                 netif_stop_queue(dev);
4337                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4338                         netif_wake_queue(tp->dev);
4339         }
4340
4341 out_unlock:
4342         mmiowb();
4343
4344         dev->trans_start = jiffies;
4345
4346         return NETDEV_TX_OK;
4347 }
4348
4349 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4350                                int new_mtu)
4351 {
4352         dev->mtu = new_mtu;
4353
4354         if (new_mtu > ETH_DATA_LEN) {
4355                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4356                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4357                         ethtool_op_set_tso(dev, 0);
4358                 }
4359                 else
4360                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4361         } else {
4362                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4363                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4364                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4365         }
4366 }
4367
4368 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4369 {
4370         struct tg3 *tp = netdev_priv(dev);
4371         int err;
4372
4373         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4374                 return -EINVAL;
4375
4376         if (!netif_running(dev)) {
4377                 /* We'll just catch it later when the
4378                  * device is up'd.
4379                  */
4380                 tg3_set_mtu(dev, tp, new_mtu);
4381                 return 0;
4382         }
4383
4384         tg3_netif_stop(tp);
4385
4386         tg3_full_lock(tp, 1);
4387
4388         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4389
4390         tg3_set_mtu(dev, tp, new_mtu);
4391
4392         err = tg3_restart_hw(tp, 0);
4393
4394         if (!err)
4395                 tg3_netif_start(tp);
4396
4397         tg3_full_unlock(tp);
4398
4399         return err;
4400 }
4401
4402 /* Free up pending packets in all rx/tx rings.
4403  *
4404  * The chip has been shut down and the driver detached from
4405  * the networking, so no interrupts or new tx packets will
4406  * end up in the driver.  tp->{tx,}lock is not held and we are not
4407  * in an interrupt context and thus may sleep.
4408  */
4409 static void tg3_free_rings(struct tg3 *tp)
4410 {
4411         struct ring_info *rxp;
4412         int i;
4413
4414         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4415                 rxp = &tp->rx_std_buffers[i];
4416
4417                 if (rxp->skb == NULL)
4418                         continue;
4419                 pci_unmap_single(tp->pdev,
4420                                  pci_unmap_addr(rxp, mapping),
4421                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4422                                  PCI_DMA_FROMDEVICE);
4423                 dev_kfree_skb_any(rxp->skb);
4424                 rxp->skb = NULL;
4425         }
4426
4427         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4428                 rxp = &tp->rx_jumbo_buffers[i];
4429
4430                 if (rxp->skb == NULL)
4431                         continue;
4432                 pci_unmap_single(tp->pdev,
4433                                  pci_unmap_addr(rxp, mapping),
4434                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4435                                  PCI_DMA_FROMDEVICE);
4436                 dev_kfree_skb_any(rxp->skb);
4437                 rxp->skb = NULL;
4438         }
4439
4440         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4441                 struct tx_ring_info *txp;
4442                 struct sk_buff *skb;
4443                 int j;
4444
4445                 txp = &tp->tx_buffers[i];
4446                 skb = txp->skb;
4447
4448                 if (skb == NULL) {
4449                         i++;
4450                         continue;
4451                 }
4452
4453                 pci_unmap_single(tp->pdev,
4454                                  pci_unmap_addr(txp, mapping),
4455                                  skb_headlen(skb),
4456                                  PCI_DMA_TODEVICE);
4457                 txp->skb = NULL;
4458
4459                 i++;
4460
4461                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4462                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4463                         pci_unmap_page(tp->pdev,
4464                                        pci_unmap_addr(txp, mapping),
4465                                        skb_shinfo(skb)->frags[j].size,
4466                                        PCI_DMA_TODEVICE);
4467                         i++;
4468                 }
4469
4470                 dev_kfree_skb_any(skb);
4471         }
4472 }
4473
4474 /* Initialize tx/rx rings for packet processing.
4475  *
4476  * The chip has been shut down and the driver detached from
4477  * the networking, so no interrupts or new tx packets will
4478  * end up in the driver.  tp->{tx,}lock are held and thus
4479  * we may not sleep.
4480  */
4481 static int tg3_init_rings(struct tg3 *tp)
4482 {
4483         u32 i;
4484
4485         /* Free up all the SKBs. */
4486         tg3_free_rings(tp);
4487
4488         /* Zero out all descriptors. */
4489         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4490         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4491         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4492         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4493
4494         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4495         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4496             (tp->dev->mtu > ETH_DATA_LEN))
4497                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4498
4499         /* Initialize invariants of the rings, we only set this
4500          * stuff once.  This works because the card does not
4501          * write into the rx buffer posting rings.
4502          */
4503         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4504                 struct tg3_rx_buffer_desc *rxd;
4505
4506                 rxd = &tp->rx_std[i];
4507                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4508                         << RXD_LEN_SHIFT;
4509                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4510                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4511                                (i << RXD_OPAQUE_INDEX_SHIFT));
4512         }
4513
4514         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4515                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4516                         struct tg3_rx_buffer_desc *rxd;
4517
4518                         rxd = &tp->rx_jumbo[i];
4519                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4520                                 << RXD_LEN_SHIFT;
4521                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4522                                 RXD_FLAG_JUMBO;
4523                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4524                                (i << RXD_OPAQUE_INDEX_SHIFT));
4525                 }
4526         }
4527
4528         /* Now allocate fresh SKBs for each rx ring. */
4529         for (i = 0; i < tp->rx_pending; i++) {
4530                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4531                         printk(KERN_WARNING PFX
4532                                "%s: Using a smaller RX standard ring, "
4533                                "only %d out of %d buffers were allocated "
4534                                "successfully.\n",
4535                                tp->dev->name, i, tp->rx_pending);
4536                         if (i == 0)
4537                                 return -ENOMEM;
4538                         tp->rx_pending = i;
4539                         break;
4540                 }
4541         }
4542
4543         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4544                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4545                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4546                                              -1, i) < 0) {
4547                                 printk(KERN_WARNING PFX
4548                                        "%s: Using a smaller RX jumbo ring, "
4549                                        "only %d out of %d buffers were "
4550                                        "allocated successfully.\n",
4551                                        tp->dev->name, i, tp->rx_jumbo_pending);
4552                                 if (i == 0) {
4553                                         tg3_free_rings(tp);
4554                                         return -ENOMEM;
4555                                 }
4556                                 tp->rx_jumbo_pending = i;
4557                                 break;
4558                         }
4559                 }
4560         }
4561         return 0;
4562 }
4563
4564 /*
4565  * Must not be invoked with interrupt sources disabled and
4566  * the hardware shutdown down.
4567  */
4568 static void tg3_free_consistent(struct tg3 *tp)
4569 {
4570         kfree(tp->rx_std_buffers);
4571         tp->rx_std_buffers = NULL;
4572         if (tp->rx_std) {
4573                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4574                                     tp->rx_std, tp->rx_std_mapping);
4575                 tp->rx_std = NULL;
4576         }
4577         if (tp->rx_jumbo) {
4578                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4579                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4580                 tp->rx_jumbo = NULL;
4581         }
4582         if (tp->rx_rcb) {
4583                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4584                                     tp->rx_rcb, tp->rx_rcb_mapping);
4585                 tp->rx_rcb = NULL;
4586         }
4587         if (tp->tx_ring) {
4588                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4589                         tp->tx_ring, tp->tx_desc_mapping);
4590                 tp->tx_ring = NULL;
4591         }
4592         if (tp->hw_status) {
4593                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4594                                     tp->hw_status, tp->status_mapping);
4595                 tp->hw_status = NULL;
4596         }
4597         if (tp->hw_stats) {
4598                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4599                                     tp->hw_stats, tp->stats_mapping);
4600                 tp->hw_stats = NULL;
4601         }
4602 }
4603
4604 /*
4605  * Must not be invoked with interrupt sources disabled and
4606  * the hardware shutdown down.  Can sleep.
4607  */
4608 static int tg3_alloc_consistent(struct tg3 *tp)
4609 {
4610         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4611                                       (TG3_RX_RING_SIZE +
4612                                        TG3_RX_JUMBO_RING_SIZE)) +
4613                                      (sizeof(struct tx_ring_info) *
4614                                       TG3_TX_RING_SIZE),
4615                                      GFP_KERNEL);
4616         if (!tp->rx_std_buffers)
4617                 return -ENOMEM;
4618
4619         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4620         tp->tx_buffers = (struct tx_ring_info *)
4621                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4622
4623         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4624                                           &tp->rx_std_mapping);
4625         if (!tp->rx_std)
4626                 goto err_out;
4627
4628         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4629                                             &tp->rx_jumbo_mapping);
4630
4631         if (!tp->rx_jumbo)
4632                 goto err_out;
4633
4634         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4635                                           &tp->rx_rcb_mapping);
4636         if (!tp->rx_rcb)
4637                 goto err_out;
4638
4639         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4640                                            &tp->tx_desc_mapping);
4641         if (!tp->tx_ring)
4642                 goto err_out;
4643
4644         tp->hw_status = pci_alloc_consistent(tp->pdev,
4645                                              TG3_HW_STATUS_SIZE,
4646                                              &tp->status_mapping);
4647         if (!tp->hw_status)
4648                 goto err_out;
4649
4650         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4651                                             sizeof(struct tg3_hw_stats),
4652                                             &tp->stats_mapping);
4653         if (!tp->hw_stats)
4654                 goto err_out;
4655
4656         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4657         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4658
4659         return 0;
4660
4661 err_out:
4662         tg3_free_consistent(tp);
4663         return -ENOMEM;
4664 }
4665
4666 #define MAX_WAIT_CNT 1000
4667
4668 /* To stop a block, clear the enable bit and poll till it
4669  * clears.  tp->lock is held.
4670  */
4671 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4672 {
4673         unsigned int i;
4674         u32 val;
4675
4676         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4677                 switch (ofs) {
4678                 case RCVLSC_MODE:
4679                 case DMAC_MODE:
4680                 case MBFREE_MODE:
4681                 case BUFMGR_MODE:
4682                 case MEMARB_MODE:
4683                         /* We can't enable/disable these bits of the
4684                          * 5705/5750, just say success.
4685                          */
4686                         return 0;
4687
4688                 default:
4689                         break;
4690                 };
4691         }
4692
4693         val = tr32(ofs);
4694         val &= ~enable_bit;
4695         tw32_f(ofs, val);
4696
4697         for (i = 0; i < MAX_WAIT_CNT; i++) {
4698                 udelay(100);
4699                 val = tr32(ofs);
4700                 if ((val & enable_bit) == 0)
4701                         break;
4702         }
4703
4704         if (i == MAX_WAIT_CNT && !silent) {
4705                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4706                        "ofs=%lx enable_bit=%x\n",
4707                        ofs, enable_bit);
4708                 return -ENODEV;
4709         }
4710
4711         return 0;
4712 }
4713
4714 /* tp->lock is held. */
4715 static int tg3_abort_hw(struct tg3 *tp, int silent)
4716 {
4717         int i, err;
4718
4719         tg3_disable_ints(tp);
4720
4721         tp->rx_mode &= ~RX_MODE_ENABLE;
4722         tw32_f(MAC_RX_MODE, tp->rx_mode);
4723         udelay(10);
4724
4725         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4726         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4727         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4728         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4729         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4730         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4731
4732         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4733         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4734         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4735         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4736         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4737         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4738         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4739
4740         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4741         tw32_f(MAC_MODE, tp->mac_mode);
4742         udelay(40);
4743
4744         tp->tx_mode &= ~TX_MODE_ENABLE;
4745         tw32_f(MAC_TX_MODE, tp->tx_mode);
4746
4747         for (i = 0; i < MAX_WAIT_CNT; i++) {
4748                 udelay(100);
4749                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4750                         break;
4751         }
4752         if (i >= MAX_WAIT_CNT) {
4753                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4754                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4755                        tp->dev->name, tr32(MAC_TX_MODE));
4756                 err |= -ENODEV;
4757         }
4758
4759         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4760         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4761         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4762
4763         tw32(FTQ_RESET, 0xffffffff);
4764         tw32(FTQ_RESET, 0x00000000);
4765
4766         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4767         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4768
4769         if (tp->hw_status)
4770                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4771         if (tp->hw_stats)
4772                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4773
4774         return err;
4775 }
4776
4777 /* tp->lock is held. */
4778 static int tg3_nvram_lock(struct tg3 *tp)
4779 {
4780         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4781                 int i;
4782
4783                 if (tp->nvram_lock_cnt == 0) {
4784                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4785                         for (i = 0; i < 8000; i++) {
4786                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4787                                         break;
4788                                 udelay(20);
4789                         }
4790                         if (i == 8000) {
4791                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4792                                 return -ENODEV;
4793                         }
4794                 }
4795                 tp->nvram_lock_cnt++;
4796         }
4797         return 0;
4798 }
4799
4800 /* tp->lock is held. */
4801 static void tg3_nvram_unlock(struct tg3 *tp)
4802 {
4803         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4804                 if (tp->nvram_lock_cnt > 0)
4805                         tp->nvram_lock_cnt--;
4806                 if (tp->nvram_lock_cnt == 0)
4807                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4808         }
4809 }
4810
4811 /* tp->lock is held. */
4812 static void tg3_enable_nvram_access(struct tg3 *tp)
4813 {
4814         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4815             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4816                 u32 nvaccess = tr32(NVRAM_ACCESS);
4817
4818                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4819         }
4820 }
4821
4822 /* tp->lock is held. */
4823 static void tg3_disable_nvram_access(struct tg3 *tp)
4824 {
4825         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4826             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4827                 u32 nvaccess = tr32(NVRAM_ACCESS);
4828
4829                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4830         }
4831 }
4832
4833 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4834 {
4835         int i;
4836         u32 apedata;
4837
4838         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4839         if (apedata != APE_SEG_SIG_MAGIC)
4840                 return;
4841
4842         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4843         if (apedata != APE_FW_STATUS_READY)
4844                 return;
4845
4846         /* Wait for up to 1 millisecond for APE to service previous event. */
4847         for (i = 0; i < 10; i++) {
4848                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4849                         return;
4850
4851                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4852
4853                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4854                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4855                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4856
4857                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4858
4859                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4860                         break;
4861
4862                 udelay(100);
4863         }
4864
4865         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4866                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4867 }
4868
4869 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4870 {
4871         u32 event;
4872         u32 apedata;
4873
4874         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4875                 return;
4876
4877         switch (kind) {
4878                 case RESET_KIND_INIT:
4879                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4880                                         APE_HOST_SEG_SIG_MAGIC);
4881                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4882                                         APE_HOST_SEG_LEN_MAGIC);
4883                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4884                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4885                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4886                                         APE_HOST_DRIVER_ID_MAGIC);
4887                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4888                                         APE_HOST_BEHAV_NO_PHYLOCK);
4889
4890                         event = APE_EVENT_STATUS_STATE_START;
4891                         break;
4892                 case RESET_KIND_SHUTDOWN:
4893                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4894                         break;
4895                 case RESET_KIND_SUSPEND:
4896                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4897                         break;
4898                 default:
4899                         return;
4900         }
4901
4902         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4903
4904         tg3_ape_send_event(tp, event);
4905 }
4906
4907 /* tp->lock is held. */
4908 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4909 {
4910         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4911                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4912
4913         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4914                 switch (kind) {
4915                 case RESET_KIND_INIT:
4916                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4917                                       DRV_STATE_START);
4918                         break;
4919
4920                 case RESET_KIND_SHUTDOWN:
4921                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4922                                       DRV_STATE_UNLOAD);
4923                         break;
4924
4925                 case RESET_KIND_SUSPEND:
4926                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4927                                       DRV_STATE_SUSPEND);
4928                         break;
4929
4930                 default:
4931                         break;
4932                 };
4933         }
4934
4935         if (kind == RESET_KIND_INIT ||
4936             kind == RESET_KIND_SUSPEND)
4937                 tg3_ape_driver_state_change(tp, kind);
4938 }
4939
4940 /* tp->lock is held. */
4941 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4942 {
4943         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4944                 switch (kind) {
4945                 case RESET_KIND_INIT:
4946                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4947                                       DRV_STATE_START_DONE);
4948                         break;
4949
4950                 case RESET_KIND_SHUTDOWN:
4951                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4952                                       DRV_STATE_UNLOAD_DONE);
4953                         break;
4954
4955                 default:
4956                         break;
4957                 };
4958         }
4959
4960         if (kind == RESET_KIND_SHUTDOWN)
4961                 tg3_ape_driver_state_change(tp, kind);
4962 }
4963
4964 /* tp->lock is held. */
4965 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4966 {
4967         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4968                 switch (kind) {
4969                 case RESET_KIND_INIT:
4970                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4971                                       DRV_STATE_START);
4972                         break;
4973
4974                 case RESET_KIND_SHUTDOWN:
4975                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4976                                       DRV_STATE_UNLOAD);
4977                         break;
4978
4979                 case RESET_KIND_SUSPEND:
4980                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4981                                       DRV_STATE_SUSPEND);
4982                         break;
4983
4984                 default:
4985                         break;
4986                 };
4987         }
4988 }
4989
4990 static int tg3_poll_fw(struct tg3 *tp)
4991 {
4992         int i;
4993         u32 val;
4994
4995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4996                 /* Wait up to 20ms for init done. */
4997                 for (i = 0; i < 200; i++) {
4998                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4999                                 return 0;
5000                         udelay(100);
5001                 }
5002                 return -ENODEV;
5003         }
5004
5005         /* Wait for firmware initialization to complete. */
5006         for (i = 0; i < 100000; i++) {
5007                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5008                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5009                         break;
5010                 udelay(10);
5011         }
5012
5013         /* Chip might not be fitted with firmware.  Some Sun onboard
5014          * parts are configured like that.  So don't signal the timeout
5015          * of the above loop as an error, but do report the lack of
5016          * running firmware once.
5017          */
5018         if (i >= 100000 &&
5019             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5020                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5021
5022                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5023                        tp->dev->name);
5024         }
5025
5026         return 0;
5027 }
5028
5029 /* Save PCI command register before chip reset */
5030 static void tg3_save_pci_state(struct tg3 *tp)
5031 {
5032         u32 val;
5033
5034         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5035         tp->pci_cmd = val;
5036 }
5037
5038 /* Restore PCI state after chip reset */
5039 static void tg3_restore_pci_state(struct tg3 *tp)
5040 {
5041         u32 val;
5042
5043         /* Re-enable indirect register accesses. */
5044         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5045                                tp->misc_host_ctrl);
5046
5047         /* Set MAX PCI retry to zero. */
5048         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5049         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5050             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5051                 val |= PCISTATE_RETRY_SAME_DMA;
5052         /* Allow reads and writes to the APE register and memory space. */
5053         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5054                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5055                        PCISTATE_ALLOW_APE_SHMEM_WR;
5056         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5057
5058         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
5059
5060         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5061                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5062                                       tp->pci_cacheline_sz);
5063                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5064                                       tp->pci_lat_timer);
5065         }
5066         /* Make sure PCI-X relaxed ordering bit is clear. */
5067         if (tp->pcix_cap) {
5068                 u16 pcix_cmd;
5069
5070                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5071                                      &pcix_cmd);
5072                 pcix_cmd &= ~PCI_X_CMD_ERO;
5073                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5074                                       pcix_cmd);
5075         }
5076
5077         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5078
5079                 /* Chip reset on 5780 will reset MSI enable bit,
5080                  * so need to restore it.
5081                  */
5082                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5083                         u16 ctrl;
5084
5085                         pci_read_config_word(tp->pdev,
5086                                              tp->msi_cap + PCI_MSI_FLAGS,
5087                                              &ctrl);
5088                         pci_write_config_word(tp->pdev,
5089                                               tp->msi_cap + PCI_MSI_FLAGS,
5090                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5091                         val = tr32(MSGINT_MODE);
5092                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5093                 }
5094         }
5095 }
5096
5097 static void tg3_stop_fw(struct tg3 *);
5098
5099 /* tp->lock is held. */
5100 static int tg3_chip_reset(struct tg3 *tp)
5101 {
5102         u32 val;
5103         void (*write_op)(struct tg3 *, u32, u32);
5104         int err;
5105
5106         tg3_nvram_lock(tp);
5107
5108         /* No matching tg3_nvram_unlock() after this because
5109          * chip reset below will undo the nvram lock.
5110          */
5111         tp->nvram_lock_cnt = 0;
5112
5113         /* GRC_MISC_CFG core clock reset will clear the memory
5114          * enable bit in PCI register 4 and the MSI enable bit
5115          * on some chips, so we save relevant registers here.
5116          */
5117         tg3_save_pci_state(tp);
5118
5119         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5120             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5121             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5122             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5123             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5124                 tw32(GRC_FASTBOOT_PC, 0);
5125
5126         /*
5127          * We must avoid the readl() that normally takes place.
5128          * It locks machines, causes machine checks, and other
5129          * fun things.  So, temporarily disable the 5701
5130          * hardware workaround, while we do the reset.
5131          */
5132         write_op = tp->write32;
5133         if (write_op == tg3_write_flush_reg32)
5134                 tp->write32 = tg3_write32;
5135
5136         /* Prevent the irq handler from reading or writing PCI registers
5137          * during chip reset when the memory enable bit in the PCI command
5138          * register may be cleared.  The chip does not generate interrupt
5139          * at this time, but the irq handler may still be called due to irq
5140          * sharing or irqpoll.
5141          */
5142         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5143         if (tp->hw_status) {
5144                 tp->hw_status->status = 0;
5145                 tp->hw_status->status_tag = 0;
5146         }
5147         tp->last_tag = 0;
5148         smp_mb();
5149         synchronize_irq(tp->pdev->irq);
5150
5151         /* do the reset */
5152         val = GRC_MISC_CFG_CORECLK_RESET;
5153
5154         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5155                 if (tr32(0x7e2c) == 0x60) {
5156                         tw32(0x7e2c, 0x20);
5157                 }
5158                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5159                         tw32(GRC_MISC_CFG, (1 << 29));
5160                         val |= (1 << 29);
5161                 }
5162         }
5163
5164         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5165                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5166                 tw32(GRC_VCPU_EXT_CTRL,
5167                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5168         }
5169
5170         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5171                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5172         tw32(GRC_MISC_CFG, val);
5173
5174         /* restore 5701 hardware bug workaround write method */
5175         tp->write32 = write_op;
5176
5177         /* Unfortunately, we have to delay before the PCI read back.
5178          * Some 575X chips even will not respond to a PCI cfg access
5179          * when the reset command is given to the chip.
5180          *
5181          * How do these hardware designers expect things to work
5182          * properly if the PCI write is posted for a long period
5183          * of time?  It is always necessary to have some method by
5184          * which a register read back can occur to push the write
5185          * out which does the reset.
5186          *
5187          * For most tg3 variants the trick below was working.
5188          * Ho hum...
5189          */
5190         udelay(120);
5191
5192         /* Flush PCI posted writes.  The normal MMIO registers
5193          * are inaccessible at this time so this is the only
5194          * way to make this reliably (actually, this is no longer
5195          * the case, see above).  I tried to use indirect
5196          * register read/write but this upset some 5701 variants.
5197          */
5198         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5199
5200         udelay(120);
5201
5202         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5203                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5204                         int i;
5205                         u32 cfg_val;
5206
5207                         /* Wait for link training to complete.  */
5208                         for (i = 0; i < 5000; i++)
5209                                 udelay(100);
5210
5211                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5212                         pci_write_config_dword(tp->pdev, 0xc4,
5213                                                cfg_val | (1 << 15));
5214                 }
5215                 /* Set PCIE max payload size and clear error status.  */
5216                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5217         }
5218
5219         tg3_restore_pci_state(tp);
5220
5221         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5222
5223         val = 0;
5224         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5225                 val = tr32(MEMARB_MODE);
5226         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5227
5228         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5229                 tg3_stop_fw(tp);
5230                 tw32(0x5000, 0x400);
5231         }
5232
5233         tw32(GRC_MODE, tp->grc_mode);
5234
5235         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5236                 val = tr32(0xc4);
5237
5238                 tw32(0xc4, val | (1 << 15));
5239         }
5240
5241         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5242             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5243                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5244                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5245                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5246                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5247         }
5248
5249         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5250                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5251                 tw32_f(MAC_MODE, tp->mac_mode);
5252         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5253                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5254                 tw32_f(MAC_MODE, tp->mac_mode);
5255         } else
5256                 tw32_f(MAC_MODE, 0);
5257         udelay(40);
5258
5259         err = tg3_poll_fw(tp);
5260         if (err)
5261                 return err;
5262
5263         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5264             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5265                 val = tr32(0x7c00);
5266
5267                 tw32(0x7c00, val | (1 << 25));
5268         }
5269
5270         /* Reprobe ASF enable state.  */
5271         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5272         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5273         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5274         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5275                 u32 nic_cfg;
5276
5277                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5278                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5279                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5280                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5281                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5282                 }
5283         }
5284
5285         return 0;
5286 }
5287
5288 /* tp->lock is held. */
5289 static void tg3_stop_fw(struct tg3 *tp)
5290 {
5291         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5292            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5293                 u32 val;
5294                 int i;
5295
5296                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5297                 val = tr32(GRC_RX_CPU_EVENT);
5298                 val |= (1 << 14);
5299                 tw32(GRC_RX_CPU_EVENT, val);
5300
5301                 /* Wait for RX cpu to ACK the event.  */
5302                 for (i = 0; i < 100; i++) {
5303                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5304                                 break;
5305                         udelay(1);
5306                 }
5307         }
5308 }
5309
5310 /* tp->lock is held. */
5311 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5312 {
5313         int err;
5314
5315         tg3_stop_fw(tp);
5316
5317         tg3_write_sig_pre_reset(tp, kind);
5318
5319         tg3_abort_hw(tp, silent);
5320         err = tg3_chip_reset(tp);
5321
5322         tg3_write_sig_legacy(tp, kind);
5323         tg3_write_sig_post_reset(tp, kind);
5324
5325         if (err)
5326                 return err;
5327
5328         return 0;
5329 }
5330
5331 #define TG3_FW_RELEASE_MAJOR    0x0
5332 #define TG3_FW_RELASE_MINOR     0x0
5333 #define TG3_FW_RELEASE_FIX      0x0
5334 #define TG3_FW_START_ADDR       0x08000000
5335 #define TG3_FW_TEXT_ADDR        0x08000000
5336 #define TG3_FW_TEXT_LEN         0x9c0
5337 #define TG3_FW_RODATA_ADDR      0x080009c0
5338 #define TG3_FW_RODATA_LEN       0x60
5339 #define TG3_FW_DATA_ADDR        0x08000a40
5340 #define TG3_FW_DATA_LEN         0x20
5341 #define TG3_FW_SBSS_ADDR        0x08000a60
5342 #define TG3_FW_SBSS_LEN         0xc
5343 #define TG3_FW_BSS_ADDR         0x08000a70
5344 #define TG3_FW_BSS_LEN          0x10
5345
5346 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5347         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5348         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5349         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5350         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5351         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5352         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5353         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5354         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5355         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5356         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5357         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5358         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5359         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5360         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5361         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5362         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5363         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5364         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5365         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5366         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5367         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5368         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5369         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5370         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5371         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5372         0, 0, 0, 0, 0, 0,
5373         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5374         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5375         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5376         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5377         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5378         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5379         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5380         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5381         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5382         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5383         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5384         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5385         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5386         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5387         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5388         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5389         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5390         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5391         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5392         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5393         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5394         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5395         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5396         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5397         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5398         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5399         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5400         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5401         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5402         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5403         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5404         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5405         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5406         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5407         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5408         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5409         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5410         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5411         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5412         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5413         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5414         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5415         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5416         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5417         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5418         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5419         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5420         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5421         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5422         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5423         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5424         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5425         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5426         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5427         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5428         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5429         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5430         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5431         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5432         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5433         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5434         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5435         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5436         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5437         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5438 };
5439
5440 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5441         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5442         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5443         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5444         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5445         0x00000000
5446 };
5447
5448 #if 0 /* All zeros, don't eat up space with it. */
5449 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5450         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5451         0x00000000, 0x00000000, 0x00000000, 0x00000000
5452 };
5453 #endif
5454
5455 #define RX_CPU_SCRATCH_BASE     0x30000
5456 #define RX_CPU_SCRATCH_SIZE     0x04000
5457 #define TX_CPU_SCRATCH_BASE     0x34000
5458 #define TX_CPU_SCRATCH_SIZE     0x04000
5459
5460 /* tp->lock is held. */
5461 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5462 {
5463         int i;
5464
5465         BUG_ON(offset == TX_CPU_BASE &&
5466             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5467
5468         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5469                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5470
5471                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5472                 return 0;
5473         }
5474         if (offset == RX_CPU_BASE) {
5475                 for (i = 0; i < 10000; i++) {
5476                         tw32(offset + CPU_STATE, 0xffffffff);
5477                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5478                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5479                                 break;
5480                 }
5481
5482                 tw32(offset + CPU_STATE, 0xffffffff);
5483                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5484                 udelay(10);
5485         } else {
5486                 for (i = 0; i < 10000; i++) {
5487                         tw32(offset + CPU_STATE, 0xffffffff);
5488                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5489                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5490                                 break;
5491                 }
5492         }
5493
5494         if (i >= 10000) {
5495                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5496                        "and %s CPU\n",
5497                        tp->dev->name,
5498                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5499                 return -ENODEV;
5500         }
5501
5502         /* Clear firmware's nvram arbitration. */
5503         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5504                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5505         return 0;
5506 }
5507
5508 struct fw_info {
5509         unsigned int text_base;
5510         unsigned int text_len;
5511         const u32 *text_data;
5512         unsigned int rodata_base;
5513         unsigned int rodata_len;
5514         const u32 *rodata_data;
5515         unsigned int data_base;
5516         unsigned int data_len;
5517         const u32 *data_data;
5518 };
5519
5520 /* tp->lock is held. */
5521 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5522                                  int cpu_scratch_size, struct fw_info *info)
5523 {
5524         int err, lock_err, i;
5525         void (*write_op)(struct tg3 *, u32, u32);
5526
5527         if (cpu_base == TX_CPU_BASE &&
5528             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5529                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5530                        "TX cpu firmware on %s which is 5705.\n",
5531                        tp->dev->name);
5532                 return -EINVAL;
5533         }
5534
5535         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5536                 write_op = tg3_write_mem;
5537         else
5538                 write_op = tg3_write_indirect_reg32;
5539
5540         /* It is possible that bootcode is still loading at this point.
5541          * Get the nvram lock first before halting the cpu.
5542          */
5543         lock_err = tg3_nvram_lock(tp);
5544         err = tg3_halt_cpu(tp, cpu_base);
5545         if (!lock_err)
5546                 tg3_nvram_unlock(tp);
5547         if (err)
5548                 goto out;
5549
5550         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5551                 write_op(tp, cpu_scratch_base + i, 0);
5552         tw32(cpu_base + CPU_STATE, 0xffffffff);
5553         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5554         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5555                 write_op(tp, (cpu_scratch_base +
5556                               (info->text_base & 0xffff) +
5557                               (i * sizeof(u32))),
5558                          (info->text_data ?
5559                           info->text_data[i] : 0));
5560         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5561                 write_op(tp, (cpu_scratch_base +
5562                               (info->rodata_base & 0xffff) +
5563                               (i * sizeof(u32))),
5564                          (info->rodata_data ?
5565                           info->rodata_data[i] : 0));
5566         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5567                 write_op(tp, (cpu_scratch_base +
5568                               (info->data_base & 0xffff) +
5569                               (i * sizeof(u32))),
5570                          (info->data_data ?
5571                           info->data_data[i] : 0));
5572
5573         err = 0;
5574
5575 out:
5576         return err;
5577 }
5578
5579 /* tp->lock is held. */
5580 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5581 {
5582         struct fw_info info;
5583         int err, i;
5584
5585         info.text_base = TG3_FW_TEXT_ADDR;
5586         info.text_len = TG3_FW_TEXT_LEN;
5587         info.text_data = &tg3FwText[0];
5588         info.rodata_base = TG3_FW_RODATA_ADDR;
5589         info.rodata_len = TG3_FW_RODATA_LEN;
5590         info.rodata_data = &tg3FwRodata[0];
5591         info.data_base = TG3_FW_DATA_ADDR;
5592         info.data_len = TG3_FW_DATA_LEN;
5593         info.data_data = NULL;
5594
5595         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5596                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5597                                     &info);
5598         if (err)
5599                 return err;
5600
5601         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5602                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5603                                     &info);
5604         if (err)
5605                 return err;
5606
5607         /* Now startup only the RX cpu. */
5608         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5609         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5610
5611         for (i = 0; i < 5; i++) {
5612                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5613                         break;
5614                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5615                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5616                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5617                 udelay(1000);
5618         }
5619         if (i >= 5) {
5620                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5621                        "to set RX CPU PC, is %08x should be %08x\n",
5622                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5623                        TG3_FW_TEXT_ADDR);
5624                 return -ENODEV;
5625         }
5626         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5627         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5628
5629         return 0;
5630 }
5631
5632
5633 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5634 #define TG3_TSO_FW_RELASE_MINOR         0x6
5635 #define TG3_TSO_FW_RELEASE_FIX          0x0
5636 #define TG3_TSO_FW_START_ADDR           0x08000000
5637 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5638 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5639 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5640 #define TG3_TSO_FW_RODATA_LEN           0x60
5641 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5642 #define TG3_TSO_FW_DATA_LEN             0x30
5643 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5644 #define TG3_TSO_FW_SBSS_LEN             0x2c
5645 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5646 #define TG3_TSO_FW_BSS_LEN              0x894
5647
5648 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5649         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5650         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5651         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5652         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5653         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5654         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5655         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5656         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5657         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5658         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5659         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5660         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5661         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5662         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5663         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5664         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5665         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5666         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5667         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5668         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5669         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5670         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5671         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5672         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5673         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5674         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5675         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5676         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5677         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5678         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5679         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5680         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5681         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5682         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5683         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5684         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5685         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5686         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5687         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5688         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5689         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5690         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5691         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5692         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5693         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5694         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5695         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5696         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5697         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5698         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5699         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5700         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5701         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5702         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5703         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5704         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5705         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5706         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5707         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5708         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5709         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5710         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5711         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5712         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5713         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5714         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5715         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5716         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5717         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5718         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5719         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5720         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5721         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5722         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5723         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5724         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5725         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5726         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5727         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5728         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5729         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5730         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5731         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5732         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5733         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5734         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5735         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5736         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5737         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5738         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5739         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5740         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5741         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5742         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5743         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5744         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5745         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5746         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5747         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5748         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5749         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5750         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5751         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5752         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5753         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5754         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5755         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5756         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5757         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5758         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5759         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5760         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5761         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5762         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5763         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5764         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5765         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5766         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5767         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5768         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5769         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5770         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5771         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5772         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5773         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5774         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5775         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5776         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5777         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5778         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5779         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5780         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5781         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5782         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5783         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5784         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5785         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5786         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5787         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5788         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5789         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5790         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5791         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5792         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5793         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5794         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5795         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5796         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5797         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5798         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5799         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5800         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5801         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5802         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5803         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5804         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5805         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5806         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5807         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5808         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5809         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5810         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5811         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5812         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5813         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5814         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5815         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5816         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5817         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5818         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5819         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5820         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5821         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5822         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5823         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5824         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5825         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5826         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5827         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5828         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5829         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5830         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5831         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5832         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5833         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5834         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5835         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5836         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5837         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5838         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5839         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5840         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5841         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5842         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5843         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5844         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5845         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5846         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5847         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5848         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5849         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5850         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5851         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5852         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5853         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5854         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5855         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5856         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5857         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5858         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5859         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5860         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5861         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5862         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5863         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5864         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5865         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5866         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5867         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5868         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5869         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5870         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5871         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5872         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5873         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5874         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5875         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5876         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5877         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5878         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5879         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5880         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5881         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5882         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5883         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5884         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5885         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5886         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5887         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5888         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5889         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5890         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5891         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5892         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5893         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5894         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5895         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5896         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5897         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5898         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5899         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5900         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5901         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5902         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5903         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5904         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5905         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5906         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5907         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5908         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5909         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5910         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5911         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5912         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5913         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5914         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5915         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5916         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5917         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5918         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5919         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5920         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5921         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5922         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5923         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5924         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5925         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5926         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5927         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5928         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5929         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5930         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5931         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5932         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5933 };
5934
5935 static const u32 tg3TsoFwRodata[] = {
5936         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5937         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5938         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5939         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5940         0x00000000,
5941 };
5942
5943 static const u32 tg3TsoFwData[] = {
5944         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5945         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5946         0x00000000,
5947 };
5948
5949 /* 5705 needs a special version of the TSO firmware.  */
5950 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5951 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5952 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5953 #define TG3_TSO5_FW_START_ADDR          0x00010000
5954 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5955 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5956 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5957 #define TG3_TSO5_FW_RODATA_LEN          0x50
5958 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5959 #define TG3_TSO5_FW_DATA_LEN            0x20
5960 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5961 #define TG3_TSO5_FW_SBSS_LEN            0x28
5962 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5963 #define TG3_TSO5_FW_BSS_LEN             0x88
5964
5965 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5966         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5967         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5968         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5969         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5970         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5971         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5972         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5973         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5974         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5975         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5976         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5977         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5978         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5979         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5980         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5981         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5982         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5983         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5984         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5985         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5986         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5987         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5988         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5989         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5990         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5991         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5992         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5993         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5994         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5995         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5996         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5997         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5998         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5999         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6000         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6001         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6002         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6003         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6004         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6005         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6006         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6007         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6008         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6009         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6010         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6011         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6012         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6013         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6014         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6015         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6016         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6017         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6018         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6019         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6020         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6021         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6022         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6023         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6024         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6025         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6026         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6027         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6028         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6029         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6030         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6031         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6032         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6033         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6034         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6035         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6036         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6037         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6038         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6039         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6040         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6041         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6042         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6043         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6044         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6045         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6046         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6047         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6048         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6049         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6050         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6051         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6052         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6053         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6054         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6055         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6056         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6057         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6058         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6059         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6060         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6061         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6062         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6063         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6064         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6065         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6066         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6067         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6068         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6069         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6070         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6071         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6072         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6073         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6074         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6075         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6076         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6077         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6078         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6079         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6080         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6081         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6082         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6083         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6084         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6085         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6086         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6087         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6088         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6089         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6090         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6091         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6092         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6093         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6094         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6095         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6096         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6097         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6098         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6099         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6100         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6101         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6102         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6103         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6104         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6105         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6106         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6107         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6108         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6109         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6110         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6111         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6112         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6113         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6114         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6115         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6116         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6117         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6118         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6119         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6120         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6121         0x00000000, 0x00000000, 0x00000000,
6122 };
6123
6124 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6125         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6126         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6127         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6128         0x00000000, 0x00000000, 0x00000000,
6129 };
6130
6131 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6132         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6133         0x00000000, 0x00000000, 0x00000000,
6134 };
6135
6136 /* tp->lock is held. */
6137 static int tg3_load_tso_firmware(struct tg3 *tp)
6138 {
6139         struct fw_info info;
6140         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6141         int err, i;
6142
6143         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6144                 return 0;
6145
6146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6147                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6148                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6149                 info.text_data = &tg3Tso5FwText[0];
6150                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6151                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6152                 info.rodata_data = &tg3Tso5FwRodata[0];
6153                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6154                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6155                 info.data_data = &tg3Tso5FwData[0];
6156                 cpu_base = RX_CPU_BASE;
6157                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6158                 cpu_scratch_size = (info.text_len +
6159                                     info.rodata_len +
6160                                     info.data_len +
6161                                     TG3_TSO5_FW_SBSS_LEN +
6162                                     TG3_TSO5_FW_BSS_LEN);
6163         } else {
6164                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6165                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6166                 info.text_data = &tg3TsoFwText[0];
6167                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6168                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6169                 info.rodata_data = &tg3TsoFwRodata[0];
6170                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6171                 info.data_len = TG3_TSO_FW_DATA_LEN;
6172                 info.data_data = &tg3TsoFwData[0];
6173                 cpu_base = TX_CPU_BASE;
6174                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6175                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6176         }
6177
6178         err = tg3_load_firmware_cpu(tp, cpu_base,
6179                                     cpu_scratch_base, cpu_scratch_size,
6180                                     &info);
6181         if (err)
6182                 return err;
6183
6184         /* Now startup the cpu. */
6185         tw32(cpu_base + CPU_STATE, 0xffffffff);
6186         tw32_f(cpu_base + CPU_PC,    info.text_base);
6187
6188         for (i = 0; i < 5; i++) {
6189                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6190                         break;
6191                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6192                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6193                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6194                 udelay(1000);
6195         }
6196         if (i >= 5) {
6197                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6198                        "to set CPU PC, is %08x should be %08x\n",
6199                        tp->dev->name, tr32(cpu_base + CPU_PC),
6200                        info.text_base);
6201                 return -ENODEV;
6202         }
6203         tw32(cpu_base + CPU_STATE, 0xffffffff);
6204         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6205         return 0;
6206 }
6207
6208
6209 /* tp->lock is held. */
6210 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6211 {
6212         u32 addr_high, addr_low;
6213         int i;
6214
6215         addr_high = ((tp->dev->dev_addr[0] << 8) |
6216                      tp->dev->dev_addr[1]);
6217         addr_low = ((tp->dev->dev_addr[2] << 24) |
6218                     (tp->dev->dev_addr[3] << 16) |
6219                     (tp->dev->dev_addr[4] <<  8) |
6220                     (tp->dev->dev_addr[5] <<  0));
6221         for (i = 0; i < 4; i++) {
6222                 if (i == 1 && skip_mac_1)
6223                         continue;
6224                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6225                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6226         }
6227
6228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6229             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6230                 for (i = 0; i < 12; i++) {
6231                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6232                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6233                 }
6234         }
6235
6236         addr_high = (tp->dev->dev_addr[0] +
6237                      tp->dev->dev_addr[1] +
6238                      tp->dev->dev_addr[2] +
6239                      tp->dev->dev_addr[3] +
6240                      tp->dev->dev_addr[4] +
6241                      tp->dev->dev_addr[5]) &
6242                 TX_BACKOFF_SEED_MASK;
6243         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6244 }
6245
6246 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6247 {
6248         struct tg3 *tp = netdev_priv(dev);
6249         struct sockaddr *addr = p;
6250         int err = 0, skip_mac_1 = 0;
6251
6252         if (!is_valid_ether_addr(addr->sa_data))
6253                 return -EINVAL;
6254
6255         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6256
6257         if (!netif_running(dev))
6258                 return 0;
6259
6260         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6261                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6262
6263                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6264                 addr0_low = tr32(MAC_ADDR_0_LOW);
6265                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6266                 addr1_low = tr32(MAC_ADDR_1_LOW);
6267
6268                 /* Skip MAC addr 1 if ASF is using it. */
6269                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6270                     !(addr1_high == 0 && addr1_low == 0))
6271                         skip_mac_1 = 1;
6272         }
6273         spin_lock_bh(&tp->lock);
6274         __tg3_set_mac_addr(tp, skip_mac_1);
6275         spin_unlock_bh(&tp->lock);
6276
6277         return err;
6278 }
6279
6280 /* tp->lock is held. */
6281 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6282                            dma_addr_t mapping, u32 maxlen_flags,
6283                            u32 nic_addr)
6284 {
6285         tg3_write_mem(tp,
6286                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6287                       ((u64) mapping >> 32));
6288         tg3_write_mem(tp,
6289                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6290                       ((u64) mapping & 0xffffffff));
6291         tg3_write_mem(tp,
6292                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6293                        maxlen_flags);
6294
6295         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6296                 tg3_write_mem(tp,
6297                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6298                               nic_addr);
6299 }
6300
6301 static void __tg3_set_rx_mode(struct net_device *);
6302 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6303 {
6304         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6305         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6306         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6307         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6308         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6309                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6310                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6311         }
6312         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6313         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6314         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6315                 u32 val = ec->stats_block_coalesce_usecs;
6316
6317                 if (!netif_carrier_ok(tp->dev))
6318                         val = 0;
6319
6320                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6321         }
6322 }
6323
6324 /* tp->lock is held. */
6325 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6326 {
6327         u32 val, rdmac_mode;
6328         int i, err, limit;
6329
6330         tg3_disable_ints(tp);
6331
6332         tg3_stop_fw(tp);
6333
6334         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6335
6336         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6337                 tg3_abort_hw(tp, 1);
6338         }
6339
6340         if (reset_phy)
6341                 tg3_phy_reset(tp);
6342
6343         err = tg3_chip_reset(tp);
6344         if (err)
6345                 return err;
6346
6347         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6348
6349         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6350                 val = tr32(TG3_CPMU_CTRL);
6351                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6352                 tw32(TG3_CPMU_CTRL, val);
6353         }
6354
6355         /* This works around an issue with Athlon chipsets on
6356          * B3 tigon3 silicon.  This bit has no effect on any
6357          * other revision.  But do not set this on PCI Express
6358          * chips and don't even touch the clocks if the CPMU is present.
6359          */
6360         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6361                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6362                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6363                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6364         }
6365
6366         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6367             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6368                 val = tr32(TG3PCI_PCISTATE);
6369                 val |= PCISTATE_RETRY_SAME_DMA;
6370                 tw32(TG3PCI_PCISTATE, val);
6371         }
6372
6373         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6374                 /* Allow reads and writes to the
6375                  * APE register and memory space.
6376                  */
6377                 val = tr32(TG3PCI_PCISTATE);
6378                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6379                        PCISTATE_ALLOW_APE_SHMEM_WR;
6380                 tw32(TG3PCI_PCISTATE, val);
6381         }
6382
6383         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6384                 /* Enable some hw fixes.  */
6385                 val = tr32(TG3PCI_MSI_DATA);
6386                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6387                 tw32(TG3PCI_MSI_DATA, val);
6388         }
6389
6390         /* Descriptor ring init may make accesses to the
6391          * NIC SRAM area to setup the TX descriptors, so we
6392          * can only do this after the hardware has been
6393          * successfully reset.
6394          */
6395         err = tg3_init_rings(tp);
6396         if (err)
6397                 return err;
6398
6399         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6400             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6401                 /* This value is determined during the probe time DMA
6402                  * engine test, tg3_test_dma.
6403                  */
6404                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6405         }
6406
6407         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6408                           GRC_MODE_4X_NIC_SEND_RINGS |
6409                           GRC_MODE_NO_TX_PHDR_CSUM |
6410                           GRC_MODE_NO_RX_PHDR_CSUM);
6411         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6412
6413         /* Pseudo-header checksum is done by hardware logic and not
6414          * the offload processers, so make the chip do the pseudo-
6415          * header checksums on receive.  For transmit it is more
6416          * convenient to do the pseudo-header checksum in software
6417          * as Linux does that on transmit for us in all cases.
6418          */
6419         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6420
6421         tw32(GRC_MODE,
6422              tp->grc_mode |
6423              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6424
6425         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6426         val = tr32(GRC_MISC_CFG);
6427         val &= ~0xff;
6428         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6429         tw32(GRC_MISC_CFG, val);
6430
6431         /* Initialize MBUF/DESC pool. */
6432         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6433                 /* Do nothing.  */
6434         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6435                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6436                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6437                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6438                 else
6439                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6440                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6441                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6442         }
6443         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6444                 int fw_len;
6445
6446                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6447                           TG3_TSO5_FW_RODATA_LEN +
6448                           TG3_TSO5_FW_DATA_LEN +
6449                           TG3_TSO5_FW_SBSS_LEN +
6450                           TG3_TSO5_FW_BSS_LEN);
6451                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6452                 tw32(BUFMGR_MB_POOL_ADDR,
6453                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6454                 tw32(BUFMGR_MB_POOL_SIZE,
6455                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6456         }
6457
6458         if (tp->dev->mtu <= ETH_DATA_LEN) {
6459                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6460                      tp->bufmgr_config.mbuf_read_dma_low_water);
6461                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6462                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6463                 tw32(BUFMGR_MB_HIGH_WATER,
6464                      tp->bufmgr_config.mbuf_high_water);
6465         } else {
6466                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6467                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6468                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6469                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6470                 tw32(BUFMGR_MB_HIGH_WATER,
6471                      tp->bufmgr_config.mbuf_high_water_jumbo);
6472         }
6473         tw32(BUFMGR_DMA_LOW_WATER,
6474              tp->bufmgr_config.dma_low_water);
6475         tw32(BUFMGR_DMA_HIGH_WATER,
6476              tp->bufmgr_config.dma_high_water);
6477
6478         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6479         for (i = 0; i < 2000; i++) {
6480                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6481                         break;
6482                 udelay(10);
6483         }
6484         if (i >= 2000) {
6485                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6486                        tp->dev->name);
6487                 return -ENODEV;
6488         }
6489
6490         /* Setup replenish threshold. */
6491         val = tp->rx_pending / 8;
6492         if (val == 0)
6493                 val = 1;
6494         else if (val > tp->rx_std_max_post)
6495                 val = tp->rx_std_max_post;
6496         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6497                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6498                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6499
6500                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6501                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6502         }
6503
6504         tw32(RCVBDI_STD_THRESH, val);
6505
6506         /* Initialize TG3_BDINFO's at:
6507          *  RCVDBDI_STD_BD:     standard eth size rx ring
6508          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6509          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6510          *
6511          * like so:
6512          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6513          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6514          *                              ring attribute flags
6515          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6516          *
6517          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6518          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6519          *
6520          * The size of each ring is fixed in the firmware, but the location is
6521          * configurable.
6522          */
6523         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6524              ((u64) tp->rx_std_mapping >> 32));
6525         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6526              ((u64) tp->rx_std_mapping & 0xffffffff));
6527         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6528              NIC_SRAM_RX_BUFFER_DESC);
6529
6530         /* Don't even try to program the JUMBO/MINI buffer descriptor
6531          * configs on 5705.
6532          */
6533         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6534                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6535                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6536         } else {
6537                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6538                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6539
6540                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6541                      BDINFO_FLAGS_DISABLED);
6542
6543                 /* Setup replenish threshold. */
6544                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6545
6546                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6547                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6548                              ((u64) tp->rx_jumbo_mapping >> 32));
6549                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6550                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6551                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6552                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6553                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6554                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6555                 } else {
6556                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6557                              BDINFO_FLAGS_DISABLED);
6558                 }
6559
6560         }
6561
6562         /* There is only one send ring on 5705/5750, no need to explicitly
6563          * disable the others.
6564          */
6565         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6566                 /* Clear out send RCB ring in SRAM. */
6567                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6568                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6569                                       BDINFO_FLAGS_DISABLED);
6570         }
6571
6572         tp->tx_prod = 0;
6573         tp->tx_cons = 0;
6574         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6575         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6576
6577         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6578                        tp->tx_desc_mapping,
6579                        (TG3_TX_RING_SIZE <<
6580                         BDINFO_FLAGS_MAXLEN_SHIFT),
6581                        NIC_SRAM_TX_BUFFER_DESC);
6582
6583         /* There is only one receive return ring on 5705/5750, no need
6584          * to explicitly disable the others.
6585          */
6586         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6587                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6588                      i += TG3_BDINFO_SIZE) {
6589                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6590                                       BDINFO_FLAGS_DISABLED);
6591                 }
6592         }
6593
6594         tp->rx_rcb_ptr = 0;
6595         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6596
6597         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6598                        tp->rx_rcb_mapping,
6599                        (TG3_RX_RCB_RING_SIZE(tp) <<
6600                         BDINFO_FLAGS_MAXLEN_SHIFT),
6601                        0);
6602
6603         tp->rx_std_ptr = tp->rx_pending;
6604         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6605                      tp->rx_std_ptr);
6606
6607         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6608                                                 tp->rx_jumbo_pending : 0;
6609         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6610                      tp->rx_jumbo_ptr);
6611
6612         /* Initialize MAC address and backoff seed. */
6613         __tg3_set_mac_addr(tp, 0);
6614
6615         /* MTU + ethernet header + FCS + optional VLAN tag */
6616         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6617
6618         /* The slot time is changed by tg3_setup_phy if we
6619          * run at gigabit with half duplex.
6620          */
6621         tw32(MAC_TX_LENGTHS,
6622              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6623              (6 << TX_LENGTHS_IPG_SHIFT) |
6624              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6625
6626         /* Receive rules. */
6627         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6628         tw32(RCVLPC_CONFIG, 0x0181);
6629
6630         /* Calculate RDMAC_MODE setting early, we need it to determine
6631          * the RCVLPC_STATE_ENABLE mask.
6632          */
6633         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6634                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6635                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6636                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6637                       RDMAC_MODE_LNGREAD_ENAB);
6638
6639         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6640                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6641                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6642                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6643
6644         /* If statement applies to 5705 and 5750 PCI devices only */
6645         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6646              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6647             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6648                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6649                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6650                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6651                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6652                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6653                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6654                 }
6655         }
6656
6657         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6658                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6659
6660         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6661                 rdmac_mode |= (1 << 27);
6662
6663         /* Receive/send statistics. */
6664         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6665                 val = tr32(RCVLPC_STATS_ENABLE);
6666                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6667                 tw32(RCVLPC_STATS_ENABLE, val);
6668         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6669                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6670                 val = tr32(RCVLPC_STATS_ENABLE);
6671                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6672                 tw32(RCVLPC_STATS_ENABLE, val);
6673         } else {
6674                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6675         }
6676         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6677         tw32(SNDDATAI_STATSENAB, 0xffffff);
6678         tw32(SNDDATAI_STATSCTRL,
6679              (SNDDATAI_SCTRL_ENABLE |
6680               SNDDATAI_SCTRL_FASTUPD));
6681
6682         /* Setup host coalescing engine. */
6683         tw32(HOSTCC_MODE, 0);
6684         for (i = 0; i < 2000; i++) {
6685                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6686                         break;
6687                 udelay(10);
6688         }
6689
6690         __tg3_set_coalesce(tp, &tp->coal);
6691
6692         /* set status block DMA address */
6693         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6694              ((u64) tp->status_mapping >> 32));
6695         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6696              ((u64) tp->status_mapping & 0xffffffff));
6697
6698         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6699                 /* Status/statistics block address.  See tg3_timer,
6700                  * the tg3_periodic_fetch_stats call there, and
6701                  * tg3_get_stats to see how this works for 5705/5750 chips.
6702                  */
6703                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6704                      ((u64) tp->stats_mapping >> 32));
6705                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6706                      ((u64) tp->stats_mapping & 0xffffffff));
6707                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6708                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6709         }
6710
6711         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6712
6713         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6714         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6715         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6716                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6717
6718         /* Clear statistics/status block in chip, and status block in ram. */
6719         for (i = NIC_SRAM_STATS_BLK;
6720              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6721              i += sizeof(u32)) {
6722                 tg3_write_mem(tp, i, 0);
6723                 udelay(40);
6724         }
6725         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6726
6727         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6728                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6729                 /* reset to prevent losing 1st rx packet intermittently */
6730                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6731                 udelay(10);
6732         }
6733
6734         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6735                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6736         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6737             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6738             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6739                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6740         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6741         udelay(40);
6742
6743         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6744          * If TG3_FLG2_IS_NIC is zero, we should read the
6745          * register to preserve the GPIO settings for LOMs. The GPIOs,
6746          * whether used as inputs or outputs, are set by boot code after
6747          * reset.
6748          */
6749         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6750                 u32 gpio_mask;
6751
6752                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6753                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6754                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6755
6756                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6757                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6758                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6759
6760                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6761                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6762
6763                 tp->grc_local_ctrl &= ~gpio_mask;
6764                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6765
6766                 /* GPIO1 must be driven high for eeprom write protect */
6767                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6768                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6769                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6770         }
6771         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6772         udelay(100);
6773
6774         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6775         tp->last_tag = 0;
6776
6777         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6778                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6779                 udelay(40);
6780         }
6781
6782         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6783                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6784                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6785                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6786                WDMAC_MODE_LNGREAD_ENAB);
6787
6788         /* If statement applies to 5705 and 5750 PCI devices only */
6789         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6790              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6792                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6793                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6794                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6795                         /* nothing */
6796                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6797                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6798                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6799                         val |= WDMAC_MODE_RX_ACCEL;
6800                 }
6801         }
6802
6803         /* Enable host coalescing bug fix */
6804         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6805             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6806             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6807             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6808                 val |= (1 << 29);
6809
6810         tw32_f(WDMAC_MODE, val);
6811         udelay(40);
6812
6813         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6814                 u16 pcix_cmd;
6815
6816                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6817                                      &pcix_cmd);
6818                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6819                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6820                         pcix_cmd |= PCI_X_CMD_READ_2K;
6821                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6822                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6823                         pcix_cmd |= PCI_X_CMD_READ_2K;
6824                 }
6825                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6826                                       pcix_cmd);
6827         }
6828
6829         tw32_f(RDMAC_MODE, rdmac_mode);
6830         udelay(40);
6831
6832         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6833         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6834                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6835
6836         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6837                 tw32(SNDDATAC_MODE,
6838                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6839         else
6840                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6841
6842         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6843         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6844         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6845         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6846         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6847                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6848         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6849         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6850
6851         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6852                 err = tg3_load_5701_a0_firmware_fix(tp);
6853                 if (err)
6854                         return err;
6855         }
6856
6857         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6858                 err = tg3_load_tso_firmware(tp);
6859                 if (err)
6860                         return err;
6861         }
6862
6863         tp->tx_mode = TX_MODE_ENABLE;
6864         tw32_f(MAC_TX_MODE, tp->tx_mode);
6865         udelay(100);
6866
6867         tp->rx_mode = RX_MODE_ENABLE;
6868         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6869             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6870                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6871
6872         tw32_f(MAC_RX_MODE, tp->rx_mode);
6873         udelay(10);
6874
6875         if (tp->link_config.phy_is_low_power) {
6876                 tp->link_config.phy_is_low_power = 0;
6877                 tp->link_config.speed = tp->link_config.orig_speed;
6878                 tp->link_config.duplex = tp->link_config.orig_duplex;
6879                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6880         }
6881
6882         tp->mi_mode = MAC_MI_MODE_BASE;
6883         tw32_f(MAC_MI_MODE, tp->mi_mode);
6884         udelay(80);
6885
6886         tw32(MAC_LED_CTRL, tp->led_ctrl);
6887
6888         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6889         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6890                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6891                 udelay(10);
6892         }
6893         tw32_f(MAC_RX_MODE, tp->rx_mode);
6894         udelay(10);
6895
6896         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6897                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6898                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6899                         /* Set drive transmission level to 1.2V  */
6900                         /* only if the signal pre-emphasis bit is not set  */
6901                         val = tr32(MAC_SERDES_CFG);
6902                         val &= 0xfffff000;
6903                         val |= 0x880;
6904                         tw32(MAC_SERDES_CFG, val);
6905                 }
6906                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6907                         tw32(MAC_SERDES_CFG, 0x616000);
6908         }
6909
6910         /* Prevent chip from dropping frames when flow control
6911          * is enabled.
6912          */
6913         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6914
6915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6916             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6917                 /* Use hardware link auto-negotiation */
6918                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6919         }
6920
6921         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6922             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6923                 u32 tmp;
6924
6925                 tmp = tr32(SERDES_RX_CTRL);
6926                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6927                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6928                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6929                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6930         }
6931
6932         err = tg3_setup_phy(tp, 0);
6933         if (err)
6934                 return err;
6935
6936         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6937             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6938                 u32 tmp;
6939
6940                 /* Clear CRC stats. */
6941                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6942                         tg3_writephy(tp, MII_TG3_TEST1,
6943                                      tmp | MII_TG3_TEST1_CRC_EN);
6944                         tg3_readphy(tp, 0x14, &tmp);
6945                 }
6946         }
6947
6948         __tg3_set_rx_mode(tp->dev);
6949
6950         /* Initialize receive rules. */
6951         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6952         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6953         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6954         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6955
6956         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6957             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6958                 limit = 8;
6959         else
6960                 limit = 16;
6961         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6962                 limit -= 4;
6963         switch (limit) {
6964         case 16:
6965                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6966         case 15:
6967                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6968         case 14:
6969                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6970         case 13:
6971                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6972         case 12:
6973                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6974         case 11:
6975                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6976         case 10:
6977                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6978         case 9:
6979                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6980         case 8:
6981                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6982         case 7:
6983                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6984         case 6:
6985                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6986         case 5:
6987                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6988         case 4:
6989                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6990         case 3:
6991                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6992         case 2:
6993         case 1:
6994
6995         default:
6996                 break;
6997         };
6998
6999         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7000                 /* Write our heartbeat update interval to APE. */
7001                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7002                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7003
7004         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7005
7006         return 0;
7007 }
7008
7009 /* Called at device open time to get the chip ready for
7010  * packet processing.  Invoked with tp->lock held.
7011  */
7012 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7013 {
7014         int err;
7015
7016         /* Force the chip into D0. */
7017         err = tg3_set_power_state(tp, PCI_D0);
7018         if (err)
7019                 goto out;
7020
7021         tg3_switch_clocks(tp);
7022
7023         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7024
7025         err = tg3_reset_hw(tp, reset_phy);
7026
7027 out:
7028         return err;
7029 }
7030
7031 #define TG3_STAT_ADD32(PSTAT, REG) \
7032 do {    u32 __val = tr32(REG); \
7033         (PSTAT)->low += __val; \
7034         if ((PSTAT)->low < __val) \
7035                 (PSTAT)->high += 1; \
7036 } while (0)
7037
7038 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7039 {
7040         struct tg3_hw_stats *sp = tp->hw_stats;
7041
7042         if (!netif_carrier_ok(tp->dev))
7043                 return;
7044
7045         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7046         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7047         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7048         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7049         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7050         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7051         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7052         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7053         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7054         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7055         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7056         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7057         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7058
7059         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7060         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7061         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7062         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7063         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7064         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7065         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7066         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7067         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7068         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7069         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7070         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7071         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7072         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7073
7074         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7075         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7076         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7077 }
7078
7079 static void tg3_timer(unsigned long __opaque)
7080 {
7081         struct tg3 *tp = (struct tg3 *) __opaque;
7082
7083         if (tp->irq_sync)
7084                 goto restart_timer;
7085
7086         spin_lock(&tp->lock);
7087
7088         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7089                 /* All of this garbage is because when using non-tagged
7090                  * IRQ status the mailbox/status_block protocol the chip
7091                  * uses with the cpu is race prone.
7092                  */
7093                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7094                         tw32(GRC_LOCAL_CTRL,
7095                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7096                 } else {
7097                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7098                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7099                 }
7100
7101                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7102                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7103                         spin_unlock(&tp->lock);
7104                         schedule_work(&tp->reset_task);
7105                         return;
7106                 }
7107         }
7108
7109         /* This part only runs once per second. */
7110         if (!--tp->timer_counter) {
7111                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7112                         tg3_periodic_fetch_stats(tp);
7113
7114                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7115                         u32 mac_stat;
7116                         int phy_event;
7117
7118                         mac_stat = tr32(MAC_STATUS);
7119
7120                         phy_event = 0;
7121                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7122                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7123                                         phy_event = 1;
7124                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7125                                 phy_event = 1;
7126
7127                         if (phy_event)
7128                                 tg3_setup_phy(tp, 0);
7129                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7130                         u32 mac_stat = tr32(MAC_STATUS);
7131                         int need_setup = 0;
7132
7133                         if (netif_carrier_ok(tp->dev) &&
7134                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7135                                 need_setup = 1;
7136                         }
7137                         if (! netif_carrier_ok(tp->dev) &&
7138                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7139                                          MAC_STATUS_SIGNAL_DET))) {
7140                                 need_setup = 1;
7141                         }
7142                         if (need_setup) {
7143                                 if (!tp->serdes_counter) {
7144                                         tw32_f(MAC_MODE,
7145                                              (tp->mac_mode &
7146                                               ~MAC_MODE_PORT_MODE_MASK));
7147                                         udelay(40);
7148                                         tw32_f(MAC_MODE, tp->mac_mode);
7149                                         udelay(40);
7150                                 }
7151                                 tg3_setup_phy(tp, 0);
7152                         }
7153                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7154                         tg3_serdes_parallel_detect(tp);
7155
7156                 tp->timer_counter = tp->timer_multiplier;
7157         }
7158
7159         /* Heartbeat is only sent once every 2 seconds.
7160          *
7161          * The heartbeat is to tell the ASF firmware that the host
7162          * driver is still alive.  In the event that the OS crashes,
7163          * ASF needs to reset the hardware to free up the FIFO space
7164          * that may be filled with rx packets destined for the host.
7165          * If the FIFO is full, ASF will no longer function properly.
7166          *
7167          * Unintended resets have been reported on real time kernels
7168          * where the timer doesn't run on time.  Netpoll will also have
7169          * same problem.
7170          *
7171          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7172          * to check the ring condition when the heartbeat is expiring
7173          * before doing the reset.  This will prevent most unintended
7174          * resets.
7175          */
7176         if (!--tp->asf_counter) {
7177                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7178                         u32 val;
7179
7180                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7181                                       FWCMD_NICDRV_ALIVE3);
7182                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7183                         /* 5 seconds timeout */
7184                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7185                         val = tr32(GRC_RX_CPU_EVENT);
7186                         val |= (1 << 14);
7187                         tw32(GRC_RX_CPU_EVENT, val);
7188                 }
7189                 tp->asf_counter = tp->asf_multiplier;
7190         }
7191
7192         spin_unlock(&tp->lock);
7193
7194 restart_timer:
7195         tp->timer.expires = jiffies + tp->timer_offset;
7196         add_timer(&tp->timer);
7197 }
7198
7199 static int tg3_request_irq(struct tg3 *tp)
7200 {
7201         irq_handler_t fn;
7202         unsigned long flags;
7203         struct net_device *dev = tp->dev;
7204
7205         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7206                 fn = tg3_msi;
7207                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7208                         fn = tg3_msi_1shot;
7209                 flags = IRQF_SAMPLE_RANDOM;
7210         } else {
7211                 fn = tg3_interrupt;
7212                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7213                         fn = tg3_interrupt_tagged;
7214                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7215         }
7216         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7217 }
7218
7219 static int tg3_test_interrupt(struct tg3 *tp)
7220 {
7221         struct net_device *dev = tp->dev;
7222         int err, i, intr_ok = 0;
7223
7224         if (!netif_running(dev))
7225                 return -ENODEV;
7226
7227         tg3_disable_ints(tp);
7228
7229         free_irq(tp->pdev->irq, dev);
7230
7231         err = request_irq(tp->pdev->irq, tg3_test_isr,
7232                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7233         if (err)
7234                 return err;
7235
7236         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7237         tg3_enable_ints(tp);
7238
7239         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7240                HOSTCC_MODE_NOW);
7241
7242         for (i = 0; i < 5; i++) {
7243                 u32 int_mbox, misc_host_ctrl;
7244
7245                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7246                                         TG3_64BIT_REG_LOW);
7247                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7248
7249                 if ((int_mbox != 0) ||
7250                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7251                         intr_ok = 1;
7252                         break;
7253                 }
7254
7255                 msleep(10);
7256         }
7257
7258         tg3_disable_ints(tp);
7259
7260         free_irq(tp->pdev->irq, dev);
7261
7262         err = tg3_request_irq(tp);
7263
7264         if (err)
7265                 return err;
7266
7267         if (intr_ok)
7268                 return 0;
7269
7270         return -EIO;
7271 }
7272
7273 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7274  * successfully restored
7275  */
7276 static int tg3_test_msi(struct tg3 *tp)
7277 {
7278         struct net_device *dev = tp->dev;
7279         int err;
7280         u16 pci_cmd;
7281
7282         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7283                 return 0;
7284
7285         /* Turn off SERR reporting in case MSI terminates with Master
7286          * Abort.
7287          */
7288         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7289         pci_write_config_word(tp->pdev, PCI_COMMAND,
7290                               pci_cmd & ~PCI_COMMAND_SERR);
7291
7292         err = tg3_test_interrupt(tp);
7293
7294         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7295
7296         if (!err)
7297                 return 0;
7298
7299         /* other failures */
7300         if (err != -EIO)
7301                 return err;
7302
7303         /* MSI test failed, go back to INTx mode */
7304         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7305                "switching to INTx mode. Please report this failure to "
7306                "the PCI maintainer and include system chipset information.\n",
7307                        tp->dev->name);
7308
7309         free_irq(tp->pdev->irq, dev);
7310         pci_disable_msi(tp->pdev);
7311
7312         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7313
7314         err = tg3_request_irq(tp);
7315         if (err)
7316                 return err;
7317
7318         /* Need to reset the chip because the MSI cycle may have terminated
7319          * with Master Abort.
7320          */
7321         tg3_full_lock(tp, 1);
7322
7323         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7324         err = tg3_init_hw(tp, 1);
7325
7326         tg3_full_unlock(tp);
7327
7328         if (err)
7329                 free_irq(tp->pdev->irq, dev);
7330
7331         return err;
7332 }
7333
7334 static int tg3_open(struct net_device *dev)
7335 {
7336         struct tg3 *tp = netdev_priv(dev);
7337         int err;
7338
7339         netif_carrier_off(tp->dev);
7340
7341         tg3_full_lock(tp, 0);
7342
7343         err = tg3_set_power_state(tp, PCI_D0);
7344         if (err) {
7345                 tg3_full_unlock(tp);
7346                 return err;
7347         }
7348
7349         tg3_disable_ints(tp);
7350         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7351
7352         tg3_full_unlock(tp);
7353
7354         /* The placement of this call is tied
7355          * to the setup and use of Host TX descriptors.
7356          */
7357         err = tg3_alloc_consistent(tp);
7358         if (err)
7359                 return err;
7360
7361         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7362                 /* All MSI supporting chips should support tagged
7363                  * status.  Assert that this is the case.
7364                  */
7365                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7366                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7367                                "Not using MSI.\n", tp->dev->name);
7368                 } else if (pci_enable_msi(tp->pdev) == 0) {
7369                         u32 msi_mode;
7370
7371                         /* Hardware bug - MSI won't work if INTX disabled. */
7372                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7373                                 pci_intx(tp->pdev, 1);
7374
7375                         msi_mode = tr32(MSGINT_MODE);
7376                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7377                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7378                 }
7379         }
7380         err = tg3_request_irq(tp);
7381
7382         if (err) {
7383                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7384                         pci_disable_msi(tp->pdev);
7385                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7386                 }
7387                 tg3_free_consistent(tp);
7388                 return err;
7389         }
7390
7391         napi_enable(&tp->napi);
7392
7393         tg3_full_lock(tp, 0);
7394
7395         err = tg3_init_hw(tp, 1);
7396         if (err) {
7397                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7398                 tg3_free_rings(tp);
7399         } else {
7400                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7401                         tp->timer_offset = HZ;
7402                 else
7403                         tp->timer_offset = HZ / 10;
7404
7405                 BUG_ON(tp->timer_offset > HZ);
7406                 tp->timer_counter = tp->timer_multiplier =
7407                         (HZ / tp->timer_offset);
7408                 tp->asf_counter = tp->asf_multiplier =
7409                         ((HZ / tp->timer_offset) * 2);
7410
7411                 init_timer(&tp->timer);
7412                 tp->timer.expires = jiffies + tp->timer_offset;
7413                 tp->timer.data = (unsigned long) tp;
7414                 tp->timer.function = tg3_timer;
7415         }
7416
7417         tg3_full_unlock(tp);
7418
7419         if (err) {
7420                 napi_disable(&tp->napi);
7421                 free_irq(tp->pdev->irq, dev);
7422                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7423                         pci_disable_msi(tp->pdev);
7424                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7425                 }
7426                 tg3_free_consistent(tp);
7427                 return err;
7428         }
7429
7430         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7431                 err = tg3_test_msi(tp);
7432
7433                 if (err) {
7434                         tg3_full_lock(tp, 0);
7435
7436                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7437                                 pci_disable_msi(tp->pdev);
7438                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7439                         }
7440                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7441                         tg3_free_rings(tp);
7442                         tg3_free_consistent(tp);
7443
7444                         tg3_full_unlock(tp);
7445
7446                         napi_disable(&tp->napi);
7447
7448                         return err;
7449                 }
7450
7451                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7452                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7453                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7454
7455                                 tw32(PCIE_TRANSACTION_CFG,
7456                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7457                         }
7458                 }
7459         }
7460
7461         tg3_full_lock(tp, 0);
7462
7463         add_timer(&tp->timer);
7464         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7465         tg3_enable_ints(tp);
7466
7467         tg3_full_unlock(tp);
7468
7469         netif_start_queue(dev);
7470
7471         return 0;
7472 }
7473
7474 #if 0
7475 /*static*/ void tg3_dump_state(struct tg3 *tp)
7476 {
7477         u32 val32, val32_2, val32_3, val32_4, val32_5;
7478         u16 val16;
7479         int i;
7480
7481         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7482         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7483         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7484                val16, val32);
7485
7486         /* MAC block */
7487         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7488                tr32(MAC_MODE), tr32(MAC_STATUS));
7489         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7490                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7491         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7492                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7493         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7494                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7495
7496         /* Send data initiator control block */
7497         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7498                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7499         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7500                tr32(SNDDATAI_STATSCTRL));
7501
7502         /* Send data completion control block */
7503         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7504
7505         /* Send BD ring selector block */
7506         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7507                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7508
7509         /* Send BD initiator control block */
7510         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7511                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7512
7513         /* Send BD completion control block */
7514         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7515
7516         /* Receive list placement control block */
7517         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7518                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7519         printk("       RCVLPC_STATSCTRL[%08x]\n",
7520                tr32(RCVLPC_STATSCTRL));
7521
7522         /* Receive data and receive BD initiator control block */
7523         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7524                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7525
7526         /* Receive data completion control block */
7527         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7528                tr32(RCVDCC_MODE));
7529
7530         /* Receive BD initiator control block */
7531         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7532                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7533
7534         /* Receive BD completion control block */
7535         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7536                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7537
7538         /* Receive list selector control block */
7539         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7540                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7541
7542         /* Mbuf cluster free block */
7543         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7544                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7545
7546         /* Host coalescing control block */
7547         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7548                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7549         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7550                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7551                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7552         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7553                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7554                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7555         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7556                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7557         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7558                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7559
7560         /* Memory arbiter control block */
7561         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7562                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7563
7564         /* Buffer manager control block */
7565         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7566                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7567         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7568                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7569         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7570                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7571                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7572                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7573
7574         /* Read DMA control block */
7575         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7576                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7577
7578         /* Write DMA control block */
7579         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7580                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7581
7582         /* DMA completion block */
7583         printk("DEBUG: DMAC_MODE[%08x]\n",
7584                tr32(DMAC_MODE));
7585
7586         /* GRC block */
7587         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7588                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7589         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7590                tr32(GRC_LOCAL_CTRL));
7591
7592         /* TG3_BDINFOs */
7593         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7594                tr32(RCVDBDI_JUMBO_BD + 0x0),
7595                tr32(RCVDBDI_JUMBO_BD + 0x4),
7596                tr32(RCVDBDI_JUMBO_BD + 0x8),
7597                tr32(RCVDBDI_JUMBO_BD + 0xc));
7598         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7599                tr32(RCVDBDI_STD_BD + 0x0),
7600                tr32(RCVDBDI_STD_BD + 0x4),
7601                tr32(RCVDBDI_STD_BD + 0x8),
7602                tr32(RCVDBDI_STD_BD + 0xc));
7603         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7604                tr32(RCVDBDI_MINI_BD + 0x0),
7605                tr32(RCVDBDI_MINI_BD + 0x4),
7606                tr32(RCVDBDI_MINI_BD + 0x8),
7607                tr32(RCVDBDI_MINI_BD + 0xc));
7608
7609         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7610         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7611         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7612         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7613         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7614                val32, val32_2, val32_3, val32_4);
7615
7616         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7617         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7618         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7619         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7620         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7621                val32, val32_2, val32_3, val32_4);
7622
7623         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7624         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7625         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7626         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7627         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7628         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7629                val32, val32_2, val32_3, val32_4, val32_5);
7630
7631         /* SW status block */
7632         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7633                tp->hw_status->status,
7634                tp->hw_status->status_tag,
7635                tp->hw_status->rx_jumbo_consumer,
7636                tp->hw_status->rx_consumer,
7637                tp->hw_status->rx_mini_consumer,
7638                tp->hw_status->idx[0].rx_producer,
7639                tp->hw_status->idx[0].tx_consumer);
7640
7641         /* SW statistics block */
7642         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7643                ((u32 *)tp->hw_stats)[0],
7644                ((u32 *)tp->hw_stats)[1],
7645                ((u32 *)tp->hw_stats)[2],
7646                ((u32 *)tp->hw_stats)[3]);
7647
7648         /* Mailboxes */
7649         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7650                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7651                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7652                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7653                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7654
7655         /* NIC side send descriptors. */
7656         for (i = 0; i < 6; i++) {
7657                 unsigned long txd;
7658
7659                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7660                         + (i * sizeof(struct tg3_tx_buffer_desc));
7661                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7662                        i,
7663                        readl(txd + 0x0), readl(txd + 0x4),
7664                        readl(txd + 0x8), readl(txd + 0xc));
7665         }
7666
7667         /* NIC side RX descriptors. */
7668         for (i = 0; i < 6; i++) {
7669                 unsigned long rxd;
7670
7671                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7672                         + (i * sizeof(struct tg3_rx_buffer_desc));
7673                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7674                        i,
7675                        readl(rxd + 0x0), readl(rxd + 0x4),
7676                        readl(rxd + 0x8), readl(rxd + 0xc));
7677                 rxd += (4 * sizeof(u32));
7678                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7679                        i,
7680                        readl(rxd + 0x0), readl(rxd + 0x4),
7681                        readl(rxd + 0x8), readl(rxd + 0xc));
7682         }
7683
7684         for (i = 0; i < 6; i++) {
7685                 unsigned long rxd;
7686
7687                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7688                         + (i * sizeof(struct tg3_rx_buffer_desc));
7689                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7690                        i,
7691                        readl(rxd + 0x0), readl(rxd + 0x4),
7692                        readl(rxd + 0x8), readl(rxd + 0xc));
7693                 rxd += (4 * sizeof(u32));
7694                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7695                        i,
7696                        readl(rxd + 0x0), readl(rxd + 0x4),
7697                        readl(rxd + 0x8), readl(rxd + 0xc));
7698         }
7699 }
7700 #endif
7701
7702 static struct net_device_stats *tg3_get_stats(struct net_device *);
7703 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7704
7705 static int tg3_close(struct net_device *dev)
7706 {
7707         struct tg3 *tp = netdev_priv(dev);
7708
7709         napi_disable(&tp->napi);
7710         cancel_work_sync(&tp->reset_task);
7711
7712         netif_stop_queue(dev);
7713
7714         del_timer_sync(&tp->timer);
7715
7716         tg3_full_lock(tp, 1);
7717 #if 0
7718         tg3_dump_state(tp);
7719 #endif
7720
7721         tg3_disable_ints(tp);
7722
7723         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7724         tg3_free_rings(tp);
7725         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7726
7727         tg3_full_unlock(tp);
7728
7729         free_irq(tp->pdev->irq, dev);
7730         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7731                 pci_disable_msi(tp->pdev);
7732                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7733         }
7734
7735         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7736                sizeof(tp->net_stats_prev));
7737         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7738                sizeof(tp->estats_prev));
7739
7740         tg3_free_consistent(tp);
7741
7742         tg3_set_power_state(tp, PCI_D3hot);
7743
7744         netif_carrier_off(tp->dev);
7745
7746         return 0;
7747 }
7748
7749 static inline unsigned long get_stat64(tg3_stat64_t *val)
7750 {
7751         unsigned long ret;
7752
7753 #if (BITS_PER_LONG == 32)
7754         ret = val->low;
7755 #else
7756         ret = ((u64)val->high << 32) | ((u64)val->low);
7757 #endif
7758         return ret;
7759 }
7760
7761 static unsigned long calc_crc_errors(struct tg3 *tp)
7762 {
7763         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7764
7765         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7766             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7767              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7768                 u32 val;
7769
7770                 spin_lock_bh(&tp->lock);
7771                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7772                         tg3_writephy(tp, MII_TG3_TEST1,
7773                                      val | MII_TG3_TEST1_CRC_EN);
7774                         tg3_readphy(tp, 0x14, &val);
7775                 } else
7776                         val = 0;
7777                 spin_unlock_bh(&tp->lock);
7778
7779                 tp->phy_crc_errors += val;
7780
7781                 return tp->phy_crc_errors;
7782         }
7783
7784         return get_stat64(&hw_stats->rx_fcs_errors);
7785 }
7786
7787 #define ESTAT_ADD(member) \
7788         estats->member =        old_estats->member + \
7789                                 get_stat64(&hw_stats->member)
7790
7791 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7792 {
7793         struct tg3_ethtool_stats *estats = &tp->estats;
7794         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7795         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7796
7797         if (!hw_stats)
7798                 return old_estats;
7799
7800         ESTAT_ADD(rx_octets);
7801         ESTAT_ADD(rx_fragments);
7802         ESTAT_ADD(rx_ucast_packets);
7803         ESTAT_ADD(rx_mcast_packets);
7804         ESTAT_ADD(rx_bcast_packets);
7805         ESTAT_ADD(rx_fcs_errors);
7806         ESTAT_ADD(rx_align_errors);
7807         ESTAT_ADD(rx_xon_pause_rcvd);
7808         ESTAT_ADD(rx_xoff_pause_rcvd);
7809         ESTAT_ADD(rx_mac_ctrl_rcvd);
7810         ESTAT_ADD(rx_xoff_entered);
7811         ESTAT_ADD(rx_frame_too_long_errors);
7812         ESTAT_ADD(rx_jabbers);
7813         ESTAT_ADD(rx_undersize_packets);
7814         ESTAT_ADD(rx_in_length_errors);
7815         ESTAT_ADD(rx_out_length_errors);
7816         ESTAT_ADD(rx_64_or_less_octet_packets);
7817         ESTAT_ADD(rx_65_to_127_octet_packets);
7818         ESTAT_ADD(rx_128_to_255_octet_packets);
7819         ESTAT_ADD(rx_256_to_511_octet_packets);
7820         ESTAT_ADD(rx_512_to_1023_octet_packets);
7821         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7822         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7823         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7824         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7825         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7826
7827         ESTAT_ADD(tx_octets);
7828         ESTAT_ADD(tx_collisions);
7829         ESTAT_ADD(tx_xon_sent);
7830         ESTAT_ADD(tx_xoff_sent);
7831         ESTAT_ADD(tx_flow_control);
7832         ESTAT_ADD(tx_mac_errors);
7833         ESTAT_ADD(tx_single_collisions);
7834         ESTAT_ADD(tx_mult_collisions);
7835         ESTAT_ADD(tx_deferred);
7836         ESTAT_ADD(tx_excessive_collisions);
7837         ESTAT_ADD(tx_late_collisions);
7838         ESTAT_ADD(tx_collide_2times);
7839         ESTAT_ADD(tx_collide_3times);
7840         ESTAT_ADD(tx_collide_4times);
7841         ESTAT_ADD(tx_collide_5times);
7842         ESTAT_ADD(tx_collide_6times);
7843         ESTAT_ADD(tx_collide_7times);
7844         ESTAT_ADD(tx_collide_8times);
7845         ESTAT_ADD(tx_collide_9times);
7846         ESTAT_ADD(tx_collide_10times);
7847         ESTAT_ADD(tx_collide_11times);
7848         ESTAT_ADD(tx_collide_12times);
7849         ESTAT_ADD(tx_collide_13times);
7850         ESTAT_ADD(tx_collide_14times);
7851         ESTAT_ADD(tx_collide_15times);
7852         ESTAT_ADD(tx_ucast_packets);
7853         ESTAT_ADD(tx_mcast_packets);
7854         ESTAT_ADD(tx_bcast_packets);
7855         ESTAT_ADD(tx_carrier_sense_errors);
7856         ESTAT_ADD(tx_discards);
7857         ESTAT_ADD(tx_errors);
7858
7859         ESTAT_ADD(dma_writeq_full);
7860         ESTAT_ADD(dma_write_prioq_full);
7861         ESTAT_ADD(rxbds_empty);
7862         ESTAT_ADD(rx_discards);
7863         ESTAT_ADD(rx_errors);
7864         ESTAT_ADD(rx_threshold_hit);
7865
7866         ESTAT_ADD(dma_readq_full);
7867         ESTAT_ADD(dma_read_prioq_full);
7868         ESTAT_ADD(tx_comp_queue_full);
7869
7870         ESTAT_ADD(ring_set_send_prod_index);
7871         ESTAT_ADD(ring_status_update);
7872         ESTAT_ADD(nic_irqs);
7873         ESTAT_ADD(nic_avoided_irqs);
7874         ESTAT_ADD(nic_tx_threshold_hit);
7875
7876         return estats;
7877 }
7878
7879 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7880 {
7881         struct tg3 *tp = netdev_priv(dev);
7882         struct net_device_stats *stats = &tp->net_stats;
7883         struct net_device_stats *old_stats = &tp->net_stats_prev;
7884         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7885
7886         if (!hw_stats)
7887                 return old_stats;
7888
7889         stats->rx_packets = old_stats->rx_packets +
7890                 get_stat64(&hw_stats->rx_ucast_packets) +
7891                 get_stat64(&hw_stats->rx_mcast_packets) +
7892                 get_stat64(&hw_stats->rx_bcast_packets);
7893
7894         stats->tx_packets = old_stats->tx_packets +
7895                 get_stat64(&hw_stats->tx_ucast_packets) +
7896                 get_stat64(&hw_stats->tx_mcast_packets) +
7897                 get_stat64(&hw_stats->tx_bcast_packets);
7898
7899         stats->rx_bytes = old_stats->rx_bytes +
7900                 get_stat64(&hw_stats->rx_octets);
7901         stats->tx_bytes = old_stats->tx_bytes +
7902                 get_stat64(&hw_stats->tx_octets);
7903
7904         stats->rx_errors = old_stats->rx_errors +
7905                 get_stat64(&hw_stats->rx_errors);
7906         stats->tx_errors = old_stats->tx_errors +
7907                 get_stat64(&hw_stats->tx_errors) +
7908                 get_stat64(&hw_stats->tx_mac_errors) +
7909                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7910                 get_stat64(&hw_stats->tx_discards);
7911
7912         stats->multicast = old_stats->multicast +
7913                 get_stat64(&hw_stats->rx_mcast_packets);
7914         stats->collisions = old_stats->collisions +
7915                 get_stat64(&hw_stats->tx_collisions);
7916
7917         stats->rx_length_errors = old_stats->rx_length_errors +
7918                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7919                 get_stat64(&hw_stats->rx_undersize_packets);
7920
7921         stats->rx_over_errors = old_stats->rx_over_errors +
7922                 get_stat64(&hw_stats->rxbds_empty);
7923         stats->rx_frame_errors = old_stats->rx_frame_errors +
7924                 get_stat64(&hw_stats->rx_align_errors);
7925         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7926                 get_stat64(&hw_stats->tx_discards);
7927         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7928                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7929
7930         stats->rx_crc_errors = old_stats->rx_crc_errors +
7931                 calc_crc_errors(tp);
7932
7933         stats->rx_missed_errors = old_stats->rx_missed_errors +
7934                 get_stat64(&hw_stats->rx_discards);
7935
7936         return stats;
7937 }
7938
7939 static inline u32 calc_crc(unsigned char *buf, int len)
7940 {
7941         u32 reg;
7942         u32 tmp;
7943         int j, k;
7944
7945         reg = 0xffffffff;
7946
7947         for (j = 0; j < len; j++) {
7948                 reg ^= buf[j];
7949
7950                 for (k = 0; k < 8; k++) {
7951                         tmp = reg & 0x01;
7952
7953                         reg >>= 1;
7954
7955                         if (tmp) {
7956                                 reg ^= 0xedb88320;
7957                         }
7958                 }
7959         }
7960
7961         return ~reg;
7962 }
7963
7964 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7965 {
7966         /* accept or reject all multicast frames */
7967         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7968         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7969         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7970         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7971 }
7972
7973 static void __tg3_set_rx_mode(struct net_device *dev)
7974 {
7975         struct tg3 *tp = netdev_priv(dev);
7976         u32 rx_mode;
7977
7978         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7979                                   RX_MODE_KEEP_VLAN_TAG);
7980
7981         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7982          * flag clear.
7983          */
7984 #if TG3_VLAN_TAG_USED
7985         if (!tp->vlgrp &&
7986             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7987                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7988 #else
7989         /* By definition, VLAN is disabled always in this
7990          * case.
7991          */
7992         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7993                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7994 #endif
7995
7996         if (dev->flags & IFF_PROMISC) {
7997                 /* Promiscuous mode. */
7998                 rx_mode |= RX_MODE_PROMISC;
7999         } else if (dev->flags & IFF_ALLMULTI) {
8000                 /* Accept all multicast. */
8001                 tg3_set_multi (tp, 1);
8002         } else if (dev->mc_count < 1) {
8003                 /* Reject all multicast. */
8004                 tg3_set_multi (tp, 0);
8005         } else {
8006                 /* Accept one or more multicast(s). */
8007                 struct dev_mc_list *mclist;
8008                 unsigned int i;
8009                 u32 mc_filter[4] = { 0, };
8010                 u32 regidx;
8011                 u32 bit;
8012                 u32 crc;
8013
8014                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8015                      i++, mclist = mclist->next) {
8016
8017                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8018                         bit = ~crc & 0x7f;
8019                         regidx = (bit & 0x60) >> 5;
8020                         bit &= 0x1f;
8021                         mc_filter[regidx] |= (1 << bit);
8022                 }
8023
8024                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8025                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8026                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8027                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8028         }
8029
8030         if (rx_mode != tp->rx_mode) {
8031                 tp->rx_mode = rx_mode;
8032                 tw32_f(MAC_RX_MODE, rx_mode);
8033                 udelay(10);
8034         }
8035 }
8036
8037 static void tg3_set_rx_mode(struct net_device *dev)
8038 {
8039         struct tg3 *tp = netdev_priv(dev);
8040
8041         if (!netif_running(dev))
8042                 return;
8043
8044         tg3_full_lock(tp, 0);
8045         __tg3_set_rx_mode(dev);
8046         tg3_full_unlock(tp);
8047 }
8048
8049 #define TG3_REGDUMP_LEN         (32 * 1024)
8050
8051 static int tg3_get_regs_len(struct net_device *dev)
8052 {
8053         return TG3_REGDUMP_LEN;
8054 }
8055
8056 static void tg3_get_regs(struct net_device *dev,
8057                 struct ethtool_regs *regs, void *_p)
8058 {
8059         u32 *p = _p;
8060         struct tg3 *tp = netdev_priv(dev);
8061         u8 *orig_p = _p;
8062         int i;
8063
8064         regs->version = 0;
8065
8066         memset(p, 0, TG3_REGDUMP_LEN);
8067
8068         if (tp->link_config.phy_is_low_power)
8069                 return;
8070
8071         tg3_full_lock(tp, 0);
8072
8073 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8074 #define GET_REG32_LOOP(base,len)                \
8075 do {    p = (u32 *)(orig_p + (base));           \
8076         for (i = 0; i < len; i += 4)            \
8077                 __GET_REG32((base) + i);        \
8078 } while (0)
8079 #define GET_REG32_1(reg)                        \
8080 do {    p = (u32 *)(orig_p + (reg));            \
8081         __GET_REG32((reg));                     \
8082 } while (0)
8083
8084         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8085         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8086         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8087         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8088         GET_REG32_1(SNDDATAC_MODE);
8089         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8090         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8091         GET_REG32_1(SNDBDC_MODE);
8092         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8093         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8094         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8095         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8096         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8097         GET_REG32_1(RCVDCC_MODE);
8098         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8099         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8100         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8101         GET_REG32_1(MBFREE_MODE);
8102         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8103         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8104         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8105         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8106         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8107         GET_REG32_1(RX_CPU_MODE);
8108         GET_REG32_1(RX_CPU_STATE);
8109         GET_REG32_1(RX_CPU_PGMCTR);
8110         GET_REG32_1(RX_CPU_HWBKPT);
8111         GET_REG32_1(TX_CPU_MODE);
8112         GET_REG32_1(TX_CPU_STATE);
8113         GET_REG32_1(TX_CPU_PGMCTR);
8114         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8115         GET_REG32_LOOP(FTQ_RESET, 0x120);
8116         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8117         GET_REG32_1(DMAC_MODE);
8118         GET_REG32_LOOP(GRC_MODE, 0x4c);
8119         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8120                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8121
8122 #undef __GET_REG32
8123 #undef GET_REG32_LOOP
8124 #undef GET_REG32_1
8125
8126         tg3_full_unlock(tp);
8127 }
8128
8129 static int tg3_get_eeprom_len(struct net_device *dev)
8130 {
8131         struct tg3 *tp = netdev_priv(dev);
8132
8133         return tp->nvram_size;
8134 }
8135
8136 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8137 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8138
8139 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8140 {
8141         struct tg3 *tp = netdev_priv(dev);
8142         int ret;
8143         u8  *pd;
8144         u32 i, offset, len, val, b_offset, b_count;
8145
8146         if (tp->link_config.phy_is_low_power)
8147                 return -EAGAIN;
8148
8149         offset = eeprom->offset;
8150         len = eeprom->len;
8151         eeprom->len = 0;
8152
8153         eeprom->magic = TG3_EEPROM_MAGIC;
8154
8155         if (offset & 3) {
8156                 /* adjustments to start on required 4 byte boundary */
8157                 b_offset = offset & 3;
8158                 b_count = 4 - b_offset;
8159                 if (b_count > len) {
8160                         /* i.e. offset=1 len=2 */
8161                         b_count = len;
8162                 }
8163                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8164                 if (ret)
8165                         return ret;
8166                 val = cpu_to_le32(val);
8167                 memcpy(data, ((char*)&val) + b_offset, b_count);
8168                 len -= b_count;
8169                 offset += b_count;
8170                 eeprom->len += b_count;
8171         }
8172
8173         /* read bytes upto the last 4 byte boundary */
8174         pd = &data[eeprom->len];
8175         for (i = 0; i < (len - (len & 3)); i += 4) {
8176                 ret = tg3_nvram_read(tp, offset + i, &val);
8177                 if (ret) {
8178                         eeprom->len += i;
8179                         return ret;
8180                 }
8181                 val = cpu_to_le32(val);
8182                 memcpy(pd + i, &val, 4);
8183         }
8184         eeprom->len += i;
8185
8186         if (len & 3) {
8187                 /* read last bytes not ending on 4 byte boundary */
8188                 pd = &data[eeprom->len];
8189                 b_count = len & 3;
8190                 b_offset = offset + len - b_count;
8191                 ret = tg3_nvram_read(tp, b_offset, &val);
8192                 if (ret)
8193                         return ret;
8194                 val = cpu_to_le32(val);
8195                 memcpy(pd, ((char*)&val), b_count);
8196                 eeprom->len += b_count;
8197         }
8198         return 0;
8199 }
8200
8201 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8202
8203 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8204 {
8205         struct tg3 *tp = netdev_priv(dev);
8206         int ret;
8207         u32 offset, len, b_offset, odd_len, start, end;
8208         u8 *buf;
8209
8210         if (tp->link_config.phy_is_low_power)
8211                 return -EAGAIN;
8212
8213         if (eeprom->magic != TG3_EEPROM_MAGIC)
8214                 return -EINVAL;
8215
8216         offset = eeprom->offset;
8217         len = eeprom->len;
8218
8219         if ((b_offset = (offset & 3))) {
8220                 /* adjustments to start on required 4 byte boundary */
8221                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8222                 if (ret)
8223                         return ret;
8224                 start = cpu_to_le32(start);
8225                 len += b_offset;
8226                 offset &= ~3;
8227                 if (len < 4)
8228                         len = 4;
8229         }
8230
8231         odd_len = 0;
8232         if (len & 3) {
8233                 /* adjustments to end on required 4 byte boundary */
8234                 odd_len = 1;
8235                 len = (len + 3) & ~3;
8236                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8237                 if (ret)
8238                         return ret;
8239                 end = cpu_to_le32(end);
8240         }
8241
8242         buf = data;
8243         if (b_offset || odd_len) {
8244                 buf = kmalloc(len, GFP_KERNEL);
8245                 if (!buf)
8246                         return -ENOMEM;
8247                 if (b_offset)
8248                         memcpy(buf, &start, 4);
8249                 if (odd_len)
8250                         memcpy(buf+len-4, &end, 4);
8251                 memcpy(buf + b_offset, data, eeprom->len);
8252         }
8253
8254         ret = tg3_nvram_write_block(tp, offset, len, buf);
8255
8256         if (buf != data)
8257                 kfree(buf);
8258
8259         return ret;
8260 }
8261
8262 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8263 {
8264         struct tg3 *tp = netdev_priv(dev);
8265
8266         cmd->supported = (SUPPORTED_Autoneg);
8267
8268         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8269                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8270                                    SUPPORTED_1000baseT_Full);
8271
8272         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8273                 cmd->supported |= (SUPPORTED_100baseT_Half |
8274                                   SUPPORTED_100baseT_Full |
8275                                   SUPPORTED_10baseT_Half |
8276                                   SUPPORTED_10baseT_Full |
8277                                   SUPPORTED_MII);
8278                 cmd->port = PORT_TP;
8279         } else {
8280                 cmd->supported |= SUPPORTED_FIBRE;
8281                 cmd->port = PORT_FIBRE;
8282         }
8283
8284         cmd->advertising = tp->link_config.advertising;
8285         if (netif_running(dev)) {
8286                 cmd->speed = tp->link_config.active_speed;
8287                 cmd->duplex = tp->link_config.active_duplex;
8288         }
8289         cmd->phy_address = PHY_ADDR;
8290         cmd->transceiver = 0;
8291         cmd->autoneg = tp->link_config.autoneg;
8292         cmd->maxtxpkt = 0;
8293         cmd->maxrxpkt = 0;
8294         return 0;
8295 }
8296
8297 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8298 {
8299         struct tg3 *tp = netdev_priv(dev);
8300
8301         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8302                 /* These are the only valid advertisement bits allowed.  */
8303                 if (cmd->autoneg == AUTONEG_ENABLE &&
8304                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8305                                           ADVERTISED_1000baseT_Full |
8306                                           ADVERTISED_Autoneg |
8307                                           ADVERTISED_FIBRE)))
8308                         return -EINVAL;
8309                 /* Fiber can only do SPEED_1000.  */
8310                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8311                          (cmd->speed != SPEED_1000))
8312                         return -EINVAL;
8313         /* Copper cannot force SPEED_1000.  */
8314         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8315                    (cmd->speed == SPEED_1000))
8316                 return -EINVAL;
8317         else if ((cmd->speed == SPEED_1000) &&
8318                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8319                 return -EINVAL;
8320
8321         tg3_full_lock(tp, 0);
8322
8323         tp->link_config.autoneg = cmd->autoneg;
8324         if (cmd->autoneg == AUTONEG_ENABLE) {
8325                 tp->link_config.advertising = (cmd->advertising |
8326                                               ADVERTISED_Autoneg);
8327                 tp->link_config.speed = SPEED_INVALID;
8328                 tp->link_config.duplex = DUPLEX_INVALID;
8329         } else {
8330                 tp->link_config.advertising = 0;
8331                 tp->link_config.speed = cmd->speed;
8332                 tp->link_config.duplex = cmd->duplex;
8333         }
8334
8335         tp->link_config.orig_speed = tp->link_config.speed;
8336         tp->link_config.orig_duplex = tp->link_config.duplex;
8337         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8338
8339         if (netif_running(dev))
8340                 tg3_setup_phy(tp, 1);
8341
8342         tg3_full_unlock(tp);
8343
8344         return 0;
8345 }
8346
8347 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8348 {
8349         struct tg3 *tp = netdev_priv(dev);
8350
8351         strcpy(info->driver, DRV_MODULE_NAME);
8352         strcpy(info->version, DRV_MODULE_VERSION);
8353         strcpy(info->fw_version, tp->fw_ver);
8354         strcpy(info->bus_info, pci_name(tp->pdev));
8355 }
8356
8357 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8358 {
8359         struct tg3 *tp = netdev_priv(dev);
8360
8361         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8362                 wol->supported = WAKE_MAGIC;
8363         else
8364                 wol->supported = 0;
8365         wol->wolopts = 0;
8366         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8367                 wol->wolopts = WAKE_MAGIC;
8368         memset(&wol->sopass, 0, sizeof(wol->sopass));
8369 }
8370
8371 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8372 {
8373         struct tg3 *tp = netdev_priv(dev);
8374
8375         if (wol->wolopts & ~WAKE_MAGIC)
8376                 return -EINVAL;
8377         if ((wol->wolopts & WAKE_MAGIC) &&
8378             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8379                 return -EINVAL;
8380
8381         spin_lock_bh(&tp->lock);
8382         if (wol->wolopts & WAKE_MAGIC)
8383                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8384         else
8385                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8386         spin_unlock_bh(&tp->lock);
8387
8388         return 0;
8389 }
8390
8391 static u32 tg3_get_msglevel(struct net_device *dev)
8392 {
8393         struct tg3 *tp = netdev_priv(dev);
8394         return tp->msg_enable;
8395 }
8396
8397 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8398 {
8399         struct tg3 *tp = netdev_priv(dev);
8400         tp->msg_enable = value;
8401 }
8402
8403 static int tg3_set_tso(struct net_device *dev, u32 value)
8404 {
8405         struct tg3 *tp = netdev_priv(dev);
8406
8407         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8408                 if (value)
8409                         return -EINVAL;
8410                 return 0;
8411         }
8412         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8413             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8414                 if (value) {
8415                         dev->features |= NETIF_F_TSO6;
8416                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8417                                 dev->features |= NETIF_F_TSO_ECN;
8418                 } else
8419                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8420         }
8421         return ethtool_op_set_tso(dev, value);
8422 }
8423
8424 static int tg3_nway_reset(struct net_device *dev)
8425 {
8426         struct tg3 *tp = netdev_priv(dev);
8427         u32 bmcr;
8428         int r;
8429
8430         if (!netif_running(dev))
8431                 return -EAGAIN;
8432
8433         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8434                 return -EINVAL;
8435
8436         spin_lock_bh(&tp->lock);
8437         r = -EINVAL;
8438         tg3_readphy(tp, MII_BMCR, &bmcr);
8439         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8440             ((bmcr & BMCR_ANENABLE) ||
8441              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8442                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8443                                            BMCR_ANENABLE);
8444                 r = 0;
8445         }
8446         spin_unlock_bh(&tp->lock);
8447
8448         return r;
8449 }
8450
8451 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8452 {
8453         struct tg3 *tp = netdev_priv(dev);
8454
8455         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8456         ering->rx_mini_max_pending = 0;
8457         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8458                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8459         else
8460                 ering->rx_jumbo_max_pending = 0;
8461
8462         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8463
8464         ering->rx_pending = tp->rx_pending;
8465         ering->rx_mini_pending = 0;
8466         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8467                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8468         else
8469                 ering->rx_jumbo_pending = 0;
8470
8471         ering->tx_pending = tp->tx_pending;
8472 }
8473
8474 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8475 {
8476         struct tg3 *tp = netdev_priv(dev);
8477         int irq_sync = 0, err = 0;
8478
8479         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8480             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8481             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8482             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8483             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8484              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8485                 return -EINVAL;
8486
8487         if (netif_running(dev)) {
8488                 tg3_netif_stop(tp);
8489                 irq_sync = 1;
8490         }
8491
8492         tg3_full_lock(tp, irq_sync);
8493
8494         tp->rx_pending = ering->rx_pending;
8495
8496         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8497             tp->rx_pending > 63)
8498                 tp->rx_pending = 63;
8499         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8500         tp->tx_pending = ering->tx_pending;
8501
8502         if (netif_running(dev)) {
8503                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8504                 err = tg3_restart_hw(tp, 1);
8505                 if (!err)
8506                         tg3_netif_start(tp);
8507         }
8508
8509         tg3_full_unlock(tp);
8510
8511         return err;
8512 }
8513
8514 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8515 {
8516         struct tg3 *tp = netdev_priv(dev);
8517
8518         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8519         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8520         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8521 }
8522
8523 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8524 {
8525         struct tg3 *tp = netdev_priv(dev);
8526         int irq_sync = 0, err = 0;
8527
8528         if (netif_running(dev)) {
8529                 tg3_netif_stop(tp);
8530                 irq_sync = 1;
8531         }
8532
8533         tg3_full_lock(tp, irq_sync);
8534
8535         if (epause->autoneg)
8536                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8537         else
8538                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8539         if (epause->rx_pause)
8540                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8541         else
8542                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8543         if (epause->tx_pause)
8544                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8545         else
8546                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8547
8548         if (netif_running(dev)) {
8549                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8550                 err = tg3_restart_hw(tp, 1);
8551                 if (!err)
8552                         tg3_netif_start(tp);
8553         }
8554
8555         tg3_full_unlock(tp);
8556
8557         return err;
8558 }
8559
8560 static u32 tg3_get_rx_csum(struct net_device *dev)
8561 {
8562         struct tg3 *tp = netdev_priv(dev);
8563         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8564 }
8565
8566 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8567 {
8568         struct tg3 *tp = netdev_priv(dev);
8569
8570         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8571                 if (data != 0)
8572                         return -EINVAL;
8573                 return 0;
8574         }
8575
8576         spin_lock_bh(&tp->lock);
8577         if (data)
8578                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8579         else
8580                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8581         spin_unlock_bh(&tp->lock);
8582
8583         return 0;
8584 }
8585
8586 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8587 {
8588         struct tg3 *tp = netdev_priv(dev);
8589
8590         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8591                 if (data != 0)
8592                         return -EINVAL;
8593                 return 0;
8594         }
8595
8596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8597             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8599             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8600                 ethtool_op_set_tx_ipv6_csum(dev, data);
8601         else
8602                 ethtool_op_set_tx_csum(dev, data);
8603
8604         return 0;
8605 }
8606
8607 static int tg3_get_sset_count (struct net_device *dev, int sset)
8608 {
8609         switch (sset) {
8610         case ETH_SS_TEST:
8611                 return TG3_NUM_TEST;
8612         case ETH_SS_STATS:
8613                 return TG3_NUM_STATS;
8614         default:
8615                 return -EOPNOTSUPP;
8616         }
8617 }
8618
8619 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8620 {
8621         switch (stringset) {
8622         case ETH_SS_STATS:
8623                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8624                 break;
8625         case ETH_SS_TEST:
8626                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8627                 break;
8628         default:
8629                 WARN_ON(1);     /* we need a WARN() */
8630                 break;
8631         }
8632 }
8633
8634 static int tg3_phys_id(struct net_device *dev, u32 data)
8635 {
8636         struct tg3 *tp = netdev_priv(dev);
8637         int i;
8638
8639         if (!netif_running(tp->dev))
8640                 return -EAGAIN;
8641
8642         if (data == 0)
8643                 data = 2;
8644
8645         for (i = 0; i < (data * 2); i++) {
8646                 if ((i % 2) == 0)
8647                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8648                                            LED_CTRL_1000MBPS_ON |
8649                                            LED_CTRL_100MBPS_ON |
8650                                            LED_CTRL_10MBPS_ON |
8651                                            LED_CTRL_TRAFFIC_OVERRIDE |
8652                                            LED_CTRL_TRAFFIC_BLINK |
8653                                            LED_CTRL_TRAFFIC_LED);
8654
8655                 else
8656                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8657                                            LED_CTRL_TRAFFIC_OVERRIDE);
8658
8659                 if (msleep_interruptible(500))
8660                         break;
8661         }
8662         tw32(MAC_LED_CTRL, tp->led_ctrl);
8663         return 0;
8664 }
8665
8666 static void tg3_get_ethtool_stats (struct net_device *dev,
8667                                    struct ethtool_stats *estats, u64 *tmp_stats)
8668 {
8669         struct tg3 *tp = netdev_priv(dev);
8670         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8671 }
8672
8673 #define NVRAM_TEST_SIZE 0x100
8674 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8675 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8676 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8677
8678 static int tg3_test_nvram(struct tg3 *tp)
8679 {
8680         u32 *buf, csum, magic;
8681         int i, j, k, err = 0, size;
8682
8683         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8684                 return -EIO;
8685
8686         if (magic == TG3_EEPROM_MAGIC)
8687                 size = NVRAM_TEST_SIZE;
8688         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8689                 if ((magic & 0xe00000) == 0x200000)
8690                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8691                 else
8692                         return 0;
8693         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8694                 size = NVRAM_SELFBOOT_HW_SIZE;
8695         else
8696                 return -EIO;
8697
8698         buf = kmalloc(size, GFP_KERNEL);
8699         if (buf == NULL)
8700                 return -ENOMEM;
8701
8702         err = -EIO;
8703         for (i = 0, j = 0; i < size; i += 4, j++) {
8704                 u32 val;
8705
8706                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8707                         break;
8708                 buf[j] = cpu_to_le32(val);
8709         }
8710         if (i < size)
8711                 goto out;
8712
8713         /* Selfboot format */
8714         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8715             TG3_EEPROM_MAGIC_FW) {
8716                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8717
8718                 for (i = 0; i < size; i++)
8719                         csum8 += buf8[i];
8720
8721                 if (csum8 == 0) {
8722                         err = 0;
8723                         goto out;
8724                 }
8725
8726                 err = -EIO;
8727                 goto out;
8728         }
8729
8730         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8731             TG3_EEPROM_MAGIC_HW) {
8732                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8733                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8734                 u8 *buf8 = (u8 *) buf;
8735
8736                 /* Separate the parity bits and the data bytes.  */
8737                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8738                         if ((i == 0) || (i == 8)) {
8739                                 int l;
8740                                 u8 msk;
8741
8742                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8743                                         parity[k++] = buf8[i] & msk;
8744                                 i++;
8745                         }
8746                         else if (i == 16) {
8747                                 int l;
8748                                 u8 msk;
8749
8750                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8751                                         parity[k++] = buf8[i] & msk;
8752                                 i++;
8753
8754                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8755                                         parity[k++] = buf8[i] & msk;
8756                                 i++;
8757                         }
8758                         data[j++] = buf8[i];
8759                 }
8760
8761                 err = -EIO;
8762                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8763                         u8 hw8 = hweight8(data[i]);
8764
8765                         if ((hw8 & 0x1) && parity[i])
8766                                 goto out;
8767                         else if (!(hw8 & 0x1) && !parity[i])
8768                                 goto out;
8769                 }
8770                 err = 0;
8771                 goto out;
8772         }
8773
8774         /* Bootstrap checksum at offset 0x10 */
8775         csum = calc_crc((unsigned char *) buf, 0x10);
8776         if(csum != cpu_to_le32(buf[0x10/4]))
8777                 goto out;
8778
8779         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8780         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8781         if (csum != cpu_to_le32(buf[0xfc/4]))
8782                  goto out;
8783
8784         err = 0;
8785
8786 out:
8787         kfree(buf);
8788         return err;
8789 }
8790
8791 #define TG3_SERDES_TIMEOUT_SEC  2
8792 #define TG3_COPPER_TIMEOUT_SEC  6
8793
8794 static int tg3_test_link(struct tg3 *tp)
8795 {
8796         int i, max;
8797
8798         if (!netif_running(tp->dev))
8799                 return -ENODEV;
8800
8801         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8802                 max = TG3_SERDES_TIMEOUT_SEC;
8803         else
8804                 max = TG3_COPPER_TIMEOUT_SEC;
8805
8806         for (i = 0; i < max; i++) {
8807                 if (netif_carrier_ok(tp->dev))
8808                         return 0;
8809
8810                 if (msleep_interruptible(1000))
8811                         break;
8812         }
8813
8814         return -EIO;
8815 }
8816
8817 /* Only test the commonly used registers */
8818 static int tg3_test_registers(struct tg3 *tp)
8819 {
8820         int i, is_5705, is_5750;
8821         u32 offset, read_mask, write_mask, val, save_val, read_val;
8822         static struct {
8823                 u16 offset;
8824                 u16 flags;
8825 #define TG3_FL_5705     0x1
8826 #define TG3_FL_NOT_5705 0x2
8827 #define TG3_FL_NOT_5788 0x4
8828 #define TG3_FL_NOT_5750 0x8
8829                 u32 read_mask;
8830                 u32 write_mask;
8831         } reg_tbl[] = {
8832                 /* MAC Control Registers */
8833                 { MAC_MODE, TG3_FL_NOT_5705,
8834                         0x00000000, 0x00ef6f8c },
8835                 { MAC_MODE, TG3_FL_5705,
8836                         0x00000000, 0x01ef6b8c },
8837                 { MAC_STATUS, TG3_FL_NOT_5705,
8838                         0x03800107, 0x00000000 },
8839                 { MAC_STATUS, TG3_FL_5705,
8840                         0x03800100, 0x00000000 },
8841                 { MAC_ADDR_0_HIGH, 0x0000,
8842                         0x00000000, 0x0000ffff },
8843                 { MAC_ADDR_0_LOW, 0x0000,
8844                         0x00000000, 0xffffffff },
8845                 { MAC_RX_MTU_SIZE, 0x0000,
8846                         0x00000000, 0x0000ffff },
8847                 { MAC_TX_MODE, 0x0000,
8848                         0x00000000, 0x00000070 },
8849                 { MAC_TX_LENGTHS, 0x0000,
8850                         0x00000000, 0x00003fff },
8851                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8852                         0x00000000, 0x000007fc },
8853                 { MAC_RX_MODE, TG3_FL_5705,
8854                         0x00000000, 0x000007dc },
8855                 { MAC_HASH_REG_0, 0x0000,
8856                         0x00000000, 0xffffffff },
8857                 { MAC_HASH_REG_1, 0x0000,
8858                         0x00000000, 0xffffffff },
8859                 { MAC_HASH_REG_2, 0x0000,
8860                         0x00000000, 0xffffffff },
8861                 { MAC_HASH_REG_3, 0x0000,
8862                         0x00000000, 0xffffffff },
8863
8864                 /* Receive Data and Receive BD Initiator Control Registers. */
8865                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8866                         0x00000000, 0xffffffff },
8867                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8868                         0x00000000, 0xffffffff },
8869                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8870                         0x00000000, 0x00000003 },
8871                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8872                         0x00000000, 0xffffffff },
8873                 { RCVDBDI_STD_BD+0, 0x0000,
8874                         0x00000000, 0xffffffff },
8875                 { RCVDBDI_STD_BD+4, 0x0000,
8876                         0x00000000, 0xffffffff },
8877                 { RCVDBDI_STD_BD+8, 0x0000,
8878                         0x00000000, 0xffff0002 },
8879                 { RCVDBDI_STD_BD+0xc, 0x0000,
8880                         0x00000000, 0xffffffff },
8881
8882                 /* Receive BD Initiator Control Registers. */
8883                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8884                         0x00000000, 0xffffffff },
8885                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8886                         0x00000000, 0x000003ff },
8887                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8888                         0x00000000, 0xffffffff },
8889
8890                 /* Host Coalescing Control Registers. */
8891                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8892                         0x00000000, 0x00000004 },
8893                 { HOSTCC_MODE, TG3_FL_5705,
8894                         0x00000000, 0x000000f6 },
8895                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8896                         0x00000000, 0xffffffff },
8897                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8898                         0x00000000, 0x000003ff },
8899                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8900                         0x00000000, 0xffffffff },
8901                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8902                         0x00000000, 0x000003ff },
8903                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8904                         0x00000000, 0xffffffff },
8905                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8906                         0x00000000, 0x000000ff },
8907                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8908                         0x00000000, 0xffffffff },
8909                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8910                         0x00000000, 0x000000ff },
8911                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8912                         0x00000000, 0xffffffff },
8913                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8914                         0x00000000, 0xffffffff },
8915                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8916                         0x00000000, 0xffffffff },
8917                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8918                         0x00000000, 0x000000ff },
8919                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8920                         0x00000000, 0xffffffff },
8921                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8922                         0x00000000, 0x000000ff },
8923                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8924                         0x00000000, 0xffffffff },
8925                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8926                         0x00000000, 0xffffffff },
8927                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8928                         0x00000000, 0xffffffff },
8929                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8930                         0x00000000, 0xffffffff },
8931                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8932                         0x00000000, 0xffffffff },
8933                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8934                         0xffffffff, 0x00000000 },
8935                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8936                         0xffffffff, 0x00000000 },
8937
8938                 /* Buffer Manager Control Registers. */
8939                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8940                         0x00000000, 0x007fff80 },
8941                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8942                         0x00000000, 0x007fffff },
8943                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8944                         0x00000000, 0x0000003f },
8945                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8946                         0x00000000, 0x000001ff },
8947                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8948                         0x00000000, 0x000001ff },
8949                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8950                         0xffffffff, 0x00000000 },
8951                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8952                         0xffffffff, 0x00000000 },
8953
8954                 /* Mailbox Registers */
8955                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8956                         0x00000000, 0x000001ff },
8957                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8958                         0x00000000, 0x000001ff },
8959                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8960                         0x00000000, 0x000007ff },
8961                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8962                         0x00000000, 0x000001ff },
8963
8964                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8965         };
8966
8967         is_5705 = is_5750 = 0;
8968         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8969                 is_5705 = 1;
8970                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8971                         is_5750 = 1;
8972         }
8973
8974         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8975                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8976                         continue;
8977
8978                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8979                         continue;
8980
8981                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8982                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8983                         continue;
8984
8985                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8986                         continue;
8987
8988                 offset = (u32) reg_tbl[i].offset;
8989                 read_mask = reg_tbl[i].read_mask;
8990                 write_mask = reg_tbl[i].write_mask;
8991
8992                 /* Save the original register content */
8993                 save_val = tr32(offset);
8994
8995                 /* Determine the read-only value. */
8996                 read_val = save_val & read_mask;
8997
8998                 /* Write zero to the register, then make sure the read-only bits
8999                  * are not changed and the read/write bits are all zeros.
9000                  */
9001                 tw32(offset, 0);
9002
9003                 val = tr32(offset);
9004
9005                 /* Test the read-only and read/write bits. */
9006                 if (((val & read_mask) != read_val) || (val & write_mask))
9007                         goto out;
9008
9009                 /* Write ones to all the bits defined by RdMask and WrMask, then
9010                  * make sure the read-only bits are not changed and the
9011                  * read/write bits are all ones.
9012                  */
9013                 tw32(offset, read_mask | write_mask);
9014
9015                 val = tr32(offset);
9016
9017                 /* Test the read-only bits. */
9018                 if ((val & read_mask) != read_val)
9019                         goto out;
9020
9021                 /* Test the read/write bits. */
9022                 if ((val & write_mask) != write_mask)
9023                         goto out;
9024
9025                 tw32(offset, save_val);
9026         }
9027
9028         return 0;
9029
9030 out:
9031         if (netif_msg_hw(tp))
9032                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9033                        offset);
9034         tw32(offset, save_val);
9035         return -EIO;
9036 }
9037
9038 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9039 {
9040         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9041         int i;
9042         u32 j;
9043
9044         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9045                 for (j = 0; j < len; j += 4) {
9046                         u32 val;
9047
9048                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9049                         tg3_read_mem(tp, offset + j, &val);
9050                         if (val != test_pattern[i])
9051                                 return -EIO;
9052                 }
9053         }
9054         return 0;
9055 }
9056
9057 static int tg3_test_memory(struct tg3 *tp)
9058 {
9059         static struct mem_entry {
9060                 u32 offset;
9061                 u32 len;
9062         } mem_tbl_570x[] = {
9063                 { 0x00000000, 0x00b50},
9064                 { 0x00002000, 0x1c000},
9065                 { 0xffffffff, 0x00000}
9066         }, mem_tbl_5705[] = {
9067                 { 0x00000100, 0x0000c},
9068                 { 0x00000200, 0x00008},
9069                 { 0x00004000, 0x00800},
9070                 { 0x00006000, 0x01000},
9071                 { 0x00008000, 0x02000},
9072                 { 0x00010000, 0x0e000},
9073                 { 0xffffffff, 0x00000}
9074         }, mem_tbl_5755[] = {
9075                 { 0x00000200, 0x00008},
9076                 { 0x00004000, 0x00800},
9077                 { 0x00006000, 0x00800},
9078                 { 0x00008000, 0x02000},
9079                 { 0x00010000, 0x0c000},
9080                 { 0xffffffff, 0x00000}
9081         }, mem_tbl_5906[] = {
9082                 { 0x00000200, 0x00008},
9083                 { 0x00004000, 0x00400},
9084                 { 0x00006000, 0x00400},
9085                 { 0x00008000, 0x01000},
9086                 { 0x00010000, 0x01000},
9087                 { 0xffffffff, 0x00000}
9088         };
9089         struct mem_entry *mem_tbl;
9090         int err = 0;
9091         int i;
9092
9093         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9094                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9095                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9096                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9097                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9098                         mem_tbl = mem_tbl_5755;
9099                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9100                         mem_tbl = mem_tbl_5906;
9101                 else
9102                         mem_tbl = mem_tbl_5705;
9103         } else
9104                 mem_tbl = mem_tbl_570x;
9105
9106         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9107                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9108                     mem_tbl[i].len)) != 0)
9109                         break;
9110         }
9111
9112         return err;
9113 }
9114
9115 #define TG3_MAC_LOOPBACK        0
9116 #define TG3_PHY_LOOPBACK        1
9117
9118 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9119 {
9120         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9121         u32 desc_idx;
9122         struct sk_buff *skb, *rx_skb;
9123         u8 *tx_data;
9124         dma_addr_t map;
9125         int num_pkts, tx_len, rx_len, i, err;
9126         struct tg3_rx_buffer_desc *desc;
9127
9128         if (loopback_mode == TG3_MAC_LOOPBACK) {
9129                 /* HW errata - mac loopback fails in some cases on 5780.
9130                  * Normal traffic and PHY loopback are not affected by
9131                  * errata.
9132                  */
9133                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9134                         return 0;
9135
9136                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9137                            MAC_MODE_PORT_INT_LPBACK;
9138                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9139                         mac_mode |= MAC_MODE_LINK_POLARITY;
9140                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9141                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9142                 else
9143                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9144                 tw32(MAC_MODE, mac_mode);
9145         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9146                 u32 val;
9147
9148                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9149                         u32 phytest;
9150
9151                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9152                                 u32 phy;
9153
9154                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9155                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9156                                 if (!tg3_readphy(tp, 0x1b, &phy))
9157                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9158                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9159                         }
9160                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9161                 } else
9162                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9163
9164                 tg3_phy_toggle_automdix(tp, 0);
9165
9166                 tg3_writephy(tp, MII_BMCR, val);
9167                 udelay(40);
9168
9169                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9170                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9171                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9172                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9173                 } else
9174                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9175
9176                 /* reset to prevent losing 1st rx packet intermittently */
9177                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9178                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9179                         udelay(10);
9180                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9181                 }
9182                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9183                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9184                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9185                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9186                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9187                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9188                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9189                 }
9190                 tw32(MAC_MODE, mac_mode);
9191         }
9192         else
9193                 return -EINVAL;
9194
9195         err = -EIO;
9196
9197         tx_len = 1514;
9198         skb = netdev_alloc_skb(tp->dev, tx_len);
9199         if (!skb)
9200                 return -ENOMEM;
9201
9202         tx_data = skb_put(skb, tx_len);
9203         memcpy(tx_data, tp->dev->dev_addr, 6);
9204         memset(tx_data + 6, 0x0, 8);
9205
9206         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9207
9208         for (i = 14; i < tx_len; i++)
9209                 tx_data[i] = (u8) (i & 0xff);
9210
9211         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9212
9213         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9214              HOSTCC_MODE_NOW);
9215
9216         udelay(10);
9217
9218         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9219
9220         num_pkts = 0;
9221
9222         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9223
9224         tp->tx_prod++;
9225         num_pkts++;
9226
9227         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9228                      tp->tx_prod);
9229         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9230
9231         udelay(10);
9232
9233         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9234         for (i = 0; i < 25; i++) {
9235                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9236                        HOSTCC_MODE_NOW);
9237
9238                 udelay(10);
9239
9240                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9241                 rx_idx = tp->hw_status->idx[0].rx_producer;
9242                 if ((tx_idx == tp->tx_prod) &&
9243                     (rx_idx == (rx_start_idx + num_pkts)))
9244                         break;
9245         }
9246
9247         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9248         dev_kfree_skb(skb);
9249
9250         if (tx_idx != tp->tx_prod)
9251                 goto out;
9252
9253         if (rx_idx != rx_start_idx + num_pkts)
9254                 goto out;
9255
9256         desc = &tp->rx_rcb[rx_start_idx];
9257         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9258         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9259         if (opaque_key != RXD_OPAQUE_RING_STD)
9260                 goto out;
9261
9262         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9263             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9264                 goto out;
9265
9266         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9267         if (rx_len != tx_len)
9268                 goto out;
9269
9270         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9271
9272         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9273         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9274
9275         for (i = 14; i < tx_len; i++) {
9276                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9277                         goto out;
9278         }
9279         err = 0;
9280
9281         /* tg3_free_rings will unmap and free the rx_skb */
9282 out:
9283         return err;
9284 }
9285
9286 #define TG3_MAC_LOOPBACK_FAILED         1
9287 #define TG3_PHY_LOOPBACK_FAILED         2
9288 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9289                                          TG3_PHY_LOOPBACK_FAILED)
9290
9291 static int tg3_test_loopback(struct tg3 *tp)
9292 {
9293         int err = 0;
9294         u32 cpmuctrl = 0;
9295
9296         if (!netif_running(tp->dev))
9297                 return TG3_LOOPBACK_FAILED;
9298
9299         err = tg3_reset_hw(tp, 1);
9300         if (err)
9301                 return TG3_LOOPBACK_FAILED;
9302
9303         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9304                 int i;
9305                 u32 status;
9306
9307                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9308
9309                 /* Wait for up to 40 microseconds to acquire lock. */
9310                 for (i = 0; i < 4; i++) {
9311                         status = tr32(TG3_CPMU_MUTEX_GNT);
9312                         if (status == CPMU_MUTEX_GNT_DRIVER)
9313                                 break;
9314                         udelay(10);
9315                 }
9316
9317                 if (status != CPMU_MUTEX_GNT_DRIVER)
9318                         return TG3_LOOPBACK_FAILED;
9319
9320                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9321
9322                 /* Turn off power management based on link speed. */
9323                 tw32(TG3_CPMU_CTRL,
9324                      cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9325         }
9326
9327         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9328                 err |= TG3_MAC_LOOPBACK_FAILED;
9329
9330         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9331                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9332
9333                 /* Release the mutex */
9334                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9335         }
9336
9337         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9338                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9339                         err |= TG3_PHY_LOOPBACK_FAILED;
9340         }
9341
9342         return err;
9343 }
9344
9345 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9346                           u64 *data)
9347 {
9348         struct tg3 *tp = netdev_priv(dev);
9349
9350         if (tp->link_config.phy_is_low_power)
9351                 tg3_set_power_state(tp, PCI_D0);
9352
9353         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9354
9355         if (tg3_test_nvram(tp) != 0) {
9356                 etest->flags |= ETH_TEST_FL_FAILED;
9357                 data[0] = 1;
9358         }
9359         if (tg3_test_link(tp) != 0) {
9360                 etest->flags |= ETH_TEST_FL_FAILED;
9361                 data[1] = 1;
9362         }
9363         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9364                 int err, irq_sync = 0;
9365
9366                 if (netif_running(dev)) {
9367                         tg3_netif_stop(tp);
9368                         irq_sync = 1;
9369                 }
9370
9371                 tg3_full_lock(tp, irq_sync);
9372
9373                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9374                 err = tg3_nvram_lock(tp);
9375                 tg3_halt_cpu(tp, RX_CPU_BASE);
9376                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9377                         tg3_halt_cpu(tp, TX_CPU_BASE);
9378                 if (!err)
9379                         tg3_nvram_unlock(tp);
9380
9381                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9382                         tg3_phy_reset(tp);
9383
9384                 if (tg3_test_registers(tp) != 0) {
9385                         etest->flags |= ETH_TEST_FL_FAILED;
9386                         data[2] = 1;
9387                 }
9388                 if (tg3_test_memory(tp) != 0) {
9389                         etest->flags |= ETH_TEST_FL_FAILED;
9390                         data[3] = 1;
9391                 }
9392                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9393                         etest->flags |= ETH_TEST_FL_FAILED;
9394
9395                 tg3_full_unlock(tp);
9396
9397                 if (tg3_test_interrupt(tp) != 0) {
9398                         etest->flags |= ETH_TEST_FL_FAILED;
9399                         data[5] = 1;
9400                 }
9401
9402                 tg3_full_lock(tp, 0);
9403
9404                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9405                 if (netif_running(dev)) {
9406                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9407                         if (!tg3_restart_hw(tp, 1))
9408                                 tg3_netif_start(tp);
9409                 }
9410
9411                 tg3_full_unlock(tp);
9412         }
9413         if (tp->link_config.phy_is_low_power)
9414                 tg3_set_power_state(tp, PCI_D3hot);
9415
9416 }
9417
9418 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9419 {
9420         struct mii_ioctl_data *data = if_mii(ifr);
9421         struct tg3 *tp = netdev_priv(dev);
9422         int err;
9423
9424         switch(cmd) {
9425         case SIOCGMIIPHY:
9426                 data->phy_id = PHY_ADDR;
9427
9428                 /* fallthru */
9429         case SIOCGMIIREG: {
9430                 u32 mii_regval;
9431
9432                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9433                         break;                  /* We have no PHY */
9434
9435                 if (tp->link_config.phy_is_low_power)
9436                         return -EAGAIN;
9437
9438                 spin_lock_bh(&tp->lock);
9439                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9440                 spin_unlock_bh(&tp->lock);
9441
9442                 data->val_out = mii_regval;
9443
9444                 return err;
9445         }
9446
9447         case SIOCSMIIREG:
9448                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9449                         break;                  /* We have no PHY */
9450
9451                 if (!capable(CAP_NET_ADMIN))
9452                         return -EPERM;
9453
9454                 if (tp->link_config.phy_is_low_power)
9455                         return -EAGAIN;
9456
9457                 spin_lock_bh(&tp->lock);
9458                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9459                 spin_unlock_bh(&tp->lock);
9460
9461                 return err;
9462
9463         default:
9464                 /* do nothing */
9465                 break;
9466         }
9467         return -EOPNOTSUPP;
9468 }
9469
9470 #if TG3_VLAN_TAG_USED
9471 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9472 {
9473         struct tg3 *tp = netdev_priv(dev);
9474
9475         if (netif_running(dev))
9476                 tg3_netif_stop(tp);
9477
9478         tg3_full_lock(tp, 0);
9479
9480         tp->vlgrp = grp;
9481
9482         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9483         __tg3_set_rx_mode(dev);
9484
9485         if (netif_running(dev))
9486                 tg3_netif_start(tp);
9487
9488         tg3_full_unlock(tp);
9489 }
9490 #endif
9491
9492 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9493 {
9494         struct tg3 *tp = netdev_priv(dev);
9495
9496         memcpy(ec, &tp->coal, sizeof(*ec));
9497         return 0;
9498 }
9499
9500 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9501 {
9502         struct tg3 *tp = netdev_priv(dev);
9503         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9504         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9505
9506         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9507                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9508                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9509                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9510                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9511         }
9512
9513         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9514             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9515             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9516             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9517             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9518             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9519             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9520             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9521             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9522             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9523                 return -EINVAL;
9524
9525         /* No rx interrupts will be generated if both are zero */
9526         if ((ec->rx_coalesce_usecs == 0) &&
9527             (ec->rx_max_coalesced_frames == 0))
9528                 return -EINVAL;
9529
9530         /* No tx interrupts will be generated if both are zero */
9531         if ((ec->tx_coalesce_usecs == 0) &&
9532             (ec->tx_max_coalesced_frames == 0))
9533                 return -EINVAL;
9534
9535         /* Only copy relevant parameters, ignore all others. */
9536         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9537         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9538         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9539         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9540         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9541         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9542         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9543         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9544         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9545
9546         if (netif_running(dev)) {
9547                 tg3_full_lock(tp, 0);
9548                 __tg3_set_coalesce(tp, &tp->coal);
9549                 tg3_full_unlock(tp);
9550         }
9551         return 0;
9552 }
9553
9554 static const struct ethtool_ops tg3_ethtool_ops = {
9555         .get_settings           = tg3_get_settings,
9556         .set_settings           = tg3_set_settings,
9557         .get_drvinfo            = tg3_get_drvinfo,
9558         .get_regs_len           = tg3_get_regs_len,
9559         .get_regs               = tg3_get_regs,
9560         .get_wol                = tg3_get_wol,
9561         .set_wol                = tg3_set_wol,
9562         .get_msglevel           = tg3_get_msglevel,
9563         .set_msglevel           = tg3_set_msglevel,
9564         .nway_reset             = tg3_nway_reset,
9565         .get_link               = ethtool_op_get_link,
9566         .get_eeprom_len         = tg3_get_eeprom_len,
9567         .get_eeprom             = tg3_get_eeprom,
9568         .set_eeprom             = tg3_set_eeprom,
9569         .get_ringparam          = tg3_get_ringparam,
9570         .set_ringparam          = tg3_set_ringparam,
9571         .get_pauseparam         = tg3_get_pauseparam,
9572         .set_pauseparam         = tg3_set_pauseparam,
9573         .get_rx_csum            = tg3_get_rx_csum,
9574         .set_rx_csum            = tg3_set_rx_csum,
9575         .set_tx_csum            = tg3_set_tx_csum,
9576         .set_sg                 = ethtool_op_set_sg,
9577         .set_tso                = tg3_set_tso,
9578         .self_test              = tg3_self_test,
9579         .get_strings            = tg3_get_strings,
9580         .phys_id                = tg3_phys_id,
9581         .get_ethtool_stats      = tg3_get_ethtool_stats,
9582         .get_coalesce           = tg3_get_coalesce,
9583         .set_coalesce           = tg3_set_coalesce,
9584         .get_sset_count         = tg3_get_sset_count,
9585 };
9586
9587 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9588 {
9589         u32 cursize, val, magic;
9590
9591         tp->nvram_size = EEPROM_CHIP_SIZE;
9592
9593         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9594                 return;
9595
9596         if ((magic != TG3_EEPROM_MAGIC) &&
9597             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9598             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9599                 return;
9600
9601         /*
9602          * Size the chip by reading offsets at increasing powers of two.
9603          * When we encounter our validation signature, we know the addressing
9604          * has wrapped around, and thus have our chip size.
9605          */
9606         cursize = 0x10;
9607
9608         while (cursize < tp->nvram_size) {
9609                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9610                         return;
9611
9612                 if (val == magic)
9613                         break;
9614
9615                 cursize <<= 1;
9616         }
9617
9618         tp->nvram_size = cursize;
9619 }
9620
9621 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9622 {
9623         u32 val;
9624
9625         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9626                 return;
9627
9628         /* Selfboot format */
9629         if (val != TG3_EEPROM_MAGIC) {
9630                 tg3_get_eeprom_size(tp);
9631                 return;
9632         }
9633
9634         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9635                 if (val != 0) {
9636                         tp->nvram_size = (val >> 16) * 1024;
9637                         return;
9638                 }
9639         }
9640         tp->nvram_size = 0x80000;
9641 }
9642
9643 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9644 {
9645         u32 nvcfg1;
9646
9647         nvcfg1 = tr32(NVRAM_CFG1);
9648         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9649                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9650         }
9651         else {
9652                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9653                 tw32(NVRAM_CFG1, nvcfg1);
9654         }
9655
9656         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9657             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9658                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9659                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9660                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9661                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9662                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9663                                 break;
9664                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9665                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9666                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9667                                 break;
9668                         case FLASH_VENDOR_ATMEL_EEPROM:
9669                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9670                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9671                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9672                                 break;
9673                         case FLASH_VENDOR_ST:
9674                                 tp->nvram_jedecnum = JEDEC_ST;
9675                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9676                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9677                                 break;
9678                         case FLASH_VENDOR_SAIFUN:
9679                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9680                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9681                                 break;
9682                         case FLASH_VENDOR_SST_SMALL:
9683                         case FLASH_VENDOR_SST_LARGE:
9684                                 tp->nvram_jedecnum = JEDEC_SST;
9685                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9686                                 break;
9687                 }
9688         }
9689         else {
9690                 tp->nvram_jedecnum = JEDEC_ATMEL;
9691                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9692                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9693         }
9694 }
9695
9696 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9697 {
9698         u32 nvcfg1;
9699
9700         nvcfg1 = tr32(NVRAM_CFG1);
9701
9702         /* NVRAM protection for TPM */
9703         if (nvcfg1 & (1 << 27))
9704                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9705
9706         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9707                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9708                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9709                         tp->nvram_jedecnum = JEDEC_ATMEL;
9710                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9711                         break;
9712                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9713                         tp->nvram_jedecnum = JEDEC_ATMEL;
9714                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9715                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9716                         break;
9717                 case FLASH_5752VENDOR_ST_M45PE10:
9718                 case FLASH_5752VENDOR_ST_M45PE20:
9719                 case FLASH_5752VENDOR_ST_M45PE40:
9720                         tp->nvram_jedecnum = JEDEC_ST;
9721                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9722                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9723                         break;
9724         }
9725
9726         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9727                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9728                         case FLASH_5752PAGE_SIZE_256:
9729                                 tp->nvram_pagesize = 256;
9730                                 break;
9731                         case FLASH_5752PAGE_SIZE_512:
9732                                 tp->nvram_pagesize = 512;
9733                                 break;
9734                         case FLASH_5752PAGE_SIZE_1K:
9735                                 tp->nvram_pagesize = 1024;
9736                                 break;
9737                         case FLASH_5752PAGE_SIZE_2K:
9738                                 tp->nvram_pagesize = 2048;
9739                                 break;
9740                         case FLASH_5752PAGE_SIZE_4K:
9741                                 tp->nvram_pagesize = 4096;
9742                                 break;
9743                         case FLASH_5752PAGE_SIZE_264:
9744                                 tp->nvram_pagesize = 264;
9745                                 break;
9746                 }
9747         }
9748         else {
9749                 /* For eeprom, set pagesize to maximum eeprom size */
9750                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9751
9752                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9753                 tw32(NVRAM_CFG1, nvcfg1);
9754         }
9755 }
9756
9757 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9758 {
9759         u32 nvcfg1, protect = 0;
9760
9761         nvcfg1 = tr32(NVRAM_CFG1);
9762
9763         /* NVRAM protection for TPM */
9764         if (nvcfg1 & (1 << 27)) {
9765                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9766                 protect = 1;
9767         }
9768
9769         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9770         switch (nvcfg1) {
9771                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9772                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9773                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9774                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9775                         tp->nvram_jedecnum = JEDEC_ATMEL;
9776                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9777                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9778                         tp->nvram_pagesize = 264;
9779                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9780                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9781                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9782                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9783                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9784                         else
9785                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9786                         break;
9787                 case FLASH_5752VENDOR_ST_M45PE10:
9788                 case FLASH_5752VENDOR_ST_M45PE20:
9789                 case FLASH_5752VENDOR_ST_M45PE40:
9790                         tp->nvram_jedecnum = JEDEC_ST;
9791                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9792                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9793                         tp->nvram_pagesize = 256;
9794                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9795                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9796                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9797                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9798                         else
9799                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9800                         break;
9801         }
9802 }
9803
9804 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9805 {
9806         u32 nvcfg1;
9807
9808         nvcfg1 = tr32(NVRAM_CFG1);
9809
9810         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9811                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9812                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9813                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9814                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9815                         tp->nvram_jedecnum = JEDEC_ATMEL;
9816                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9817                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9818
9819                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9820                         tw32(NVRAM_CFG1, nvcfg1);
9821                         break;
9822                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9823                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9824                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9825                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9826                         tp->nvram_jedecnum = JEDEC_ATMEL;
9827                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9828                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9829                         tp->nvram_pagesize = 264;
9830                         break;
9831                 case FLASH_5752VENDOR_ST_M45PE10:
9832                 case FLASH_5752VENDOR_ST_M45PE20:
9833                 case FLASH_5752VENDOR_ST_M45PE40:
9834                         tp->nvram_jedecnum = JEDEC_ST;
9835                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9836                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9837                         tp->nvram_pagesize = 256;
9838                         break;
9839         }
9840 }
9841
9842 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9843 {
9844         u32 nvcfg1, protect = 0;
9845
9846         nvcfg1 = tr32(NVRAM_CFG1);
9847
9848         /* NVRAM protection for TPM */
9849         if (nvcfg1 & (1 << 27)) {
9850                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9851                 protect = 1;
9852         }
9853
9854         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9855         switch (nvcfg1) {
9856                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9857                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9858                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9859                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9860                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9861                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9862                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9863                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9864                         tp->nvram_jedecnum = JEDEC_ATMEL;
9865                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9866                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9867                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9868                         tp->nvram_pagesize = 256;
9869                         break;
9870                 case FLASH_5761VENDOR_ST_A_M45PE20:
9871                 case FLASH_5761VENDOR_ST_A_M45PE40:
9872                 case FLASH_5761VENDOR_ST_A_M45PE80:
9873                 case FLASH_5761VENDOR_ST_A_M45PE16:
9874                 case FLASH_5761VENDOR_ST_M_M45PE20:
9875                 case FLASH_5761VENDOR_ST_M_M45PE40:
9876                 case FLASH_5761VENDOR_ST_M_M45PE80:
9877                 case FLASH_5761VENDOR_ST_M_M45PE16:
9878                         tp->nvram_jedecnum = JEDEC_ST;
9879                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9880                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9881                         tp->nvram_pagesize = 256;
9882                         break;
9883         }
9884
9885         if (protect) {
9886                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9887         } else {
9888                 switch (nvcfg1) {
9889                         case FLASH_5761VENDOR_ATMEL_ADB161D:
9890                         case FLASH_5761VENDOR_ATMEL_MDB161D:
9891                         case FLASH_5761VENDOR_ST_A_M45PE16:
9892                         case FLASH_5761VENDOR_ST_M_M45PE16:
9893                                 tp->nvram_size = 0x100000;
9894                                 break;
9895                         case FLASH_5761VENDOR_ATMEL_ADB081D:
9896                         case FLASH_5761VENDOR_ATMEL_MDB081D:
9897                         case FLASH_5761VENDOR_ST_A_M45PE80:
9898                         case FLASH_5761VENDOR_ST_M_M45PE80:
9899                                 tp->nvram_size = 0x80000;
9900                                 break;
9901                         case FLASH_5761VENDOR_ATMEL_ADB041D:
9902                         case FLASH_5761VENDOR_ATMEL_MDB041D:
9903                         case FLASH_5761VENDOR_ST_A_M45PE40:
9904                         case FLASH_5761VENDOR_ST_M_M45PE40:
9905                                 tp->nvram_size = 0x40000;
9906                                 break;
9907                         case FLASH_5761VENDOR_ATMEL_ADB021D:
9908                         case FLASH_5761VENDOR_ATMEL_MDB021D:
9909                         case FLASH_5761VENDOR_ST_A_M45PE20:
9910                         case FLASH_5761VENDOR_ST_M_M45PE20:
9911                                 tp->nvram_size = 0x20000;
9912                                 break;
9913                 }
9914         }
9915 }
9916
9917 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9918 {
9919         tp->nvram_jedecnum = JEDEC_ATMEL;
9920         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9921         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9922 }
9923
9924 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9925 static void __devinit tg3_nvram_init(struct tg3 *tp)
9926 {
9927         tw32_f(GRC_EEPROM_ADDR,
9928              (EEPROM_ADDR_FSM_RESET |
9929               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9930                EEPROM_ADDR_CLKPERD_SHIFT)));
9931
9932         msleep(1);
9933
9934         /* Enable seeprom accesses. */
9935         tw32_f(GRC_LOCAL_CTRL,
9936              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9937         udelay(100);
9938
9939         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9940             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9941                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9942
9943                 if (tg3_nvram_lock(tp)) {
9944                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9945                                "tg3_nvram_init failed.\n", tp->dev->name);
9946                         return;
9947                 }
9948                 tg3_enable_nvram_access(tp);
9949
9950                 tp->nvram_size = 0;
9951
9952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9953                         tg3_get_5752_nvram_info(tp);
9954                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9955                         tg3_get_5755_nvram_info(tp);
9956                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9957                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
9958                         tg3_get_5787_nvram_info(tp);
9959                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9960                         tg3_get_5761_nvram_info(tp);
9961                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9962                         tg3_get_5906_nvram_info(tp);
9963                 else
9964                         tg3_get_nvram_info(tp);
9965
9966                 if (tp->nvram_size == 0)
9967                         tg3_get_nvram_size(tp);
9968
9969                 tg3_disable_nvram_access(tp);
9970                 tg3_nvram_unlock(tp);
9971
9972         } else {
9973                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9974
9975                 tg3_get_eeprom_size(tp);
9976         }
9977 }
9978
9979 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9980                                         u32 offset, u32 *val)
9981 {
9982         u32 tmp;
9983         int i;
9984
9985         if (offset > EEPROM_ADDR_ADDR_MASK ||
9986             (offset % 4) != 0)
9987                 return -EINVAL;
9988
9989         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9990                                         EEPROM_ADDR_DEVID_MASK |
9991                                         EEPROM_ADDR_READ);
9992         tw32(GRC_EEPROM_ADDR,
9993              tmp |
9994              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9995              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9996               EEPROM_ADDR_ADDR_MASK) |
9997              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9998
9999         for (i = 0; i < 1000; i++) {
10000                 tmp = tr32(GRC_EEPROM_ADDR);
10001
10002                 if (tmp & EEPROM_ADDR_COMPLETE)
10003                         break;
10004                 msleep(1);
10005         }
10006         if (!(tmp & EEPROM_ADDR_COMPLETE))
10007                 return -EBUSY;
10008
10009         *val = tr32(GRC_EEPROM_DATA);
10010         return 0;
10011 }
10012
10013 #define NVRAM_CMD_TIMEOUT 10000
10014
10015 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10016 {
10017         int i;
10018
10019         tw32(NVRAM_CMD, nvram_cmd);
10020         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10021                 udelay(10);
10022                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10023                         udelay(10);
10024                         break;
10025                 }
10026         }
10027         if (i == NVRAM_CMD_TIMEOUT) {
10028                 return -EBUSY;
10029         }
10030         return 0;
10031 }
10032
10033 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10034 {
10035         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10036             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10037             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10038            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10039             (tp->nvram_jedecnum == JEDEC_ATMEL))
10040
10041                 addr = ((addr / tp->nvram_pagesize) <<
10042                         ATMEL_AT45DB0X1B_PAGE_POS) +
10043                        (addr % tp->nvram_pagesize);
10044
10045         return addr;
10046 }
10047
10048 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10049 {
10050         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10051             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10052             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10053            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10054             (tp->nvram_jedecnum == JEDEC_ATMEL))
10055
10056                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10057                         tp->nvram_pagesize) +
10058                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10059
10060         return addr;
10061 }
10062
10063 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10064 {
10065         int ret;
10066
10067         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10068                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10069
10070         offset = tg3_nvram_phys_addr(tp, offset);
10071
10072         if (offset > NVRAM_ADDR_MSK)
10073                 return -EINVAL;
10074
10075         ret = tg3_nvram_lock(tp);
10076         if (ret)
10077                 return ret;
10078
10079         tg3_enable_nvram_access(tp);
10080
10081         tw32(NVRAM_ADDR, offset);
10082         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10083                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10084
10085         if (ret == 0)
10086                 *val = swab32(tr32(NVRAM_RDDATA));
10087
10088         tg3_disable_nvram_access(tp);
10089
10090         tg3_nvram_unlock(tp);
10091
10092         return ret;
10093 }
10094
10095 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10096 {
10097         int err;
10098         u32 tmp;
10099
10100         err = tg3_nvram_read(tp, offset, &tmp);
10101         *val = swab32(tmp);
10102         return err;
10103 }
10104
10105 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10106                                     u32 offset, u32 len, u8 *buf)
10107 {
10108         int i, j, rc = 0;
10109         u32 val;
10110
10111         for (i = 0; i < len; i += 4) {
10112                 u32 addr, data;
10113
10114                 addr = offset + i;
10115
10116                 memcpy(&data, buf + i, 4);
10117
10118                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10119
10120                 val = tr32(GRC_EEPROM_ADDR);
10121                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10122
10123                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10124                         EEPROM_ADDR_READ);
10125                 tw32(GRC_EEPROM_ADDR, val |
10126                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10127                         (addr & EEPROM_ADDR_ADDR_MASK) |
10128                         EEPROM_ADDR_START |
10129                         EEPROM_ADDR_WRITE);
10130
10131                 for (j = 0; j < 1000; j++) {
10132                         val = tr32(GRC_EEPROM_ADDR);
10133
10134                         if (val & EEPROM_ADDR_COMPLETE)
10135                                 break;
10136                         msleep(1);
10137                 }
10138                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10139                         rc = -EBUSY;
10140                         break;
10141                 }
10142         }
10143
10144         return rc;
10145 }
10146
10147 /* offset and length are dword aligned */
10148 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10149                 u8 *buf)
10150 {
10151         int ret = 0;
10152         u32 pagesize = tp->nvram_pagesize;
10153         u32 pagemask = pagesize - 1;
10154         u32 nvram_cmd;
10155         u8 *tmp;
10156
10157         tmp = kmalloc(pagesize, GFP_KERNEL);
10158         if (tmp == NULL)
10159                 return -ENOMEM;
10160
10161         while (len) {
10162                 int j;
10163                 u32 phy_addr, page_off, size;
10164
10165                 phy_addr = offset & ~pagemask;
10166
10167                 for (j = 0; j < pagesize; j += 4) {
10168                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
10169                                                 (u32 *) (tmp + j))))
10170                                 break;
10171                 }
10172                 if (ret)
10173                         break;
10174
10175                 page_off = offset & pagemask;
10176                 size = pagesize;
10177                 if (len < size)
10178                         size = len;
10179
10180                 len -= size;
10181
10182                 memcpy(tmp + page_off, buf, size);
10183
10184                 offset = offset + (pagesize - page_off);
10185
10186                 tg3_enable_nvram_access(tp);
10187
10188                 /*
10189                  * Before we can erase the flash page, we need
10190                  * to issue a special "write enable" command.
10191                  */
10192                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10193
10194                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10195                         break;
10196
10197                 /* Erase the target page */
10198                 tw32(NVRAM_ADDR, phy_addr);
10199
10200                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10201                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10202
10203                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10204                         break;
10205
10206                 /* Issue another write enable to start the write. */
10207                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10208
10209                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10210                         break;
10211
10212                 for (j = 0; j < pagesize; j += 4) {
10213                         u32 data;
10214
10215                         data = *((u32 *) (tmp + j));
10216                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
10217
10218                         tw32(NVRAM_ADDR, phy_addr + j);
10219
10220                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10221                                 NVRAM_CMD_WR;
10222
10223                         if (j == 0)
10224                                 nvram_cmd |= NVRAM_CMD_FIRST;
10225                         else if (j == (pagesize - 4))
10226                                 nvram_cmd |= NVRAM_CMD_LAST;
10227
10228                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10229                                 break;
10230                 }
10231                 if (ret)
10232                         break;
10233         }
10234
10235         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10236         tg3_nvram_exec_cmd(tp, nvram_cmd);
10237
10238         kfree(tmp);
10239
10240         return ret;
10241 }
10242
10243 /* offset and length are dword aligned */
10244 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10245                 u8 *buf)
10246 {
10247         int i, ret = 0;
10248
10249         for (i = 0; i < len; i += 4, offset += 4) {
10250                 u32 data, page_off, phy_addr, nvram_cmd;
10251
10252                 memcpy(&data, buf + i, 4);
10253                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10254
10255                 page_off = offset % tp->nvram_pagesize;
10256
10257                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10258
10259                 tw32(NVRAM_ADDR, phy_addr);
10260
10261                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10262
10263                 if ((page_off == 0) || (i == 0))
10264                         nvram_cmd |= NVRAM_CMD_FIRST;
10265                 if (page_off == (tp->nvram_pagesize - 4))
10266                         nvram_cmd |= NVRAM_CMD_LAST;
10267
10268                 if (i == (len - 4))
10269                         nvram_cmd |= NVRAM_CMD_LAST;
10270
10271                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10272                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10273                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10274                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10275                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10276                     (tp->nvram_jedecnum == JEDEC_ST) &&
10277                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10278
10279                         if ((ret = tg3_nvram_exec_cmd(tp,
10280                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10281                                 NVRAM_CMD_DONE)))
10282
10283                                 break;
10284                 }
10285                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10286                         /* We always do complete word writes to eeprom. */
10287                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10288                 }
10289
10290                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10291                         break;
10292         }
10293         return ret;
10294 }
10295
10296 /* offset and length are dword aligned */
10297 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10298 {
10299         int ret;
10300
10301         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10302                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10303                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10304                 udelay(40);
10305         }
10306
10307         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10308                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10309         }
10310         else {
10311                 u32 grc_mode;
10312
10313                 ret = tg3_nvram_lock(tp);
10314                 if (ret)
10315                         return ret;
10316
10317                 tg3_enable_nvram_access(tp);
10318                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10319                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10320                         tw32(NVRAM_WRITE1, 0x406);
10321
10322                 grc_mode = tr32(GRC_MODE);
10323                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10324
10325                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10326                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10327
10328                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10329                                 buf);
10330                 }
10331                 else {
10332                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10333                                 buf);
10334                 }
10335
10336                 grc_mode = tr32(GRC_MODE);
10337                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10338
10339                 tg3_disable_nvram_access(tp);
10340                 tg3_nvram_unlock(tp);
10341         }
10342
10343         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10344                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10345                 udelay(40);
10346         }
10347
10348         return ret;
10349 }
10350
10351 struct subsys_tbl_ent {
10352         u16 subsys_vendor, subsys_devid;
10353         u32 phy_id;
10354 };
10355
10356 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10357         /* Broadcom boards. */
10358         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10359         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10360         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10361         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10362         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10363         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10364         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10365         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10366         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10367         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10368         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10369
10370         /* 3com boards. */
10371         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10372         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10373         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10374         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10375         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10376
10377         /* DELL boards. */
10378         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10379         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10380         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10381         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10382
10383         /* Compaq boards. */
10384         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10385         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10386         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10387         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10388         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10389
10390         /* IBM boards. */
10391         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10392 };
10393
10394 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10395 {
10396         int i;
10397
10398         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10399                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10400                      tp->pdev->subsystem_vendor) &&
10401                     (subsys_id_to_phy_id[i].subsys_devid ==
10402                      tp->pdev->subsystem_device))
10403                         return &subsys_id_to_phy_id[i];
10404         }
10405         return NULL;
10406 }
10407
10408 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10409 {
10410         u32 val;
10411         u16 pmcsr;
10412
10413         /* On some early chips the SRAM cannot be accessed in D3hot state,
10414          * so need make sure we're in D0.
10415          */
10416         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10417         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10418         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10419         msleep(1);
10420
10421         /* Make sure register accesses (indirect or otherwise)
10422          * will function correctly.
10423          */
10424         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10425                                tp->misc_host_ctrl);
10426
10427         /* The memory arbiter has to be enabled in order for SRAM accesses
10428          * to succeed.  Normally on powerup the tg3 chip firmware will make
10429          * sure it is enabled, but other entities such as system netboot
10430          * code might disable it.
10431          */
10432         val = tr32(MEMARB_MODE);
10433         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10434
10435         tp->phy_id = PHY_ID_INVALID;
10436         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10437
10438         /* Assume an onboard device and WOL capable by default.  */
10439         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10440
10441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10442                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10443                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10444                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10445                 }
10446                 val = tr32(VCPU_CFGSHDW);
10447                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10448                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10449                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10450                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10451                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10452                 return;
10453         }
10454
10455         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10456         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10457                 u32 nic_cfg, led_cfg;
10458                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10459                 int eeprom_phy_serdes = 0;
10460
10461                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10462                 tp->nic_sram_data_cfg = nic_cfg;
10463
10464                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10465                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10466                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10467                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10468                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10469                     (ver > 0) && (ver < 0x100))
10470                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10471
10472                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10473                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10474                         eeprom_phy_serdes = 1;
10475
10476                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10477                 if (nic_phy_id != 0) {
10478                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10479                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10480
10481                         eeprom_phy_id  = (id1 >> 16) << 10;
10482                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10483                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10484                 } else
10485                         eeprom_phy_id = 0;
10486
10487                 tp->phy_id = eeprom_phy_id;
10488                 if (eeprom_phy_serdes) {
10489                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10490                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10491                         else
10492                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10493                 }
10494
10495                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10496                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10497                                     SHASTA_EXT_LED_MODE_MASK);
10498                 else
10499                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10500
10501                 switch (led_cfg) {
10502                 default:
10503                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10504                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10505                         break;
10506
10507                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10508                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10509                         break;
10510
10511                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10512                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10513
10514                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10515                          * read on some older 5700/5701 bootcode.
10516                          */
10517                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10518                             ASIC_REV_5700 ||
10519                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10520                             ASIC_REV_5701)
10521                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10522
10523                         break;
10524
10525                 case SHASTA_EXT_LED_SHARED:
10526                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10527                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10528                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10529                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10530                                                  LED_CTRL_MODE_PHY_2);
10531                         break;
10532
10533                 case SHASTA_EXT_LED_MAC:
10534                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10535                         break;
10536
10537                 case SHASTA_EXT_LED_COMBO:
10538                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10539                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10540                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10541                                                  LED_CTRL_MODE_PHY_2);
10542                         break;
10543
10544                 };
10545
10546                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10547                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10548                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10549                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10550
10551                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10552                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10553                         if ((tp->pdev->subsystem_vendor ==
10554                              PCI_VENDOR_ID_ARIMA) &&
10555                             (tp->pdev->subsystem_device == 0x205a ||
10556                              tp->pdev->subsystem_device == 0x2063))
10557                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10558                 } else {
10559                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10560                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10561                 }
10562
10563                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10564                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10565                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10566                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10567                 }
10568                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10569                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10570                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10571                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10572                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10573
10574                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10575                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10576                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10577
10578                 if (cfg2 & (1 << 17))
10579                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10580
10581                 /* serdes signal pre-emphasis in register 0x590 set by */
10582                 /* bootcode if bit 18 is set */
10583                 if (cfg2 & (1 << 18))
10584                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10585
10586                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10587                         u32 cfg3;
10588
10589                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10590                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10591                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10592                 }
10593         }
10594 }
10595
10596 static int __devinit tg3_phy_probe(struct tg3 *tp)
10597 {
10598         u32 hw_phy_id_1, hw_phy_id_2;
10599         u32 hw_phy_id, hw_phy_id_masked;
10600         int err;
10601
10602         /* Reading the PHY ID register can conflict with ASF
10603          * firwmare access to the PHY hardware.
10604          */
10605         err = 0;
10606         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10607             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10608                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10609         } else {
10610                 /* Now read the physical PHY_ID from the chip and verify
10611                  * that it is sane.  If it doesn't look good, we fall back
10612                  * to either the hard-coded table based PHY_ID and failing
10613                  * that the value found in the eeprom area.
10614                  */
10615                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10616                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10617
10618                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10619                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10620                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10621
10622                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10623         }
10624
10625         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10626                 tp->phy_id = hw_phy_id;
10627                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10628                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10629                 else
10630                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10631         } else {
10632                 if (tp->phy_id != PHY_ID_INVALID) {
10633                         /* Do nothing, phy ID already set up in
10634                          * tg3_get_eeprom_hw_cfg().
10635                          */
10636                 } else {
10637                         struct subsys_tbl_ent *p;
10638
10639                         /* No eeprom signature?  Try the hardcoded
10640                          * subsys device table.
10641                          */
10642                         p = lookup_by_subsys(tp);
10643                         if (!p)
10644                                 return -ENODEV;
10645
10646                         tp->phy_id = p->phy_id;
10647                         if (!tp->phy_id ||
10648                             tp->phy_id == PHY_ID_BCM8002)
10649                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10650                 }
10651         }
10652
10653         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10654             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10655             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10656                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10657
10658                 tg3_readphy(tp, MII_BMSR, &bmsr);
10659                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10660                     (bmsr & BMSR_LSTATUS))
10661                         goto skip_phy_reset;
10662
10663                 err = tg3_phy_reset(tp);
10664                 if (err)
10665                         return err;
10666
10667                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10668                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10669                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10670                 tg3_ctrl = 0;
10671                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10672                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10673                                     MII_TG3_CTRL_ADV_1000_FULL);
10674                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10675                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10676                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10677                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10678                 }
10679
10680                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10681                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10682                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10683                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10684                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10685
10686                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10687                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10688
10689                         tg3_writephy(tp, MII_BMCR,
10690                                      BMCR_ANENABLE | BMCR_ANRESTART);
10691                 }
10692                 tg3_phy_set_wirespeed(tp);
10693
10694                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10695                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10696                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10697         }
10698
10699 skip_phy_reset:
10700         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10701                 err = tg3_init_5401phy_dsp(tp);
10702                 if (err)
10703                         return err;
10704         }
10705
10706         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10707                 err = tg3_init_5401phy_dsp(tp);
10708         }
10709
10710         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10711                 tp->link_config.advertising =
10712                         (ADVERTISED_1000baseT_Half |
10713                          ADVERTISED_1000baseT_Full |
10714                          ADVERTISED_Autoneg |
10715                          ADVERTISED_FIBRE);
10716         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10717                 tp->link_config.advertising &=
10718                         ~(ADVERTISED_1000baseT_Half |
10719                           ADVERTISED_1000baseT_Full);
10720
10721         return err;
10722 }
10723
10724 static void __devinit tg3_read_partno(struct tg3 *tp)
10725 {
10726         unsigned char vpd_data[256];
10727         unsigned int i;
10728         u32 magic;
10729
10730         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10731                 goto out_not_found;
10732
10733         if (magic == TG3_EEPROM_MAGIC) {
10734                 for (i = 0; i < 256; i += 4) {
10735                         u32 tmp;
10736
10737                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10738                                 goto out_not_found;
10739
10740                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10741                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10742                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10743                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10744                 }
10745         } else {
10746                 int vpd_cap;
10747
10748                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10749                 for (i = 0; i < 256; i += 4) {
10750                         u32 tmp, j = 0;
10751                         u16 tmp16;
10752
10753                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10754                                               i);
10755                         while (j++ < 100) {
10756                                 pci_read_config_word(tp->pdev, vpd_cap +
10757                                                      PCI_VPD_ADDR, &tmp16);
10758                                 if (tmp16 & 0x8000)
10759                                         break;
10760                                 msleep(1);
10761                         }
10762                         if (!(tmp16 & 0x8000))
10763                                 goto out_not_found;
10764
10765                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10766                                               &tmp);
10767                         tmp = cpu_to_le32(tmp);
10768                         memcpy(&vpd_data[i], &tmp, 4);
10769                 }
10770         }
10771
10772         /* Now parse and find the part number. */
10773         for (i = 0; i < 254; ) {
10774                 unsigned char val = vpd_data[i];
10775                 unsigned int block_end;
10776
10777                 if (val == 0x82 || val == 0x91) {
10778                         i = (i + 3 +
10779                              (vpd_data[i + 1] +
10780                               (vpd_data[i + 2] << 8)));
10781                         continue;
10782                 }
10783
10784                 if (val != 0x90)
10785                         goto out_not_found;
10786
10787                 block_end = (i + 3 +
10788                              (vpd_data[i + 1] +
10789                               (vpd_data[i + 2] << 8)));
10790                 i += 3;
10791
10792                 if (block_end > 256)
10793                         goto out_not_found;
10794
10795                 while (i < (block_end - 2)) {
10796                         if (vpd_data[i + 0] == 'P' &&
10797                             vpd_data[i + 1] == 'N') {
10798                                 int partno_len = vpd_data[i + 2];
10799
10800                                 i += 3;
10801                                 if (partno_len > 24 || (partno_len + i) > 256)
10802                                         goto out_not_found;
10803
10804                                 memcpy(tp->board_part_number,
10805                                        &vpd_data[i], partno_len);
10806
10807                                 /* Success. */
10808                                 return;
10809                         }
10810                         i += 3 + vpd_data[i + 2];
10811                 }
10812
10813                 /* Part number not found. */
10814                 goto out_not_found;
10815         }
10816
10817 out_not_found:
10818         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10819                 strcpy(tp->board_part_number, "BCM95906");
10820         else
10821                 strcpy(tp->board_part_number, "none");
10822 }
10823
10824 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10825 {
10826         u32 val;
10827
10828         if (tg3_nvram_read_swab(tp, offset, &val) ||
10829             (val & 0xfc000000) != 0x0c000000 ||
10830             tg3_nvram_read_swab(tp, offset + 4, &val) ||
10831             val != 0)
10832                 return 0;
10833
10834         return 1;
10835 }
10836
10837 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10838 {
10839         u32 val, offset, start;
10840         u32 ver_offset;
10841         int i, bcnt;
10842
10843         if (tg3_nvram_read_swab(tp, 0, &val))
10844                 return;
10845
10846         if (val != TG3_EEPROM_MAGIC)
10847                 return;
10848
10849         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10850             tg3_nvram_read_swab(tp, 0x4, &start))
10851                 return;
10852
10853         offset = tg3_nvram_logical_addr(tp, offset);
10854
10855         if (!tg3_fw_img_is_valid(tp, offset) ||
10856             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10857                 return;
10858
10859         offset = offset + ver_offset - start;
10860         for (i = 0; i < 16; i += 4) {
10861                 if (tg3_nvram_read(tp, offset + i, &val))
10862                         return;
10863
10864                 val = le32_to_cpu(val);
10865                 memcpy(tp->fw_ver + i, &val, 4);
10866         }
10867
10868         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10869              (tp->tg3_flags & TG3_FLG3_ENABLE_APE))
10870                 return;
10871
10872         for (offset = TG3_NVM_DIR_START;
10873              offset < TG3_NVM_DIR_END;
10874              offset += TG3_NVM_DIRENT_SIZE) {
10875                 if (tg3_nvram_read_swab(tp, offset, &val))
10876                         return;
10877
10878                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10879                         break;
10880         }
10881
10882         if (offset == TG3_NVM_DIR_END)
10883                 return;
10884
10885         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10886                 start = 0x08000000;
10887         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10888                 return;
10889
10890         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10891             !tg3_fw_img_is_valid(tp, offset) ||
10892             tg3_nvram_read_swab(tp, offset + 8, &val))
10893                 return;
10894
10895         offset += val - start;
10896
10897         bcnt = strlen(tp->fw_ver);
10898
10899         tp->fw_ver[bcnt++] = ',';
10900         tp->fw_ver[bcnt++] = ' ';
10901
10902         for (i = 0; i < 4; i++) {
10903                 if (tg3_nvram_read(tp, offset, &val))
10904                         return;
10905
10906                 val = le32_to_cpu(val);
10907                 offset += sizeof(val);
10908
10909                 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10910                         memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10911                         break;
10912                 }
10913
10914                 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10915                 bcnt += sizeof(val);
10916         }
10917
10918         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
10919 }
10920
10921 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10922
10923 static int __devinit tg3_get_invariants(struct tg3 *tp)
10924 {
10925         static struct pci_device_id write_reorder_chipsets[] = {
10926                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10927                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10928                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10929                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10930                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10931                              PCI_DEVICE_ID_VIA_8385_0) },
10932                 { },
10933         };
10934         u32 misc_ctrl_reg;
10935         u32 cacheline_sz_reg;
10936         u32 pci_state_reg, grc_misc_cfg;
10937         u32 val;
10938         u16 pci_cmd;
10939         int err, pcie_cap;
10940
10941         /* Force memory write invalidate off.  If we leave it on,
10942          * then on 5700_BX chips we have to enable a workaround.
10943          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10944          * to match the cacheline size.  The Broadcom driver have this
10945          * workaround but turns MWI off all the times so never uses
10946          * it.  This seems to suggest that the workaround is insufficient.
10947          */
10948         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10949         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10950         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10951
10952         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10953          * has the register indirect write enable bit set before
10954          * we try to access any of the MMIO registers.  It is also
10955          * critical that the PCI-X hw workaround situation is decided
10956          * before that as well.
10957          */
10958         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10959                               &misc_ctrl_reg);
10960
10961         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10962                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10963         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10964                 u32 prod_id_asic_rev;
10965
10966                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10967                                       &prod_id_asic_rev);
10968                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10969         }
10970
10971         /* Wrong chip ID in 5752 A0. This code can be removed later
10972          * as A0 is not in production.
10973          */
10974         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10975                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10976
10977         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10978          * we need to disable memory and use config. cycles
10979          * only to access all registers. The 5702/03 chips
10980          * can mistakenly decode the special cycles from the
10981          * ICH chipsets as memory write cycles, causing corruption
10982          * of register and memory space. Only certain ICH bridges
10983          * will drive special cycles with non-zero data during the
10984          * address phase which can fall within the 5703's address
10985          * range. This is not an ICH bug as the PCI spec allows
10986          * non-zero address during special cycles. However, only
10987          * these ICH bridges are known to drive non-zero addresses
10988          * during special cycles.
10989          *
10990          * Since special cycles do not cross PCI bridges, we only
10991          * enable this workaround if the 5703 is on the secondary
10992          * bus of these ICH bridges.
10993          */
10994         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10995             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10996                 static struct tg3_dev_id {
10997                         u32     vendor;
10998                         u32     device;
10999                         u32     rev;
11000                 } ich_chipsets[] = {
11001                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11002                           PCI_ANY_ID },
11003                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11004                           PCI_ANY_ID },
11005                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11006                           0xa },
11007                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11008                           PCI_ANY_ID },
11009                         { },
11010                 };
11011                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11012                 struct pci_dev *bridge = NULL;
11013
11014                 while (pci_id->vendor != 0) {
11015                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11016                                                 bridge);
11017                         if (!bridge) {
11018                                 pci_id++;
11019                                 continue;
11020                         }
11021                         if (pci_id->rev != PCI_ANY_ID) {
11022                                 if (bridge->revision > pci_id->rev)
11023                                         continue;
11024                         }
11025                         if (bridge->subordinate &&
11026                             (bridge->subordinate->number ==
11027                              tp->pdev->bus->number)) {
11028
11029                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11030                                 pci_dev_put(bridge);
11031                                 break;
11032                         }
11033                 }
11034         }
11035
11036         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11037          * DMA addresses > 40-bit. This bridge may have other additional
11038          * 57xx devices behind it in some 4-port NIC designs for example.
11039          * Any tg3 device found behind the bridge will also need the 40-bit
11040          * DMA workaround.
11041          */
11042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11043             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11044                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11045                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11046                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11047         }
11048         else {
11049                 struct pci_dev *bridge = NULL;
11050
11051                 do {
11052                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11053                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11054                                                 bridge);
11055                         if (bridge && bridge->subordinate &&
11056                             (bridge->subordinate->number <=
11057                              tp->pdev->bus->number) &&
11058                             (bridge->subordinate->subordinate >=
11059                              tp->pdev->bus->number)) {
11060                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11061                                 pci_dev_put(bridge);
11062                                 break;
11063                         }
11064                 } while (bridge);
11065         }
11066
11067         /* Initialize misc host control in PCI block. */
11068         tp->misc_host_ctrl |= (misc_ctrl_reg &
11069                                MISC_HOST_CTRL_CHIPREV);
11070         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11071                                tp->misc_host_ctrl);
11072
11073         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11074                               &cacheline_sz_reg);
11075
11076         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11077         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11078         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11079         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11080
11081         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11082             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11083                 tp->pdev_peer = tg3_find_peer(tp);
11084
11085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11088             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11089             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11090             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11091             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11092             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11093                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11094
11095         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11096             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11097                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11098
11099         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11100                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11101                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11102                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11103                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11104                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11105                      tp->pdev_peer == tp->pdev))
11106                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11107
11108                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11109                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11110                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11111                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11112                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11113                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11114                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11115                 } else {
11116                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11117                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11118                                 ASIC_REV_5750 &&
11119                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11120                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11121                 }
11122         }
11123
11124         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11125             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11126             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11127             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11128             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11129             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11130             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11131             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11132                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11133
11134         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11135         if (pcie_cap != 0) {
11136                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11137                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11138                         u16 lnkctl;
11139
11140                         pci_read_config_word(tp->pdev,
11141                                              pcie_cap + PCI_EXP_LNKCTL,
11142                                              &lnkctl);
11143                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11144                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11145                 }
11146         }
11147
11148         /* If we have an AMD 762 or VIA K8T800 chipset, write
11149          * reordering to the mailbox registers done by the host
11150          * controller can cause major troubles.  We read back from
11151          * every mailbox register write to force the writes to be
11152          * posted to the chip in order.
11153          */
11154         if (pci_dev_present(write_reorder_chipsets) &&
11155             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11156                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11157
11158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11159             tp->pci_lat_timer < 64) {
11160                 tp->pci_lat_timer = 64;
11161
11162                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11163                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11164                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11165                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11166
11167                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11168                                        cacheline_sz_reg);
11169         }
11170
11171         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11172             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11173                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11174                 if (!tp->pcix_cap) {
11175                         printk(KERN_ERR PFX "Cannot find PCI-X "
11176                                             "capability, aborting.\n");
11177                         return -EIO;
11178                 }
11179         }
11180
11181         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11182                               &pci_state_reg);
11183
11184         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11185                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11186
11187                 /* If this is a 5700 BX chipset, and we are in PCI-X
11188                  * mode, enable register write workaround.
11189                  *
11190                  * The workaround is to use indirect register accesses
11191                  * for all chip writes not to mailbox registers.
11192                  */
11193                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11194                         u32 pm_reg;
11195
11196                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11197
11198                         /* The chip can have it's power management PCI config
11199                          * space registers clobbered due to this bug.
11200                          * So explicitly force the chip into D0 here.
11201                          */
11202                         pci_read_config_dword(tp->pdev,
11203                                               tp->pm_cap + PCI_PM_CTRL,
11204                                               &pm_reg);
11205                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11206                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11207                         pci_write_config_dword(tp->pdev,
11208                                                tp->pm_cap + PCI_PM_CTRL,
11209                                                pm_reg);
11210
11211                         /* Also, force SERR#/PERR# in PCI command. */
11212                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11213                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11214                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11215                 }
11216         }
11217
11218         /* 5700 BX chips need to have their TX producer index mailboxes
11219          * written twice to workaround a bug.
11220          */
11221         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11222                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11223
11224         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11225                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11226         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11227                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11228
11229         /* Chip-specific fixup from Broadcom driver */
11230         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11231             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11232                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11233                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11234         }
11235
11236         /* Default fast path register access methods */
11237         tp->read32 = tg3_read32;
11238         tp->write32 = tg3_write32;
11239         tp->read32_mbox = tg3_read32;
11240         tp->write32_mbox = tg3_write32;
11241         tp->write32_tx_mbox = tg3_write32;
11242         tp->write32_rx_mbox = tg3_write32;
11243
11244         /* Various workaround register access methods */
11245         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11246                 tp->write32 = tg3_write_indirect_reg32;
11247         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11248                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11249                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11250                 /*
11251                  * Back to back register writes can cause problems on these
11252                  * chips, the workaround is to read back all reg writes
11253                  * except those to mailbox regs.
11254                  *
11255                  * See tg3_write_indirect_reg32().
11256                  */
11257                 tp->write32 = tg3_write_flush_reg32;
11258         }
11259
11260
11261         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11262             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11263                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11264                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11265                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11266         }
11267
11268         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11269                 tp->read32 = tg3_read_indirect_reg32;
11270                 tp->write32 = tg3_write_indirect_reg32;
11271                 tp->read32_mbox = tg3_read_indirect_mbox;
11272                 tp->write32_mbox = tg3_write_indirect_mbox;
11273                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11274                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11275
11276                 iounmap(tp->regs);
11277                 tp->regs = NULL;
11278
11279                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11280                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11281                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11282         }
11283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11284                 tp->read32_mbox = tg3_read32_mbox_5906;
11285                 tp->write32_mbox = tg3_write32_mbox_5906;
11286                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11287                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11288         }
11289
11290         if (tp->write32 == tg3_write_indirect_reg32 ||
11291             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11292              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11293               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11294                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11295
11296         /* Get eeprom hw config before calling tg3_set_power_state().
11297          * In particular, the TG3_FLG2_IS_NIC flag must be
11298          * determined before calling tg3_set_power_state() so that
11299          * we know whether or not to switch out of Vaux power.
11300          * When the flag is set, it means that GPIO1 is used for eeprom
11301          * write protect and also implies that it is a LOM where GPIOs
11302          * are not used to switch power.
11303          */
11304         tg3_get_eeprom_hw_cfg(tp);
11305
11306         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11307                 /* Allow reads and writes to the
11308                  * APE register and memory space.
11309                  */
11310                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11311                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11312                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11313                                        pci_state_reg);
11314         }
11315
11316         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11317             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11318                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11319
11320         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11321          * GPIO1 driven high will bring 5700's external PHY out of reset.
11322          * It is also used as eeprom write protect on LOMs.
11323          */
11324         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11325         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11326             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11327                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11328                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11329         /* Unused GPIO3 must be driven as output on 5752 because there
11330          * are no pull-up resistors on unused GPIO pins.
11331          */
11332         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11333                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11334
11335         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11336                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11337
11338         /* Force the chip into D0. */
11339         err = tg3_set_power_state(tp, PCI_D0);
11340         if (err) {
11341                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11342                        pci_name(tp->pdev));
11343                 return err;
11344         }
11345
11346         /* 5700 B0 chips do not support checksumming correctly due
11347          * to hardware bugs.
11348          */
11349         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11350                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11351
11352         /* Derive initial jumbo mode from MTU assigned in
11353          * ether_setup() via the alloc_etherdev() call
11354          */
11355         if (tp->dev->mtu > ETH_DATA_LEN &&
11356             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11357                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11358
11359         /* Determine WakeOnLan speed to use. */
11360         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11361             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11362             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11363             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11364                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11365         } else {
11366                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11367         }
11368
11369         /* A few boards don't want Ethernet@WireSpeed phy feature */
11370         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11371             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11372              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11373              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11374             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11375             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11376                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11377
11378         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11379             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11380                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11381         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11382                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11383
11384         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11385                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11386                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11387                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11388                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11389                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11390                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11391                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11392                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11393                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11394                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11395                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11396         }
11397
11398         tp->coalesce_mode = 0;
11399         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11400             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11401                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11402
11403         /* Initialize MAC MI mode, polling disabled. */
11404         tw32_f(MAC_MI_MODE, tp->mi_mode);
11405         udelay(80);
11406
11407         /* Initialize data/descriptor byte/word swapping. */
11408         val = tr32(GRC_MODE);
11409         val &= GRC_MODE_HOST_STACKUP;
11410         tw32(GRC_MODE, val | tp->grc_mode);
11411
11412         tg3_switch_clocks(tp);
11413
11414         /* Clear this out for sanity. */
11415         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11416
11417         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11418                               &pci_state_reg);
11419         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11420             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11421                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11422
11423                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11424                     chiprevid == CHIPREV_ID_5701_B0 ||
11425                     chiprevid == CHIPREV_ID_5701_B2 ||
11426                     chiprevid == CHIPREV_ID_5701_B5) {
11427                         void __iomem *sram_base;
11428
11429                         /* Write some dummy words into the SRAM status block
11430                          * area, see if it reads back correctly.  If the return
11431                          * value is bad, force enable the PCIX workaround.
11432                          */
11433                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11434
11435                         writel(0x00000000, sram_base);
11436                         writel(0x00000000, sram_base + 4);
11437                         writel(0xffffffff, sram_base + 4);
11438                         if (readl(sram_base) != 0x00000000)
11439                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11440                 }
11441         }
11442
11443         udelay(50);
11444         tg3_nvram_init(tp);
11445
11446         grc_misc_cfg = tr32(GRC_MISC_CFG);
11447         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11448
11449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11450             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11451              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11452                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11453
11454         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11455             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11456                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11457         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11458                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11459                                       HOSTCC_MODE_CLRTICK_TXBD);
11460
11461                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11462                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11463                                        tp->misc_host_ctrl);
11464         }
11465
11466         /* these are limited to 10/100 only */
11467         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11468              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11469             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11470              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11471              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11472               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11473               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11474             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11475              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11476               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11477               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11478             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11479                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11480
11481         err = tg3_phy_probe(tp);
11482         if (err) {
11483                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11484                        pci_name(tp->pdev), err);
11485                 /* ... but do not return immediately ... */
11486         }
11487
11488         tg3_read_partno(tp);
11489         tg3_read_fw_ver(tp);
11490
11491         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11492                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11493         } else {
11494                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11495                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11496                 else
11497                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11498         }
11499
11500         /* 5700 {AX,BX} chips have a broken status block link
11501          * change bit implementation, so we must use the
11502          * status register in those cases.
11503          */
11504         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11505                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11506         else
11507                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11508
11509         /* The led_ctrl is set during tg3_phy_probe, here we might
11510          * have to force the link status polling mechanism based
11511          * upon subsystem IDs.
11512          */
11513         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11514             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11515             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11516                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11517                                   TG3_FLAG_USE_LINKCHG_REG);
11518         }
11519
11520         /* For all SERDES we poll the MAC status register. */
11521         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11522                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11523         else
11524                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11525
11526         /* All chips before 5787 can get confused if TX buffers
11527          * straddle the 4GB address boundary in some cases.
11528          */
11529         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11530             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11531             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11533             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11534                 tp->dev->hard_start_xmit = tg3_start_xmit;
11535         else
11536                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11537
11538         tp->rx_offset = 2;
11539         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11540             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11541                 tp->rx_offset = 0;
11542
11543         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11544
11545         /* Increment the rx prod index on the rx std ring by at most
11546          * 8 for these chips to workaround hw errata.
11547          */
11548         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11549             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11551                 tp->rx_std_max_post = 8;
11552
11553         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11554                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11555                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11556
11557         return err;
11558 }
11559
11560 #ifdef CONFIG_SPARC
11561 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11562 {
11563         struct net_device *dev = tp->dev;
11564         struct pci_dev *pdev = tp->pdev;
11565         struct device_node *dp = pci_device_to_OF_node(pdev);
11566         const unsigned char *addr;
11567         int len;
11568
11569         addr = of_get_property(dp, "local-mac-address", &len);
11570         if (addr && len == 6) {
11571                 memcpy(dev->dev_addr, addr, 6);
11572                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11573                 return 0;
11574         }
11575         return -ENODEV;
11576 }
11577
11578 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11579 {
11580         struct net_device *dev = tp->dev;
11581
11582         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11583         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11584         return 0;
11585 }
11586 #endif
11587
11588 static int __devinit tg3_get_device_address(struct tg3 *tp)
11589 {
11590         struct net_device *dev = tp->dev;
11591         u32 hi, lo, mac_offset;
11592         int addr_ok = 0;
11593
11594 #ifdef CONFIG_SPARC
11595         if (!tg3_get_macaddr_sparc(tp))
11596                 return 0;
11597 #endif
11598
11599         mac_offset = 0x7c;
11600         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11601             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11602                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11603                         mac_offset = 0xcc;
11604                 if (tg3_nvram_lock(tp))
11605                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11606                 else
11607                         tg3_nvram_unlock(tp);
11608         }
11609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11610                 mac_offset = 0x10;
11611
11612         /* First try to get it from MAC address mailbox. */
11613         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11614         if ((hi >> 16) == 0x484b) {
11615                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11616                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11617
11618                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11619                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11620                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11621                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11622                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11623
11624                 /* Some old bootcode may report a 0 MAC address in SRAM */
11625                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11626         }
11627         if (!addr_ok) {
11628                 /* Next, try NVRAM. */
11629                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11630                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11631                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11632                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11633                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11634                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11635                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11636                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11637                 }
11638                 /* Finally just fetch it out of the MAC control regs. */
11639                 else {
11640                         hi = tr32(MAC_ADDR_0_HIGH);
11641                         lo = tr32(MAC_ADDR_0_LOW);
11642
11643                         dev->dev_addr[5] = lo & 0xff;
11644                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11645                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11646                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11647                         dev->dev_addr[1] = hi & 0xff;
11648                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11649                 }
11650         }
11651
11652         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11653 #ifdef CONFIG_SPARC64
11654                 if (!tg3_get_default_macaddr_sparc(tp))
11655                         return 0;
11656 #endif
11657                 return -EINVAL;
11658         }
11659         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11660         return 0;
11661 }
11662
11663 #define BOUNDARY_SINGLE_CACHELINE       1
11664 #define BOUNDARY_MULTI_CACHELINE        2
11665
11666 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11667 {
11668         int cacheline_size;
11669         u8 byte;
11670         int goal;
11671
11672         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11673         if (byte == 0)
11674                 cacheline_size = 1024;
11675         else
11676                 cacheline_size = (int) byte * 4;
11677
11678         /* On 5703 and later chips, the boundary bits have no
11679          * effect.
11680          */
11681         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11682             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11683             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11684                 goto out;
11685
11686 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11687         goal = BOUNDARY_MULTI_CACHELINE;
11688 #else
11689 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11690         goal = BOUNDARY_SINGLE_CACHELINE;
11691 #else
11692         goal = 0;
11693 #endif
11694 #endif
11695
11696         if (!goal)
11697                 goto out;
11698
11699         /* PCI controllers on most RISC systems tend to disconnect
11700          * when a device tries to burst across a cache-line boundary.
11701          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11702          *
11703          * Unfortunately, for PCI-E there are only limited
11704          * write-side controls for this, and thus for reads
11705          * we will still get the disconnects.  We'll also waste
11706          * these PCI cycles for both read and write for chips
11707          * other than 5700 and 5701 which do not implement the
11708          * boundary bits.
11709          */
11710         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11711             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11712                 switch (cacheline_size) {
11713                 case 16:
11714                 case 32:
11715                 case 64:
11716                 case 128:
11717                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11718                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11719                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11720                         } else {
11721                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11722                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11723                         }
11724                         break;
11725
11726                 case 256:
11727                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11728                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11729                         break;
11730
11731                 default:
11732                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11733                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11734                         break;
11735                 };
11736         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11737                 switch (cacheline_size) {
11738                 case 16:
11739                 case 32:
11740                 case 64:
11741                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11742                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11743                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11744                                 break;
11745                         }
11746                         /* fallthrough */
11747                 case 128:
11748                 default:
11749                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11750                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11751                         break;
11752                 };
11753         } else {
11754                 switch (cacheline_size) {
11755                 case 16:
11756                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11757                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11758                                         DMA_RWCTRL_WRITE_BNDRY_16);
11759                                 break;
11760                         }
11761                         /* fallthrough */
11762                 case 32:
11763                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11764                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11765                                         DMA_RWCTRL_WRITE_BNDRY_32);
11766                                 break;
11767                         }
11768                         /* fallthrough */
11769                 case 64:
11770                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11771                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11772                                         DMA_RWCTRL_WRITE_BNDRY_64);
11773                                 break;
11774                         }
11775                         /* fallthrough */
11776                 case 128:
11777                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11778                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11779                                         DMA_RWCTRL_WRITE_BNDRY_128);
11780                                 break;
11781                         }
11782                         /* fallthrough */
11783                 case 256:
11784                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11785                                 DMA_RWCTRL_WRITE_BNDRY_256);
11786                         break;
11787                 case 512:
11788                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11789                                 DMA_RWCTRL_WRITE_BNDRY_512);
11790                         break;
11791                 case 1024:
11792                 default:
11793                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11794                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11795                         break;
11796                 };
11797         }
11798
11799 out:
11800         return val;
11801 }
11802
11803 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11804 {
11805         struct tg3_internal_buffer_desc test_desc;
11806         u32 sram_dma_descs;
11807         int i, ret;
11808
11809         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11810
11811         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11812         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11813         tw32(RDMAC_STATUS, 0);
11814         tw32(WDMAC_STATUS, 0);
11815
11816         tw32(BUFMGR_MODE, 0);
11817         tw32(FTQ_RESET, 0);
11818
11819         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11820         test_desc.addr_lo = buf_dma & 0xffffffff;
11821         test_desc.nic_mbuf = 0x00002100;
11822         test_desc.len = size;
11823
11824         /*
11825          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11826          * the *second* time the tg3 driver was getting loaded after an
11827          * initial scan.
11828          *
11829          * Broadcom tells me:
11830          *   ...the DMA engine is connected to the GRC block and a DMA
11831          *   reset may affect the GRC block in some unpredictable way...
11832          *   The behavior of resets to individual blocks has not been tested.
11833          *
11834          * Broadcom noted the GRC reset will also reset all sub-components.
11835          */
11836         if (to_device) {
11837                 test_desc.cqid_sqid = (13 << 8) | 2;
11838
11839                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11840                 udelay(40);
11841         } else {
11842                 test_desc.cqid_sqid = (16 << 8) | 7;
11843
11844                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11845                 udelay(40);
11846         }
11847         test_desc.flags = 0x00000005;
11848
11849         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11850                 u32 val;
11851
11852                 val = *(((u32 *)&test_desc) + i);
11853                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11854                                        sram_dma_descs + (i * sizeof(u32)));
11855                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11856         }
11857         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11858
11859         if (to_device) {
11860                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11861         } else {
11862                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11863         }
11864
11865         ret = -ENODEV;
11866         for (i = 0; i < 40; i++) {
11867                 u32 val;
11868
11869                 if (to_device)
11870                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11871                 else
11872                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11873                 if ((val & 0xffff) == sram_dma_descs) {
11874                         ret = 0;
11875                         break;
11876                 }
11877
11878                 udelay(100);
11879         }
11880
11881         return ret;
11882 }
11883
11884 #define TEST_BUFFER_SIZE        0x2000
11885
11886 static int __devinit tg3_test_dma(struct tg3 *tp)
11887 {
11888         dma_addr_t buf_dma;
11889         u32 *buf, saved_dma_rwctrl;
11890         int ret;
11891
11892         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11893         if (!buf) {
11894                 ret = -ENOMEM;
11895                 goto out_nofree;
11896         }
11897
11898         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11899                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11900
11901         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11902
11903         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11904                 /* DMA read watermark not used on PCIE */
11905                 tp->dma_rwctrl |= 0x00180000;
11906         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11907                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11908                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11909                         tp->dma_rwctrl |= 0x003f0000;
11910                 else
11911                         tp->dma_rwctrl |= 0x003f000f;
11912         } else {
11913                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11914                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11915                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11916                         u32 read_water = 0x7;
11917
11918                         /* If the 5704 is behind the EPB bridge, we can
11919                          * do the less restrictive ONE_DMA workaround for
11920                          * better performance.
11921                          */
11922                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11923                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11924                                 tp->dma_rwctrl |= 0x8000;
11925                         else if (ccval == 0x6 || ccval == 0x7)
11926                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11927
11928                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11929                                 read_water = 4;
11930                         /* Set bit 23 to enable PCIX hw bug fix */
11931                         tp->dma_rwctrl |=
11932                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11933                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11934                                 (1 << 23);
11935                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11936                         /* 5780 always in PCIX mode */
11937                         tp->dma_rwctrl |= 0x00144000;
11938                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11939                         /* 5714 always in PCIX mode */
11940                         tp->dma_rwctrl |= 0x00148000;
11941                 } else {
11942                         tp->dma_rwctrl |= 0x001b000f;
11943                 }
11944         }
11945
11946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11948                 tp->dma_rwctrl &= 0xfffffff0;
11949
11950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11952                 /* Remove this if it causes problems for some boards. */
11953                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11954
11955                 /* On 5700/5701 chips, we need to set this bit.
11956                  * Otherwise the chip will issue cacheline transactions
11957                  * to streamable DMA memory with not all the byte
11958                  * enables turned on.  This is an error on several
11959                  * RISC PCI controllers, in particular sparc64.
11960                  *
11961                  * On 5703/5704 chips, this bit has been reassigned
11962                  * a different meaning.  In particular, it is used
11963                  * on those chips to enable a PCI-X workaround.
11964                  */
11965                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11966         }
11967
11968         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11969
11970 #if 0
11971         /* Unneeded, already done by tg3_get_invariants.  */
11972         tg3_switch_clocks(tp);
11973 #endif
11974
11975         ret = 0;
11976         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11977             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11978                 goto out;
11979
11980         /* It is best to perform DMA test with maximum write burst size
11981          * to expose the 5700/5701 write DMA bug.
11982          */
11983         saved_dma_rwctrl = tp->dma_rwctrl;
11984         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11985         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11986
11987         while (1) {
11988                 u32 *p = buf, i;
11989
11990                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11991                         p[i] = i;
11992
11993                 /* Send the buffer to the chip. */
11994                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11995                 if (ret) {
11996                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11997                         break;
11998                 }
11999
12000 #if 0
12001                 /* validate data reached card RAM correctly. */
12002                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12003                         u32 val;
12004                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12005                         if (le32_to_cpu(val) != p[i]) {
12006                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12007                                 /* ret = -ENODEV here? */
12008                         }
12009                         p[i] = 0;
12010                 }
12011 #endif
12012                 /* Now read it back. */
12013                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12014                 if (ret) {
12015                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12016
12017                         break;
12018                 }
12019
12020                 /* Verify it. */
12021                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12022                         if (p[i] == i)
12023                                 continue;
12024
12025                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12026                             DMA_RWCTRL_WRITE_BNDRY_16) {
12027                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12028                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12029                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12030                                 break;
12031                         } else {
12032                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12033                                 ret = -ENODEV;
12034                                 goto out;
12035                         }
12036                 }
12037
12038                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12039                         /* Success. */
12040                         ret = 0;
12041                         break;
12042                 }
12043         }
12044         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12045             DMA_RWCTRL_WRITE_BNDRY_16) {
12046                 static struct pci_device_id dma_wait_state_chipsets[] = {
12047                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12048                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12049                         { },
12050                 };
12051
12052                 /* DMA test passed without adjusting DMA boundary,
12053                  * now look for chipsets that are known to expose the
12054                  * DMA bug without failing the test.
12055                  */
12056                 if (pci_dev_present(dma_wait_state_chipsets)) {
12057                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12058                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12059                 }
12060                 else
12061                         /* Safe to use the calculated DMA boundary. */
12062                         tp->dma_rwctrl = saved_dma_rwctrl;
12063
12064                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12065         }
12066
12067 out:
12068         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12069 out_nofree:
12070         return ret;
12071 }
12072
12073 static void __devinit tg3_init_link_config(struct tg3 *tp)
12074 {
12075         tp->link_config.advertising =
12076                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12077                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12078                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12079                  ADVERTISED_Autoneg | ADVERTISED_MII);
12080         tp->link_config.speed = SPEED_INVALID;
12081         tp->link_config.duplex = DUPLEX_INVALID;
12082         tp->link_config.autoneg = AUTONEG_ENABLE;
12083         tp->link_config.active_speed = SPEED_INVALID;
12084         tp->link_config.active_duplex = DUPLEX_INVALID;
12085         tp->link_config.phy_is_low_power = 0;
12086         tp->link_config.orig_speed = SPEED_INVALID;
12087         tp->link_config.orig_duplex = DUPLEX_INVALID;
12088         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12089 }
12090
12091 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12092 {
12093         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12094                 tp->bufmgr_config.mbuf_read_dma_low_water =
12095                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12096                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12097                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12098                 tp->bufmgr_config.mbuf_high_water =
12099                         DEFAULT_MB_HIGH_WATER_5705;
12100                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12101                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12102                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12103                         tp->bufmgr_config.mbuf_high_water =
12104                                 DEFAULT_MB_HIGH_WATER_5906;
12105                 }
12106
12107                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12108                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12109                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12110                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12111                 tp->bufmgr_config.mbuf_high_water_jumbo =
12112                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12113         } else {
12114                 tp->bufmgr_config.mbuf_read_dma_low_water =
12115                         DEFAULT_MB_RDMA_LOW_WATER;
12116                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12117                         DEFAULT_MB_MACRX_LOW_WATER;
12118                 tp->bufmgr_config.mbuf_high_water =
12119                         DEFAULT_MB_HIGH_WATER;
12120
12121                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12122                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12123                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12124                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12125                 tp->bufmgr_config.mbuf_high_water_jumbo =
12126                         DEFAULT_MB_HIGH_WATER_JUMBO;
12127         }
12128
12129         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12130         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12131 }
12132
12133 static char * __devinit tg3_phy_string(struct tg3 *tp)
12134 {
12135         switch (tp->phy_id & PHY_ID_MASK) {
12136         case PHY_ID_BCM5400:    return "5400";
12137         case PHY_ID_BCM5401:    return "5401";
12138         case PHY_ID_BCM5411:    return "5411";
12139         case PHY_ID_BCM5701:    return "5701";
12140         case PHY_ID_BCM5703:    return "5703";
12141         case PHY_ID_BCM5704:    return "5704";
12142         case PHY_ID_BCM5705:    return "5705";
12143         case PHY_ID_BCM5750:    return "5750";
12144         case PHY_ID_BCM5752:    return "5752";
12145         case PHY_ID_BCM5714:    return "5714";
12146         case PHY_ID_BCM5780:    return "5780";
12147         case PHY_ID_BCM5755:    return "5755";
12148         case PHY_ID_BCM5787:    return "5787";
12149         case PHY_ID_BCM5784:    return "5784";
12150         case PHY_ID_BCM5756:    return "5722/5756";
12151         case PHY_ID_BCM5906:    return "5906";
12152         case PHY_ID_BCM5761:    return "5761";
12153         case PHY_ID_BCM8002:    return "8002/serdes";
12154         case 0:                 return "serdes";
12155         default:                return "unknown";
12156         };
12157 }
12158
12159 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12160 {
12161         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12162                 strcpy(str, "PCI Express");
12163                 return str;
12164         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12165                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12166
12167                 strcpy(str, "PCIX:");
12168
12169                 if ((clock_ctrl == 7) ||
12170                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12171                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12172                         strcat(str, "133MHz");
12173                 else if (clock_ctrl == 0)
12174                         strcat(str, "33MHz");
12175                 else if (clock_ctrl == 2)
12176                         strcat(str, "50MHz");
12177                 else if (clock_ctrl == 4)
12178                         strcat(str, "66MHz");
12179                 else if (clock_ctrl == 6)
12180                         strcat(str, "100MHz");
12181         } else {
12182                 strcpy(str, "PCI:");
12183                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12184                         strcat(str, "66MHz");
12185                 else
12186                         strcat(str, "33MHz");
12187         }
12188         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12189                 strcat(str, ":32-bit");
12190         else
12191                 strcat(str, ":64-bit");
12192         return str;
12193 }
12194
12195 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12196 {
12197         struct pci_dev *peer;
12198         unsigned int func, devnr = tp->pdev->devfn & ~7;
12199
12200         for (func = 0; func < 8; func++) {
12201                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12202                 if (peer && peer != tp->pdev)
12203                         break;
12204                 pci_dev_put(peer);
12205         }
12206         /* 5704 can be configured in single-port mode, set peer to
12207          * tp->pdev in that case.
12208          */
12209         if (!peer) {
12210                 peer = tp->pdev;
12211                 return peer;
12212         }
12213
12214         /*
12215          * We don't need to keep the refcount elevated; there's no way
12216          * to remove one half of this device without removing the other
12217          */
12218         pci_dev_put(peer);
12219
12220         return peer;
12221 }
12222
12223 static void __devinit tg3_init_coal(struct tg3 *tp)
12224 {
12225         struct ethtool_coalesce *ec = &tp->coal;
12226
12227         memset(ec, 0, sizeof(*ec));
12228         ec->cmd = ETHTOOL_GCOALESCE;
12229         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12230         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12231         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12232         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12233         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12234         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12235         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12236         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12237         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12238
12239         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12240                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12241                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12242                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12243                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12244                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12245         }
12246
12247         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12248                 ec->rx_coalesce_usecs_irq = 0;
12249                 ec->tx_coalesce_usecs_irq = 0;
12250                 ec->stats_block_coalesce_usecs = 0;
12251         }
12252 }
12253
12254 static int __devinit tg3_init_one(struct pci_dev *pdev,
12255                                   const struct pci_device_id *ent)
12256 {
12257         static int tg3_version_printed = 0;
12258         unsigned long tg3reg_base, tg3reg_len;
12259         struct net_device *dev;
12260         struct tg3 *tp;
12261         int i, err, pm_cap;
12262         char str[40];
12263         u64 dma_mask, persist_dma_mask;
12264
12265         if (tg3_version_printed++ == 0)
12266                 printk(KERN_INFO "%s", version);
12267
12268         err = pci_enable_device(pdev);
12269         if (err) {
12270                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12271                        "aborting.\n");
12272                 return err;
12273         }
12274
12275         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12276                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12277                        "base address, aborting.\n");
12278                 err = -ENODEV;
12279                 goto err_out_disable_pdev;
12280         }
12281
12282         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12283         if (err) {
12284                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12285                        "aborting.\n");
12286                 goto err_out_disable_pdev;
12287         }
12288
12289         pci_set_master(pdev);
12290
12291         /* Find power-management capability. */
12292         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12293         if (pm_cap == 0) {
12294                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12295                        "aborting.\n");
12296                 err = -EIO;
12297                 goto err_out_free_res;
12298         }
12299
12300         tg3reg_base = pci_resource_start(pdev, 0);
12301         tg3reg_len = pci_resource_len(pdev, 0);
12302
12303         dev = alloc_etherdev(sizeof(*tp));
12304         if (!dev) {
12305                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12306                 err = -ENOMEM;
12307                 goto err_out_free_res;
12308         }
12309
12310         SET_NETDEV_DEV(dev, &pdev->dev);
12311
12312 #if TG3_VLAN_TAG_USED
12313         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12314         dev->vlan_rx_register = tg3_vlan_rx_register;
12315 #endif
12316
12317         tp = netdev_priv(dev);
12318         tp->pdev = pdev;
12319         tp->dev = dev;
12320         tp->pm_cap = pm_cap;
12321         tp->mac_mode = TG3_DEF_MAC_MODE;
12322         tp->rx_mode = TG3_DEF_RX_MODE;
12323         tp->tx_mode = TG3_DEF_TX_MODE;
12324         tp->mi_mode = MAC_MI_MODE_BASE;
12325         if (tg3_debug > 0)
12326                 tp->msg_enable = tg3_debug;
12327         else
12328                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12329
12330         /* The word/byte swap controls here control register access byte
12331          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12332          * setting below.
12333          */
12334         tp->misc_host_ctrl =
12335                 MISC_HOST_CTRL_MASK_PCI_INT |
12336                 MISC_HOST_CTRL_WORD_SWAP |
12337                 MISC_HOST_CTRL_INDIR_ACCESS |
12338                 MISC_HOST_CTRL_PCISTATE_RW;
12339
12340         /* The NONFRM (non-frame) byte/word swap controls take effect
12341          * on descriptor entries, anything which isn't packet data.
12342          *
12343          * The StrongARM chips on the board (one for tx, one for rx)
12344          * are running in big-endian mode.
12345          */
12346         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12347                         GRC_MODE_WSWAP_NONFRM_DATA);
12348 #ifdef __BIG_ENDIAN
12349         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12350 #endif
12351         spin_lock_init(&tp->lock);
12352         spin_lock_init(&tp->indirect_lock);
12353         INIT_WORK(&tp->reset_task, tg3_reset_task);
12354
12355         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12356         if (!tp->regs) {
12357                 printk(KERN_ERR PFX "Cannot map device registers, "
12358                        "aborting.\n");
12359                 err = -ENOMEM;
12360                 goto err_out_free_dev;
12361         }
12362
12363         tg3_init_link_config(tp);
12364
12365         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12366         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12367         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12368
12369         dev->open = tg3_open;
12370         dev->stop = tg3_close;
12371         dev->get_stats = tg3_get_stats;
12372         dev->set_multicast_list = tg3_set_rx_mode;
12373         dev->set_mac_address = tg3_set_mac_addr;
12374         dev->do_ioctl = tg3_ioctl;
12375         dev->tx_timeout = tg3_tx_timeout;
12376         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12377         dev->ethtool_ops = &tg3_ethtool_ops;
12378         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12379         dev->change_mtu = tg3_change_mtu;
12380         dev->irq = pdev->irq;
12381 #ifdef CONFIG_NET_POLL_CONTROLLER
12382         dev->poll_controller = tg3_poll_controller;
12383 #endif
12384
12385         err = tg3_get_invariants(tp);
12386         if (err) {
12387                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12388                        "aborting.\n");
12389                 goto err_out_iounmap;
12390         }
12391
12392         /* The EPB bridge inside 5714, 5715, and 5780 and any
12393          * device behind the EPB cannot support DMA addresses > 40-bit.
12394          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12395          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12396          * do DMA address check in tg3_start_xmit().
12397          */
12398         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12399                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12400         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12401                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12402 #ifdef CONFIG_HIGHMEM
12403                 dma_mask = DMA_64BIT_MASK;
12404 #endif
12405         } else
12406                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12407
12408         /* Configure DMA attributes. */
12409         if (dma_mask > DMA_32BIT_MASK) {
12410                 err = pci_set_dma_mask(pdev, dma_mask);
12411                 if (!err) {
12412                         dev->features |= NETIF_F_HIGHDMA;
12413                         err = pci_set_consistent_dma_mask(pdev,
12414                                                           persist_dma_mask);
12415                         if (err < 0) {
12416                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12417                                        "DMA for consistent allocations\n");
12418                                 goto err_out_iounmap;
12419                         }
12420                 }
12421         }
12422         if (err || dma_mask == DMA_32BIT_MASK) {
12423                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12424                 if (err) {
12425                         printk(KERN_ERR PFX "No usable DMA configuration, "
12426                                "aborting.\n");
12427                         goto err_out_iounmap;
12428                 }
12429         }
12430
12431         tg3_init_bufmgr_config(tp);
12432
12433         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12434                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12435         }
12436         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12437             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12438             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12439             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12440             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12441                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12442         } else {
12443                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12444         }
12445
12446         /* TSO is on by default on chips that support hardware TSO.
12447          * Firmware TSO on older chips gives lower performance, so it
12448          * is off by default, but can be enabled using ethtool.
12449          */
12450         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12451                 dev->features |= NETIF_F_TSO;
12452                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12453                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12454                         dev->features |= NETIF_F_TSO6;
12455                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12456                         dev->features |= NETIF_F_TSO_ECN;
12457         }
12458
12459
12460         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12461             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12462             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12463                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12464                 tp->rx_pending = 63;
12465         }
12466
12467         err = tg3_get_device_address(tp);
12468         if (err) {
12469                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12470                        "aborting.\n");
12471                 goto err_out_iounmap;
12472         }
12473
12474         /*
12475          * Reset chip in case UNDI or EFI driver did not shutdown
12476          * DMA self test will enable WDMAC and we'll see (spurious)
12477          * pending DMA on the PCI bus at that point.
12478          */
12479         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12480             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12481                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12482                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12483         }
12484
12485         err = tg3_test_dma(tp);
12486         if (err) {
12487                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12488                 goto err_out_iounmap;
12489         }
12490
12491         /* Tigon3 can do ipv4 only... and some chips have buggy
12492          * checksumming.
12493          */
12494         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12495                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12496                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12497                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12498                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12499                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12500                         dev->features |= NETIF_F_IPV6_CSUM;
12501
12502                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12503         } else
12504                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12505
12506         /* flow control autonegotiation is default behavior */
12507         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12508
12509         tg3_init_coal(tp);
12510
12511         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12512                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12513                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12514                                "base address for APE, aborting.\n");
12515                         err = -ENODEV;
12516                         goto err_out_iounmap;
12517                 }
12518
12519                 tg3reg_base = pci_resource_start(pdev, 2);
12520                 tg3reg_len = pci_resource_len(pdev, 2);
12521
12522                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12523                 if (tp->aperegs == 0UL) {
12524                         printk(KERN_ERR PFX "Cannot map APE registers, "
12525                                "aborting.\n");
12526                         err = -ENOMEM;
12527                         goto err_out_iounmap;
12528                 }
12529
12530                 tg3_ape_lock_init(tp);
12531         }
12532
12533         pci_set_drvdata(pdev, dev);
12534
12535         err = register_netdev(dev);
12536         if (err) {
12537                 printk(KERN_ERR PFX "Cannot register net device, "
12538                        "aborting.\n");
12539                 goto err_out_apeunmap;
12540         }
12541
12542         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12543                dev->name,
12544                tp->board_part_number,
12545                tp->pci_chip_rev_id,
12546                tg3_phy_string(tp),
12547                tg3_bus_string(tp, str),
12548                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12549                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12550                  "10/100/1000Base-T")));
12551
12552         for (i = 0; i < 6; i++)
12553                 printk("%2.2x%c", dev->dev_addr[i],
12554                        i == 5 ? '\n' : ':');
12555
12556         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12557                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12558                dev->name,
12559                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12560                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12561                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12562                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12563                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12564                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12565         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12566                dev->name, tp->dma_rwctrl,
12567                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12568                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12569
12570         return 0;
12571
12572 err_out_apeunmap:
12573         if (tp->aperegs) {
12574                 iounmap(tp->aperegs);
12575                 tp->aperegs = NULL;
12576         }
12577
12578 err_out_iounmap:
12579         if (tp->regs) {
12580                 iounmap(tp->regs);
12581                 tp->regs = NULL;
12582         }
12583
12584 err_out_free_dev:
12585         free_netdev(dev);
12586
12587 err_out_free_res:
12588         pci_release_regions(pdev);
12589
12590 err_out_disable_pdev:
12591         pci_disable_device(pdev);
12592         pci_set_drvdata(pdev, NULL);
12593         return err;
12594 }
12595
12596 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12597 {
12598         struct net_device *dev = pci_get_drvdata(pdev);
12599
12600         if (dev) {
12601                 struct tg3 *tp = netdev_priv(dev);
12602
12603                 flush_scheduled_work();
12604                 unregister_netdev(dev);
12605                 if (tp->aperegs) {
12606                         iounmap(tp->aperegs);
12607                         tp->aperegs = NULL;
12608                 }
12609                 if (tp->regs) {
12610                         iounmap(tp->regs);
12611                         tp->regs = NULL;
12612                 }
12613                 free_netdev(dev);
12614                 pci_release_regions(pdev);
12615                 pci_disable_device(pdev);
12616                 pci_set_drvdata(pdev, NULL);
12617         }
12618 }
12619
12620 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12621 {
12622         struct net_device *dev = pci_get_drvdata(pdev);
12623         struct tg3 *tp = netdev_priv(dev);
12624         int err;
12625
12626         /* PCI register 4 needs to be saved whether netif_running() or not.
12627          * MSI address and data need to be saved if using MSI and
12628          * netif_running().
12629          */
12630         pci_save_state(pdev);
12631
12632         if (!netif_running(dev))
12633                 return 0;
12634
12635         flush_scheduled_work();
12636         tg3_netif_stop(tp);
12637
12638         del_timer_sync(&tp->timer);
12639
12640         tg3_full_lock(tp, 1);
12641         tg3_disable_ints(tp);
12642         tg3_full_unlock(tp);
12643
12644         netif_device_detach(dev);
12645
12646         tg3_full_lock(tp, 0);
12647         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12648         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12649         tg3_full_unlock(tp);
12650
12651         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12652         if (err) {
12653                 tg3_full_lock(tp, 0);
12654
12655                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12656                 if (tg3_restart_hw(tp, 1))
12657                         goto out;
12658
12659                 tp->timer.expires = jiffies + tp->timer_offset;
12660                 add_timer(&tp->timer);
12661
12662                 netif_device_attach(dev);
12663                 tg3_netif_start(tp);
12664
12665 out:
12666                 tg3_full_unlock(tp);
12667         }
12668
12669         return err;
12670 }
12671
12672 static int tg3_resume(struct pci_dev *pdev)
12673 {
12674         struct net_device *dev = pci_get_drvdata(pdev);
12675         struct tg3 *tp = netdev_priv(dev);
12676         int err;
12677
12678         pci_restore_state(tp->pdev);
12679
12680         if (!netif_running(dev))
12681                 return 0;
12682
12683         err = tg3_set_power_state(tp, PCI_D0);
12684         if (err)
12685                 return err;
12686
12687         /* Hardware bug - MSI won't work if INTX disabled. */
12688         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12689             (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12690                 pci_intx(tp->pdev, 1);
12691
12692         netif_device_attach(dev);
12693
12694         tg3_full_lock(tp, 0);
12695
12696         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12697         err = tg3_restart_hw(tp, 1);
12698         if (err)
12699                 goto out;
12700
12701         tp->timer.expires = jiffies + tp->timer_offset;
12702         add_timer(&tp->timer);
12703
12704         tg3_netif_start(tp);
12705
12706 out:
12707         tg3_full_unlock(tp);
12708
12709         return err;
12710 }
12711
12712 static struct pci_driver tg3_driver = {
12713         .name           = DRV_MODULE_NAME,
12714         .id_table       = tg3_pci_tbl,
12715         .probe          = tg3_init_one,
12716         .remove         = __devexit_p(tg3_remove_one),
12717         .suspend        = tg3_suspend,
12718         .resume         = tg3_resume
12719 };
12720
12721 static int __init tg3_init(void)
12722 {
12723         return pci_register_driver(&tg3_driver);
12724 }
12725
12726 static void __exit tg3_cleanup(void)
12727 {
12728         pci_unregister_driver(&tg3_driver);
12729 }
12730
12731 module_init(tg3_init);
12732 module_exit(tg3_cleanup);