ad1f052c3d3a2f831a59839f40bbabb3ea3da9b3
[sfrench/cifs-2.6.git] / drivers / net / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31
32 char e1000_driver_name[] = "e1000";
33 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34 #ifndef CONFIG_E1000_NAPI
35 #define DRIVERNAPI
36 #else
37 #define DRIVERNAPI "-NAPI"
38 #endif
39 #define DRV_VERSION "7.3.20-k2"DRIVERNAPI
40 const char e1000_driver_version[] = DRV_VERSION;
41 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42
43 /* e1000_pci_tbl - PCI Device ID Table
44  *
45  * Last entry must be all 0s
46  *
47  * Macro expands to...
48  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
49  */
50 static struct pci_device_id e1000_pci_tbl[] = {
51         INTEL_E1000_ETHERNET_DEVICE(0x1000),
52         INTEL_E1000_ETHERNET_DEVICE(0x1001),
53         INTEL_E1000_ETHERNET_DEVICE(0x1004),
54         INTEL_E1000_ETHERNET_DEVICE(0x1008),
55         INTEL_E1000_ETHERNET_DEVICE(0x1009),
56         INTEL_E1000_ETHERNET_DEVICE(0x100C),
57         INTEL_E1000_ETHERNET_DEVICE(0x100D),
58         INTEL_E1000_ETHERNET_DEVICE(0x100E),
59         INTEL_E1000_ETHERNET_DEVICE(0x100F),
60         INTEL_E1000_ETHERNET_DEVICE(0x1010),
61         INTEL_E1000_ETHERNET_DEVICE(0x1011),
62         INTEL_E1000_ETHERNET_DEVICE(0x1012),
63         INTEL_E1000_ETHERNET_DEVICE(0x1013),
64         INTEL_E1000_ETHERNET_DEVICE(0x1014),
65         INTEL_E1000_ETHERNET_DEVICE(0x1015),
66         INTEL_E1000_ETHERNET_DEVICE(0x1016),
67         INTEL_E1000_ETHERNET_DEVICE(0x1017),
68         INTEL_E1000_ETHERNET_DEVICE(0x1018),
69         INTEL_E1000_ETHERNET_DEVICE(0x1019),
70         INTEL_E1000_ETHERNET_DEVICE(0x101A),
71         INTEL_E1000_ETHERNET_DEVICE(0x101D),
72         INTEL_E1000_ETHERNET_DEVICE(0x101E),
73         INTEL_E1000_ETHERNET_DEVICE(0x1026),
74         INTEL_E1000_ETHERNET_DEVICE(0x1027),
75         INTEL_E1000_ETHERNET_DEVICE(0x1028),
76         INTEL_E1000_ETHERNET_DEVICE(0x1075),
77         INTEL_E1000_ETHERNET_DEVICE(0x1076),
78         INTEL_E1000_ETHERNET_DEVICE(0x1077),
79         INTEL_E1000_ETHERNET_DEVICE(0x1078),
80         INTEL_E1000_ETHERNET_DEVICE(0x1079),
81         INTEL_E1000_ETHERNET_DEVICE(0x107A),
82         INTEL_E1000_ETHERNET_DEVICE(0x107B),
83         INTEL_E1000_ETHERNET_DEVICE(0x107C),
84         INTEL_E1000_ETHERNET_DEVICE(0x108A),
85         INTEL_E1000_ETHERNET_DEVICE(0x1099),
86         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
87         /* required last entry */
88         {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
98 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
99 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
101 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
102 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
103                              struct e1000_tx_ring *txdr);
104 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
105                              struct e1000_rx_ring *rxdr);
106 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
107                              struct e1000_tx_ring *tx_ring);
108 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
109                              struct e1000_rx_ring *rx_ring);
110 void e1000_update_stats(struct e1000_adapter *adapter);
111
112 static int e1000_init_module(void);
113 static void e1000_exit_module(void);
114 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
115 static void __devexit e1000_remove(struct pci_dev *pdev);
116 static int e1000_alloc_queues(struct e1000_adapter *adapter);
117 static int e1000_sw_init(struct e1000_adapter *adapter);
118 static int e1000_open(struct net_device *netdev);
119 static int e1000_close(struct net_device *netdev);
120 static void e1000_configure_tx(struct e1000_adapter *adapter);
121 static void e1000_configure_rx(struct e1000_adapter *adapter);
122 static void e1000_setup_rctl(struct e1000_adapter *adapter);
123 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
125 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
126                                 struct e1000_tx_ring *tx_ring);
127 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
128                                 struct e1000_rx_ring *rx_ring);
129 static void e1000_set_rx_mode(struct net_device *netdev);
130 static void e1000_update_phy_info(unsigned long data);
131 static void e1000_watchdog(unsigned long data);
132 static void e1000_82547_tx_fifo_stall(unsigned long data);
133 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static irqreturn_t e1000_intr_msi(int irq, void *data);
139 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
140                                struct e1000_tx_ring *tx_ring);
141 #ifdef CONFIG_E1000_NAPI
142 static int e1000_clean(struct napi_struct *napi, int budget);
143 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
144                                struct e1000_rx_ring *rx_ring,
145                                int *work_done, int work_to_do);
146 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
147                                   struct e1000_rx_ring *rx_ring,
148                                   int *work_done, int work_to_do);
149 #else
150 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
151                                struct e1000_rx_ring *rx_ring);
152 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
153                                   struct e1000_rx_ring *rx_ring);
154 #endif
155 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
156                                    struct e1000_rx_ring *rx_ring,
157                                    int cleaned_count);
158 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
159                                       struct e1000_rx_ring *rx_ring,
160                                       int cleaned_count);
161 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
162 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
163                            int cmd);
164 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
165 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
166 static void e1000_tx_timeout(struct net_device *dev);
167 static void e1000_reset_task(struct work_struct *work);
168 static void e1000_smartspeed(struct e1000_adapter *adapter);
169 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
170                                        struct sk_buff *skb);
171
172 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
173 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
174 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
175 static void e1000_restore_vlan(struct e1000_adapter *adapter);
176
177 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
178 #ifdef CONFIG_PM
179 static int e1000_resume(struct pci_dev *pdev);
180 #endif
181 static void e1000_shutdown(struct pci_dev *pdev);
182
183 #ifdef CONFIG_NET_POLL_CONTROLLER
184 /* for netdump / net console */
185 static void e1000_netpoll (struct net_device *netdev);
186 #endif
187
188 #define COPYBREAK_DEFAULT 256
189 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
190 module_param(copybreak, uint, 0644);
191 MODULE_PARM_DESC(copybreak,
192         "Maximum size of packet that is copied to a new buffer on receive");
193
194 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
195                      pci_channel_state_t state);
196 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
197 static void e1000_io_resume(struct pci_dev *pdev);
198
199 static struct pci_error_handlers e1000_err_handler = {
200         .error_detected = e1000_io_error_detected,
201         .slot_reset = e1000_io_slot_reset,
202         .resume = e1000_io_resume,
203 };
204
205 static struct pci_driver e1000_driver = {
206         .name     = e1000_driver_name,
207         .id_table = e1000_pci_tbl,
208         .probe    = e1000_probe,
209         .remove   = __devexit_p(e1000_remove),
210 #ifdef CONFIG_PM
211         /* Power Managment Hooks */
212         .suspend  = e1000_suspend,
213         .resume   = e1000_resume,
214 #endif
215         .shutdown = e1000_shutdown,
216         .err_handler = &e1000_err_handler
217 };
218
219 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
220 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_VERSION);
223
224 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
225 module_param(debug, int, 0);
226 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
227
228 /**
229  * e1000_init_module - Driver Registration Routine
230  *
231  * e1000_init_module is the first routine called when the driver is
232  * loaded. All it does is register with the PCI subsystem.
233  **/
234
235 static int __init e1000_init_module(void)
236 {
237         int ret;
238         printk(KERN_INFO "%s - version %s\n",
239                e1000_driver_string, e1000_driver_version);
240
241         printk(KERN_INFO "%s\n", e1000_copyright);
242
243         ret = pci_register_driver(&e1000_driver);
244         if (copybreak != COPYBREAK_DEFAULT) {
245                 if (copybreak == 0)
246                         printk(KERN_INFO "e1000: copybreak disabled\n");
247                 else
248                         printk(KERN_INFO "e1000: copybreak enabled for "
249                                "packets <= %u bytes\n", copybreak);
250         }
251         return ret;
252 }
253
254 module_init(e1000_init_module);
255
256 /**
257  * e1000_exit_module - Driver Exit Cleanup Routine
258  *
259  * e1000_exit_module is called just before the driver is removed
260  * from memory.
261  **/
262
263 static void __exit e1000_exit_module(void)
264 {
265         pci_unregister_driver(&e1000_driver);
266 }
267
268 module_exit(e1000_exit_module);
269
270 static int e1000_request_irq(struct e1000_adapter *adapter)
271 {
272         struct e1000_hw *hw = &adapter->hw;
273         struct net_device *netdev = adapter->netdev;
274         irq_handler_t handler = e1000_intr;
275         int irq_flags = IRQF_SHARED;
276         int err;
277
278         if (hw->mac_type >= e1000_82571) {
279                 adapter->have_msi = !pci_enable_msi(adapter->pdev);
280                 if (adapter->have_msi) {
281                         handler = e1000_intr_msi;
282                         irq_flags = 0;
283                 }
284         }
285
286         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
287                           netdev);
288         if (err) {
289                 if (adapter->have_msi)
290                         pci_disable_msi(adapter->pdev);
291                 DPRINTK(PROBE, ERR,
292                         "Unable to allocate interrupt Error: %d\n", err);
293         }
294
295         return err;
296 }
297
298 static void e1000_free_irq(struct e1000_adapter *adapter)
299 {
300         struct net_device *netdev = adapter->netdev;
301
302         free_irq(adapter->pdev->irq, netdev);
303
304         if (adapter->have_msi)
305                 pci_disable_msi(adapter->pdev);
306 }
307
308 /**
309  * e1000_irq_disable - Mask off interrupt generation on the NIC
310  * @adapter: board private structure
311  **/
312
313 static void e1000_irq_disable(struct e1000_adapter *adapter)
314 {
315         struct e1000_hw *hw = &adapter->hw;
316
317         ew32(IMC, ~0);
318         E1000_WRITE_FLUSH();
319         synchronize_irq(adapter->pdev->irq);
320 }
321
322 /**
323  * e1000_irq_enable - Enable default interrupt generation settings
324  * @adapter: board private structure
325  **/
326
327 static void e1000_irq_enable(struct e1000_adapter *adapter)
328 {
329         struct e1000_hw *hw = &adapter->hw;
330
331         ew32(IMS, IMS_ENABLE_MASK);
332         E1000_WRITE_FLUSH();
333 }
334
335 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
336 {
337         struct e1000_hw *hw = &adapter->hw;
338         struct net_device *netdev = adapter->netdev;
339         u16 vid = hw->mng_cookie.vlan_id;
340         u16 old_vid = adapter->mng_vlan_id;
341         if (adapter->vlgrp) {
342                 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
343                         if (hw->mng_cookie.status &
344                                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
345                                 e1000_vlan_rx_add_vid(netdev, vid);
346                                 adapter->mng_vlan_id = vid;
347                         } else
348                                 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
349
350                         if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
351                                         (vid != old_vid) &&
352                             !vlan_group_get_device(adapter->vlgrp, old_vid))
353                                 e1000_vlan_rx_kill_vid(netdev, old_vid);
354                 } else
355                         adapter->mng_vlan_id = vid;
356         }
357 }
358
359 /**
360  * e1000_release_hw_control - release control of the h/w to f/w
361  * @adapter: address of board private structure
362  *
363  * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
364  * For ASF and Pass Through versions of f/w this means that the
365  * driver is no longer loaded. For AMT version (only with 82573) i
366  * of the f/w this means that the network i/f is closed.
367  *
368  **/
369
370 static void e1000_release_hw_control(struct e1000_adapter *adapter)
371 {
372         u32 ctrl_ext;
373         u32 swsm;
374         struct e1000_hw *hw = &adapter->hw;
375
376         /* Let firmware taken over control of h/w */
377         switch (hw->mac_type) {
378         case e1000_82573:
379                 swsm = er32(SWSM);
380                 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
381                 break;
382         case e1000_82571:
383         case e1000_82572:
384         case e1000_80003es2lan:
385         case e1000_ich8lan:
386                 ctrl_ext = er32(CTRL_EXT);
387                 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
388                 break;
389         default:
390                 break;
391         }
392 }
393
394 /**
395  * e1000_get_hw_control - get control of the h/w from f/w
396  * @adapter: address of board private structure
397  *
398  * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
399  * For ASF and Pass Through versions of f/w this means that
400  * the driver is loaded. For AMT version (only with 82573)
401  * of the f/w this means that the network i/f is open.
402  *
403  **/
404
405 static void e1000_get_hw_control(struct e1000_adapter *adapter)
406 {
407         u32 ctrl_ext;
408         u32 swsm;
409         struct e1000_hw *hw = &adapter->hw;
410
411         /* Let firmware know the driver has taken over */
412         switch (hw->mac_type) {
413         case e1000_82573:
414                 swsm = er32(SWSM);
415                 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
416                 break;
417         case e1000_82571:
418         case e1000_82572:
419         case e1000_80003es2lan:
420         case e1000_ich8lan:
421                 ctrl_ext = er32(CTRL_EXT);
422                 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
423                 break;
424         default:
425                 break;
426         }
427 }
428
429 static void e1000_init_manageability(struct e1000_adapter *adapter)
430 {
431         struct e1000_hw *hw = &adapter->hw;
432
433         if (adapter->en_mng_pt) {
434                 u32 manc = er32(MANC);
435
436                 /* disable hardware interception of ARP */
437                 manc &= ~(E1000_MANC_ARP_EN);
438
439                 /* enable receiving management packets to the host */
440                 /* this will probably generate destination unreachable messages
441                  * from the host OS, but the packets will be handled on SMBUS */
442                 if (hw->has_manc2h) {
443                         u32 manc2h = er32(MANC2H);
444
445                         manc |= E1000_MANC_EN_MNG2HOST;
446 #define E1000_MNG2HOST_PORT_623 (1 << 5)
447 #define E1000_MNG2HOST_PORT_664 (1 << 6)
448                         manc2h |= E1000_MNG2HOST_PORT_623;
449                         manc2h |= E1000_MNG2HOST_PORT_664;
450                         ew32(MANC2H, manc2h);
451                 }
452
453                 ew32(MANC, manc);
454         }
455 }
456
457 static void e1000_release_manageability(struct e1000_adapter *adapter)
458 {
459         struct e1000_hw *hw = &adapter->hw;
460
461         if (adapter->en_mng_pt) {
462                 u32 manc = er32(MANC);
463
464                 /* re-enable hardware interception of ARP */
465                 manc |= E1000_MANC_ARP_EN;
466
467                 if (hw->has_manc2h)
468                         manc &= ~E1000_MANC_EN_MNG2HOST;
469
470                 /* don't explicitly have to mess with MANC2H since
471                  * MANC has an enable disable that gates MANC2H */
472
473                 ew32(MANC, manc);
474         }
475 }
476
477 /**
478  * e1000_configure - configure the hardware for RX and TX
479  * @adapter = private board structure
480  **/
481 static void e1000_configure(struct e1000_adapter *adapter)
482 {
483         struct net_device *netdev = adapter->netdev;
484         int i;
485
486         e1000_set_rx_mode(netdev);
487
488         e1000_restore_vlan(adapter);
489         e1000_init_manageability(adapter);
490
491         e1000_configure_tx(adapter);
492         e1000_setup_rctl(adapter);
493         e1000_configure_rx(adapter);
494         /* call E1000_DESC_UNUSED which always leaves
495          * at least 1 descriptor unused to make sure
496          * next_to_use != next_to_clean */
497         for (i = 0; i < adapter->num_rx_queues; i++) {
498                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
499                 adapter->alloc_rx_buf(adapter, ring,
500                                       E1000_DESC_UNUSED(ring));
501         }
502
503         adapter->tx_queue_len = netdev->tx_queue_len;
504 }
505
506 int e1000_up(struct e1000_adapter *adapter)
507 {
508         struct e1000_hw *hw = &adapter->hw;
509
510         /* hardware has been reset, we need to reload some things */
511         e1000_configure(adapter);
512
513         clear_bit(__E1000_DOWN, &adapter->flags);
514
515 #ifdef CONFIG_E1000_NAPI
516         napi_enable(&adapter->napi);
517 #endif
518         e1000_irq_enable(adapter);
519
520         /* fire a link change interrupt to start the watchdog */
521         ew32(ICS, E1000_ICS_LSC);
522         return 0;
523 }
524
525 /**
526  * e1000_power_up_phy - restore link in case the phy was powered down
527  * @adapter: address of board private structure
528  *
529  * The phy may be powered down to save power and turn off link when the
530  * driver is unloaded and wake on lan is not enabled (among others)
531  * *** this routine MUST be followed by a call to e1000_reset ***
532  *
533  **/
534
535 void e1000_power_up_phy(struct e1000_adapter *adapter)
536 {
537         struct e1000_hw *hw = &adapter->hw;
538         u16 mii_reg = 0;
539
540         /* Just clear the power down bit to wake the phy back up */
541         if (hw->media_type == e1000_media_type_copper) {
542                 /* according to the manual, the phy will retain its
543                  * settings across a power-down/up cycle */
544                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
545                 mii_reg &= ~MII_CR_POWER_DOWN;
546                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
547         }
548 }
549
550 static void e1000_power_down_phy(struct e1000_adapter *adapter)
551 {
552         struct e1000_hw *hw = &adapter->hw;
553
554         /* Power down the PHY so no link is implied when interface is down *
555          * The PHY cannot be powered down if any of the following is true *
556          * (a) WoL is enabled
557          * (b) AMT is active
558          * (c) SoL/IDER session is active */
559         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
560            hw->media_type == e1000_media_type_copper) {
561                 u16 mii_reg = 0;
562
563                 switch (hw->mac_type) {
564                 case e1000_82540:
565                 case e1000_82545:
566                 case e1000_82545_rev_3:
567                 case e1000_82546:
568                 case e1000_82546_rev_3:
569                 case e1000_82541:
570                 case e1000_82541_rev_2:
571                 case e1000_82547:
572                 case e1000_82547_rev_2:
573                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
574                                 goto out;
575                         break;
576                 case e1000_82571:
577                 case e1000_82572:
578                 case e1000_82573:
579                 case e1000_80003es2lan:
580                 case e1000_ich8lan:
581                         if (e1000_check_mng_mode(hw) ||
582                             e1000_check_phy_reset_block(hw))
583                                 goto out;
584                         break;
585                 default:
586                         goto out;
587                 }
588                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
589                 mii_reg |= MII_CR_POWER_DOWN;
590                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
591                 mdelay(1);
592         }
593 out:
594         return;
595 }
596
597 void e1000_down(struct e1000_adapter *adapter)
598 {
599         struct net_device *netdev = adapter->netdev;
600
601         /* signal that we're down so the interrupt handler does not
602          * reschedule our watchdog timer */
603         set_bit(__E1000_DOWN, &adapter->flags);
604
605 #ifdef CONFIG_E1000_NAPI
606         napi_disable(&adapter->napi);
607 #endif
608         e1000_irq_disable(adapter);
609
610         del_timer_sync(&adapter->tx_fifo_stall_timer);
611         del_timer_sync(&adapter->watchdog_timer);
612         del_timer_sync(&adapter->phy_info_timer);
613
614         netdev->tx_queue_len = adapter->tx_queue_len;
615         adapter->link_speed = 0;
616         adapter->link_duplex = 0;
617         netif_carrier_off(netdev);
618         netif_stop_queue(netdev);
619
620         e1000_reset(adapter);
621         e1000_clean_all_tx_rings(adapter);
622         e1000_clean_all_rx_rings(adapter);
623 }
624
625 void e1000_reinit_locked(struct e1000_adapter *adapter)
626 {
627         WARN_ON(in_interrupt());
628         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
629                 msleep(1);
630         e1000_down(adapter);
631         e1000_up(adapter);
632         clear_bit(__E1000_RESETTING, &adapter->flags);
633 }
634
635 void e1000_reset(struct e1000_adapter *adapter)
636 {
637         struct e1000_hw *hw = &adapter->hw;
638         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
639         u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
640         bool legacy_pba_adjust = false;
641
642         /* Repartition Pba for greater than 9k mtu
643          * To take effect CTRL.RST is required.
644          */
645
646         switch (hw->mac_type) {
647         case e1000_82542_rev2_0:
648         case e1000_82542_rev2_1:
649         case e1000_82543:
650         case e1000_82544:
651         case e1000_82540:
652         case e1000_82541:
653         case e1000_82541_rev_2:
654                 legacy_pba_adjust = true;
655                 pba = E1000_PBA_48K;
656                 break;
657         case e1000_82545:
658         case e1000_82545_rev_3:
659         case e1000_82546:
660         case e1000_82546_rev_3:
661                 pba = E1000_PBA_48K;
662                 break;
663         case e1000_82547:
664         case e1000_82547_rev_2:
665                 legacy_pba_adjust = true;
666                 pba = E1000_PBA_30K;
667                 break;
668         case e1000_82571:
669         case e1000_82572:
670         case e1000_80003es2lan:
671                 pba = E1000_PBA_38K;
672                 break;
673         case e1000_82573:
674                 pba = E1000_PBA_20K;
675                 break;
676         case e1000_ich8lan:
677                 pba = E1000_PBA_8K;
678         case e1000_undefined:
679         case e1000_num_macs:
680                 break;
681         }
682
683         if (legacy_pba_adjust) {
684                 if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
685                         pba -= 8; /* allocate more FIFO for Tx */
686
687                 if (hw->mac_type == e1000_82547) {
688                         adapter->tx_fifo_head = 0;
689                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
690                         adapter->tx_fifo_size =
691                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
692                         atomic_set(&adapter->tx_fifo_stall, 0);
693                 }
694         } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
695                 /* adjust PBA for jumbo frames */
696                 ew32(PBA, pba);
697
698                 /* To maintain wire speed transmits, the Tx FIFO should be
699                  * large enough to accomodate two full transmit packets,
700                  * rounded up to the next 1KB and expressed in KB.  Likewise,
701                  * the Rx FIFO should be large enough to accomodate at least
702                  * one full receive packet and is similarly rounded up and
703                  * expressed in KB. */
704                 pba = er32(PBA);
705                 /* upper 16 bits has Tx packet buffer allocation size in KB */
706                 tx_space = pba >> 16;
707                 /* lower 16 bits has Rx packet buffer allocation size in KB */
708                 pba &= 0xffff;
709                 /* don't include ethernet FCS because hardware appends/strips */
710                 min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
711                                VLAN_TAG_SIZE;
712                 min_tx_space = min_rx_space;
713                 min_tx_space *= 2;
714                 min_tx_space = ALIGN(min_tx_space, 1024);
715                 min_tx_space >>= 10;
716                 min_rx_space = ALIGN(min_rx_space, 1024);
717                 min_rx_space >>= 10;
718
719                 /* If current Tx allocation is less than the min Tx FIFO size,
720                  * and the min Tx FIFO size is less than the current Rx FIFO
721                  * allocation, take space away from current Rx allocation */
722                 if (tx_space < min_tx_space &&
723                     ((min_tx_space - tx_space) < pba)) {
724                         pba = pba - (min_tx_space - tx_space);
725
726                         /* PCI/PCIx hardware has PBA alignment constraints */
727                         switch (hw->mac_type) {
728                         case e1000_82545 ... e1000_82546_rev_3:
729                                 pba &= ~(E1000_PBA_8K - 1);
730                                 break;
731                         default:
732                                 break;
733                         }
734
735                         /* if short on rx space, rx wins and must trump tx
736                          * adjustment or use Early Receive if available */
737                         if (pba < min_rx_space) {
738                                 switch (hw->mac_type) {
739                                 case e1000_82573:
740                                         /* ERT enabled in e1000_configure_rx */
741                                         break;
742                                 default:
743                                         pba = min_rx_space;
744                                         break;
745                                 }
746                         }
747                 }
748         }
749
750         ew32(PBA, pba);
751
752         /* flow control settings */
753         /* Set the FC high water mark to 90% of the FIFO size.
754          * Required to clear last 3 LSB */
755         fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
756         /* We can't use 90% on small FIFOs because the remainder
757          * would be less than 1 full frame.  In this case, we size
758          * it to allow at least a full frame above the high water
759          *  mark. */
760         if (pba < E1000_PBA_16K)
761                 fc_high_water_mark = (pba * 1024) - 1600;
762
763         hw->fc_high_water = fc_high_water_mark;
764         hw->fc_low_water = fc_high_water_mark - 8;
765         if (hw->mac_type == e1000_80003es2lan)
766                 hw->fc_pause_time = 0xFFFF;
767         else
768                 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
769         hw->fc_send_xon = 1;
770         hw->fc = hw->original_fc;
771
772         /* Allow time for pending master requests to run */
773         e1000_reset_hw(hw);
774         if (hw->mac_type >= e1000_82544)
775                 ew32(WUC, 0);
776
777         if (e1000_init_hw(hw))
778                 DPRINTK(PROBE, ERR, "Hardware Error\n");
779         e1000_update_mng_vlan(adapter);
780
781         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
782         if (hw->mac_type >= e1000_82544 &&
783             hw->mac_type <= e1000_82547_rev_2 &&
784             hw->autoneg == 1 &&
785             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
786                 u32 ctrl = er32(CTRL);
787                 /* clear phy power management bit if we are in gig only mode,
788                  * which if enabled will attempt negotiation to 100Mb, which
789                  * can cause a loss of link at power off or driver unload */
790                 ctrl &= ~E1000_CTRL_SWDPIN3;
791                 ew32(CTRL, ctrl);
792         }
793
794         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
795         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
796
797         e1000_reset_adaptive(hw);
798         e1000_phy_get_info(hw, &adapter->phy_info);
799
800         if (!adapter->smart_power_down &&
801             (hw->mac_type == e1000_82571 ||
802              hw->mac_type == e1000_82572)) {
803                 u16 phy_data = 0;
804                 /* speed up time to link by disabling smart power down, ignore
805                  * the return value of this function because there is nothing
806                  * different we would do if it failed */
807                 e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
808                                    &phy_data);
809                 phy_data &= ~IGP02E1000_PM_SPD;
810                 e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
811                                     phy_data);
812         }
813
814         e1000_release_manageability(adapter);
815 }
816
817 /**
818  *  Dump the eeprom for users having checksum issues
819  **/
820 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
821 {
822         struct net_device *netdev = adapter->netdev;
823         struct ethtool_eeprom eeprom;
824         const struct ethtool_ops *ops = netdev->ethtool_ops;
825         u8 *data;
826         int i;
827         u16 csum_old, csum_new = 0;
828
829         eeprom.len = ops->get_eeprom_len(netdev);
830         eeprom.offset = 0;
831
832         data = kmalloc(eeprom.len, GFP_KERNEL);
833         if (!data) {
834                 printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
835                        " data\n");
836                 return;
837         }
838
839         ops->get_eeprom(netdev, &eeprom, data);
840
841         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
842                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
843         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
844                 csum_new += data[i] + (data[i + 1] << 8);
845         csum_new = EEPROM_SUM - csum_new;
846
847         printk(KERN_ERR "/*********************/\n");
848         printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
849         printk(KERN_ERR "Calculated              : 0x%04x\n", csum_new);
850
851         printk(KERN_ERR "Offset    Values\n");
852         printk(KERN_ERR "========  ======\n");
853         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
854
855         printk(KERN_ERR "Include this output when contacting your support "
856                "provider.\n");
857         printk(KERN_ERR "This is not a software error! Something bad "
858                "happened to your hardware or\n");
859         printk(KERN_ERR "EEPROM image. Ignoring this "
860                "problem could result in further problems,\n");
861         printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
862         printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
863                "which is invalid\n");
864         printk(KERN_ERR "and requires you to set the proper MAC "
865                "address manually before continuing\n");
866         printk(KERN_ERR "to enable this network device.\n");
867         printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
868                "to your hardware vendor\n");
869         printk(KERN_ERR "or Intel Customer Support: linux-nics@intel.com\n");
870         printk(KERN_ERR "/*********************/\n");
871
872         kfree(data);
873 }
874
875 /**
876  * e1000_probe - Device Initialization Routine
877  * @pdev: PCI device information struct
878  * @ent: entry in e1000_pci_tbl
879  *
880  * Returns 0 on success, negative on failure
881  *
882  * e1000_probe initializes an adapter identified by a pci_dev structure.
883  * The OS initialization, configuring of the adapter private structure,
884  * and a hardware reset occur.
885  **/
886
887 static int __devinit e1000_probe(struct pci_dev *pdev,
888                                  const struct pci_device_id *ent)
889 {
890         struct net_device *netdev;
891         struct e1000_adapter *adapter;
892         struct e1000_hw *hw;
893
894         static int cards_found = 0;
895         static int global_quad_port_a = 0; /* global ksp3 port a indication */
896         int i, err, pci_using_dac;
897         u16 eeprom_data = 0;
898         u16 eeprom_apme_mask = E1000_EEPROM_APME;
899         DECLARE_MAC_BUF(mac);
900
901         if ((err = pci_enable_device(pdev)))
902                 return err;
903
904         if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
905             !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
906                 pci_using_dac = 1;
907         } else {
908                 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
909                     (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
910                         E1000_ERR("No usable DMA configuration, aborting\n");
911                         goto err_dma;
912                 }
913                 pci_using_dac = 0;
914         }
915
916         if ((err = pci_request_regions(pdev, e1000_driver_name)))
917                 goto err_pci_reg;
918
919         pci_set_master(pdev);
920
921         err = -ENOMEM;
922         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
923         if (!netdev)
924                 goto err_alloc_etherdev;
925
926         SET_NETDEV_DEV(netdev, &pdev->dev);
927
928         pci_set_drvdata(pdev, netdev);
929         adapter = netdev_priv(netdev);
930         adapter->netdev = netdev;
931         adapter->pdev = pdev;
932         adapter->msg_enable = (1 << debug) - 1;
933
934         hw = &adapter->hw;
935         hw->back = adapter;
936
937         err = -EIO;
938         hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
939                               pci_resource_len(pdev, BAR_0));
940         if (!hw->hw_addr)
941                 goto err_ioremap;
942
943         for (i = BAR_1; i <= BAR_5; i++) {
944                 if (pci_resource_len(pdev, i) == 0)
945                         continue;
946                 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
947                         hw->io_base = pci_resource_start(pdev, i);
948                         break;
949                 }
950         }
951
952         netdev->open = &e1000_open;
953         netdev->stop = &e1000_close;
954         netdev->hard_start_xmit = &e1000_xmit_frame;
955         netdev->get_stats = &e1000_get_stats;
956         netdev->set_rx_mode = &e1000_set_rx_mode;
957         netdev->set_mac_address = &e1000_set_mac;
958         netdev->change_mtu = &e1000_change_mtu;
959         netdev->do_ioctl = &e1000_ioctl;
960         e1000_set_ethtool_ops(netdev);
961         netdev->tx_timeout = &e1000_tx_timeout;
962         netdev->watchdog_timeo = 5 * HZ;
963 #ifdef CONFIG_E1000_NAPI
964         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
965 #endif
966         netdev->vlan_rx_register = e1000_vlan_rx_register;
967         netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
968         netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
969 #ifdef CONFIG_NET_POLL_CONTROLLER
970         netdev->poll_controller = e1000_netpoll;
971 #endif
972         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
973
974         adapter->bd_number = cards_found;
975
976         /* setup the private structure */
977
978         if ((err = e1000_sw_init(adapter)))
979                 goto err_sw_init;
980
981         err = -EIO;
982         /* Flash BAR mapping must happen after e1000_sw_init
983          * because it depends on mac_type */
984         if ((hw->mac_type == e1000_ich8lan) &&
985            (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
986                 hw->flash_address =
987                         ioremap(pci_resource_start(pdev, 1),
988                                 pci_resource_len(pdev, 1));
989                 if (!hw->flash_address)
990                         goto err_flashmap;
991         }
992
993         if (e1000_check_phy_reset_block(hw))
994                 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
995
996         if (hw->mac_type >= e1000_82543) {
997                 netdev->features = NETIF_F_SG |
998                                    NETIF_F_HW_CSUM |
999                                    NETIF_F_HW_VLAN_TX |
1000                                    NETIF_F_HW_VLAN_RX |
1001                                    NETIF_F_HW_VLAN_FILTER;
1002                 if (hw->mac_type == e1000_ich8lan)
1003                         netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
1004         }
1005
1006         if ((hw->mac_type >= e1000_82544) &&
1007            (hw->mac_type != e1000_82547))
1008                 netdev->features |= NETIF_F_TSO;
1009
1010         if (hw->mac_type > e1000_82547_rev_2)
1011                 netdev->features |= NETIF_F_TSO6;
1012         if (pci_using_dac)
1013                 netdev->features |= NETIF_F_HIGHDMA;
1014
1015         netdev->features |= NETIF_F_LLTX;
1016
1017         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1018
1019         /* initialize eeprom parameters */
1020         if (e1000_init_eeprom_params(hw)) {
1021                 E1000_ERR("EEPROM initialization failed\n");
1022                 goto err_eeprom;
1023         }
1024
1025         /* before reading the EEPROM, reset the controller to
1026          * put the device in a known good starting state */
1027
1028         e1000_reset_hw(hw);
1029
1030         /* make sure the EEPROM is good */
1031         if (e1000_validate_eeprom_checksum(hw) < 0) {
1032                 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
1033                 e1000_dump_eeprom(adapter);
1034                 /*
1035                  * set MAC address to all zeroes to invalidate and temporary
1036                  * disable this device for the user. This blocks regular
1037                  * traffic while still permitting ethtool ioctls from reaching
1038                  * the hardware as well as allowing the user to run the
1039                  * interface after manually setting a hw addr using
1040                  * `ip set address`
1041                  */
1042                 memset(hw->mac_addr, 0, netdev->addr_len);
1043         } else {
1044                 /* copy the MAC address out of the EEPROM */
1045                 if (e1000_read_mac_addr(hw))
1046                         DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
1047         }
1048         /* don't block initalization here due to bad MAC address */
1049         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1050         memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1051
1052         if (!is_valid_ether_addr(netdev->perm_addr))
1053                 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
1054
1055         e1000_get_bus_info(hw);
1056
1057         init_timer(&adapter->tx_fifo_stall_timer);
1058         adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
1059         adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
1060
1061         init_timer(&adapter->watchdog_timer);
1062         adapter->watchdog_timer.function = &e1000_watchdog;
1063         adapter->watchdog_timer.data = (unsigned long) adapter;
1064
1065         init_timer(&adapter->phy_info_timer);
1066         adapter->phy_info_timer.function = &e1000_update_phy_info;
1067         adapter->phy_info_timer.data = (unsigned long) adapter;
1068
1069         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1070
1071         e1000_check_options(adapter);
1072
1073         /* Initial Wake on LAN setting
1074          * If APM wake is enabled in the EEPROM,
1075          * enable the ACPI Magic Packet filter
1076          */
1077
1078         switch (hw->mac_type) {
1079         case e1000_82542_rev2_0:
1080         case e1000_82542_rev2_1:
1081         case e1000_82543:
1082                 break;
1083         case e1000_82544:
1084                 e1000_read_eeprom(hw,
1085                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1086                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1087                 break;
1088         case e1000_ich8lan:
1089                 e1000_read_eeprom(hw,
1090                         EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
1091                 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
1092                 break;
1093         case e1000_82546:
1094         case e1000_82546_rev_3:
1095         case e1000_82571:
1096         case e1000_80003es2lan:
1097                 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1098                         e1000_read_eeprom(hw,
1099                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1100                         break;
1101                 }
1102                 /* Fall Through */
1103         default:
1104                 e1000_read_eeprom(hw,
1105                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1106                 break;
1107         }
1108         if (eeprom_data & eeprom_apme_mask)
1109                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1110
1111         /* now that we have the eeprom settings, apply the special cases
1112          * where the eeprom may be wrong or the board simply won't support
1113          * wake on lan on a particular port */
1114         switch (pdev->device) {
1115         case E1000_DEV_ID_82546GB_PCIE:
1116                 adapter->eeprom_wol = 0;
1117                 break;
1118         case E1000_DEV_ID_82546EB_FIBER:
1119         case E1000_DEV_ID_82546GB_FIBER:
1120         case E1000_DEV_ID_82571EB_FIBER:
1121                 /* Wake events only supported on port A for dual fiber
1122                  * regardless of eeprom setting */
1123                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1124                         adapter->eeprom_wol = 0;
1125                 break;
1126         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1127         case E1000_DEV_ID_82571EB_QUAD_COPPER:
1128         case E1000_DEV_ID_82571EB_QUAD_FIBER:
1129         case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1130         case E1000_DEV_ID_82571PT_QUAD_COPPER:
1131                 /* if quad port adapter, disable WoL on all but port A */
1132                 if (global_quad_port_a != 0)
1133                         adapter->eeprom_wol = 0;
1134                 else
1135                         adapter->quad_port_a = 1;
1136                 /* Reset for multiple quad port adapters */
1137                 if (++global_quad_port_a == 4)
1138                         global_quad_port_a = 0;
1139                 break;
1140         }
1141
1142         /* initialize the wol settings based on the eeprom settings */
1143         adapter->wol = adapter->eeprom_wol;
1144
1145         /* print bus type/speed/width info */
1146         DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1147                 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
1148                  (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
1149                 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1150                  (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1151                  (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1152                  (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1153                  (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1154                 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
1155                  (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1156                  (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1157                  "32-bit"));
1158
1159         printk("%s\n", print_mac(mac, netdev->dev_addr));
1160
1161         if (hw->bus_type == e1000_bus_type_pci_express) {
1162                 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
1163                         "longer be supported by this driver in the future.\n",
1164                         pdev->vendor, pdev->device);
1165                 DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
1166                         "driver instead.\n");
1167         }
1168
1169         /* reset the hardware with the new settings */
1170         e1000_reset(adapter);
1171
1172         /* If the controller is 82573 and f/w is AMT, do not set
1173          * DRV_LOAD until the interface is up.  For all other cases,
1174          * let the f/w know that the h/w is now under the control
1175          * of the driver. */
1176         if (hw->mac_type != e1000_82573 ||
1177             !e1000_check_mng_mode(hw))
1178                 e1000_get_hw_control(adapter);
1179
1180         /* tell the stack to leave us alone until e1000_open() is called */
1181         netif_carrier_off(netdev);
1182         netif_stop_queue(netdev);
1183
1184         strcpy(netdev->name, "eth%d");
1185         if ((err = register_netdev(netdev)))
1186                 goto err_register;
1187
1188         DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
1189
1190         cards_found++;
1191         return 0;
1192
1193 err_register:
1194         e1000_release_hw_control(adapter);
1195 err_eeprom:
1196         if (!e1000_check_phy_reset_block(hw))
1197                 e1000_phy_hw_reset(hw);
1198
1199         if (hw->flash_address)
1200                 iounmap(hw->flash_address);
1201 err_flashmap:
1202 #ifdef CONFIG_E1000_NAPI
1203         for (i = 0; i < adapter->num_rx_queues; i++)
1204                 dev_put(&adapter->polling_netdev[i]);
1205 #endif
1206
1207         kfree(adapter->tx_ring);
1208         kfree(adapter->rx_ring);
1209 #ifdef CONFIG_E1000_NAPI
1210         kfree(adapter->polling_netdev);
1211 #endif
1212 err_sw_init:
1213         iounmap(hw->hw_addr);
1214 err_ioremap:
1215         free_netdev(netdev);
1216 err_alloc_etherdev:
1217         pci_release_regions(pdev);
1218 err_pci_reg:
1219 err_dma:
1220         pci_disable_device(pdev);
1221         return err;
1222 }
1223
1224 /**
1225  * e1000_remove - Device Removal Routine
1226  * @pdev: PCI device information struct
1227  *
1228  * e1000_remove is called by the PCI subsystem to alert the driver
1229  * that it should release a PCI device.  The could be caused by a
1230  * Hot-Plug event, or because the driver is going to be removed from
1231  * memory.
1232  **/
1233
1234 static void __devexit e1000_remove(struct pci_dev *pdev)
1235 {
1236         struct net_device *netdev = pci_get_drvdata(pdev);
1237         struct e1000_adapter *adapter = netdev_priv(netdev);
1238         struct e1000_hw *hw = &adapter->hw;
1239 #ifdef CONFIG_E1000_NAPI
1240         int i;
1241 #endif
1242
1243         cancel_work_sync(&adapter->reset_task);
1244
1245         e1000_release_manageability(adapter);
1246
1247         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
1248          * would have already happened in close and is redundant. */
1249         e1000_release_hw_control(adapter);
1250
1251 #ifdef CONFIG_E1000_NAPI
1252         for (i = 0; i < adapter->num_rx_queues; i++)
1253                 dev_put(&adapter->polling_netdev[i]);
1254 #endif
1255
1256         unregister_netdev(netdev);
1257
1258         if (!e1000_check_phy_reset_block(hw))
1259                 e1000_phy_hw_reset(hw);
1260
1261         kfree(adapter->tx_ring);
1262         kfree(adapter->rx_ring);
1263 #ifdef CONFIG_E1000_NAPI
1264         kfree(adapter->polling_netdev);
1265 #endif
1266
1267         iounmap(hw->hw_addr);
1268         if (hw->flash_address)
1269                 iounmap(hw->flash_address);
1270         pci_release_regions(pdev);
1271
1272         free_netdev(netdev);
1273
1274         pci_disable_device(pdev);
1275 }
1276
1277 /**
1278  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1279  * @adapter: board private structure to initialize
1280  *
1281  * e1000_sw_init initializes the Adapter private data structure.
1282  * Fields are initialized based on PCI device information and
1283  * OS network device settings (MTU size).
1284  **/
1285
1286 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1287 {
1288         struct e1000_hw *hw = &adapter->hw;
1289         struct net_device *netdev = adapter->netdev;
1290         struct pci_dev *pdev = adapter->pdev;
1291 #ifdef CONFIG_E1000_NAPI
1292         int i;
1293 #endif
1294
1295         /* PCI config space info */
1296
1297         hw->vendor_id = pdev->vendor;
1298         hw->device_id = pdev->device;
1299         hw->subsystem_vendor_id = pdev->subsystem_vendor;
1300         hw->subsystem_id = pdev->subsystem_device;
1301         hw->revision_id = pdev->revision;
1302
1303         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1304
1305         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1306         adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1307         hw->max_frame_size = netdev->mtu +
1308                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1309         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1310
1311         /* identify the MAC */
1312
1313         if (e1000_set_mac_type(hw)) {
1314                 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
1315                 return -EIO;
1316         }
1317
1318         switch (hw->mac_type) {
1319         default:
1320                 break;
1321         case e1000_82541:
1322         case e1000_82547:
1323         case e1000_82541_rev_2:
1324         case e1000_82547_rev_2:
1325                 hw->phy_init_script = 1;
1326                 break;
1327         }
1328
1329         e1000_set_media_type(hw);
1330
1331         hw->wait_autoneg_complete = false;
1332         hw->tbi_compatibility_en = true;
1333         hw->adaptive_ifs = true;
1334
1335         /* Copper options */
1336
1337         if (hw->media_type == e1000_media_type_copper) {
1338                 hw->mdix = AUTO_ALL_MODES;
1339                 hw->disable_polarity_correction = false;
1340                 hw->master_slave = E1000_MASTER_SLAVE;
1341         }
1342
1343         adapter->num_tx_queues = 1;
1344         adapter->num_rx_queues = 1;
1345
1346         if (e1000_alloc_queues(adapter)) {
1347                 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
1348                 return -ENOMEM;
1349         }
1350
1351 #ifdef CONFIG_E1000_NAPI
1352         for (i = 0; i < adapter->num_rx_queues; i++) {
1353                 adapter->polling_netdev[i].priv = adapter;
1354                 dev_hold(&adapter->polling_netdev[i]);
1355                 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
1356         }
1357         spin_lock_init(&adapter->tx_queue_lock);
1358 #endif
1359
1360         /* Explicitly disable IRQ since the NIC can be in any state. */
1361         e1000_irq_disable(adapter);
1362
1363         spin_lock_init(&adapter->stats_lock);
1364
1365         set_bit(__E1000_DOWN, &adapter->flags);
1366
1367         return 0;
1368 }
1369
1370 /**
1371  * e1000_alloc_queues - Allocate memory for all rings
1372  * @adapter: board private structure to initialize
1373  *
1374  * We allocate one ring per queue at run-time since we don't know the
1375  * number of queues at compile-time.  The polling_netdev array is
1376  * intended for Multiqueue, but should work fine with a single queue.
1377  **/
1378
1379 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1380 {
1381         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1382                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1383         if (!adapter->tx_ring)
1384                 return -ENOMEM;
1385
1386         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1387                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1388         if (!adapter->rx_ring) {
1389                 kfree(adapter->tx_ring);
1390                 return -ENOMEM;
1391         }
1392
1393 #ifdef CONFIG_E1000_NAPI
1394         adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
1395                                           sizeof(struct net_device),
1396                                           GFP_KERNEL);
1397         if (!adapter->polling_netdev) {
1398                 kfree(adapter->tx_ring);
1399                 kfree(adapter->rx_ring);
1400                 return -ENOMEM;
1401         }
1402 #endif
1403
1404         return E1000_SUCCESS;
1405 }
1406
1407 /**
1408  * e1000_open - Called when a network interface is made active
1409  * @netdev: network interface device structure
1410  *
1411  * Returns 0 on success, negative value on failure
1412  *
1413  * The open entry point is called when a network interface is made
1414  * active by the system (IFF_UP).  At this point all resources needed
1415  * for transmit and receive operations are allocated, the interrupt
1416  * handler is registered with the OS, the watchdog timer is started,
1417  * and the stack is notified that the interface is ready.
1418  **/
1419
1420 static int e1000_open(struct net_device *netdev)
1421 {
1422         struct e1000_adapter *adapter = netdev_priv(netdev);
1423         struct e1000_hw *hw = &adapter->hw;
1424         int err;
1425
1426         /* disallow open during test */
1427         if (test_bit(__E1000_TESTING, &adapter->flags))
1428                 return -EBUSY;
1429
1430         /* allocate transmit descriptors */
1431         err = e1000_setup_all_tx_resources(adapter);
1432         if (err)
1433                 goto err_setup_tx;
1434
1435         /* allocate receive descriptors */
1436         err = e1000_setup_all_rx_resources(adapter);
1437         if (err)
1438                 goto err_setup_rx;
1439
1440         e1000_power_up_phy(adapter);
1441
1442         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1443         if ((hw->mng_cookie.status &
1444                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1445                 e1000_update_mng_vlan(adapter);
1446         }
1447
1448         /* If AMT is enabled, let the firmware know that the network
1449          * interface is now open */
1450         if (hw->mac_type == e1000_82573 &&
1451             e1000_check_mng_mode(hw))
1452                 e1000_get_hw_control(adapter);
1453
1454         /* before we allocate an interrupt, we must be ready to handle it.
1455          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1456          * as soon as we call pci_request_irq, so we have to setup our
1457          * clean_rx handler before we do so.  */
1458         e1000_configure(adapter);
1459
1460         err = e1000_request_irq(adapter);
1461         if (err)
1462                 goto err_req_irq;
1463
1464         /* From here on the code is the same as e1000_up() */
1465         clear_bit(__E1000_DOWN, &adapter->flags);
1466
1467 #ifdef CONFIG_E1000_NAPI
1468         napi_enable(&adapter->napi);
1469 #endif
1470
1471         e1000_irq_enable(adapter);
1472
1473         netif_start_queue(netdev);
1474
1475         /* fire a link status change interrupt to start the watchdog */
1476         ew32(ICS, E1000_ICS_LSC);
1477
1478         return E1000_SUCCESS;
1479
1480 err_req_irq:
1481         e1000_release_hw_control(adapter);
1482         e1000_power_down_phy(adapter);
1483         e1000_free_all_rx_resources(adapter);
1484 err_setup_rx:
1485         e1000_free_all_tx_resources(adapter);
1486 err_setup_tx:
1487         e1000_reset(adapter);
1488
1489         return err;
1490 }
1491
1492 /**
1493  * e1000_close - Disables a network interface
1494  * @netdev: network interface device structure
1495  *
1496  * Returns 0, this is not allowed to fail
1497  *
1498  * The close entry point is called when an interface is de-activated
1499  * by the OS.  The hardware is still under the drivers control, but
1500  * needs to be disabled.  A global MAC reset is issued to stop the
1501  * hardware, and all transmit and receive resources are freed.
1502  **/
1503
1504 static int e1000_close(struct net_device *netdev)
1505 {
1506         struct e1000_adapter *adapter = netdev_priv(netdev);
1507         struct e1000_hw *hw = &adapter->hw;
1508
1509         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1510         e1000_down(adapter);
1511         e1000_power_down_phy(adapter);
1512         e1000_free_irq(adapter);
1513
1514         e1000_free_all_tx_resources(adapter);
1515         e1000_free_all_rx_resources(adapter);
1516
1517         /* kill manageability vlan ID if supported, but not if a vlan with
1518          * the same ID is registered on the host OS (let 8021q kill it) */
1519         if ((hw->mng_cookie.status &
1520                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1521              !(adapter->vlgrp &&
1522                vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
1523                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1524         }
1525
1526         /* If AMT is enabled, let the firmware know that the network
1527          * interface is now closed */
1528         if (hw->mac_type == e1000_82573 &&
1529             e1000_check_mng_mode(hw))
1530                 e1000_release_hw_control(adapter);
1531
1532         return 0;
1533 }
1534
1535 /**
1536  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1537  * @adapter: address of board private structure
1538  * @start: address of beginning of memory
1539  * @len: length of memory
1540  **/
1541 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1542                                   unsigned long len)
1543 {
1544         struct e1000_hw *hw = &adapter->hw;
1545         unsigned long begin = (unsigned long) start;
1546         unsigned long end = begin + len;
1547
1548         /* First rev 82545 and 82546 need to not allow any memory
1549          * write location to cross 64k boundary due to errata 23 */
1550         if (hw->mac_type == e1000_82545 ||
1551             hw->mac_type == e1000_82546) {
1552                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1553         }
1554
1555         return true;
1556 }
1557
1558 /**
1559  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1560  * @adapter: board private structure
1561  * @txdr:    tx descriptor ring (for a specific queue) to setup
1562  *
1563  * Return 0 on success, negative on failure
1564  **/
1565
1566 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1567                                     struct e1000_tx_ring *txdr)
1568 {
1569         struct pci_dev *pdev = adapter->pdev;
1570         int size;
1571
1572         size = sizeof(struct e1000_buffer) * txdr->count;
1573         txdr->buffer_info = vmalloc(size);
1574         if (!txdr->buffer_info) {
1575                 DPRINTK(PROBE, ERR,
1576                 "Unable to allocate memory for the transmit descriptor ring\n");
1577                 return -ENOMEM;
1578         }
1579         memset(txdr->buffer_info, 0, size);
1580
1581         /* round up to nearest 4K */
1582
1583         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1584         txdr->size = ALIGN(txdr->size, 4096);
1585
1586         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1587         if (!txdr->desc) {
1588 setup_tx_desc_die:
1589                 vfree(txdr->buffer_info);
1590                 DPRINTK(PROBE, ERR,
1591                 "Unable to allocate memory for the transmit descriptor ring\n");
1592                 return -ENOMEM;
1593         }
1594
1595         /* Fix for errata 23, can't cross 64kB boundary */
1596         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1597                 void *olddesc = txdr->desc;
1598                 dma_addr_t olddma = txdr->dma;
1599                 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
1600                                      "at %p\n", txdr->size, txdr->desc);
1601                 /* Try again, without freeing the previous */
1602                 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1603                 /* Failed allocation, critical failure */
1604                 if (!txdr->desc) {
1605                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1606                         goto setup_tx_desc_die;
1607                 }
1608
1609                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1610                         /* give up */
1611                         pci_free_consistent(pdev, txdr->size, txdr->desc,
1612                                             txdr->dma);
1613                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1614                         DPRINTK(PROBE, ERR,
1615                                 "Unable to allocate aligned memory "
1616                                 "for the transmit descriptor ring\n");
1617                         vfree(txdr->buffer_info);
1618                         return -ENOMEM;
1619                 } else {
1620                         /* Free old allocation, new allocation was successful */
1621                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1622                 }
1623         }
1624         memset(txdr->desc, 0, txdr->size);
1625
1626         txdr->next_to_use = 0;
1627         txdr->next_to_clean = 0;
1628         spin_lock_init(&txdr->tx_lock);
1629
1630         return 0;
1631 }
1632
1633 /**
1634  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1635  *                                (Descriptors) for all queues
1636  * @adapter: board private structure
1637  *
1638  * Return 0 on success, negative on failure
1639  **/
1640
1641 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1642 {
1643         int i, err = 0;
1644
1645         for (i = 0; i < adapter->num_tx_queues; i++) {
1646                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1647                 if (err) {
1648                         DPRINTK(PROBE, ERR,
1649                                 "Allocation for Tx Queue %u failed\n", i);
1650                         for (i-- ; i >= 0; i--)
1651                                 e1000_free_tx_resources(adapter,
1652                                                         &adapter->tx_ring[i]);
1653                         break;
1654                 }
1655         }
1656
1657         return err;
1658 }
1659
1660 /**
1661  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1662  * @adapter: board private structure
1663  *
1664  * Configure the Tx unit of the MAC after a reset.
1665  **/
1666
1667 static void e1000_configure_tx(struct e1000_adapter *adapter)
1668 {
1669         u64 tdba;
1670         struct e1000_hw *hw = &adapter->hw;
1671         u32 tdlen, tctl, tipg, tarc;
1672         u32 ipgr1, ipgr2;
1673
1674         /* Setup the HW Tx Head and Tail descriptor pointers */
1675
1676         switch (adapter->num_tx_queues) {
1677         case 1:
1678         default:
1679                 tdba = adapter->tx_ring[0].dma;
1680                 tdlen = adapter->tx_ring[0].count *
1681                         sizeof(struct e1000_tx_desc);
1682                 ew32(TDLEN, tdlen);
1683                 ew32(TDBAH, (tdba >> 32));
1684                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1685                 ew32(TDT, 0);
1686                 ew32(TDH, 0);
1687                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1688                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1689                 break;
1690         }
1691
1692         /* Set the default values for the Tx Inter Packet Gap timer */
1693         if (hw->mac_type <= e1000_82547_rev_2 &&
1694             (hw->media_type == e1000_media_type_fiber ||
1695              hw->media_type == e1000_media_type_internal_serdes))
1696                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1697         else
1698                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1699
1700         switch (hw->mac_type) {
1701         case e1000_82542_rev2_0:
1702         case e1000_82542_rev2_1:
1703                 tipg = DEFAULT_82542_TIPG_IPGT;
1704                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1705                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1706                 break;
1707         case e1000_80003es2lan:
1708                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1709                 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1710                 break;
1711         default:
1712                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1713                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1714                 break;
1715         }
1716         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1717         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1718         ew32(TIPG, tipg);
1719
1720         /* Set the Tx Interrupt Delay register */
1721
1722         ew32(TIDV, adapter->tx_int_delay);
1723         if (hw->mac_type >= e1000_82540)
1724                 ew32(TADV, adapter->tx_abs_int_delay);
1725
1726         /* Program the Transmit Control Register */
1727
1728         tctl = er32(TCTL);
1729         tctl &= ~E1000_TCTL_CT;
1730         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1731                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1732
1733         if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1734                 tarc = er32(TARC0);
1735                 /* set the speed mode bit, we'll clear it if we're not at
1736                  * gigabit link later */
1737                 tarc |= (1 << 21);
1738                 ew32(TARC0, tarc);
1739         } else if (hw->mac_type == e1000_80003es2lan) {
1740                 tarc = er32(TARC0);
1741                 tarc |= 1;
1742                 ew32(TARC0, tarc);
1743                 tarc = er32(TARC1);
1744                 tarc |= 1;
1745                 ew32(TARC1, tarc);
1746         }
1747
1748         e1000_config_collision_dist(hw);
1749
1750         /* Setup Transmit Descriptor Settings for eop descriptor */
1751         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1752
1753         /* only set IDE if we are delaying interrupts using the timers */
1754         if (adapter->tx_int_delay)
1755                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1756
1757         if (hw->mac_type < e1000_82543)
1758                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1759         else
1760                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1761
1762         /* Cache if we're 82544 running in PCI-X because we'll
1763          * need this to apply a workaround later in the send path. */
1764         if (hw->mac_type == e1000_82544 &&
1765             hw->bus_type == e1000_bus_type_pcix)
1766                 adapter->pcix_82544 = 1;
1767
1768         ew32(TCTL, tctl);
1769
1770 }
1771
1772 /**
1773  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1774  * @adapter: board private structure
1775  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1776  *
1777  * Returns 0 on success, negative on failure
1778  **/
1779
1780 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1781                                     struct e1000_rx_ring *rxdr)
1782 {
1783         struct e1000_hw *hw = &adapter->hw;
1784         struct pci_dev *pdev = adapter->pdev;
1785         int size, desc_len;
1786
1787         size = sizeof(struct e1000_buffer) * rxdr->count;
1788         rxdr->buffer_info = vmalloc(size);
1789         if (!rxdr->buffer_info) {
1790                 DPRINTK(PROBE, ERR,
1791                 "Unable to allocate memory for the receive descriptor ring\n");
1792                 return -ENOMEM;
1793         }
1794         memset(rxdr->buffer_info, 0, size);
1795
1796         rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page),
1797                                 GFP_KERNEL);
1798         if (!rxdr->ps_page) {
1799                 vfree(rxdr->buffer_info);
1800                 DPRINTK(PROBE, ERR,
1801                 "Unable to allocate memory for the receive descriptor ring\n");
1802                 return -ENOMEM;
1803         }
1804
1805         rxdr->ps_page_dma = kcalloc(rxdr->count,
1806                                     sizeof(struct e1000_ps_page_dma),
1807                                     GFP_KERNEL);
1808         if (!rxdr->ps_page_dma) {
1809                 vfree(rxdr->buffer_info);
1810                 kfree(rxdr->ps_page);
1811                 DPRINTK(PROBE, ERR,
1812                 "Unable to allocate memory for the receive descriptor ring\n");
1813                 return -ENOMEM;
1814         }
1815
1816         if (hw->mac_type <= e1000_82547_rev_2)
1817                 desc_len = sizeof(struct e1000_rx_desc);
1818         else
1819                 desc_len = sizeof(union e1000_rx_desc_packet_split);
1820
1821         /* Round up to nearest 4K */
1822
1823         rxdr->size = rxdr->count * desc_len;
1824         rxdr->size = ALIGN(rxdr->size, 4096);
1825
1826         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1827
1828         if (!rxdr->desc) {
1829                 DPRINTK(PROBE, ERR,
1830                 "Unable to allocate memory for the receive descriptor ring\n");
1831 setup_rx_desc_die:
1832                 vfree(rxdr->buffer_info);
1833                 kfree(rxdr->ps_page);
1834                 kfree(rxdr->ps_page_dma);
1835                 return -ENOMEM;
1836         }
1837
1838         /* Fix for errata 23, can't cross 64kB boundary */
1839         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1840                 void *olddesc = rxdr->desc;
1841                 dma_addr_t olddma = rxdr->dma;
1842                 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1843                                      "at %p\n", rxdr->size, rxdr->desc);
1844                 /* Try again, without freeing the previous */
1845                 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1846                 /* Failed allocation, critical failure */
1847                 if (!rxdr->desc) {
1848                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1849                         DPRINTK(PROBE, ERR,
1850                                 "Unable to allocate memory "
1851                                 "for the receive descriptor ring\n");
1852                         goto setup_rx_desc_die;
1853                 }
1854
1855                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1856                         /* give up */
1857                         pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1858                                             rxdr->dma);
1859                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1860                         DPRINTK(PROBE, ERR,
1861                                 "Unable to allocate aligned memory "
1862                                 "for the receive descriptor ring\n");
1863                         goto setup_rx_desc_die;
1864                 } else {
1865                         /* Free old allocation, new allocation was successful */
1866                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1867                 }
1868         }
1869         memset(rxdr->desc, 0, rxdr->size);
1870
1871         rxdr->next_to_clean = 0;
1872         rxdr->next_to_use = 0;
1873
1874         return 0;
1875 }
1876
1877 /**
1878  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1879  *                                (Descriptors) for all queues
1880  * @adapter: board private structure
1881  *
1882  * Return 0 on success, negative on failure
1883  **/
1884
1885 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1886 {
1887         int i, err = 0;
1888
1889         for (i = 0; i < adapter->num_rx_queues; i++) {
1890                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1891                 if (err) {
1892                         DPRINTK(PROBE, ERR,
1893                                 "Allocation for Rx Queue %u failed\n", i);
1894                         for (i-- ; i >= 0; i--)
1895                                 e1000_free_rx_resources(adapter,
1896                                                         &adapter->rx_ring[i]);
1897                         break;
1898                 }
1899         }
1900
1901         return err;
1902 }
1903
1904 /**
1905  * e1000_setup_rctl - configure the receive control registers
1906  * @adapter: Board private structure
1907  **/
1908 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1909                         (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1910 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1911 {
1912         struct e1000_hw *hw = &adapter->hw;
1913         u32 rctl, rfctl;
1914         u32 psrctl = 0;
1915 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1916         u32 pages = 0;
1917 #endif
1918
1919         rctl = er32(RCTL);
1920
1921         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1922
1923         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1924                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1925                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1926
1927         if (hw->tbi_compatibility_on == 1)
1928                 rctl |= E1000_RCTL_SBP;
1929         else
1930                 rctl &= ~E1000_RCTL_SBP;
1931
1932         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1933                 rctl &= ~E1000_RCTL_LPE;
1934         else
1935                 rctl |= E1000_RCTL_LPE;
1936
1937         /* Setup buffer sizes */
1938         rctl &= ~E1000_RCTL_SZ_4096;
1939         rctl |= E1000_RCTL_BSEX;
1940         switch (adapter->rx_buffer_len) {
1941                 case E1000_RXBUFFER_256:
1942                         rctl |= E1000_RCTL_SZ_256;
1943                         rctl &= ~E1000_RCTL_BSEX;
1944                         break;
1945                 case E1000_RXBUFFER_512:
1946                         rctl |= E1000_RCTL_SZ_512;
1947                         rctl &= ~E1000_RCTL_BSEX;
1948                         break;
1949                 case E1000_RXBUFFER_1024:
1950                         rctl |= E1000_RCTL_SZ_1024;
1951                         rctl &= ~E1000_RCTL_BSEX;
1952                         break;
1953                 case E1000_RXBUFFER_2048:
1954                 default:
1955                         rctl |= E1000_RCTL_SZ_2048;
1956                         rctl &= ~E1000_RCTL_BSEX;
1957                         break;
1958                 case E1000_RXBUFFER_4096:
1959                         rctl |= E1000_RCTL_SZ_4096;
1960                         break;
1961                 case E1000_RXBUFFER_8192:
1962                         rctl |= E1000_RCTL_SZ_8192;
1963                         break;
1964                 case E1000_RXBUFFER_16384:
1965                         rctl |= E1000_RCTL_SZ_16384;
1966                         break;
1967         }
1968
1969 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1970         /* 82571 and greater support packet-split where the protocol
1971          * header is placed in skb->data and the packet data is
1972          * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1973          * In the case of a non-split, skb->data is linearly filled,
1974          * followed by the page buffers.  Therefore, skb->data is
1975          * sized to hold the largest protocol header.
1976          */
1977         /* allocations using alloc_page take too long for regular MTU
1978          * so only enable packet split for jumbo frames */
1979         pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1980         if ((hw->mac_type >= e1000_82571) && (pages <= 3) &&
1981             PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
1982                 adapter->rx_ps_pages = pages;
1983         else
1984                 adapter->rx_ps_pages = 0;
1985 #endif
1986         if (adapter->rx_ps_pages) {
1987                 /* Configure extra packet-split registers */
1988                 rfctl = er32(RFCTL);
1989                 rfctl |= E1000_RFCTL_EXTEN;
1990                 /* disable packet split support for IPv6 extension headers,
1991                  * because some malformed IPv6 headers can hang the RX */
1992                 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1993                           E1000_RFCTL_NEW_IPV6_EXT_DIS);
1994
1995                 ew32(RFCTL, rfctl);
1996
1997                 rctl |= E1000_RCTL_DTYP_PS;
1998
1999                 psrctl |= adapter->rx_ps_bsize0 >>
2000                         E1000_PSRCTL_BSIZE0_SHIFT;
2001
2002                 switch (adapter->rx_ps_pages) {
2003                 case 3:
2004                         psrctl |= PAGE_SIZE <<
2005                                 E1000_PSRCTL_BSIZE3_SHIFT;
2006                 case 2:
2007                         psrctl |= PAGE_SIZE <<
2008                                 E1000_PSRCTL_BSIZE2_SHIFT;
2009                 case 1:
2010                         psrctl |= PAGE_SIZE >>
2011                                 E1000_PSRCTL_BSIZE1_SHIFT;
2012                         break;
2013                 }
2014
2015                 ew32(PSRCTL, psrctl);
2016         }
2017
2018         ew32(RCTL, rctl);
2019 }
2020
2021 /**
2022  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
2023  * @adapter: board private structure
2024  *
2025  * Configure the Rx unit of the MAC after a reset.
2026  **/
2027
2028 static void e1000_configure_rx(struct e1000_adapter *adapter)
2029 {
2030         u64 rdba;
2031         struct e1000_hw *hw = &adapter->hw;
2032         u32 rdlen, rctl, rxcsum, ctrl_ext;
2033
2034         if (adapter->rx_ps_pages) {
2035                 /* this is a 32 byte descriptor */
2036                 rdlen = adapter->rx_ring[0].count *
2037                         sizeof(union e1000_rx_desc_packet_split);
2038                 adapter->clean_rx = e1000_clean_rx_irq_ps;
2039                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2040         } else {
2041                 rdlen = adapter->rx_ring[0].count *
2042                         sizeof(struct e1000_rx_desc);
2043                 adapter->clean_rx = e1000_clean_rx_irq;
2044                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2045         }
2046
2047         /* disable receives while setting up the descriptors */
2048         rctl = er32(RCTL);
2049         ew32(RCTL, rctl & ~E1000_RCTL_EN);
2050
2051         /* set the Receive Delay Timer Register */
2052         ew32(RDTR, adapter->rx_int_delay);
2053
2054         if (hw->mac_type >= e1000_82540) {
2055                 ew32(RADV, adapter->rx_abs_int_delay);
2056                 if (adapter->itr_setting != 0)
2057                         ew32(ITR, 1000000000 / (adapter->itr * 256));
2058         }
2059
2060         if (hw->mac_type >= e1000_82571) {
2061                 ctrl_ext = er32(CTRL_EXT);
2062                 /* Reset delay timers after every interrupt */
2063                 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2064 #ifdef CONFIG_E1000_NAPI
2065                 /* Auto-Mask interrupts upon ICR access */
2066                 ctrl_ext |= E1000_CTRL_EXT_IAME;
2067                 ew32(IAM, 0xffffffff);
2068 #endif
2069                 ew32(CTRL_EXT, ctrl_ext);
2070                 E1000_WRITE_FLUSH();
2071         }
2072
2073         /* Setup the HW Rx Head and Tail Descriptor Pointers and
2074          * the Base and Length of the Rx Descriptor Ring */
2075         switch (adapter->num_rx_queues) {
2076         case 1:
2077         default:
2078                 rdba = adapter->rx_ring[0].dma;
2079                 ew32(RDLEN, rdlen);
2080                 ew32(RDBAH, (rdba >> 32));
2081                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
2082                 ew32(RDT, 0);
2083                 ew32(RDH, 0);
2084                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
2085                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
2086                 break;
2087         }
2088
2089         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2090         if (hw->mac_type >= e1000_82543) {
2091                 rxcsum = er32(RXCSUM);
2092                 if (adapter->rx_csum) {
2093                         rxcsum |= E1000_RXCSUM_TUOFL;
2094
2095                         /* Enable 82571 IPv4 payload checksum for UDP fragments
2096                          * Must be used in conjunction with packet-split. */
2097                         if ((hw->mac_type >= e1000_82571) &&
2098                             (adapter->rx_ps_pages)) {
2099                                 rxcsum |= E1000_RXCSUM_IPPCSE;
2100                         }
2101                 } else {
2102                         rxcsum &= ~E1000_RXCSUM_TUOFL;
2103                         /* don't need to clear IPPCSE as it defaults to 0 */
2104                 }
2105                 ew32(RXCSUM, rxcsum);
2106         }
2107
2108         /* enable early receives on 82573, only takes effect if using > 2048
2109          * byte total frame size.  for example only for jumbo frames */
2110 #define E1000_ERT_2048 0x100
2111         if (hw->mac_type == e1000_82573)
2112                 ew32(ERT, E1000_ERT_2048);
2113
2114         /* Enable Receives */
2115         ew32(RCTL, rctl);
2116 }
2117
2118 /**
2119  * e1000_free_tx_resources - Free Tx Resources per Queue
2120  * @adapter: board private structure
2121  * @tx_ring: Tx descriptor ring for a specific queue
2122  *
2123  * Free all transmit software resources
2124  **/
2125
2126 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
2127                                     struct e1000_tx_ring *tx_ring)
2128 {
2129         struct pci_dev *pdev = adapter->pdev;
2130
2131         e1000_clean_tx_ring(adapter, tx_ring);
2132
2133         vfree(tx_ring->buffer_info);
2134         tx_ring->buffer_info = NULL;
2135
2136         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2137
2138         tx_ring->desc = NULL;
2139 }
2140
2141 /**
2142  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
2143  * @adapter: board private structure
2144  *
2145  * Free all transmit software resources
2146  **/
2147
2148 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
2149 {
2150         int i;
2151
2152         for (i = 0; i < adapter->num_tx_queues; i++)
2153                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
2154 }
2155
2156 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
2157                                              struct e1000_buffer *buffer_info)
2158 {
2159         if (buffer_info->dma) {
2160                 pci_unmap_page(adapter->pdev,
2161                                 buffer_info->dma,
2162                                 buffer_info->length,
2163                                 PCI_DMA_TODEVICE);
2164                 buffer_info->dma = 0;
2165         }
2166         if (buffer_info->skb) {
2167                 dev_kfree_skb_any(buffer_info->skb);
2168                 buffer_info->skb = NULL;
2169         }
2170         /* buffer_info must be completely set up in the transmit path */
2171 }
2172
2173 /**
2174  * e1000_clean_tx_ring - Free Tx Buffers
2175  * @adapter: board private structure
2176  * @tx_ring: ring to be cleaned
2177  **/
2178
2179 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2180                                 struct e1000_tx_ring *tx_ring)
2181 {
2182         struct e1000_hw *hw = &adapter->hw;
2183         struct e1000_buffer *buffer_info;
2184         unsigned long size;
2185         unsigned int i;
2186
2187         /* Free all the Tx ring sk_buffs */
2188
2189         for (i = 0; i < tx_ring->count; i++) {
2190                 buffer_info = &tx_ring->buffer_info[i];
2191                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2192         }
2193
2194         size = sizeof(struct e1000_buffer) * tx_ring->count;
2195         memset(tx_ring->buffer_info, 0, size);
2196
2197         /* Zero out the descriptor ring */
2198
2199         memset(tx_ring->desc, 0, tx_ring->size);
2200
2201         tx_ring->next_to_use = 0;
2202         tx_ring->next_to_clean = 0;
2203         tx_ring->last_tx_tso = 0;
2204
2205         writel(0, hw->hw_addr + tx_ring->tdh);
2206         writel(0, hw->hw_addr + tx_ring->tdt);
2207 }
2208
2209 /**
2210  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2211  * @adapter: board private structure
2212  **/
2213
2214 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2215 {
2216         int i;
2217
2218         for (i = 0; i < adapter->num_tx_queues; i++)
2219                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2220 }
2221
2222 /**
2223  * e1000_free_rx_resources - Free Rx Resources
2224  * @adapter: board private structure
2225  * @rx_ring: ring to clean the resources from
2226  *
2227  * Free all receive software resources
2228  **/
2229
2230 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2231                                     struct e1000_rx_ring *rx_ring)
2232 {
2233         struct pci_dev *pdev = adapter->pdev;
2234
2235         e1000_clean_rx_ring(adapter, rx_ring);
2236
2237         vfree(rx_ring->buffer_info);
2238         rx_ring->buffer_info = NULL;
2239         kfree(rx_ring->ps_page);
2240         rx_ring->ps_page = NULL;
2241         kfree(rx_ring->ps_page_dma);
2242         rx_ring->ps_page_dma = NULL;
2243
2244         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2245
2246         rx_ring->desc = NULL;
2247 }
2248
2249 /**
2250  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2251  * @adapter: board private structure
2252  *
2253  * Free all receive software resources
2254  **/
2255
2256 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2257 {
2258         int i;
2259
2260         for (i = 0; i < adapter->num_rx_queues; i++)
2261                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2262 }
2263
2264 /**
2265  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2266  * @adapter: board private structure
2267  * @rx_ring: ring to free buffers from
2268  **/
2269
2270 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2271                                 struct e1000_rx_ring *rx_ring)
2272 {
2273         struct e1000_hw *hw = &adapter->hw;
2274         struct e1000_buffer *buffer_info;
2275         struct e1000_ps_page *ps_page;
2276         struct e1000_ps_page_dma *ps_page_dma;
2277         struct pci_dev *pdev = adapter->pdev;
2278         unsigned long size;
2279         unsigned int i, j;
2280
2281         /* Free all the Rx ring sk_buffs */
2282         for (i = 0; i < rx_ring->count; i++) {
2283                 buffer_info = &rx_ring->buffer_info[i];
2284                 if (buffer_info->skb) {
2285                         pci_unmap_single(pdev,
2286                                          buffer_info->dma,
2287                                          buffer_info->length,
2288                                          PCI_DMA_FROMDEVICE);
2289
2290                         dev_kfree_skb(buffer_info->skb);
2291                         buffer_info->skb = NULL;
2292                 }
2293                 ps_page = &rx_ring->ps_page[i];
2294                 ps_page_dma = &rx_ring->ps_page_dma[i];
2295                 for (j = 0; j < adapter->rx_ps_pages; j++) {
2296                         if (!ps_page->ps_page[j]) break;
2297                         pci_unmap_page(pdev,
2298                                        ps_page_dma->ps_page_dma[j],
2299                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2300                         ps_page_dma->ps_page_dma[j] = 0;
2301                         put_page(ps_page->ps_page[j]);
2302                         ps_page->ps_page[j] = NULL;
2303                 }
2304         }
2305
2306         size = sizeof(struct e1000_buffer) * rx_ring->count;
2307         memset(rx_ring->buffer_info, 0, size);
2308         size = sizeof(struct e1000_ps_page) * rx_ring->count;
2309         memset(rx_ring->ps_page, 0, size);
2310         size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2311         memset(rx_ring->ps_page_dma, 0, size);
2312
2313         /* Zero out the descriptor ring */
2314
2315         memset(rx_ring->desc, 0, rx_ring->size);
2316
2317         rx_ring->next_to_clean = 0;
2318         rx_ring->next_to_use = 0;
2319
2320         writel(0, hw->hw_addr + rx_ring->rdh);
2321         writel(0, hw->hw_addr + rx_ring->rdt);
2322 }
2323
2324 /**
2325  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2326  * @adapter: board private structure
2327  **/
2328
2329 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2330 {
2331         int i;
2332
2333         for (i = 0; i < adapter->num_rx_queues; i++)
2334                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2335 }
2336
2337 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2338  * and memory write and invalidate disabled for certain operations
2339  */
2340 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2341 {
2342         struct e1000_hw *hw = &adapter->hw;
2343         struct net_device *netdev = adapter->netdev;
2344         u32 rctl;
2345
2346         e1000_pci_clear_mwi(hw);
2347
2348         rctl = er32(RCTL);
2349         rctl |= E1000_RCTL_RST;
2350         ew32(RCTL, rctl);
2351         E1000_WRITE_FLUSH();
2352         mdelay(5);
2353
2354         if (netif_running(netdev))
2355                 e1000_clean_all_rx_rings(adapter);
2356 }
2357
2358 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2359 {
2360         struct e1000_hw *hw = &adapter->hw;
2361         struct net_device *netdev = adapter->netdev;
2362         u32 rctl;
2363
2364         rctl = er32(RCTL);
2365         rctl &= ~E1000_RCTL_RST;
2366         ew32(RCTL, rctl);
2367         E1000_WRITE_FLUSH();
2368         mdelay(5);
2369
2370         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2371                 e1000_pci_set_mwi(hw);
2372
2373         if (netif_running(netdev)) {
2374                 /* No need to loop, because 82542 supports only 1 queue */
2375                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2376                 e1000_configure_rx(adapter);
2377                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2378         }
2379 }
2380
2381 /**
2382  * e1000_set_mac - Change the Ethernet Address of the NIC
2383  * @netdev: network interface device structure
2384  * @p: pointer to an address structure
2385  *
2386  * Returns 0 on success, negative on failure
2387  **/
2388
2389 static int e1000_set_mac(struct net_device *netdev, void *p)
2390 {
2391         struct e1000_adapter *adapter = netdev_priv(netdev);
2392         struct e1000_hw *hw = &adapter->hw;
2393         struct sockaddr *addr = p;
2394
2395         if (!is_valid_ether_addr(addr->sa_data))
2396                 return -EADDRNOTAVAIL;
2397
2398         /* 82542 2.0 needs to be in reset to write receive address registers */
2399
2400         if (hw->mac_type == e1000_82542_rev2_0)
2401                 e1000_enter_82542_rst(adapter);
2402
2403         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2404         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2405
2406         e1000_rar_set(hw, hw->mac_addr, 0);
2407
2408         /* With 82571 controllers, LAA may be overwritten (with the default)
2409          * due to controller reset from the other port. */
2410         if (hw->mac_type == e1000_82571) {
2411                 /* activate the work around */
2412                 hw->laa_is_present = 1;
2413
2414                 /* Hold a copy of the LAA in RAR[14] This is done so that
2415                  * between the time RAR[0] gets clobbered  and the time it
2416                  * gets fixed (in e1000_watchdog), the actual LAA is in one
2417                  * of the RARs and no incoming packets directed to this port
2418                  * are dropped. Eventaully the LAA will be in RAR[0] and
2419                  * RAR[14] */
2420                 e1000_rar_set(hw, hw->mac_addr,
2421                                         E1000_RAR_ENTRIES - 1);
2422         }
2423
2424         if (hw->mac_type == e1000_82542_rev2_0)
2425                 e1000_leave_82542_rst(adapter);
2426
2427         return 0;
2428 }
2429
2430 /**
2431  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2432  * @netdev: network interface device structure
2433  *
2434  * The set_rx_mode entry point is called whenever the unicast or multicast
2435  * address lists or the network interface flags are updated. This routine is
2436  * responsible for configuring the hardware for proper unicast, multicast,
2437  * promiscuous mode, and all-multi behavior.
2438  **/
2439
2440 static void e1000_set_rx_mode(struct net_device *netdev)
2441 {
2442         struct e1000_adapter *adapter = netdev_priv(netdev);
2443         struct e1000_hw *hw = &adapter->hw;
2444         struct dev_addr_list *uc_ptr;
2445         struct dev_addr_list *mc_ptr;
2446         u32 rctl;
2447         u32 hash_value;
2448         int i, rar_entries = E1000_RAR_ENTRIES;
2449         int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2450                                 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2451                                 E1000_NUM_MTA_REGISTERS;
2452
2453         if (hw->mac_type == e1000_ich8lan)
2454                 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2455
2456         /* reserve RAR[14] for LAA over-write work-around */
2457         if (hw->mac_type == e1000_82571)
2458                 rar_entries--;
2459
2460         /* Check for Promiscuous and All Multicast modes */
2461
2462         rctl = er32(RCTL);
2463
2464         if (netdev->flags & IFF_PROMISC) {
2465                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2466                 rctl &= ~E1000_RCTL_VFE;
2467         } else {
2468                 if (netdev->flags & IFF_ALLMULTI) {
2469                         rctl |= E1000_RCTL_MPE;
2470                 } else {
2471                         rctl &= ~E1000_RCTL_MPE;
2472                 }
2473                 if (adapter->hw.mac_type != e1000_ich8lan)
2474                         rctl |= E1000_RCTL_VFE;
2475         }
2476
2477         uc_ptr = NULL;
2478         if (netdev->uc_count > rar_entries - 1) {
2479                 rctl |= E1000_RCTL_UPE;
2480         } else if (!(netdev->flags & IFF_PROMISC)) {
2481                 rctl &= ~E1000_RCTL_UPE;
2482                 uc_ptr = netdev->uc_list;
2483         }
2484
2485         ew32(RCTL, rctl);
2486
2487         /* 82542 2.0 needs to be in reset to write receive address registers */
2488
2489         if (hw->mac_type == e1000_82542_rev2_0)
2490                 e1000_enter_82542_rst(adapter);
2491
2492         /* load the first 14 addresses into the exact filters 1-14. Unicast
2493          * addresses take precedence to avoid disabling unicast filtering
2494          * when possible.
2495          *
2496          * RAR 0 is used for the station MAC adddress
2497          * if there are not 14 addresses, go ahead and clear the filters
2498          * -- with 82571 controllers only 0-13 entries are filled here
2499          */
2500         mc_ptr = netdev->mc_list;
2501
2502         for (i = 1; i < rar_entries; i++) {
2503                 if (uc_ptr) {
2504                         e1000_rar_set(hw, uc_ptr->da_addr, i);
2505                         uc_ptr = uc_ptr->next;
2506                 } else if (mc_ptr) {
2507                         e1000_rar_set(hw, mc_ptr->da_addr, i);
2508                         mc_ptr = mc_ptr->next;
2509                 } else {
2510                         E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2511                         E1000_WRITE_FLUSH();
2512                         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2513                         E1000_WRITE_FLUSH();
2514                 }
2515         }
2516         WARN_ON(uc_ptr != NULL);
2517
2518         /* clear the old settings from the multicast hash table */
2519
2520         for (i = 0; i < mta_reg_count; i++) {
2521                 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2522                 E1000_WRITE_FLUSH();
2523         }
2524
2525         /* load any remaining addresses into the hash table */
2526
2527         for (; mc_ptr; mc_ptr = mc_ptr->next) {
2528                 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
2529                 e1000_mta_set(hw, hash_value);
2530         }
2531
2532         if (hw->mac_type == e1000_82542_rev2_0)
2533                 e1000_leave_82542_rst(adapter);
2534 }
2535
2536 /* Need to wait a few seconds after link up to get diagnostic information from
2537  * the phy */
2538
2539 static void e1000_update_phy_info(unsigned long data)
2540 {
2541         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2542         struct e1000_hw *hw = &adapter->hw;
2543         e1000_phy_get_info(hw, &adapter->phy_info);
2544 }
2545
2546 /**
2547  * e1000_82547_tx_fifo_stall - Timer Call-back
2548  * @data: pointer to adapter cast into an unsigned long
2549  **/
2550
2551 static void e1000_82547_tx_fifo_stall(unsigned long data)
2552 {
2553         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2554         struct e1000_hw *hw = &adapter->hw;
2555         struct net_device *netdev = adapter->netdev;
2556         u32 tctl;
2557
2558         if (atomic_read(&adapter->tx_fifo_stall)) {
2559                 if ((er32(TDT) == er32(TDH)) &&
2560                    (er32(TDFT) == er32(TDFH)) &&
2561                    (er32(TDFTS) == er32(TDFHS))) {
2562                         tctl = er32(TCTL);
2563                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2564                         ew32(TDFT, adapter->tx_head_addr);
2565                         ew32(TDFH, adapter->tx_head_addr);
2566                         ew32(TDFTS, adapter->tx_head_addr);
2567                         ew32(TDFHS, adapter->tx_head_addr);
2568                         ew32(TCTL, tctl);
2569                         E1000_WRITE_FLUSH();
2570
2571                         adapter->tx_fifo_head = 0;
2572                         atomic_set(&adapter->tx_fifo_stall, 0);
2573                         netif_wake_queue(netdev);
2574                 } else {
2575                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2576                 }
2577         }
2578 }
2579
2580 /**
2581  * e1000_watchdog - Timer Call-back
2582  * @data: pointer to adapter cast into an unsigned long
2583  **/
2584 static void e1000_watchdog(unsigned long data)
2585 {
2586         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2587         struct e1000_hw *hw = &adapter->hw;
2588         struct net_device *netdev = adapter->netdev;
2589         struct e1000_tx_ring *txdr = adapter->tx_ring;
2590         u32 link, tctl;
2591         s32 ret_val;
2592
2593         ret_val = e1000_check_for_link(hw);
2594         if ((ret_val == E1000_ERR_PHY) &&
2595             (hw->phy_type == e1000_phy_igp_3) &&
2596             (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2597                 /* See e1000_kumeran_lock_loss_workaround() */
2598                 DPRINTK(LINK, INFO,
2599                         "Gigabit has been disabled, downgrading speed\n");
2600         }
2601
2602         if (hw->mac_type == e1000_82573) {
2603                 e1000_enable_tx_pkt_filtering(hw);
2604                 if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
2605                         e1000_update_mng_vlan(adapter);
2606         }
2607
2608         if ((hw->media_type == e1000_media_type_internal_serdes) &&
2609            !(er32(TXCW) & E1000_TXCW_ANE))
2610                 link = !hw->serdes_link_down;
2611         else
2612                 link = er32(STATUS) & E1000_STATUS_LU;
2613
2614         if (link) {
2615                 if (!netif_carrier_ok(netdev)) {
2616                         u32 ctrl;
2617                         bool txb2b = true;
2618                         e1000_get_speed_and_duplex(hw,
2619                                                    &adapter->link_speed,
2620                                                    &adapter->link_duplex);
2621
2622                         ctrl = er32(CTRL);
2623                         DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
2624                                 "Flow Control: %s\n",
2625                                 adapter->link_speed,
2626                                 adapter->link_duplex == FULL_DUPLEX ?
2627                                 "Full Duplex" : "Half Duplex",
2628                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2629                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2630                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2631                                 E1000_CTRL_TFCE) ? "TX" : "None" )));
2632
2633                         /* tweak tx_queue_len according to speed/duplex
2634                          * and adjust the timeout factor */
2635                         netdev->tx_queue_len = adapter->tx_queue_len;
2636                         adapter->tx_timeout_factor = 1;
2637                         switch (adapter->link_speed) {
2638                         case SPEED_10:
2639                                 txb2b = false;
2640                                 netdev->tx_queue_len = 10;
2641                                 adapter->tx_timeout_factor = 8;
2642                                 break;
2643                         case SPEED_100:
2644                                 txb2b = false;
2645                                 netdev->tx_queue_len = 100;
2646                                 /* maybe add some timeout factor ? */
2647                                 break;
2648                         }
2649
2650                         if ((hw->mac_type == e1000_82571 ||
2651                              hw->mac_type == e1000_82572) &&
2652                             !txb2b) {
2653                                 u32 tarc0;
2654                                 tarc0 = er32(TARC0);
2655                                 tarc0 &= ~(1 << 21);
2656                                 ew32(TARC0, tarc0);
2657                         }
2658
2659                         /* disable TSO for pcie and 10/100 speeds, to avoid
2660                          * some hardware issues */
2661                         if (!adapter->tso_force &&
2662                             hw->bus_type == e1000_bus_type_pci_express){
2663                                 switch (adapter->link_speed) {
2664                                 case SPEED_10:
2665                                 case SPEED_100:
2666                                         DPRINTK(PROBE,INFO,
2667                                         "10/100 speed: disabling TSO\n");
2668                                         netdev->features &= ~NETIF_F_TSO;
2669                                         netdev->features &= ~NETIF_F_TSO6;
2670                                         break;
2671                                 case SPEED_1000:
2672                                         netdev->features |= NETIF_F_TSO;
2673                                         netdev->features |= NETIF_F_TSO6;
2674                                         break;
2675                                 default:
2676                                         /* oops */
2677                                         break;
2678                                 }
2679                         }
2680
2681                         /* enable transmits in the hardware, need to do this
2682                          * after setting TARC0 */
2683                         tctl = er32(TCTL);
2684                         tctl |= E1000_TCTL_EN;
2685                         ew32(TCTL, tctl);
2686
2687                         netif_carrier_on(netdev);
2688                         netif_wake_queue(netdev);
2689                         mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2690                         adapter->smartspeed = 0;
2691                 } else {
2692                         /* make sure the receive unit is started */
2693                         if (hw->rx_needs_kicking) {
2694                                 u32 rctl = er32(RCTL);
2695                                 ew32(RCTL, rctl | E1000_RCTL_EN);
2696                         }
2697                 }
2698         } else {
2699                 if (netif_carrier_ok(netdev)) {
2700                         adapter->link_speed = 0;
2701                         adapter->link_duplex = 0;
2702                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
2703                         netif_carrier_off(netdev);
2704                         netif_stop_queue(netdev);
2705                         mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2706
2707                         /* 80003ES2LAN workaround--
2708                          * For packet buffer work-around on link down event;
2709                          * disable receives in the ISR and
2710                          * reset device here in the watchdog
2711                          */
2712                         if (hw->mac_type == e1000_80003es2lan)
2713                                 /* reset device */
2714                                 schedule_work(&adapter->reset_task);
2715                 }
2716
2717                 e1000_smartspeed(adapter);
2718         }
2719
2720         e1000_update_stats(adapter);
2721
2722         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2723         adapter->tpt_old = adapter->stats.tpt;
2724         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2725         adapter->colc_old = adapter->stats.colc;
2726
2727         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2728         adapter->gorcl_old = adapter->stats.gorcl;
2729         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2730         adapter->gotcl_old = adapter->stats.gotcl;
2731
2732         e1000_update_adaptive(hw);
2733
2734         if (!netif_carrier_ok(netdev)) {
2735                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2736                         /* We've lost link, so the controller stops DMA,
2737                          * but we've got queued Tx work that's never going
2738                          * to get done, so reset controller to flush Tx.
2739                          * (Do the reset outside of interrupt context). */
2740                         adapter->tx_timeout_count++;
2741                         schedule_work(&adapter->reset_task);
2742                 }
2743         }
2744
2745         /* Cause software interrupt to ensure rx ring is cleaned */
2746         ew32(ICS, E1000_ICS_RXDMT0);
2747
2748         /* Force detection of hung controller every watchdog period */
2749         adapter->detect_tx_hung = true;
2750
2751         /* With 82571 controllers, LAA may be overwritten due to controller
2752          * reset from the other port. Set the appropriate LAA in RAR[0] */
2753         if (hw->mac_type == e1000_82571 && hw->laa_is_present)
2754                 e1000_rar_set(hw, hw->mac_addr, 0);
2755
2756         /* Reset the timer */
2757         mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
2758 }
2759
2760 enum latency_range {
2761         lowest_latency = 0,
2762         low_latency = 1,
2763         bulk_latency = 2,
2764         latency_invalid = 255
2765 };
2766
2767 /**
2768  * e1000_update_itr - update the dynamic ITR value based on statistics
2769  *      Stores a new ITR value based on packets and byte
2770  *      counts during the last interrupt.  The advantage of per interrupt
2771  *      computation is faster updates and more accurate ITR for the current
2772  *      traffic pattern.  Constants in this function were computed
2773  *      based on theoretical maximum wire speed and thresholds were set based
2774  *      on testing data as well as attempting to minimize response time
2775  *      while increasing bulk throughput.
2776  *      this functionality is controlled by the InterruptThrottleRate module
2777  *      parameter (see e1000_param.c)
2778  * @adapter: pointer to adapter
2779  * @itr_setting: current adapter->itr
2780  * @packets: the number of packets during this measurement interval
2781  * @bytes: the number of bytes during this measurement interval
2782  **/
2783 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2784                                      u16 itr_setting, int packets, int bytes)
2785 {
2786         unsigned int retval = itr_setting;
2787         struct e1000_hw *hw = &adapter->hw;
2788
2789         if (unlikely(hw->mac_type < e1000_82540))
2790                 goto update_itr_done;
2791
2792         if (packets == 0)
2793                 goto update_itr_done;
2794
2795         switch (itr_setting) {
2796         case lowest_latency:
2797                 /* jumbo frames get bulk treatment*/
2798                 if (bytes/packets > 8000)
2799                         retval = bulk_latency;
2800                 else if ((packets < 5) && (bytes > 512))
2801                         retval = low_latency;
2802                 break;
2803         case low_latency:  /* 50 usec aka 20000 ints/s */
2804                 if (bytes > 10000) {
2805                         /* jumbo frames need bulk latency setting */
2806                         if (bytes/packets > 8000)
2807                                 retval = bulk_latency;
2808                         else if ((packets < 10) || ((bytes/packets) > 1200))
2809                                 retval = bulk_latency;
2810                         else if ((packets > 35))
2811                                 retval = lowest_latency;
2812                 } else if (bytes/packets > 2000)
2813                         retval = bulk_latency;
2814                 else if (packets <= 2 && bytes < 512)
2815                         retval = lowest_latency;
2816                 break;
2817         case bulk_latency: /* 250 usec aka 4000 ints/s */
2818                 if (bytes > 25000) {
2819                         if (packets > 35)
2820                                 retval = low_latency;
2821                 } else if (bytes < 6000) {
2822                         retval = low_latency;
2823                 }
2824                 break;
2825         }
2826
2827 update_itr_done:
2828         return retval;
2829 }
2830
2831 static void e1000_set_itr(struct e1000_adapter *adapter)
2832 {
2833         struct e1000_hw *hw = &adapter->hw;
2834         u16 current_itr;
2835         u32 new_itr = adapter->itr;
2836
2837         if (unlikely(hw->mac_type < e1000_82540))
2838                 return;
2839
2840         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2841         if (unlikely(adapter->link_speed != SPEED_1000)) {
2842                 current_itr = 0;
2843                 new_itr = 4000;
2844                 goto set_itr_now;
2845         }
2846
2847         adapter->tx_itr = e1000_update_itr(adapter,
2848                                     adapter->tx_itr,
2849                                     adapter->total_tx_packets,
2850                                     adapter->total_tx_bytes);
2851         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2852         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2853                 adapter->tx_itr = low_latency;
2854
2855         adapter->rx_itr = e1000_update_itr(adapter,
2856                                     adapter->rx_itr,
2857                                     adapter->total_rx_packets,
2858                                     adapter->total_rx_bytes);
2859         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2860         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2861                 adapter->rx_itr = low_latency;
2862
2863         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2864
2865         switch (current_itr) {
2866         /* counts and packets in update_itr are dependent on these numbers */
2867         case lowest_latency:
2868                 new_itr = 70000;
2869                 break;
2870         case low_latency:
2871                 new_itr = 20000; /* aka hwitr = ~200 */
2872                 break;
2873         case bulk_latency:
2874                 new_itr = 4000;
2875                 break;
2876         default:
2877                 break;
2878         }
2879
2880 set_itr_now:
2881         if (new_itr != adapter->itr) {
2882                 /* this attempts to bias the interrupt rate towards Bulk
2883                  * by adding intermediate steps when interrupt rate is
2884                  * increasing */
2885                 new_itr = new_itr > adapter->itr ?
2886                              min(adapter->itr + (new_itr >> 2), new_itr) :
2887                              new_itr;
2888                 adapter->itr = new_itr;
2889                 ew32(ITR, 1000000000 / (new_itr * 256));
2890         }
2891
2892         return;
2893 }
2894
2895 #define E1000_TX_FLAGS_CSUM             0x00000001
2896 #define E1000_TX_FLAGS_VLAN             0x00000002
2897 #define E1000_TX_FLAGS_TSO              0x00000004
2898 #define E1000_TX_FLAGS_IPV4             0x00000008
2899 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2900 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2901
2902 static int e1000_tso(struct e1000_adapter *adapter,
2903                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2904 {
2905         struct e1000_context_desc *context_desc;
2906         struct e1000_buffer *buffer_info;
2907         unsigned int i;
2908         u32 cmd_length = 0;
2909         u16 ipcse = 0, tucse, mss;
2910         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2911         int err;
2912
2913         if (skb_is_gso(skb)) {
2914                 if (skb_header_cloned(skb)) {
2915                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2916                         if (err)
2917                                 return err;
2918                 }
2919
2920                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2921                 mss = skb_shinfo(skb)->gso_size;
2922                 if (skb->protocol == htons(ETH_P_IP)) {
2923                         struct iphdr *iph = ip_hdr(skb);
2924                         iph->tot_len = 0;
2925                         iph->check = 0;
2926                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2927                                                                  iph->daddr, 0,
2928                                                                  IPPROTO_TCP,
2929                                                                  0);
2930                         cmd_length = E1000_TXD_CMD_IP;
2931                         ipcse = skb_transport_offset(skb) - 1;
2932                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2933                         ipv6_hdr(skb)->payload_len = 0;
2934                         tcp_hdr(skb)->check =
2935                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2936                                                  &ipv6_hdr(skb)->daddr,
2937                                                  0, IPPROTO_TCP, 0);
2938                         ipcse = 0;
2939                 }
2940                 ipcss = skb_network_offset(skb);
2941                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2942                 tucss = skb_transport_offset(skb);
2943                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2944                 tucse = 0;
2945
2946                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2947                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2948
2949                 i = tx_ring->next_to_use;
2950                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2951                 buffer_info = &tx_ring->buffer_info[i];
2952
2953                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2954                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2955                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2956                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2957                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2958                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2959                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2960                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2961                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2962
2963                 buffer_info->time_stamp = jiffies;
2964                 buffer_info->next_to_watch = i;
2965
2966                 if (++i == tx_ring->count) i = 0;
2967                 tx_ring->next_to_use = i;
2968
2969                 return true;
2970         }
2971         return false;
2972 }
2973
2974 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2975                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2976 {
2977         struct e1000_context_desc *context_desc;
2978         struct e1000_buffer *buffer_info;
2979         unsigned int i;
2980         u8 css;
2981
2982         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2983                 css = skb_transport_offset(skb);
2984
2985                 i = tx_ring->next_to_use;
2986                 buffer_info = &tx_ring->buffer_info[i];
2987                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2988
2989                 context_desc->lower_setup.ip_config = 0;
2990                 context_desc->upper_setup.tcp_fields.tucss = css;
2991                 context_desc->upper_setup.tcp_fields.tucso =
2992                         css + skb->csum_offset;
2993                 context_desc->upper_setup.tcp_fields.tucse = 0;
2994                 context_desc->tcp_seg_setup.data = 0;
2995                 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2996
2997                 buffer_info->time_stamp = jiffies;
2998                 buffer_info->next_to_watch = i;
2999
3000                 if (unlikely(++i == tx_ring->count)) i = 0;
3001                 tx_ring->next_to_use = i;
3002
3003                 return true;
3004         }
3005
3006         return false;
3007 }
3008
3009 #define E1000_MAX_TXD_PWR       12
3010 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
3011
3012 static int e1000_tx_map(struct e1000_adapter *adapter,
3013                         struct e1000_tx_ring *tx_ring,
3014                         struct sk_buff *skb, unsigned int first,
3015                         unsigned int max_per_txd, unsigned int nr_frags,
3016                         unsigned int mss)
3017 {
3018         struct e1000_hw *hw = &adapter->hw;
3019         struct e1000_buffer *buffer_info;
3020         unsigned int len = skb->len;
3021         unsigned int offset = 0, size, count = 0, i;
3022         unsigned int f;
3023         len -= skb->data_len;
3024
3025         i = tx_ring->next_to_use;
3026
3027         while (len) {
3028                 buffer_info = &tx_ring->buffer_info[i];
3029                 size = min(len, max_per_txd);
3030                 /* Workaround for Controller erratum --
3031                  * descriptor for non-tso packet in a linear SKB that follows a
3032                  * tso gets written back prematurely before the data is fully
3033                  * DMA'd to the controller */
3034                 if (!skb->data_len && tx_ring->last_tx_tso &&
3035                     !skb_is_gso(skb)) {
3036                         tx_ring->last_tx_tso = 0;
3037                         size -= 4;
3038                 }
3039
3040                 /* Workaround for premature desc write-backs
3041                  * in TSO mode.  Append 4-byte sentinel desc */
3042                 if (unlikely(mss && !nr_frags && size == len && size > 8))
3043                         size -= 4;
3044                 /* work-around for errata 10 and it applies
3045                  * to all controllers in PCI-X mode
3046                  * The fix is to make sure that the first descriptor of a
3047                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
3048                  */
3049                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3050                                 (size > 2015) && count == 0))
3051                         size = 2015;
3052
3053                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
3054                  * terminating buffers within evenly-aligned dwords. */
3055                 if (unlikely(adapter->pcix_82544 &&
3056                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
3057                    size > 4))
3058                         size -= 4;
3059
3060                 buffer_info->length = size;
3061                 buffer_info->dma =
3062                         pci_map_single(adapter->pdev,
3063                                 skb->data + offset,
3064                                 size,
3065                                 PCI_DMA_TODEVICE);
3066                 buffer_info->time_stamp = jiffies;
3067                 buffer_info->next_to_watch = i;
3068
3069                 len -= size;
3070                 offset += size;
3071                 count++;
3072                 if (unlikely(++i == tx_ring->count)) i = 0;
3073         }
3074
3075         for (f = 0; f < nr_frags; f++) {
3076                 struct skb_frag_struct *frag;
3077
3078                 frag = &skb_shinfo(skb)->frags[f];
3079                 len = frag->size;
3080                 offset = frag->page_offset;
3081
3082                 while (len) {
3083                         buffer_info = &tx_ring->buffer_info[i];
3084                         size = min(len, max_per_txd);
3085                         /* Workaround for premature desc write-backs
3086                          * in TSO mode.  Append 4-byte sentinel desc */
3087                         if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
3088                                 size -= 4;
3089                         /* Workaround for potential 82544 hang in PCI-X.
3090                          * Avoid terminating buffers within evenly-aligned
3091                          * dwords. */
3092                         if (unlikely(adapter->pcix_82544 &&
3093                            !((unsigned long)(frag->page+offset+size-1) & 4) &&
3094                            size > 4))
3095                                 size -= 4;
3096
3097                         buffer_info->length = size;
3098                         buffer_info->dma =
3099                                 pci_map_page(adapter->pdev,
3100                                         frag->page,
3101                                         offset,
3102                                         size,
3103                                         PCI_DMA_TODEVICE);
3104                         buffer_info->time_stamp = jiffies;
3105                         buffer_info->next_to_watch = i;
3106
3107                         len -= size;
3108                         offset += size;
3109                         count++;
3110                         if (unlikely(++i == tx_ring->count)) i = 0;
3111                 }
3112         }
3113
3114         i = (i == 0) ? tx_ring->count - 1 : i - 1;
3115         tx_ring->buffer_info[i].skb = skb;
3116         tx_ring->buffer_info[first].next_to_watch = i;
3117
3118         return count;
3119 }
3120
3121 static void e1000_tx_queue(struct e1000_adapter *adapter,
3122                            struct e1000_tx_ring *tx_ring, int tx_flags,
3123                            int count)
3124 {
3125         struct e1000_hw *hw = &adapter->hw;
3126         struct e1000_tx_desc *tx_desc = NULL;
3127         struct e1000_buffer *buffer_info;
3128         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3129         unsigned int i;
3130
3131         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3132                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3133                              E1000_TXD_CMD_TSE;
3134                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3135
3136                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3137                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3138         }
3139
3140         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3141                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3142                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3143         }
3144
3145         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3146                 txd_lower |= E1000_TXD_CMD_VLE;
3147                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3148         }
3149
3150         i = tx_ring->next_to_use;
3151
3152         while (count--) {
3153                 buffer_info = &tx_ring->buffer_info[i];
3154                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3155                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3156                 tx_desc->lower.data =
3157                         cpu_to_le32(txd_lower | buffer_info->length);
3158                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3159                 if (unlikely(++i == tx_ring->count)) i = 0;
3160         }
3161
3162         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3163
3164         /* Force memory writes to complete before letting h/w
3165          * know there are new descriptors to fetch.  (Only
3166          * applicable for weak-ordered memory model archs,
3167          * such as IA-64). */
3168         wmb();
3169
3170         tx_ring->next_to_use = i;
3171         writel(i, hw->hw_addr + tx_ring->tdt);
3172         /* we need this if more than one processor can write to our tail
3173          * at a time, it syncronizes IO on IA64/Altix systems */
3174         mmiowb();
3175 }
3176
3177 /**
3178  * 82547 workaround to avoid controller hang in half-duplex environment.
3179  * The workaround is to avoid queuing a large packet that would span
3180  * the internal Tx FIFO ring boundary by notifying the stack to resend
3181  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3182  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3183  * to the beginning of the Tx FIFO.
3184  **/
3185
3186 #define E1000_FIFO_HDR                  0x10
3187 #define E1000_82547_PAD_LEN             0x3E0
3188
3189 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3190                                        struct sk_buff *skb)
3191 {
3192         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3193         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3194
3195         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3196
3197         if (adapter->link_duplex != HALF_DUPLEX)
3198                 goto no_fifo_stall_required;
3199
3200         if (atomic_read(&adapter->tx_fifo_stall))
3201                 return 1;
3202
3203         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3204                 atomic_set(&adapter->tx_fifo_stall, 1);
3205                 return 1;
3206         }
3207
3208 no_fifo_stall_required:
3209         adapter->tx_fifo_head += skb_fifo_len;
3210         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3211                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3212         return 0;
3213 }
3214
3215 #define MINIMUM_DHCP_PACKET_SIZE 282
3216 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3217                                     struct sk_buff *skb)
3218 {
3219         struct e1000_hw *hw =  &adapter->hw;
3220         u16 length, offset;
3221         if (vlan_tx_tag_present(skb)) {
3222                 if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
3223                         ( hw->mng_cookie.status &
3224                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3225                         return 0;
3226         }
3227         if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
3228                 struct ethhdr *eth = (struct ethhdr *) skb->data;
3229                 if ((htons(ETH_P_IP) == eth->h_proto)) {
3230                         const struct iphdr *ip =
3231                                 (struct iphdr *)((u8 *)skb->data+14);
3232                         if (IPPROTO_UDP == ip->protocol) {
3233                                 struct udphdr *udp =
3234                                         (struct udphdr *)((u8 *)ip +
3235                                                 (ip->ihl << 2));
3236                                 if (ntohs(udp->dest) == 67) {
3237                                         offset = (u8 *)udp + 8 - skb->data;
3238                                         length = skb->len - offset;
3239
3240                                         return e1000_mng_write_dhcp_info(hw,
3241                                                         (u8 *)udp + 8,
3242                                                         length);
3243                                 }
3244                         }
3245                 }
3246         }
3247         return 0;
3248 }
3249
3250 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3251 {
3252         struct e1000_adapter *adapter = netdev_priv(netdev);
3253         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3254
3255         netif_stop_queue(netdev);
3256         /* Herbert's original patch had:
3257          *  smp_mb__after_netif_stop_queue();
3258          * but since that doesn't exist yet, just open code it. */
3259         smp_mb();
3260
3261         /* We need to check again in a case another CPU has just
3262          * made room available. */
3263         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3264                 return -EBUSY;
3265
3266         /* A reprieve! */
3267         netif_start_queue(netdev);
3268         ++adapter->restart_queue;
3269         return 0;
3270 }
3271
3272 static int e1000_maybe_stop_tx(struct net_device *netdev,
3273                                struct e1000_tx_ring *tx_ring, int size)
3274 {
3275         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3276                 return 0;
3277         return __e1000_maybe_stop_tx(netdev, size);
3278 }
3279
3280 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3281 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3282 {
3283         struct e1000_adapter *adapter = netdev_priv(netdev);
3284         struct e1000_hw *hw = &adapter->hw;
3285         struct e1000_tx_ring *tx_ring;
3286         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3287         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3288         unsigned int tx_flags = 0;
3289         unsigned int len = skb->len - skb->data_len;
3290         unsigned long flags;
3291         unsigned int nr_frags;
3292         unsigned int mss;
3293         int count = 0;
3294         int tso;
3295         unsigned int f;
3296
3297         /* This goes back to the question of how to logically map a tx queue
3298          * to a flow.  Right now, performance is impacted slightly negatively
3299          * if using multiple tx queues.  If the stack breaks away from a
3300          * single qdisc implementation, we can look at this again. */
3301         tx_ring = adapter->tx_ring;
3302
3303         if (unlikely(skb->len <= 0)) {
3304                 dev_kfree_skb_any(skb);
3305                 return NETDEV_TX_OK;
3306         }
3307
3308         /* 82571 and newer doesn't need the workaround that limited descriptor
3309          * length to 4kB */
3310         if (hw->mac_type >= e1000_82571)
3311                 max_per_txd = 8192;
3312
3313         mss = skb_shinfo(skb)->gso_size;
3314         /* The controller does a simple calculation to
3315          * make sure there is enough room in the FIFO before
3316          * initiating the DMA for each buffer.  The calc is:
3317          * 4 = ceil(buffer len/mss).  To make sure we don't
3318          * overrun the FIFO, adjust the max buffer len if mss
3319          * drops. */
3320         if (mss) {
3321                 u8 hdr_len;
3322                 max_per_txd = min(mss << 2, max_per_txd);
3323                 max_txd_pwr = fls(max_per_txd) - 1;
3324
3325                 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3326                 * points to just header, pull a few bytes of payload from
3327                 * frags into skb->data */
3328                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3329                 if (skb->data_len && hdr_len == len) {
3330                         switch (hw->mac_type) {
3331                                 unsigned int pull_size;
3332                         case e1000_82544:
3333                                 /* Make sure we have room to chop off 4 bytes,
3334                                  * and that the end alignment will work out to
3335                                  * this hardware's requirements
3336                                  * NOTE: this is a TSO only workaround
3337                                  * if end byte alignment not correct move us
3338                                  * into the next dword */
3339                                 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3340                                         break;
3341                                 /* fall through */
3342                         case e1000_82571:
3343                         case e1000_82572:
3344                         case e1000_82573:
3345                         case e1000_ich8lan:
3346                                 pull_size = min((unsigned int)4, skb->data_len);
3347                                 if (!__pskb_pull_tail(skb, pull_size)) {
3348                                         DPRINTK(DRV, ERR,
3349                                                 "__pskb_pull_tail failed.\n");
3350                                         dev_kfree_skb_any(skb);
3351                                         return NETDEV_TX_OK;
3352                                 }
3353                                 len = skb->len - skb->data_len;
3354                                 break;
3355                         default:
3356                                 /* do nothing */
3357                                 break;
3358                         }
3359                 }
3360         }
3361
3362         /* reserve a descriptor for the offload context */
3363         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3364                 count++;
3365         count++;
3366
3367         /* Controller Erratum workaround */
3368         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3369                 count++;
3370
3371         count += TXD_USE_COUNT(len, max_txd_pwr);
3372
3373         if (adapter->pcix_82544)
3374                 count++;
3375
3376         /* work-around for errata 10 and it applies to all controllers
3377          * in PCI-X mode, so add one more descriptor to the count
3378          */
3379         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3380                         (len > 2015)))
3381                 count++;
3382
3383         nr_frags = skb_shinfo(skb)->nr_frags;
3384         for (f = 0; f < nr_frags; f++)
3385                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3386                                        max_txd_pwr);
3387         if (adapter->pcix_82544)
3388                 count += nr_frags;
3389
3390
3391         if (hw->tx_pkt_filtering &&
3392             (hw->mac_type == e1000_82573))
3393                 e1000_transfer_dhcp_info(adapter, skb);
3394
3395         if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
3396                 /* Collision - tell upper layer to requeue */
3397                 return NETDEV_TX_LOCKED;
3398
3399         /* need: count + 2 desc gap to keep tail from touching
3400          * head, otherwise try next time */
3401         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
3402                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3403                 return NETDEV_TX_BUSY;
3404         }
3405
3406         if (unlikely(hw->mac_type == e1000_82547)) {
3407                 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3408                         netif_stop_queue(netdev);
3409                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
3410                         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3411                         return NETDEV_TX_BUSY;
3412                 }
3413         }
3414
3415         if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
3416                 tx_flags |= E1000_TX_FLAGS_VLAN;
3417                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3418         }
3419
3420         first = tx_ring->next_to_use;
3421
3422         tso = e1000_tso(adapter, tx_ring, skb);
3423         if (tso < 0) {
3424                 dev_kfree_skb_any(skb);
3425                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3426                 return NETDEV_TX_OK;
3427         }
3428
3429         if (likely(tso)) {
3430                 tx_ring->last_tx_tso = 1;
3431                 tx_flags |= E1000_TX_FLAGS_TSO;
3432         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3433                 tx_flags |= E1000_TX_FLAGS_CSUM;
3434
3435         /* Old method was to assume IPv4 packet by default if TSO was enabled.
3436          * 82571 hardware supports TSO capabilities for IPv6 as well...
3437          * no longer assume, we must. */
3438         if (likely(skb->protocol == htons(ETH_P_IP)))
3439                 tx_flags |= E1000_TX_FLAGS_IPV4;
3440
3441         e1000_tx_queue(adapter, tx_ring, tx_flags,
3442                        e1000_tx_map(adapter, tx_ring, skb, first,
3443                                     max_per_txd, nr_frags, mss));
3444
3445         netdev->trans_start = jiffies;
3446
3447         /* Make sure there is space in the ring for the next send. */
3448         e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3449
3450         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3451         return NETDEV_TX_OK;
3452 }
3453
3454 /**
3455  * e1000_tx_timeout - Respond to a Tx Hang
3456  * @netdev: network interface device structure
3457  **/
3458
3459 static void e1000_tx_timeout(struct net_device *netdev)
3460 {
3461         struct e1000_adapter *adapter = netdev_priv(netdev);
3462
3463         /* Do the reset outside of interrupt context */
3464         adapter->tx_timeout_count++;
3465         schedule_work(&adapter->reset_task);
3466 }
3467
3468 static void e1000_reset_task(struct work_struct *work)
3469 {
3470         struct e1000_adapter *adapter =
3471                 container_of(work, struct e1000_adapter, reset_task);
3472
3473         e1000_reinit_locked(adapter);
3474 }
3475
3476 /**
3477  * e1000_get_stats - Get System Network Statistics
3478  * @netdev: network interface device structure
3479  *
3480  * Returns the address of the device statistics structure.
3481  * The statistics are actually updated from the timer callback.
3482  **/
3483
3484 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3485 {
3486         struct e1000_adapter *adapter = netdev_priv(netdev);
3487
3488         /* only return the current stats */
3489         return &adapter->net_stats;
3490 }
3491
3492 /**
3493  * e1000_change_mtu - Change the Maximum Transfer Unit
3494  * @netdev: network interface device structure
3495  * @new_mtu: new value for maximum frame size
3496  *
3497  * Returns 0 on success, negative on failure
3498  **/
3499
3500 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3501 {
3502         struct e1000_adapter *adapter = netdev_priv(netdev);
3503         struct e1000_hw *hw = &adapter->hw;
3504         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3505         u16 eeprom_data = 0;
3506
3507         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3508             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3509                 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3510                 return -EINVAL;
3511         }
3512
3513         /* Adapter-specific max frame size limits. */
3514         switch (hw->mac_type) {
3515         case e1000_undefined ... e1000_82542_rev2_1:
3516         case e1000_ich8lan:
3517                 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3518                         DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3519                         return -EINVAL;
3520                 }
3521                 break;
3522         case e1000_82573:
3523                 /* Jumbo Frames not supported if:
3524                  * - this is not an 82573L device
3525                  * - ASPM is enabled in any way (0x1A bits 3:2) */
3526                 e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
3527                                   &eeprom_data);
3528                 if ((hw->device_id != E1000_DEV_ID_82573L) ||
3529                     (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
3530                         if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3531                                 DPRINTK(PROBE, ERR,
3532                                         "Jumbo Frames not supported.\n");
3533                                 return -EINVAL;
3534                         }
3535                         break;
3536                 }
3537                 /* ERT will be enabled later to enable wire speed receives */
3538
3539                 /* fall through to get support */
3540         case e1000_82571:
3541         case e1000_82572:
3542         case e1000_80003es2lan:
3543 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3544                 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3545                         DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3546                         return -EINVAL;
3547                 }
3548                 break;
3549         default:
3550                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3551                 break;
3552         }
3553
3554         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3555          * means we reserve 2 more, this pushes us to allocate from the next
3556          * larger slab size
3557          * i.e. RXBUFFER_2048 --> size-4096 slab */
3558
3559         if (max_frame <= E1000_RXBUFFER_256)
3560                 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3561         else if (max_frame <= E1000_RXBUFFER_512)
3562                 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3563         else if (max_frame <= E1000_RXBUFFER_1024)
3564                 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3565         else if (max_frame <= E1000_RXBUFFER_2048)
3566                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3567         else if (max_frame <= E1000_RXBUFFER_4096)
3568                 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3569         else if (max_frame <= E1000_RXBUFFER_8192)
3570                 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3571         else if (max_frame <= E1000_RXBUFFER_16384)