igb: cleanup code related to ring resource allocation and free
[sfrench/cifs-2.6.git] / drivers / net / igb / igb_main.c
index 2ffe0997b83802eb08edda978eab257adddff837..e67ff0ea2a560f705be92683f1a123e44b32ced0 100644 (file)
@@ -82,6 +82,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
 static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
 static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
 void igb_update_stats(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
@@ -90,7 +91,6 @@ static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
 static void igb_configure_tx(struct igb_adapter *);
 static void igb_configure_rx(struct igb_adapter *);
-static void igb_setup_rctl(struct igb_adapter *);
 static void igb_clean_all_tx_rings(struct igb_adapter *);
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
@@ -99,11 +99,7 @@ static void igb_set_rx_mode(struct net_device *);
 static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
-static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
-                                          struct net_device *,
-                                          struct igb_ring *);
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
-                                     struct net_device *);
+static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
 static struct net_device_stats *igb_get_stats(struct net_device *);
 static int igb_change_mtu(struct net_device *, int);
 static int igb_set_mac(struct net_device *, void *);
@@ -111,17 +107,14 @@ static void igb_set_uta(struct igb_adapter *adapter);
 static irqreturn_t igb_intr(int irq, void *);
 static irqreturn_t igb_intr_msi(int irq, void *);
 static irqreturn_t igb_msix_other(int irq, void *);
-static irqreturn_t igb_msix_rx(int irq, void *);
-static irqreturn_t igb_msix_tx(int irq, void *);
+static irqreturn_t igb_msix_ring(int irq, void *);
 #ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *);
-static void igb_update_tx_dca(struct igb_ring *);
+static void igb_update_dca(struct igb_q_vector *);
 static void igb_setup_dca(struct igb_adapter *);
 #endif /* CONFIG_IGB_DCA */
-static bool igb_clean_tx_irq(struct igb_ring *);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
 static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
-static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
@@ -132,43 +125,10 @@ static void igb_restore_vlan(struct igb_adapter *);
 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
 static void igb_ping_all_vfs(struct igb_adapter *);
 static void igb_msg_task(struct igb_adapter *);
-static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
 static void igb_vmm_control(struct igb_adapter *);
-static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
+static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
 
-static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
-{
-       u32 reg_data;
-
-       reg_data = rd32(E1000_VMOLR(vfn));
-       reg_data |= E1000_VMOLR_BAM |    /* Accept broadcast */
-                   E1000_VMOLR_ROMPE |  /* Accept packets matched in MTA */
-                   E1000_VMOLR_AUPE |   /* Accept untagged packets */
-                   E1000_VMOLR_STRVLAN; /* Strip vlan tags */
-       wr32(E1000_VMOLR(vfn), reg_data);
-}
-
-static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
-                                 int vfn)
-{
-       struct e1000_hw *hw = &adapter->hw;
-       u32 vmolr;
-
-       /* if it isn't the PF check to see if VFs are enabled and
-        * increase the size to support vlan tags */
-       if (vfn < adapter->vfs_allocated_count &&
-           adapter->vf_data[vfn].vlans_enabled)
-               size += VLAN_TAG_SIZE;
-
-       vmolr = rd32(E1000_VMOLR(vfn));
-       vmolr &= ~E1000_VMOLR_RLPML_MASK;
-       vmolr |= size | E1000_VMOLR_LPE;
-       wr32(E1000_VMOLR(vfn), vmolr);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int igb_suspend(struct pci_dev *, pm_message_t);
 static int igb_resume(struct pci_dev *);
@@ -219,45 +179,11 @@ static struct pci_driver igb_driver = {
        .err_handler = &igb_err_handler
 };
 
-static int global_quad_port_a; /* global quad port a indication */
-
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
-/**
- * Scale the NIC clock cycle by a large factor so that
- * relatively small clock corrections can be added or
- * substracted at each clock tick. The drawbacks of a
- * large factor are a) that the clock register overflows
- * more quickly (not such a big deal) and b) that the
- * increment per tick has to fit into 24 bits.
- *
- * Note that
- *   TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
- *             IGB_TSYNC_SCALE
- *   TIMINCA += TIMINCA * adjustment [ppm] / 1e9
- *
- * The base scale factor is intentionally a power of two
- * so that the division in %struct timecounter can be done with
- * a shift.
- */
-#define IGB_TSYNC_SHIFT (19)
-#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
-
-/**
- * The duration of one clock cycle of the NIC.
- *
- * @todo This hard-coded value is part of the specification and might change
- * in future hardware revisions. Add revision check.
- */
-#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
-
-#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
-# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
-#endif
-
 /**
  * igb_read_clock - read raw cycle counter (to be used by time counter)
  */
@@ -266,11 +192,11 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
        struct igb_adapter *adapter =
                container_of(tc, struct igb_adapter, cycles);
        struct e1000_hw *hw = &adapter->hw;
-       u64 stamp;
-
-       stamp =  rd32(E1000_SYSTIML);
-       stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
+       u64 stamp = 0;
+       int shift = 0;
 
+       stamp |= (u64)rd32(E1000_SYSTIML) << shift;
+       stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
        return stamp;
 }
 
@@ -310,17 +236,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
 }
 #endif
 
-/**
- * igb_desc_unused - calculate if we have unused descriptors
- **/
-static int igb_desc_unused(struct igb_ring *ring)
-{
-       if (ring->next_to_clean > ring->next_to_use)
-               return ring->next_to_clean - ring->next_to_use - 1;
-
-       return ring->count + ring->next_to_clean - ring->next_to_use - 1;
-}
-
 /**
  * igb_init_module - Driver Registration Routine
  *
@@ -335,12 +250,9 @@ static int __init igb_init_module(void)
 
        printk(KERN_INFO "%s\n", igb_copyright);
 
-       global_quad_port_a = 0;
-
 #ifdef CONFIG_IGB_DCA
        dca_register_notify(&dca_notifier);
 #endif
-
        ret = pci_register_driver(&igb_driver);
        return ret;
 }
@@ -374,7 +286,7 @@ module_exit(igb_exit_module);
 static void igb_cache_ring_register(struct igb_adapter *adapter)
 {
        int i;
-       unsigned int rbase_offset = adapter->vfs_allocated_count;
+       u32 rbase_offset = adapter->vfs_allocated_count;
 
        switch (adapter->hw.mac.type) {
        case e1000_82576:
@@ -400,6 +312,18 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
        }
 }
 
+static void igb_free_queues(struct igb_adapter *adapter)
+{
+       kfree(adapter->tx_ring);
+       kfree(adapter->rx_ring);
+
+       adapter->tx_ring = NULL;
+       adapter->rx_ring = NULL;
+
+       adapter->num_rx_queues = 0;
+       adapter->num_tx_queues = 0;
+}
+
 /**
  * igb_alloc_queues - Allocate memory for all rings
  * @adapter: board private structure to initialize
@@ -414,59 +338,61 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
        adapter->tx_ring = kcalloc(adapter->num_tx_queues,
                                   sizeof(struct igb_ring), GFP_KERNEL);
        if (!adapter->tx_ring)
-               return -ENOMEM;
+               goto err;
 
        adapter->rx_ring = kcalloc(adapter->num_rx_queues,
                                   sizeof(struct igb_ring), GFP_KERNEL);
-       if (!adapter->rx_ring) {
-               kfree(adapter->tx_ring);
-               return -ENOMEM;
-       }
-
-       adapter->rx_ring->buddy = adapter->tx_ring;
+       if (!adapter->rx_ring)
+               goto err;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
                struct igb_ring *ring = &(adapter->tx_ring[i]);
                ring->count = adapter->tx_ring_count;
-               ring->adapter = adapter;
                ring->queue_index = i;
+               ring->pdev = adapter->pdev;
+               ring->netdev = adapter->netdev;
+               /* For 82575, context index must be unique per ring. */
+               if (adapter->hw.mac.type == e1000_82575)
+                       ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
        }
+
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct igb_ring *ring = &(adapter->rx_ring[i]);
                ring->count = adapter->rx_ring_count;
-               ring->adapter = adapter;
                ring->queue_index = i;
-               ring->itr_register = E1000_ITR;
-
-               /* set a default napi handler for each rx_ring */
-               netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
+               ring->pdev = adapter->pdev;
+               ring->netdev = adapter->netdev;
+               ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+               ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
+               /* set flag indicating ring supports SCTP checksum offload */
+               if (adapter->hw.mac.type >= e1000_82576)
+                       ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
        }
 
        igb_cache_ring_register(adapter);
-       return 0;
-}
 
-static void igb_free_queues(struct igb_adapter *adapter)
-{
-       int i;
-
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               netif_napi_del(&adapter->rx_ring[i].napi);
+       return 0;
 
-       adapter->num_rx_queues = 0;
-       adapter->num_tx_queues = 0;
+err:
+       igb_free_queues(adapter);
 
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
+       return -ENOMEM;
 }
 
 #define IGB_N0_QUEUE -1
-static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
-                             int tx_queue, int msix_vector)
+static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
 {
        u32 msixbm = 0;
+       struct igb_adapter *adapter = q_vector->adapter;
        struct e1000_hw *hw = &adapter->hw;
        u32 ivar, index;
+       int rx_queue = IGB_N0_QUEUE;
+       int tx_queue = IGB_N0_QUEUE;
+
+       if (q_vector->rx_ring)
+               rx_queue = q_vector->rx_ring->reg_idx;
+       if (q_vector->tx_ring)
+               tx_queue = q_vector->tx_ring->reg_idx;
 
        switch (hw->mac.type) {
        case e1000_82575:
@@ -474,16 +400,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
                   bitmask for the EICR/EIMS/EIMC registers.  To assign one
                   or more queues to a vector, we write the appropriate bits
                   into the MSIXBM register for that vector. */
-               if (rx_queue > IGB_N0_QUEUE) {
+               if (rx_queue > IGB_N0_QUEUE)
                        msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
-                       adapter->rx_ring[rx_queue].eims_value = msixbm;
-               }
-               if (tx_queue > IGB_N0_QUEUE) {
+               if (tx_queue > IGB_N0_QUEUE)
                        msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
-                       adapter->tx_ring[tx_queue].eims_value =
-                                 E1000_EICR_TX_QUEUE0 << tx_queue;
-               }
                array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+               q_vector->eims_value = msixbm;
                break;
        case e1000_82576:
                /* 82576 uses a table-based method for assigning vectors.
@@ -491,35 +413,34 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
                   a vector number along with a "valid" bit.  Sadly, the layout
                   of the table is somewhat counterintuitive. */
                if (rx_queue > IGB_N0_QUEUE) {
-                       index = (rx_queue >> 1) + adapter->vfs_allocated_count;
+                       index = (rx_queue & 0x7);
                        ivar = array_rd32(E1000_IVAR0, index);
-                       if (rx_queue & 0x1) {
-                               /* vector goes into third byte of register */
-                               ivar = ivar & 0xFF00FFFF;
-                               ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
-                       } else {
+                       if (rx_queue < 8) {
                                /* vector goes into low byte of register */
                                ivar = ivar & 0xFFFFFF00;
                                ivar |= msix_vector | E1000_IVAR_VALID;
+                       } else {
+                               /* vector goes into third byte of register */
+                               ivar = ivar & 0xFF00FFFF;
+                               ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
                        }
-                       adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
                        array_wr32(E1000_IVAR0, index, ivar);
                }
                if (tx_queue > IGB_N0_QUEUE) {
-                       index = (tx_queue >> 1) + adapter->vfs_allocated_count;
+                       index = (tx_queue & 0x7);
                        ivar = array_rd32(E1000_IVAR0, index);
-                       if (tx_queue & 0x1) {
-                               /* vector goes into high byte of register */
-                               ivar = ivar & 0x00FFFFFF;
-                               ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
-                       } else {
+                       if (tx_queue < 8) {
                                /* vector goes into second byte of register */
                                ivar = ivar & 0xFFFF00FF;
                                ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+                       } else {
+                               /* vector goes into high byte of register */
+                               ivar = ivar & 0x00FFFFFF;
+                               ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
                        }
-                       adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
                        array_wr32(E1000_IVAR0, index, ivar);
                }
+               q_vector->eims_value = 1 << msix_vector;
                break;
        default:
                BUG();
@@ -540,43 +461,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
 
        adapter->eims_enable_mask = 0;
-       if (hw->mac.type == e1000_82576)
-               /* Turn on MSI-X capability first, or our settings
-                * won't stick.  And it will take days to debug. */
-               wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
-                                  E1000_GPIE_PBA | E1000_GPIE_EIAME |
-                                  E1000_GPIE_NSICR);
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct igb_ring *tx_ring = &adapter->tx_ring[i];
-               igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
-               adapter->eims_enable_mask |= tx_ring->eims_value;
-               if (tx_ring->itr_val)
-                       writel(tx_ring->itr_val,
-                              hw->hw_addr + tx_ring->itr_register);
-               else
-                       writel(1, hw->hw_addr + tx_ring->itr_register);
-       }
-
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *rx_ring = &adapter->rx_ring[i];
-               rx_ring->buddy = NULL;
-               igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
-               adapter->eims_enable_mask |= rx_ring->eims_value;
-               if (rx_ring->itr_val)
-                       writel(rx_ring->itr_val,
-                              hw->hw_addr + rx_ring->itr_register);
-               else
-                       writel(1, hw->hw_addr + rx_ring->itr_register);
-       }
-
 
        /* set vector for other causes, i.e. link changes */
        switch (hw->mac.type) {
        case e1000_82575:
-               array_wr32(E1000_MSIXBM(0), vector++,
-                                     E1000_EIMS_OTHER);
-
                tmp = rd32(E1000_CTRL_EXT);
                /* enable MSI-X PBA support*/
                tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -586,22 +474,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
                tmp |= E1000_CTRL_EXT_IRCA;
 
                wr32(E1000_CTRL_EXT, tmp);
-               adapter->eims_enable_mask |= E1000_EIMS_OTHER;
+
+               /* enable msix_other interrupt */
+               array_wr32(E1000_MSIXBM(0), vector++,
+                                     E1000_EIMS_OTHER);
                adapter->eims_other = E1000_EIMS_OTHER;
 
                break;
 
        case e1000_82576:
+               /* Turn on MSI-X capability first, or our settings
+                * won't stick.  And it will take days to debug. */
+               wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+                               E1000_GPIE_PBA | E1000_GPIE_EIAME |
+                               E1000_GPIE_NSICR);
+
+               /* enable msix_other interrupt */
+               adapter->eims_other = 1 << vector;
                tmp = (vector++ | E1000_IVAR_VALID) << 8;
-               wr32(E1000_IVAR_MISC, tmp);
 
-               adapter->eims_enable_mask = (1 << (vector)) - 1;
-               adapter->eims_other = 1 << (vector - 1);
+               wr32(E1000_IVAR_MISC, tmp);
                break;
        default:
                /* do nothing, since nothing else supports MSI-X */
                break;
        } /* switch (hw->mac.type) */
+
+       adapter->eims_enable_mask |= adapter->eims_other;
+
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               igb_assign_vector(q_vector, vector++);
+               adapter->eims_enable_mask |= q_vector->eims_value;
+       }
+
        wrfl();
 }
 
@@ -614,43 +520,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 static int igb_request_msix(struct igb_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct e1000_hw *hw = &adapter->hw;
        int i, err = 0, vector = 0;
 
-       vector = 0;
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct igb_ring *ring = &(adapter->tx_ring[i]);
-               sprintf(ring->name, "%s-tx-%d", netdev->name, i);
-               err = request_irq(adapter->msix_entries[vector].vector,
-                                 &igb_msix_tx, 0, ring->name,
-                                 &(adapter->tx_ring[i]));
-               if (err)
-                       goto out;
-               ring->itr_register = E1000_EITR(0) + (vector << 2);
-               ring->itr_val = 976; /* ~4000 ints/sec */
-               vector++;
-       }
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *ring = &(adapter->rx_ring[i]);
-               if (strlen(netdev->name) < (IFNAMSIZ - 5))
-                       sprintf(ring->name, "%s-rx-%d", netdev->name, i);
+       err = request_irq(adapter->msix_entries[vector].vector,
+                         &igb_msix_other, 0, netdev->name, adapter);
+       if (err)
+               goto out;
+       vector++;
+
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+               q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+               if (q_vector->rx_ring && q_vector->tx_ring)
+                       sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+                               q_vector->rx_ring->queue_index);
+               else if (q_vector->tx_ring)
+                       sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+                               q_vector->tx_ring->queue_index);
+               else if (q_vector->rx_ring)
+                       sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+                               q_vector->rx_ring->queue_index);
                else
-                       memcpy(ring->name, netdev->name, IFNAMSIZ);
+                       sprintf(q_vector->name, "%s-unused", netdev->name);
+
                err = request_irq(adapter->msix_entries[vector].vector,
-                                 &igb_msix_rx, 0, ring->name,
-                                 &(adapter->rx_ring[i]));
+                                 &igb_msix_ring, 0, q_vector->name,
+                                 q_vector);
                if (err)
                        goto out;
-               ring->itr_register = E1000_EITR(0) + (vector << 2);
-               ring->itr_val = adapter->itr;
                vector++;
        }
 
-       err = request_irq(adapter->msix_entries[vector].vector,
-                         &igb_msix_other, 0, netdev->name, netdev);
-       if (err)
-               goto out;
-
        igb_configure_msix(adapter);
        return 0;
 out:
@@ -663,11 +566,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
                pci_disable_msix(adapter->pdev);
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
-       } else if (adapter->flags & IGB_FLAG_HAS_MSI)
+       } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
                pci_disable_msi(adapter->pdev);
-       return;
+       }
 }
 
+/**
+ * igb_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vectors(struct igb_adapter *adapter)
+{
+       int v_idx;
+
+       for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+               adapter->q_vector[v_idx] = NULL;
+               netif_napi_del(&q_vector->napi);
+               kfree(q_vector);
+       }
+       adapter->num_q_vectors = 0;
+}
+
+/**
+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
+ */
+static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+{
+       igb_free_queues(adapter);
+       igb_free_q_vectors(adapter);
+       igb_reset_interrupt_capability(adapter);
+}
 
 /**
  * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -681,11 +617,20 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
        int numvecs, i;
 
        /* Number of supported queues. */
-       /* Having more queues than CPUs doesn't make sense. */
        adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
        adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
 
-       numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
+       /* start with one vector for every rx queue */
+       numvecs = adapter->num_rx_queues;
+
+       /* if tx handler is seperate add 1 for every tx queue */
+       numvecs += adapter->num_tx_queues;
+
+       /* store the number of vectors reserved for queues */
+       adapter->num_q_vectors = numvecs;
+
+       /* add 1 vector for link status interrupts */
+       numvecs++;
        adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
                                        GFP_KERNEL);
        if (!adapter->msix_entries)
@@ -719,8 +664,11 @@ msi_only:
                dev_info(&adapter->pdev->dev, "IOV Disabled\n");
        }
 #endif
+       adapter->vfs_allocated_count = 0;
+       adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
        adapter->num_rx_queues = 1;
        adapter->num_tx_queues = 1;
+       adapter->num_q_vectors = 1;
        if (!pci_enable_msi(adapter->pdev))
                adapter->flags |= IGB_FLAG_HAS_MSI;
 out:
@@ -729,6 +677,143 @@ out:
        return;
 }
 
+/**
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+{
+       struct igb_q_vector *q_vector;
+       struct e1000_hw *hw = &adapter->hw;
+       int v_idx;
+
+       for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+               q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
+               if (!q_vector)
+                       goto err_out;
+               q_vector->adapter = adapter;
+               q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
+               q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
+               q_vector->itr_val = IGB_START_ITR;
+               q_vector->set_itr = 1;
+               netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
+               adapter->q_vector[v_idx] = q_vector;
+       }
+       return 0;
+
+err_out:
+       while (v_idx) {
+               v_idx--;
+               q_vector = adapter->q_vector[v_idx];
+               netif_napi_del(&q_vector->napi);
+               kfree(q_vector);
+               adapter->q_vector[v_idx] = NULL;
+       }
+       return -ENOMEM;
+}
+
+static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+       struct igb_q_vector *q_vector;
+
+       q_vector = adapter->q_vector[v_idx];
+       q_vector->rx_ring = &adapter->rx_ring[ring_idx];
+       q_vector->rx_ring->q_vector = q_vector;
+       q_vector->itr_val = adapter->rx_itr_setting;
+       if (q_vector->itr_val && q_vector->itr_val <= 3)
+               q_vector->itr_val = IGB_START_ITR;
+}
+
+static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+       struct igb_q_vector *q_vector;
+
+       q_vector = adapter->q_vector[v_idx];
+       q_vector->tx_ring = &adapter->tx_ring[ring_idx];
+       q_vector->tx_ring->q_vector = q_vector;
+       q_vector->itr_val = adapter->tx_itr_setting;
+       if (q_vector->itr_val && q_vector->itr_val <= 3)
+               q_vector->itr_val = IGB_START_ITR;
+}
+
+/**
+ * igb_map_ring_to_vector - maps allocated queues to vectors
+ *
+ * This function maps the recently allocated queues to vectors.
+ **/
+static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+{
+       int i;
+       int v_idx = 0;
+
+       if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
+           (adapter->num_q_vectors < adapter->num_tx_queues))
+               return -ENOMEM;
+
+       if (adapter->num_q_vectors >=
+           (adapter->num_rx_queues + adapter->num_tx_queues)) {
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+       } else {
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       if (i < adapter->num_tx_queues)
+                               igb_map_tx_ring_to_vector(adapter, i, v_idx);
+                       igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+               }
+               for (; i < adapter->num_tx_queues; i++)
+                       igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+       }
+       return 0;
+}
+
+/**
+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ **/
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int err;
+
+       igb_set_interrupt_capability(adapter);
+
+       err = igb_alloc_q_vectors(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+               goto err_alloc_q_vectors;
+       }
+
+       err = igb_alloc_queues(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+               goto err_alloc_queues;
+       }
+
+       err = igb_map_ring_to_vector(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
+               goto err_map_queues;
+       }
+
+
+       return 0;
+err_map_queues:
+       igb_free_queues(adapter);
+err_alloc_queues:
+       igb_free_q_vectors(adapter);
+err_alloc_q_vectors:
+       igb_reset_interrupt_capability(adapter);
+       return err;
+}
+
 /**
  * igb_request_irq - initialize interrupts
  *
@@ -738,6 +823,7 @@ out:
 static int igb_request_irq(struct igb_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
        struct e1000_hw *hw = &adapter->hw;
        int err = 0;
 
@@ -746,18 +832,36 @@ static int igb_request_irq(struct igb_adapter *adapter)
                if (!err)
                        goto request_done;
                /* fall back to MSI */
-               igb_reset_interrupt_capability(adapter);
+               igb_clear_interrupt_scheme(adapter);
                if (!pci_enable_msi(adapter->pdev))
                        adapter->flags |= IGB_FLAG_HAS_MSI;
                igb_free_all_tx_resources(adapter);
                igb_free_all_rx_resources(adapter);
+               adapter->num_tx_queues = 1;
                adapter->num_rx_queues = 1;
-               igb_alloc_queues(adapter);
+               adapter->num_q_vectors = 1;
+               err = igb_alloc_q_vectors(adapter);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "Unable to allocate memory for vectors\n");
+                       goto request_done;
+               }
+               err = igb_alloc_queues(adapter);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "Unable to allocate memory for queues\n");
+                       igb_free_q_vectors(adapter);
+                       goto request_done;
+               }
+               igb_setup_all_tx_resources(adapter);
+               igb_setup_all_rx_resources(adapter);
        } else {
                switch (hw->mac.type) {
                case e1000_82575:
                        wr32(E1000_MSIXBM(0),
-                            (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
+                            (E1000_EICR_RX_QUEUE0 |
+                             E1000_EICR_TX_QUEUE0 |
+                             E1000_EIMS_OTHER));
                        break;
                case e1000_82576:
                        wr32(E1000_IVAR0, E1000_IVAR_VALID);
@@ -769,16 +873,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
 
        if (adapter->flags & IGB_FLAG_HAS_MSI) {
                err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
-                                 netdev->name, netdev);
+                                 netdev->name, adapter);
                if (!err)
                        goto request_done;
+
                /* fall back to legacy interrupts */
                igb_reset_interrupt_capability(adapter);
                adapter->flags &= ~IGB_FLAG_HAS_MSI;
        }
 
        err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
-                         netdev->name, netdev);
+                         netdev->name, adapter);
 
        if (err)
                dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -790,23 +895,19 @@ request_done:
 
 static void igb_free_irq(struct igb_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
-
        if (adapter->msix_entries) {
                int vector = 0, i;
 
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       free_irq(adapter->msix_entries[vector++].vector,
-                               &(adapter->tx_ring[i]));
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       free_irq(adapter->msix_entries[vector++].vector,
-                               &(adapter->rx_ring[i]));
+               free_irq(adapter->msix_entries[vector++].vector, adapter);
 
-               free_irq(adapter->msix_entries[vector++].vector, netdev);
-               return;
+               for (i = 0; i < adapter->num_q_vectors; i++) {
+                       struct igb_q_vector *q_vector = adapter->q_vector[i];
+                       free_irq(adapter->msix_entries[vector++].vector,
+                                q_vector);
+               }
+       } else {
+               free_irq(adapter->pdev->irq, adapter);
        }
-
-       free_irq(adapter->pdev->irq, netdev);
 }
 
 /**
@@ -857,24 +958,23 @@ static void igb_irq_enable(struct igb_adapter *adapter)
 
 static void igb_update_mng_vlan(struct igb_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
+       struct e1000_hw *hw = &adapter->hw;
        u16 vid = adapter->hw.mng_cookie.vlan_id;
        u16 old_vid = adapter->mng_vlan_id;
-       if (adapter->vlgrp) {
-               if (!vlan_group_get_device(adapter->vlgrp, vid)) {
-                       if (adapter->hw.mng_cookie.status &
-                               E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
-                               igb_vlan_rx_add_vid(netdev, vid);
-                               adapter->mng_vlan_id = vid;
-                       } else
-                               adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
 
-                       if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
-                                       (vid != old_vid) &&
-                           !vlan_group_get_device(adapter->vlgrp, old_vid))
-                               igb_vlan_rx_kill_vid(netdev, old_vid);
-               } else
-                       adapter->mng_vlan_id = vid;
+       if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+               /* add VID to filter table */
+               igb_vfta_set(hw, vid, true);
+               adapter->mng_vlan_id = vid;
+       } else {
+               adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+       }
+
+       if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+           (vid != old_vid) &&
+           !vlan_group_get_device(adapter->vlgrp, old_vid)) {
+               /* remove VID from filter table */
+               igb_vfta_set(hw, old_vid, false);
        }
 }
 
@@ -933,8 +1033,11 @@ static void igb_configure(struct igb_adapter *adapter)
 
        igb_restore_vlan(adapter);
 
-       igb_configure_tx(adapter);
+       igb_setup_tctl(adapter);
+       igb_setup_mrqc(adapter);
        igb_setup_rctl(adapter);
+
+       igb_configure_tx(adapter);
        igb_configure_rx(adapter);
 
        igb_rx_fifo_flush_82575(&adapter->hw);
@@ -967,18 +1070,24 @@ int igb_up(struct igb_adapter *adapter)
 
        clear_bit(__IGB_DOWN, &adapter->state);
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               napi_enable(&adapter->rx_ring[i].napi);
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               napi_enable(&q_vector->napi);
+       }
        if (adapter->msix_entries)
                igb_configure_msix(adapter);
 
-       igb_vmm_control(adapter);
-       igb_set_vmolr(hw, adapter->vfs_allocated_count);
-
        /* Clear any pending interrupts. */
        rd32(E1000_ICR);
        igb_irq_enable(adapter);
 
+       /* notify VFs that reset has been completed */
+       if (adapter->vfs_allocated_count) {
+               u32 reg_data = rd32(E1000_CTRL_EXT);
+               reg_data |= E1000_CTRL_EXT_PFRSTD;
+               wr32(E1000_CTRL_EXT, reg_data);
+       }
+
        netif_tx_start_all_queues(adapter->netdev);
 
        /* Fire a link change interrupt to start the watchdog. */
@@ -1012,8 +1121,10 @@ void igb_down(struct igb_adapter *adapter)
        wrfl();
        msleep(10);
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               napi_disable(&adapter->rx_ring[i].napi);
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               napi_disable(&q_vector->napi);
+       }
 
        igb_irq_disable(adapter);
 
@@ -1063,7 +1174,8 @@ void igb_reset(struct igb_adapter *adapter)
         */
        switch (mac->type) {
        case e1000_82576:
-               pba = E1000_PBA_64K;
+               pba = rd32(E1000_RXPBS);
+               pba &= E1000_RXPBS_SIZE_MASK_82576;
                break;
        case e1000_82575:
        default:
@@ -1138,10 +1250,10 @@ void igb_reset(struct igb_adapter *adapter)
        if (adapter->vfs_allocated_count) {
                int i;
                for (i = 0 ; i < adapter->vfs_allocated_count; i++)
-                       adapter->vf_data[i].clear_to_send = false;
+                       adapter->vf_data[i].flags = 0;
 
                /* ping all the active vfs to let them know we are going down */
-                       igb_ping_all_vfs(adapter);
+               igb_ping_all_vfs(adapter);
 
                /* disable transmits and receives */
                wr32(E1000_VFRE, 0);
@@ -1201,10 +1313,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        struct net_device *netdev;
        struct igb_adapter *adapter;
        struct e1000_hw *hw;
+       u16 eeprom_data = 0;
+       static int global_quad_port_a; /* global quad port a indication */
        const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
        unsigned long mmio_start, mmio_len;
        int err, pci_using_dac;
-       u16 eeprom_data = 0;
        u16 eeprom_apme_mask = IGB_EEPROM_APME;
        u32 part_num;
 
@@ -1292,46 +1405,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        if (err)
                goto err_sw_init;
 
-#ifdef CONFIG_PCI_IOV
-       /* since iov functionality isn't critical to base device function we
-        * can accept failure.  If it fails we don't allow iov to be enabled */
-       if (hw->mac.type == e1000_82576) {
-               /* 82576 supports a maximum of 7 VFs in addition to the PF */
-               unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
-               int i;
-               unsigned char mac_addr[ETH_ALEN];
-
-               if (num_vfs) {
-                       adapter->vf_data = kcalloc(num_vfs,
-                                               sizeof(struct vf_data_storage),
-                                               GFP_KERNEL);
-                       if (!adapter->vf_data) {
-                               dev_err(&pdev->dev,
-                                       "Could not allocate VF private data - "
-                                       "IOV enable failed\n");
-                       } else {
-                               err = pci_enable_sriov(pdev, num_vfs);
-                               if (!err) {
-                                       adapter->vfs_allocated_count = num_vfs;
-                                       dev_info(&pdev->dev,
-                                                "%d vfs allocated\n",
-                                                num_vfs);
-                                       for (i = 0;
-                                            i < adapter->vfs_allocated_count;
-                                            i++) {
-                                               random_ether_addr(mac_addr);
-                                               igb_set_vf_mac(adapter, i,
-                                                              mac_addr);
-                                       }
-                               } else {
-                                       kfree(adapter->vf_data);
-                                       adapter->vf_data = NULL;
-                               }
-                       }
-               }
-       }
-
-#endif
        /* setup the private structure */
        err = igb_sw_init(adapter);
        if (err)
@@ -1339,16 +1412,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 
        igb_get_bus_info_pcie(hw);
 
-       /* set flags */
-       switch (hw->mac.type) {
-       case e1000_82575:
-               adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
-               break;
-       case e1000_82576:
-       default:
-               break;
-       }
-
        hw->phy.autoneg_wait_to_complete = false;
        hw->mac.adaptive_ifs = true;
 
@@ -1429,9 +1492,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        hw->fc.requested_mode = e1000_fc_default;
        hw->fc.current_mode = e1000_fc_default;
 
-       adapter->itr_setting = IGB_DEFAULT_ITR;
-       adapter->itr = IGB_START_ITR;
-
        igb_validate_mdi_setting(hw);
 
        /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
@@ -1498,59 +1558,58 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                dev_info(&pdev->dev, "DCA enabled\n");
                igb_setup_dca(adapter);
        }
-#endif
 
-       /*
-        * Initialize hardware timer: we keep it running just in case
-        * that some program needs it later on.
-        */
-       memset(&adapter->cycles, 0, sizeof(adapter->cycles));
-       adapter->cycles.read = igb_read_clock;
-       adapter->cycles.mask = CLOCKSOURCE_MASK(64);
-       adapter->cycles.mult = 1;
-       adapter->cycles.shift = IGB_TSYNC_SHIFT;
-       wr32(E1000_TIMINCA,
-            (1<<24) |
-            IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
-#if 0
-       /*
-        * Avoid rollover while we initialize by resetting the time counter.
-        */
-       wr32(E1000_SYSTIML, 0x00000000);
-       wr32(E1000_SYSTIMH, 0x00000000);
-#else
-       /*
-        * Set registers so that rollover occurs soon to test this.
-        */
-       wr32(E1000_SYSTIML, 0x00000000);
-       wr32(E1000_SYSTIMH, 0xFF800000);
 #endif
-       wrfl();
-       timecounter_init(&adapter->clock,
-                        &adapter->cycles,
-                        ktime_to_ns(ktime_get_real()));
-
-       /*
-        * Synchronize our NIC clock against system wall clock. NIC
-        * time stamp reading requires ~3us per sample, each sample
-        * was pretty stable even under load => only require 10
-        * samples for each offset comparison.
-        */
-       memset(&adapter->compare, 0, sizeof(adapter->compare));
-       adapter->compare.source = &adapter->clock;
-       adapter->compare.target = ktime_get_real;
-       adapter->compare.num_samples = 10;
-       timecompare_update(&adapter->compare, 0);
 
-#ifdef DEBUG
-       {
-               char buffer[160];
-               printk(KERN_DEBUG
-                       "igb: %s: hw %p initialized timer\n",
-                       igb_get_time_str(adapter, buffer),
-                       &adapter->hw);
+       switch (hw->mac.type) {
+       case e1000_82576:
+               /*
+                * Initialize hardware timer: we keep it running just in case
+                * that some program needs it later on.
+                */
+               memset(&adapter->cycles, 0, sizeof(adapter->cycles));
+               adapter->cycles.read = igb_read_clock;
+               adapter->cycles.mask = CLOCKSOURCE_MASK(64);
+               adapter->cycles.mult = 1;
+               /**
+                * Scale the NIC clock cycle by a large factor so that
+                * relatively small clock corrections can be added or
+                * substracted at each clock tick. The drawbacks of a large
+                * factor are a) that the clock register overflows more quickly
+                * (not such a big deal) and b) that the increment per tick has
+                * to fit into 24 bits.  As a result we need to use a shift of
+                * 19 so we can fit a value of 16 into the TIMINCA register.
+                */
+               adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
+               wr32(E1000_TIMINCA,
+                               (1 << E1000_TIMINCA_16NS_SHIFT) |
+                               (16 << IGB_82576_TSYNC_SHIFT));
+
+               /* Set registers so that rollover occurs soon to test this. */
+               wr32(E1000_SYSTIML, 0x00000000);
+               wr32(E1000_SYSTIMH, 0xFF800000);
+               wrfl();
+
+               timecounter_init(&adapter->clock,
+                                &adapter->cycles,
+                                ktime_to_ns(ktime_get_real()));
+               /*
+                * Synchronize our NIC clock against system wall clock. NIC
+                * time stamp reading requires ~3us per sample, each sample
+                * was pretty stable even under load => only require 10
+                * samples for each offset comparison.
+                */
+               memset(&adapter->compare, 0, sizeof(adapter->compare));
+               adapter->compare.source = &adapter->clock;
+               adapter->compare.target = ktime_get_real;
+               adapter->compare.num_samples = 10;
+               timecompare_update(&adapter->compare, 0);
+               break;
+       case e1000_82575:
+               /* 82575 does not support timesync */
+       default:
+               break;
        }
-#endif
 
        dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
        /* print bus type/speed/width info */
@@ -1584,9 +1643,8 @@ err_eeprom:
 
        if (hw->flash_address)
                iounmap(hw->flash_address);
-
-       igb_free_queues(adapter);
 err_sw_init:
+       igb_clear_interrupt_scheme(adapter);
        iounmap(hw->hw_addr);
 err_ioremap:
        free_netdev(netdev);
@@ -1640,9 +1698,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
        if (!igb_check_reset_block(&adapter->hw))
                igb_reset_phy(&adapter->hw);
 
-       igb_reset_interrupt_capability(adapter);
-
-       igb_free_queues(adapter);
+       igb_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PCI_IOV
        /* reclaim resources allocated to VFs */
@@ -1671,6 +1727,54 @@ static void __devexit igb_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
+/**
+ * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+ * @adapter: board private structure to initialize
+ *
+ * This function initializes the vf specific data storage and then attempts to
+ * allocate the VFs.  The reason for ordering it this way is because it is much
+ * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * the memory for the VFs.
+ **/
+static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
+{
+#ifdef CONFIG_PCI_IOV
+       struct pci_dev *pdev = adapter->pdev;
+
+       if (adapter->vfs_allocated_count > 7)
+               adapter->vfs_allocated_count = 7;
+
+       if (adapter->vfs_allocated_count) {
+               adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+                                          sizeof(struct vf_data_storage),
+                                          GFP_KERNEL);
+               /* if allocation failed then we do not support SR-IOV */
+               if (!adapter->vf_data) {
+                       adapter->vfs_allocated_count = 0;
+                       dev_err(&pdev->dev, "Unable to allocate memory for VF "
+                               "Data Storage\n");
+               }
+       }
+
+       if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
+               kfree(adapter->vf_data);
+               adapter->vf_data = NULL;
+#endif /* CONFIG_PCI_IOV */
+               adapter->vfs_allocated_count = 0;
+#ifdef CONFIG_PCI_IOV
+       } else {
+               unsigned char mac_addr[ETH_ALEN];
+               int i;
+               dev_info(&pdev->dev, "%d vfs allocated\n",
+                        adapter->vfs_allocated_count);
+               for (i = 0; i < adapter->vfs_allocated_count; i++) {
+                       random_ether_addr(mac_addr);
+                       igb_set_vf_mac(adapter, i, mac_addr);
+               }
+       }
+#endif /* CONFIG_PCI_IOV */
+}
+
 /**
  * igb_sw_init - Initialize general software structures (struct igb_adapter)
  * @adapter: board private structure to initialize
@@ -1689,20 +1793,25 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
 
        adapter->tx_ring_count = IGB_DEFAULT_TXD;
        adapter->rx_ring_count = IGB_DEFAULT_RXD;
-       adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-       adapter->rx_ps_hdr_size = 0; /* disable packet split */
+       adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+       adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+
        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
-       /* This call may decrease the number of queues depending on
-        * interrupt mode. */
-       igb_set_interrupt_capability(adapter);
+#ifdef CONFIG_PCI_IOV
+       if (hw->mac.type == e1000_82576)
+               adapter->vfs_allocated_count = max_vfs;
 
-       if (igb_alloc_queues(adapter)) {
+#endif /* CONFIG_PCI_IOV */
+       /* This call may decrease the number of queues */
+       if (igb_init_interrupt_scheme(adapter)) {
                dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
                return -ENOMEM;
        }
 
+       igb_probe_vfs(adapter);
+
        /* Explicitly disable IRQ since the NIC can be in any state. */
        igb_irq_disable(adapter);
 
@@ -1747,20 +1856,12 @@ static int igb_open(struct net_device *netdev)
 
        /* e1000_power_up_phy(adapter); */
 
-       adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-       if ((adapter->hw.mng_cookie.status &
-            E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
-               igb_update_mng_vlan(adapter);
-
        /* before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
         * as soon as we call pci_request_irq, so we have to setup our
         * clean_rx handler before we do so.  */
        igb_configure(adapter);
 
-       igb_vmm_control(adapter);
-       igb_set_vmolr(hw, adapter->vfs_allocated_count);
-
        err = igb_request_irq(adapter);
        if (err)
                goto err_req_irq;
@@ -1768,14 +1869,23 @@ static int igb_open(struct net_device *netdev)
        /* From here on the code is the same as igb_up() */
        clear_bit(__IGB_DOWN, &adapter->state);
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               napi_enable(&adapter->rx_ring[i].napi);
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               napi_enable(&q_vector->napi);
+       }
 
        /* Clear any pending interrupts. */
        rd32(E1000_ICR);
 
        igb_irq_enable(adapter);
 
+       /* notify VFs that reset has been completed */
+       if (adapter->vfs_allocated_count) {
+               u32 reg_data = rd32(E1000_CTRL_EXT);
+               reg_data |= E1000_CTRL_EXT_PFRSTD;
+               wr32(E1000_CTRL_EXT, reg_data);
+       }
+
        netif_tx_start_all_queues(netdev);
 
        /* Fire a link status change interrupt to start the watchdog. */
@@ -1818,28 +1928,18 @@ static int igb_close(struct net_device *netdev)
        igb_free_all_tx_resources(adapter);
        igb_free_all_rx_resources(adapter);
 
-       /* kill manageability vlan ID if supported, but not if a vlan with
-        * the same ID is registered on the host OS (let 8021q kill it) */
-       if ((adapter->hw.mng_cookie.status &
-                         E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-            !(adapter->vlgrp &&
-              vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
-               igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-
        return 0;
 }
 
 /**
  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring: tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int igb_setup_tx_resources(struct igb_adapter *adapter,
-                          struct igb_ring *tx_ring)
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *pdev = tx_ring->pdev;
        int size;
 
        size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1852,20 +1952,20 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
        tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-       tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+       tx_ring->desc = pci_alloc_consistent(pdev,
+                                            tx_ring->size,
                                             &tx_ring->dma);
 
        if (!tx_ring->desc)
                goto err;
 
-       tx_ring->adapter = adapter;
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
        return 0;
 
 err:
        vfree(tx_ring->buffer_info);
-       dev_err(&adapter->pdev->dev,
+       dev_err(&pdev->dev,
                "Unable to allocate memory for the transmit descriptor ring\n");
        return -ENOMEM;
 }
@@ -1879,13 +1979,13 @@ err:
  **/
 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
 {
+       struct pci_dev *pdev = adapter->pdev;
        int i, err = 0;
-       int r_idx;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+               err = igb_setup_tx_resources(&adapter->tx_ring[i]);
                if (err) {
-                       dev_err(&adapter->pdev->dev,
+                       dev_err(&pdev->dev,
                                "Allocation for Tx Queue %u failed\n", i);
                        for (i--; i >= 0; i--)
                                igb_free_tx_resources(&adapter->tx_ring[i]);
@@ -1894,56 +1994,23 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
        }
 
        for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
-               r_idx = i % adapter->num_tx_queues;
+               int r_idx = i % adapter->num_tx_queues;
                adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
        }
        return err;
 }
 
 /**
- * igb_configure_tx - Configure transmit Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
+ * igb_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
  **/
-static void igb_configure_tx(struct igb_adapter *adapter)
+void igb_setup_tctl(struct igb_adapter *adapter)
 {
-       u64 tdba;
        struct e1000_hw *hw = &adapter->hw;
        u32 tctl;
-       u32 txdctl, txctrl;
-       int i, j;
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct igb_ring *ring = &adapter->tx_ring[i];
-               j = ring->reg_idx;
-               wr32(E1000_TDLEN(j),
-                    ring->count * sizeof(union e1000_adv_tx_desc));
-               tdba = ring->dma;
-               wr32(E1000_TDBAL(j),
-                    tdba & 0x00000000ffffffffULL);
-               wr32(E1000_TDBAH(j), tdba >> 32);
-
-               ring->head = E1000_TDH(j);
-               ring->tail = E1000_TDT(j);
-               writel(0, hw->hw_addr + ring->tail);
-               writel(0, hw->hw_addr + ring->head);
-               txdctl = rd32(E1000_TXDCTL(j));
-               txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
-               wr32(E1000_TXDCTL(j), txdctl);
-
-               /* Turn off Relaxed Ordering on head write-backs.  The
-                * writebacks MUST be delivered in order or it will
-                * completely screw up our bookeeping.
-                */
-               txctrl = rd32(E1000_DCA_TXCTRL(j));
-               txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
-               wr32(E1000_DCA_TXCTRL(j), txctrl);
-       }
 
-       /* disable queue 0 to prevent tail bump w/o re-configuration */
-       if (adapter->vfs_allocated_count)
-               wr32(E1000_TXDCTL(0), 0);
+       /* disable queue 0 which is enabled by default on 82575 and 82576 */
+       wr32(E1000_TXDCTL(0), 0);
 
        /* Program the Transmit Control Register */
        tctl = rd32(E1000_TCTL);
@@ -1953,9 +2020,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 
        igb_config_collision_dist(hw);
 
-       /* Setup Transmit Descriptor Settings for eop descriptor */
-       adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
-
        /* Enable transmits */
        tctl |= E1000_TCTL_EN;
 
@@ -1963,16 +2027,69 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 }
 
 /**
- * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * igb_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
+ *
+ * Configure a transmit ring after a reset.
+ **/
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 txdctl;
+       u64 tdba = ring->dma;
+       int reg_idx = ring->reg_idx;
+
+       /* disable the queue */
+       txdctl = rd32(E1000_TXDCTL(reg_idx));
+       wr32(E1000_TXDCTL(reg_idx),
+                       txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+       wrfl();
+       mdelay(10);
+
+       wr32(E1000_TDLEN(reg_idx),
+                       ring->count * sizeof(union e1000_adv_tx_desc));
+       wr32(E1000_TDBAL(reg_idx),
+                       tdba & 0x00000000ffffffffULL);
+       wr32(E1000_TDBAH(reg_idx), tdba >> 32);
+
+       ring->head = hw->hw_addr + E1000_TDH(reg_idx);
+       ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+       writel(0, ring->head);
+       writel(0, ring->tail);
+
+       txdctl |= IGB_TX_PTHRESH;
+       txdctl |= IGB_TX_HTHRESH << 8;
+       txdctl |= IGB_TX_WTHRESH << 16;
+
+       txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+       wr32(E1000_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ * igb_configure_tx - Configure transmit Unit after Reset
  * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void igb_configure_tx(struct igb_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int igb_setup_rx_resources(struct igb_adapter *adapter,
-                          struct igb_ring *rx_ring)
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *pdev = rx_ring->pdev;
        int size, desc_len;
 
        size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -1996,13 +2113,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
-       rx_ring->adapter = adapter;
-
        return 0;
 
 err:
        vfree(rx_ring->buffer_info);
-       dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
+       rx_ring->buffer_info = NULL;
+       dev_err(&pdev->dev, "Unable to allocate memory for "
                "the receive descriptor ring\n");
        return -ENOMEM;
 }
@@ -2016,12 +2132,13 @@ err:
  **/
 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
 {
+       struct pci_dev *pdev = adapter->pdev;
        int i, err = 0;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+               err = igb_setup_rx_resources(&adapter->rx_ring[i]);
                if (err) {
-                       dev_err(&adapter->pdev->dev,
+                       dev_err(&pdev->dev,
                                "Allocation for Rx Queue %u failed\n", i);
                        for (i--; i >= 0; i--)
                                igb_free_rx_resources(&adapter->rx_ring[i]);
@@ -2032,16 +2149,119 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
        return err;
 }
 
+/**
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 mrqc, rxcsum;
+       u32 j, num_rx_queues, shift = 0, shift2 = 0;
+       union e1000_reta {
+               u32 dword;
+               u8  bytes[4];
+       } reta;
+       static const u8 rsshash[40] = {
+               0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
+               0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
+               0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
+               0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+
+       /* Fill out hash function seeds */
+       for (j = 0; j < 10; j++) {
+               u32 rsskey = rsshash[(j * 4)];
+               rsskey |= rsshash[(j * 4) + 1] << 8;
+               rsskey |= rsshash[(j * 4) + 2] << 16;
+               rsskey |= rsshash[(j * 4) + 3] << 24;
+               array_wr32(E1000_RSSRK(0), j, rsskey);
+       }
+
+       num_rx_queues = adapter->num_rx_queues;
+
+       if (adapter->vfs_allocated_count) {
+               /* 82575 and 82576 supports 2 RSS queues for VMDq */
+               switch (hw->mac.type) {
+               case e1000_82576:
+                       shift = 3;
+                       num_rx_queues = 2;
+                       break;
+               case e1000_82575:
+                       shift = 2;
+                       shift2 = 6;
+               default:
+                       break;
+               }
+       } else {
+               if (hw->mac.type == e1000_82575)
+                       shift = 6;
+       }
+
+       for (j = 0; j < (32 * 4); j++) {
+               reta.bytes[j & 3] = (j % num_rx_queues) << shift;
+               if (shift2)
+                       reta.bytes[j & 3] |= num_rx_queues << shift2;
+               if ((j & 3) == 3)
+                       wr32(E1000_RETA(j >> 2), reta.dword);
+       }
+
+       /*
+        * Disable raw packet checksumming so that RSS hash is placed in
+        * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
+        * offloads as they are enabled by default
+        */
+       rxcsum = rd32(E1000_RXCSUM);
+       rxcsum |= E1000_RXCSUM_PCSD;
+
+       if (adapter->hw.mac.type >= e1000_82576)
+               /* Enable Receive Checksum Offload for SCTP */
+               rxcsum |= E1000_RXCSUM_CRCOFL;
+
+       /* Don't need to set TUOFL or IPOFL, they default to 1 */
+       wr32(E1000_RXCSUM, rxcsum);
+
+       /* If VMDq is enabled then we set the appropriate mode for that, else
+        * we default to RSS so that an RSS hash is calculated per packet even
+        * if we are only using one queue */
+       if (adapter->vfs_allocated_count) {
+               if (hw->mac.type > e1000_82575) {
+                       /* Set the default pool for the PF's first queue */
+                       u32 vtctl = rd32(E1000_VT_CTL);
+                       vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+                                  E1000_VT_CTL_DISABLE_DEF_POOL);
+                       vtctl |= adapter->vfs_allocated_count <<
+                               E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+                       wr32(E1000_VT_CTL, vtctl);
+               }
+               if (adapter->num_rx_queues > 1)
+                       mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+               else
+                       mrqc = E1000_MRQC_ENABLE_VMDQ;
+       } else {
+               mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+       }
+       igb_vmm_control(adapter);
+
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+                E1000_MRQC_RSS_FIELD_IPV4_TCP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
+                E1000_MRQC_RSS_FIELD_IPV6_TCP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
+                E1000_MRQC_RSS_FIELD_IPV6_UDP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
+                E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+       wr32(E1000_MRQC, mrqc);
+}
+
 /**
  * igb_setup_rctl - configure the receive control registers
  * @adapter: Board private structure
  **/
-static void igb_setup_rctl(struct igb_adapter *adapter)
+void igb_setup_rctl(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 rctl;
-       u32 srrctl = 0;
-       int i;
 
        rctl = rd32(E1000_RCTL);
 
@@ -2063,70 +2283,42 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
         */
        rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
 
-       /* enable LPE when to prevent packets larger than max_frame_size */
-               rctl |= E1000_RCTL_LPE;
-
-       /* Setup buffer sizes */
-       switch (adapter->rx_buffer_len) {
-       case IGB_RXBUFFER_256:
-               rctl |= E1000_RCTL_SZ_256;
-               break;
-       case IGB_RXBUFFER_512:
-               rctl |= E1000_RCTL_SZ_512;
-               break;
-       default:
-               srrctl = ALIGN(adapter->rx_buffer_len, 1024)
-                        >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-               break;
-       }
+       /* enable LPE to prevent packets larger than max_frame_size */
+       rctl |= E1000_RCTL_LPE;
 
-       /* 82575 and greater support packet-split where the protocol
-        * header is placed in skb->data and the packet data is
-        * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
-        * In the case of a non-split, skb->data is linearly filled,
-        * followed by the page buffers.  Therefore, skb->data is
-        * sized to hold the largest protocol header.
-        */
-       /* allocations using alloc_page take too long for regular MTU
-        * so only enable packet split for jumbo frames */
-       if (adapter->netdev->mtu > ETH_DATA_LEN) {
-               adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
-               srrctl |= adapter->rx_ps_hdr_size <<
-                        E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-       } else {
-               adapter->rx_ps_hdr_size = 0;
-               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-       }
+       /* disable queue 0 to prevent tail write w/o re-config */
+       wr32(E1000_RXDCTL(0), 0);
 
        /* Attention!!!  For SR-IOV PF driver operations you must enable
         * queue drop for all VF and PF queues to prevent head of line blocking
         * if an un-trusted VF does not provide descriptors to hardware.
         */
        if (adapter->vfs_allocated_count) {
-               u32 vmolr;
-
                /* set all queue drop enable bits */
                wr32(E1000_QDE, ALL_QUEUES);
-               srrctl |= E1000_SRRCTL_DROP_EN;
+       }
 
-               /* disable queue 0 to prevent tail write w/o re-config */
-               wr32(E1000_RXDCTL(0), 0);
+       wr32(E1000_RCTL, rctl);
+}
 
-               vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
-               if (rctl & E1000_RCTL_LPE)
-                       vmolr |= E1000_VMOLR_LPE;
-               if (adapter->num_rx_queues > 1)
-                       vmolr |= E1000_VMOLR_RSSE;
-               wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
-       }
+static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
+                                   int vfn)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 vmolr;
 
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               int j = adapter->rx_ring[i].reg_idx;
-               wr32(E1000_SRRCTL(j), srrctl);
-       }
+       /* if it isn't the PF check to see if VFs are enabled and
+        * increase the size to support vlan tags */
+       if (vfn < adapter->vfs_allocated_count &&
+           adapter->vf_data[vfn].vlans_enabled)
+               size += VLAN_TAG_SIZE;
 
-       wr32(E1000_RCTL, rctl);
+       vmolr = rd32(E1000_VMOLR(vfn));
+       vmolr &= ~E1000_VMOLR_RLPML_MASK;
+       vmolr |= size | E1000_VMOLR_LPE;
+       wr32(E1000_VMOLR(vfn), vmolr);
+
+       return 0;
 }
 
 /**
@@ -2148,144 +2340,118 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
         * size and set the VMOLR RLPML to the size we need */
        if (pf_id) {
                igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
-               max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
+               max_frame_size = MAX_JUMBO_FRAME_SIZE;
        }
 
        wr32(E1000_RLPML, max_frame_size);
 }
 
-/**
- * igb_configure_vt_default_pool - Configure VT default pool
- * @adapter: board private structure
- *
- * Configure the default pool
- **/
-static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
+static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u16 pf_id = adapter->vfs_allocated_count;
-       u32 vtctl;
+       u32 vmolr;
 
-       /* not in sr-iov mode - do nothing */
-       if (!pf_id)
+       /*
+        * This register exists only on 82576 and newer so if we are older then
+        * we should exit and do nothing
+        */
+       if (hw->mac.type < e1000_82576)
                return;
 
-       vtctl = rd32(E1000_VT_CTL);
-       vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
-                  E1000_VT_CTL_DISABLE_DEF_POOL);
-       vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
-       wr32(E1000_VT_CTL, vtctl);
+       vmolr = rd32(E1000_VMOLR(vfn));
+       vmolr |= E1000_VMOLR_AUPE |        /* Accept untagged packets */
+                E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
+
+       /* clear all bits that might not be set */
+       vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
+
+       if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count)
+               vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+       /*
+        * for VMDq only allow the VFs and pool 0 to accept broadcast and
+        * multicast packets
+        */
+       if (vfn <= adapter->vfs_allocated_count)
+               vmolr |= E1000_VMOLR_BAM;          /* Accept broadcast */
+
+       wr32(E1000_VMOLR(vfn), vmolr);
 }
 
 /**
- * igb_configure_rx - Configure receive Unit after Reset
+ * igb_configure_rx_ring - Configure a receive ring after Reset
  * @adapter: board private structure
+ * @ring: receive ring to be configured
  *
  * Configure the Rx unit of the MAC after a reset.
  **/
-static void igb_configure_rx(struct igb_adapter *adapter)
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
 {
-       u64 rdba;
        struct e1000_hw *hw = &adapter->hw;
-       u32 rctl, rxcsum;
-       u32 rxdctl;
-       int i;
-
-       /* disable receives while setting up the descriptors */
-       rctl = rd32(E1000_RCTL);
-       wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-       wrfl();
-       mdelay(10);
-
-       if (adapter->itr_setting > 3)
-               wr32(E1000_ITR, adapter->itr);
-
-       /* Setup the HW Rx Head and Tail Descriptor Pointers and
-        * the Base and Length of the Rx Descriptor Ring */
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *ring = &adapter->rx_ring[i];
-               int j = ring->reg_idx;
-               rdba = ring->dma;
-               wr32(E1000_RDBAL(j),
-                    rdba & 0x00000000ffffffffULL);
-               wr32(E1000_RDBAH(j), rdba >> 32);
-               wr32(E1000_RDLEN(j),
-                    ring->count * sizeof(union e1000_adv_rx_desc));
-
-               ring->head = E1000_RDH(j);
-               ring->tail = E1000_RDT(j);
-               writel(0, hw->hw_addr + ring->tail);
-               writel(0, hw->hw_addr + ring->head);
-
-               rxdctl = rd32(E1000_RXDCTL(j));
-               rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
-               rxdctl &= 0xFFF00000;
-               rxdctl |= IGB_RX_PTHRESH;
-               rxdctl |= IGB_RX_HTHRESH << 8;
-               rxdctl |= IGB_RX_WTHRESH << 16;
-               wr32(E1000_RXDCTL(j), rxdctl);
-       }
-
-       if (adapter->num_rx_queues > 1) {
-               u32 random[10];
-               u32 mrqc;
-               u32 j, shift;
-               union e1000_reta {
-                       u32 dword;
-                       u8  bytes[4];
-               } reta;
-
-               get_random_bytes(&random[0], 40);
-
-               if (hw->mac.type >= e1000_82576)
-                       shift = 0;
-               else
-                       shift = 6;
-               for (j = 0; j < (32 * 4); j++) {
-                       reta.bytes[j & 3] =
-                               adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
-                       if ((j & 3) == 3)
-                               writel(reta.dword,
-                                      hw->hw_addr + E1000_RETA(0) + (j & ~3));
-               }
-               if (adapter->vfs_allocated_count)
-                       mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
-               else
-                       mrqc = E1000_MRQC_ENABLE_RSS_4Q;
-
-               /* Fill out hash function seeds */
-               for (j = 0; j < 10; j++)
-                       array_wr32(E1000_RSSRK(0), j, random[j]);
-
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
-                        E1000_MRQC_RSS_FIELD_IPV4_TCP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
-                        E1000_MRQC_RSS_FIELD_IPV6_TCP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
-                        E1000_MRQC_RSS_FIELD_IPV6_UDP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
-                        E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
-
-               wr32(E1000_MRQC, mrqc);
-       } else if (adapter->vfs_allocated_count) {
-               /* Enable multi-queue for sr-iov */
-               wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
+       u64 rdba = ring->dma;
+       int reg_idx = ring->reg_idx;
+       u32 srrctl, rxdctl;
+
+       /* disable the queue */
+       rxdctl = rd32(E1000_RXDCTL(reg_idx));
+       wr32(E1000_RXDCTL(reg_idx),
+                       rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+
+       /* Set DMA base address registers */
+       wr32(E1000_RDBAL(reg_idx),
+            rdba & 0x00000000ffffffffULL);
+       wr32(E1000_RDBAH(reg_idx), rdba >> 32);
+       wr32(E1000_RDLEN(reg_idx),
+                      ring->count * sizeof(union e1000_adv_rx_desc));
+
+       /* initialize head and tail */
+       ring->head = hw->hw_addr + E1000_RDH(reg_idx);
+       ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+       writel(0, ring->head);
+       writel(0, ring->tail);
+
+       /* set descriptor configuration */
+       if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
+               srrctl = ALIGN(ring->rx_buffer_len, 64) <<
+                        E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
+               srrctl |= IGB_RXBUFFER_16384 >>
+                         E1000_SRRCTL_BSIZEPKT_SHIFT;
+#else
+               srrctl |= (PAGE_SIZE / 2) >>
+                         E1000_SRRCTL_BSIZEPKT_SHIFT;
+#endif
+               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+       } else {
+               srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
+                        E1000_SRRCTL_BSIZEPKT_SHIFT;
+               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
        }
 
-       /* Enable Receive Checksum Offload for TCP and UDP */
-       rxcsum = rd32(E1000_RXCSUM);
-       /* Disable raw packet checksumming */
-       rxcsum |= E1000_RXCSUM_PCSD;
+       wr32(E1000_SRRCTL(reg_idx), srrctl);
 
-       if (adapter->hw.mac.type == e1000_82576)
-               /* Enable Receive Checksum Offload for SCTP */
-               rxcsum |= E1000_RXCSUM_CRCOFL;
+       /* set filtering for VMDQ pools */
+       igb_set_vmolr(adapter, reg_idx & 0x7);
 
-       /* Don't need to set TUOFL or IPOFL, they default to 1 */
-       wr32(E1000_RXCSUM, rxcsum);
+       /* enable receive descriptor fetching */
+       rxdctl = rd32(E1000_RXDCTL(reg_idx));
+       rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+       rxdctl &= 0xFFF00000;
+       rxdctl |= IGB_RX_PTHRESH;
+       rxdctl |= IGB_RX_HTHRESH << 8;
+       rxdctl |= IGB_RX_WTHRESH << 16;
+       wr32(E1000_RXDCTL(reg_idx), rxdctl);
+}
 
-       /* Set the default pool for the PF's first queue */
-       igb_configure_vt_default_pool(adapter);
+/**
+ * igb_configure_rx - Configure receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void igb_configure_rx(struct igb_adapter *adapter)
+{
+       int i;
 
        /* set UTA to appropriate mode */
        igb_set_uta(adapter);
@@ -2294,10 +2460,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
        igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
                         adapter->vfs_allocated_count);
 
-       igb_rlpml_set(adapter);
-
-       /* Enable Receives */
-       wr32(E1000_RCTL, rctl);
+       /* Setup the HW Rx Head and Tail Descriptor Pointers and
+        * the Base and Length of the Rx Descriptor Ring */
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
 /**
@@ -2308,14 +2474,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
  **/
 void igb_free_tx_resources(struct igb_ring *tx_ring)
 {
-       struct pci_dev *pdev = tx_ring->adapter->pdev;
-
        igb_clean_tx_ring(tx_ring);
 
        vfree(tx_ring->buffer_info);
        tx_ring->buffer_info = NULL;
 
-       pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+       /* if not set, then don't free */
+       if (!tx_ring->desc)
+               return;
+
+       pci_free_consistent(tx_ring->pdev, tx_ring->size,
+                           tx_ring->desc, tx_ring->dma);
 
        tx_ring->desc = NULL;
 }
@@ -2334,12 +2503,13 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
                igb_free_tx_resources(&adapter->tx_ring[i]);
 }
 
-static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
-                                          struct igb_buffer *buffer_info)
+void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
+                                   struct igb_buffer *buffer_info)
 {
        buffer_info->dma = 0;
        if (buffer_info->skb) {
-               skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+               skb_dma_unmap(&tx_ring->pdev->dev,
+                             buffer_info->skb,
                              DMA_TO_DEVICE);
                dev_kfree_skb_any(buffer_info->skb);
                buffer_info->skb = NULL;
@@ -2354,7 +2524,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
  **/
 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 {
-       struct igb_adapter *adapter = tx_ring->adapter;
        struct igb_buffer *buffer_info;
        unsigned long size;
        unsigned int i;
@@ -2365,21 +2534,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 
        for (i = 0; i < tx_ring->count; i++) {
                buffer_info = &tx_ring->buffer_info[i];
-               igb_unmap_and_free_tx_resource(adapter, buffer_info);
+               igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
        }
 
        size = sizeof(struct igb_buffer) * tx_ring->count;
        memset(tx_ring->buffer_info, 0, size);
 
        /* Zero out the descriptor ring */
-
        memset(tx_ring->desc, 0, tx_ring->size);
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-
-       writel(0, adapter->hw.hw_addr + tx_ring->head);
-       writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -2402,14 +2567,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  **/
 void igb_free_rx_resources(struct igb_ring *rx_ring)
 {
-       struct pci_dev *pdev = rx_ring->adapter->pdev;
-
        igb_clean_rx_ring(rx_ring);
 
        vfree(rx_ring->buffer_info);
        rx_ring->buffer_info = NULL;
 
-       pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+       /* if not set, then don't free */
+       if (!rx_ring->desc)
+               return;
+
+       pci_free_consistent(rx_ring->pdev, rx_ring->size,
+                           rx_ring->desc, rx_ring->dma);
 
        rx_ring->desc = NULL;
 }
@@ -2434,26 +2602,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  **/
 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 {
-       struct igb_adapter *adapter = rx_ring->adapter;
        struct igb_buffer *buffer_info;
-       struct pci_dev *pdev = adapter->pdev;
        unsigned long size;
        unsigned int i;
 
        if (!rx_ring->buffer_info)
                return;
+
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
                buffer_info = &rx_ring->buffer_info[i];
                if (buffer_info->dma) {
-                       if (adapter->rx_ps_hdr_size)
-                               pci_unmap_single(pdev, buffer_info->dma,
-                                                adapter->rx_ps_hdr_size,
-                                                PCI_DMA_FROMDEVICE);
-                       else
-                               pci_unmap_single(pdev, buffer_info->dma,
-                                                adapter->rx_buffer_len,
-                                                PCI_DMA_FROMDEVICE);
+                       pci_unmap_single(rx_ring->pdev,
+                                        buffer_info->dma,
+                                        rx_ring->rx_buffer_len,
+                                        PCI_DMA_FROMDEVICE);
                        buffer_info->dma = 0;
                }
 
@@ -2461,14 +2624,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
                        dev_kfree_skb(buffer_info->skb);
                        buffer_info->skb = NULL;
                }
+               if (buffer_info->page_dma) {
+                       pci_unmap_page(rx_ring->pdev,
+                                      buffer_info->page_dma,
+                                      PAGE_SIZE / 2,
+                                      PCI_DMA_FROMDEVICE);
+                       buffer_info->page_dma = 0;
+               }
                if (buffer_info->page) {
-                       if (buffer_info->page_dma)
-                               pci_unmap_page(pdev, buffer_info->page_dma,
-                                              PAGE_SIZE / 2,
-                                              PCI_DMA_FROMDEVICE);
                        put_page(buffer_info->page);
                        buffer_info->page = NULL;
-                       buffer_info->page_dma = 0;
                        buffer_info->page_offset = 0;
                }
        }
@@ -2481,9 +2646,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-
-       writel(0, adapter->hw.hw_addr + rx_ring->head);
-       writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
@@ -2749,7 +2911,6 @@ static void igb_watchdog_task(struct work_struct *work)
        struct net_device *netdev = adapter->netdev;
        struct igb_ring *tx_ring = adapter->tx_ring;
        u32 link;
-       u32 eics = 0;
        int i;
 
        link = igb_has_link(adapter);
@@ -2846,18 +3007,22 @@ link_up:
                }
        }
 
+       /* Force detection of hung controller every watchdog period */
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               adapter->tx_ring[i].detect_tx_hung = true;
+
        /* Cause software interrupt to ensure rx ring is cleaned */
        if (adapter->msix_entries) {
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       eics |= adapter->rx_ring[i].eims_value;
+               u32 eics = 0;
+               for (i = 0; i < adapter->num_q_vectors; i++) {
+                       struct igb_q_vector *q_vector = adapter->q_vector[i];
+                       eics |= q_vector->eims_value;
+               }
                wr32(E1000_EICS, eics);
        } else {
                wr32(E1000_ICS, E1000_ICS_RXDMT0);
        }
 
-       /* Force detection of hung controller every watchdog period */
-       tx_ring->detect_tx_hung = true;
-
        /* Reset the timer */
        if (!test_bit(__IGB_DOWN, &adapter->state))
                mod_timer(&adapter->watchdog_timer,
@@ -2871,7 +3036,6 @@ enum latency_range {
        latency_invalid = 255
 };
 
-
 /**
  * igb_update_ring_itr - update the dynamic ITR value based on packet size
  *
@@ -2886,25 +3050,37 @@ enum latency_range {
  *      parameter (see igb_param.c)
  *      NOTE:  This function is called only when operating in a multiqueue
  *             receive environment.
- * @rx_ring: pointer to ring
+ * @q_vector: pointer to q_vector
  **/
-static void igb_update_ring_itr(struct igb_ring *rx_ring)
+static void igb_update_ring_itr(struct igb_q_vector *q_vector)
 {
-       int new_val = rx_ring->itr_val;
+       int new_val = q_vector->itr_val;
        int avg_wire_size = 0;
-       struct igb_adapter *adapter = rx_ring->adapter;
-
-       if (!rx_ring->total_packets)
-               goto clear_counts; /* no packets, so don't do anything */
+       struct igb_adapter *adapter = q_vector->adapter;
 
        /* For non-gigabit speeds, just fix the interrupt rate at 4000
         * ints/sec - ITR timer value of 120 ticks.
         */
        if (adapter->link_speed != SPEED_1000) {
-               new_val = 120;
+               new_val = 976;
                goto set_itr_val;
        }
-       avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
+
+       if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
+               struct igb_ring *ring = q_vector->rx_ring;
+               avg_wire_size = ring->total_bytes / ring->total_packets;
+       }
+
+       if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
+               struct igb_ring *ring = q_vector->tx_ring;
+               avg_wire_size = max_t(u32, avg_wire_size,
+                                     (ring->total_bytes /
+                                      ring->total_packets));
+       }
+
+       /* if avg_wire_size isn't set no work was done */
+       if (!avg_wire_size)
+               goto clear_counts;
 
        /* Add 24 bytes to size to account for CRC, preamble, and gap */
        avg_wire_size += 24;
@@ -2919,13 +3095,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
                new_val = avg_wire_size / 2;
 
 set_itr_val:
-       if (new_val != rx_ring->itr_val) {
-               rx_ring->itr_val = new_val;
-               rx_ring->set_itr = 1;
+       if (new_val != q_vector->itr_val) {
+               q_vector->itr_val = new_val;
+               q_vector->set_itr = 1;
        }
 clear_counts:
-       rx_ring->total_bytes = 0;
-       rx_ring->total_packets = 0;
+       if (q_vector->rx_ring) {
+               q_vector->rx_ring->total_bytes = 0;
+               q_vector->rx_ring->total_packets = 0;
+       }
+       if (q_vector->tx_ring) {
+               q_vector->tx_ring->total_bytes = 0;
+               q_vector->tx_ring->total_packets = 0;
+       }
 }
 
 /**
@@ -2942,7 +3124,7 @@ clear_counts:
  *      NOTE:  These calculations are only valid when operating in a single-
  *             queue environment.
  * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
+ * @itr_setting: current q_vector->itr_val
  * @packets: the number of packets during this measurement interval
  * @bytes: the number of bytes during this measurement interval
  **/
@@ -2994,8 +3176,9 @@ update_itr_done:
 
 static void igb_set_itr(struct igb_adapter *adapter)
 {
+       struct igb_q_vector *q_vector = adapter->q_vector[0];
        u16 current_itr;
-       u32 new_itr = adapter->itr;
+       u32 new_itr = q_vector->itr_val;
 
        /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
        if (adapter->link_speed != SPEED_1000) {
@@ -3009,18 +3192,14 @@ static void igb_set_itr(struct igb_adapter *adapter)
                                    adapter->rx_ring->total_packets,
                                    adapter->rx_ring->total_bytes);
 
-       if (adapter->rx_ring->buddy) {
-               adapter->tx_itr = igb_update_itr(adapter,
-                                           adapter->tx_itr,
-                                           adapter->tx_ring->total_packets,
-                                           adapter->tx_ring->total_bytes);
-               current_itr = max(adapter->rx_itr, adapter->tx_itr);
-       } else {
-               current_itr = adapter->rx_itr;
-       }
+       adapter->tx_itr = igb_update_itr(adapter,
+                                   adapter->tx_itr,
+                                   adapter->tx_ring->total_packets,
+                                   adapter->tx_ring->total_bytes);
+       current_itr = max(adapter->rx_itr, adapter->tx_itr);
 
        /* conservative mode (itr 3) eliminates the lowest_latency setting */
-       if (adapter->itr_setting == 3 && current_itr == lowest_latency)
+       if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
                current_itr = low_latency;
 
        switch (current_itr) {
@@ -3041,18 +3220,17 @@ static void igb_set_itr(struct igb_adapter *adapter)
 set_itr_now:
        adapter->rx_ring->total_bytes = 0;
        adapter->rx_ring->total_packets = 0;
-       if (adapter->rx_ring->buddy) {
-               adapter->rx_ring->buddy->total_bytes = 0;
-               adapter->rx_ring->buddy->total_packets = 0;
-       }
+       adapter->tx_ring->total_bytes = 0;
+       adapter->tx_ring->total_packets = 0;
 
-       if (new_itr != adapter->itr) {
+       if (new_itr != q_vector->itr_val) {
                /* this attempts to bias the interrupt rate towards Bulk
                 * by adding intermediate steps when interrupt rate is
                 * increasing */
-               new_itr = new_itr > adapter->itr ?
-                            max((new_itr * adapter->itr) /
-                                (new_itr + (adapter->itr >> 2)), new_itr) :
+               new_itr = new_itr > q_vector->itr_val ?
+                            max((new_itr * q_vector->itr_val) /
+                                (new_itr + (q_vector->itr_val >> 2)),
+                                new_itr) :
                             new_itr;
                /* Don't write the value here; it resets the adapter's
                 * internal timer, and causes us to delay far longer than
@@ -3060,15 +3238,13 @@ set_itr_now:
                 * value at the beginning of the next interrupt so the timing
                 * ends up being correct.
                 */
-               adapter->itr = new_itr;
-               adapter->rx_ring->itr_val = new_itr;
-               adapter->rx_ring->set_itr = 1;
+               q_vector->itr_val = new_itr;
+               q_vector->set_itr = 1;
        }
 
        return;
 }
 
-
 #define IGB_TX_FLAGS_CSUM              0x00000001
 #define IGB_TX_FLAGS_VLAN              0x00000002
 #define IGB_TX_FLAGS_TSO               0x00000004
@@ -3077,8 +3253,7 @@ set_itr_now:
 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
 #define IGB_TX_FLAGS_VLAN_SHIFT        16
 
-static inline int igb_tso_adv(struct igb_adapter *adapter,
-                             struct igb_ring *tx_ring,
+static inline int igb_tso_adv(struct igb_ring *tx_ring,
                              struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 {
        struct e1000_adv_tx_context_desc *context_desc;
@@ -3140,8 +3315,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
        mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
 
        /* For 82575, context index must be unique per ring. */
-       if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
-               mss_l4len_idx |= tx_ring->queue_index << 4;
+       if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
+               mss_l4len_idx |= tx_ring->reg_idx << 4;
 
        context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
        context_desc->seqnum_seed = 0;
@@ -3158,14 +3333,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
        return true;
 }
 
-static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
-                                       struct igb_ring *tx_ring,
-                                       struct sk_buff *skb, u32 tx_flags)
+static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
+                                  struct sk_buff *skb, u32 tx_flags)
 {
        struct e1000_adv_tx_context_desc *context_desc;
-       unsigned int i;
+       struct pci_dev *pdev = tx_ring->pdev;
        struct igb_buffer *buffer_info;
        u32 info = 0, tu_cmd = 0;
+       unsigned int i;
 
        if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
            (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3212,7 +3387,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
                                break;
                        default:
                                if (unlikely(net_ratelimit()))
-                                       dev_warn(&adapter->pdev->dev,
+                                       dev_warn(&pdev->dev,
                                            "partial checksum but proto=%x!\n",
                                            skb->protocol);
                                break;
@@ -3221,11 +3396,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
                context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
                context_desc->seqnum_seed = 0;
-               if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
+               if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
                        context_desc->mss_l4len_idx =
-                               cpu_to_le32(tx_ring->queue_index << 4);
-               else
-                       context_desc->mss_l4len_idx = 0;
+                               cpu_to_le32(tx_ring->reg_idx << 4);
 
                buffer_info->time_stamp = jiffies;
                buffer_info->next_to_watch = i;
@@ -3244,11 +3417,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 #define IGB_MAX_TXD_PWR        16
 #define IGB_MAX_DATA_PER_TXD   (1<<IGB_MAX_TXD_PWR)
 
-static inline int igb_tx_map_adv(struct igb_adapter *adapter,
-                                struct igb_ring *tx_ring, struct sk_buff *skb,
+static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
                                 unsigned int first)
 {
        struct igb_buffer *buffer_info;
+       struct pci_dev *pdev = tx_ring->pdev;
        unsigned int len = skb_headlen(skb);
        unsigned int count = 0, i;
        unsigned int f;
@@ -3256,8 +3429,8 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
 
        i = tx_ring->next_to_use;
 
-       if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-               dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+       if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
+               dev_err(&pdev->dev, "TX DMA map failed\n");
                return 0;
        }
 
@@ -3296,8 +3469,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
        return count + 1;
 }
 
-static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
-                                   struct igb_ring *tx_ring,
+static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
                                    int tx_flags, int count, u32 paylen,
                                    u8 hdr_len)
 {
@@ -3329,10 +3501,11 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
                olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
        }
 
-       if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
-           (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
+       if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
+           (tx_flags & (IGB_TX_FLAGS_CSUM |
+                        IGB_TX_FLAGS_TSO |
                         IGB_TX_FLAGS_VLAN)))
-               olinfo_status |= tx_ring->queue_index << 4;
+               olinfo_status |= tx_ring->reg_idx << 4;
 
        olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
 
@@ -3349,7 +3522,7 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
                        i = 0;
        }
 
-       tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+       tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -3357,16 +3530,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
        wmb();
 
        tx_ring->next_to_use = i;
-       writel(i, adapter->hw.hw_addr + tx_ring->tail);
+       writel(i, tx_ring->tail);
        /* we need this if more than one processor can write to our tail
         * at a time, it syncronizes IO on IA64/Altix systems */
        mmiowb();
 }
 
-static int __igb_maybe_stop_tx(struct net_device *netdev,
-                              struct igb_ring *tx_ring, int size)
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct net_device *netdev = tx_ring->netdev;
 
        netif_stop_subqueue(netdev, tx_ring->queue_index);
 
@@ -3382,60 +3554,38 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
 
        /* A reprieve! */
        netif_wake_subqueue(netdev, tx_ring->queue_index);
-       ++adapter->restart_queue;
+       tx_ring->tx_stats.restart_queue++;
        return 0;
 }
 
-static int igb_maybe_stop_tx(struct net_device *netdev,
-                            struct igb_ring *tx_ring, int size)
+static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
        if (igb_desc_unused(tx_ring) >= size)
                return 0;
-       return __igb_maybe_stop_tx(netdev, tx_ring, size);
+       return __igb_maybe_stop_tx(tx_ring, size);
 }
 
-static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
-                                          struct net_device *netdev,
-                                          struct igb_ring *tx_ring)
+netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
+                                   struct igb_ring *tx_ring)
 {
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
        unsigned int first;
        unsigned int tx_flags = 0;
        u8 hdr_len = 0;
        int count = 0;
        int tso = 0;
-       union skb_shared_tx *shtx;
-
-       if (test_bit(__IGB_DOWN, &adapter->state)) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
-       }
-
-       if (skb->len <= 0) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
-       }
+       union skb_shared_tx *shtx = skb_tx(skb);
 
        /* need: 1 descriptor per page,
         *       + 2 desc gap to keep tail from touching head,
         *       + 1 desc for skb->data,
         *       + 1 desc for context descriptor,
         * otherwise try next time */
-       if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+       if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
                /* this is a hard error */
                return NETDEV_TX_BUSY;
        }
 
-       /*
-        * TODO: check that there currently is no other packet with
-        * time stamping in the queue
-        *
-        * When doing time stamping, keep the connection to the socket
-        * a while longer: it is still needed by skb_hwtstamp_tx(),
-        * called either in igb_tx_hwtstamp() or by our caller when
-        * doing software time stamping.
-        */
-       shtx = skb_tx(skb);
        if (unlikely(shtx->hardware)) {
                shtx->in_progress = 1;
                tx_flags |= IGB_TX_FLAGS_TSTAMP;
@@ -3450,17 +3600,17 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
                tx_flags |= IGB_TX_FLAGS_IPV4;
 
        first = tx_ring->next_to_use;
-       tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
-                                             &hdr_len) : 0;
-
-       if (tso < 0) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
+       if (skb_is_gso(skb)) {
+               tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
+               if (tso < 0) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
        }
 
        if (tso)
                tx_flags |= IGB_TX_FLAGS_TSO;
-       else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
+       else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
                 (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IGB_TX_FLAGS_CSUM;
 
@@ -3468,19 +3618,20 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
         * count reflects descriptors mapped, if 0 then mapping error
         * has occured and we need to rewind the descriptor queue
         */
-       count = igb_tx_map_adv(adapter, tx_ring, skb, first);
+       count = igb_tx_map_adv(tx_ring, skb, first);
 
-       if (count) {
-               igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
-                                skb->len, hdr_len);
-               /* Make sure there is space in the ring for the next send. */
-               igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
-       } else {
+       if (!count) {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
                tx_ring->next_to_use = first;
+               return NETDEV_TX_OK;
        }
 
+       igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
+
+       /* Make sure there is space in the ring for the next send. */
+       igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+
        return NETDEV_TX_OK;
 }
 
@@ -3489,8 +3640,18 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct igb_ring *tx_ring;
-
        int r_idx = 0;
+
+       if (test_bit(__IGB_DOWN, &adapter->state)) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
+       if (skb->len <= 0) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
        r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
        tx_ring = adapter->multi_tx_table[r_idx];
 
@@ -3498,7 +3659,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
         * to a flow.  Right now, performance is impacted slightly negatively
         * if using multiple tx queues.  If the stack breaks away from a
         * single qdisc implementation, we can look at this again. */
-       return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
+       return igb_xmit_frame_ring_adv(skb, tx_ring);
 }
 
 /**
@@ -3512,6 +3673,7 @@ static void igb_tx_timeout(struct net_device *netdev)
 
        /* Do the reset outside of interrupt context */
        adapter->tx_timeout_count++;
+
        schedule_work(&adapter->reset_task);
        wr32(E1000_EICS,
             (adapter->eims_enable_mask & ~adapter->eims_other));
@@ -3549,6 +3711,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+       u32 rx_buffer_len, i;
 
        if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
            (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3566,44 +3729,29 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 
        /* igb_down has a dependency on max_frame_size */
        adapter->max_frame_size = max_frame;
-       if (netif_running(netdev))
-               igb_down(adapter);
-
        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
         * larger slab size.
         * i.e. RXBUFFER_2048 --> size-4096 slab
         */
 
-       if (max_frame <= IGB_RXBUFFER_256)
-               adapter->rx_buffer_len = IGB_RXBUFFER_256;
-       else if (max_frame <= IGB_RXBUFFER_512)
-               adapter->rx_buffer_len = IGB_RXBUFFER_512;
-       else if (max_frame <= IGB_RXBUFFER_1024)
-               adapter->rx_buffer_len = IGB_RXBUFFER_1024;
-       else if (max_frame <= IGB_RXBUFFER_2048)
-               adapter->rx_buffer_len = IGB_RXBUFFER_2048;
+       if (max_frame <= IGB_RXBUFFER_1024)
+               rx_buffer_len = IGB_RXBUFFER_1024;
+       else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
+               rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
        else
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
-               adapter->rx_buffer_len = IGB_RXBUFFER_16384;
-#else
-               adapter->rx_buffer_len = PAGE_SIZE / 2;
-#endif
+               rx_buffer_len = IGB_RXBUFFER_128;
 
-       /* if sr-iov is enabled we need to force buffer size to 1K or larger */
-       if (adapter->vfs_allocated_count &&
-           (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
-               adapter->rx_buffer_len = IGB_RXBUFFER_1024;
-
-       /* adjust allocation if LPE protects us, and we aren't using SBP */
-       if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-            (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
-               adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+       if (netif_running(netdev))
+               igb_down(adapter);
 
        dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
                 netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
 
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
+
        if (netif_running(netdev))
                igb_up(adapter);
        else
@@ -3624,7 +3772,10 @@ void igb_update_stats(struct igb_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
+       u32 rnbc;
        u16 phy_tmp;
+       int i;
+       u64 bytes, packets;
 
 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
 
@@ -3637,6 +3788,29 @@ void igb_update_stats(struct igb_adapter *adapter)
        if (pci_channel_offline(pdev))
                return;
 
+       bytes = 0;
+       packets = 0;
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
+               adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
+               netdev->stats.rx_fifo_errors += rqdpc_tmp;
+               bytes += adapter->rx_ring[i].rx_stats.bytes;
+               packets += adapter->rx_ring[i].rx_stats.packets;
+       }
+
+       netdev->stats.rx_bytes = bytes;
+       netdev->stats.rx_packets = packets;
+
+       bytes = 0;
+       packets = 0;
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               bytes += adapter->tx_ring[i].tx_stats.bytes;
+               packets += adapter->tx_ring[i].tx_stats.packets;
+       }
+       netdev->stats.tx_bytes = bytes;
+       netdev->stats.tx_packets = packets;
+
+       /* read stats registers */
        adapter->stats.crcerrs += rd32(E1000_CRCERRS);
        adapter->stats.gprc += rd32(E1000_GPRC);
        adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3669,7 +3843,9 @@ void igb_update_stats(struct igb_adapter *adapter)
        adapter->stats.gptc += rd32(E1000_GPTC);
        adapter->stats.gotc += rd32(E1000_GOTCL);
        rd32(E1000_GOTCH); /* clear GOTCL */
-       adapter->stats.rnbc += rd32(E1000_RNBC);
+       rnbc = rd32(E1000_RNBC);
+       adapter->stats.rnbc += rnbc;
+       netdev->stats.rx_fifo_errors += rnbc;
        adapter->stats.ruc += rd32(E1000_RUC);
        adapter->stats.rfc += rd32(E1000_RFC);
        adapter->stats.rjc += rd32(E1000_RJC);
@@ -3712,36 +3888,9 @@ void igb_update_stats(struct igb_adapter *adapter)
 
        /* Fill out the OS statistics structure */
        netdev->stats.multicast = adapter->stats.mprc;
-       netdev->stats.collisions = adapter->stats.colc;
-
-       /* Rx Errors */
-
-       if (hw->mac.type != e1000_82575) {
-               u32 rqdpc_tmp;
-               u64 rqdpc_total = 0;
-               int i;
-               /* Read out drops stats per RX queue.  Notice RQDPC (Receive
-                * Queue Drop Packet Count) stats only gets incremented, if
-                * the DROP_EN but it set (in the SRRCTL register for that
-                * queue).  If DROP_EN bit is NOT set, then the some what
-                * equivalent count is stored in RNBC (not per queue basis).
-                * Also note the drop count is due to lack of available
-                * descriptors.
-                */
-               for (i = 0; i < adapter->num_rx_queues; i++) {
-                       rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
-                       adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
-                       rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
-               }
-               netdev->stats.rx_fifo_errors = rqdpc_total;
-       }
+       netdev->stats.collisions = adapter->stats.colc;
 
-       /* Note RNBC (Receive No Buffers Count) is an not an exact
-        * drop count as the hardware FIFO might save the day.  Thats
-        * one of the reason for saving it in rx_fifo_errors, as its
-        * potentially not a true drop.
-        */
-       netdev->stats.rx_fifo_errors += adapter->stats.rnbc;
+       /* Rx Errors */
 
        /* RLEC on some newer hardware can be incorrect so build
         * our own version based on RUC and ROC */
@@ -3781,14 +3930,12 @@ void igb_update_stats(struct igb_adapter *adapter)
 
 static irqreturn_t igb_msix_other(int irq, void *data)
 {
-       struct net_device *netdev = data;
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = data;
        struct e1000_hw *hw = &adapter->hw;
        u32 icr = rd32(E1000_ICR);
-
        /* reading ICR causes bit 31 of EICR to be cleared */
 
-       if(icr & E1000_ICR_DOUTSYNC) {
+       if (icr & E1000_ICR_DOUTSYNC) {
                /* HW is reporting DMA is out of sync */
                adapter->stats.doosync++;
        }
@@ -3810,119 +3957,79 @@ static irqreturn_t igb_msix_other(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t igb_msix_tx(int irq, void *data)
+static void igb_write_itr(struct igb_q_vector *q_vector)
 {
-       struct igb_ring *tx_ring = data;
-       struct igb_adapter *adapter = tx_ring->adapter;
-       struct e1000_hw *hw = &adapter->hw;
+       u32 itr_val = q_vector->itr_val & 0x7FFC;
 
-#ifdef CONFIG_IGB_DCA
-       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-               igb_update_tx_dca(tx_ring);
-#endif
+       if (!q_vector->set_itr)
+               return;
 
-       tx_ring->total_bytes = 0;
-       tx_ring->total_packets = 0;
+       if (!itr_val)
+               itr_val = 0x4;
 
-       /* auto mask will automatically reenable the interrupt when we write
-        * EICS */
-       if (!igb_clean_tx_irq(tx_ring))
-               /* Ring was not completely cleaned, so fire another interrupt */
-               wr32(E1000_EICS, tx_ring->eims_value);
+       if (q_vector->itr_shift)
+               itr_val |= itr_val << q_vector->itr_shift;
        else
-               wr32(E1000_EIMS, tx_ring->eims_value);
-
-       return IRQ_HANDLED;
-}
+               itr_val |= 0x8000000;
 
-static void igb_write_itr(struct igb_ring *ring)
-{
-       struct e1000_hw *hw = &ring->adapter->hw;
-       if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
-               switch (hw->mac.type) {
-               case e1000_82576:
-                       wr32(ring->itr_register, ring->itr_val |
-                            0x80000000);
-                       break;
-               default:
-                       wr32(ring->itr_register, ring->itr_val |
-                            (ring->itr_val << 16));
-                       break;
-               }
-               ring->set_itr = 0;
-       }
+       writel(itr_val, q_vector->itr_register);
+       q_vector->set_itr = 0;
 }
 
-static irqreturn_t igb_msix_rx(int irq, void *data)
+static irqreturn_t igb_msix_ring(int irq, void *data)
 {
-       struct igb_ring *rx_ring = data;
+       struct igb_q_vector *q_vector = data;
 
-       /* Write the ITR value calculated at the end of the
-        * previous interrupt.
-        */
-
-       igb_write_itr(rx_ring);
+       /* Write the ITR value calculated from the previous interrupt. */
+       igb_write_itr(q_vector);
 
-       if (napi_schedule_prep(&rx_ring->napi))
-               __napi_schedule(&rx_ring->napi);
+       napi_schedule(&q_vector->napi);
 
-#ifdef CONFIG_IGB_DCA
-       if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-               igb_update_rx_dca(rx_ring);
-#endif
-               return IRQ_HANDLED;
+       return IRQ_HANDLED;
 }
 
 #ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *rx_ring)
+static void igb_update_dca(struct igb_q_vector *q_vector)
 {
-       u32 dca_rxctrl;
-       struct igb_adapter *adapter = rx_ring->adapter;
+       struct igb_adapter *adapter = q_vector->adapter;
        struct e1000_hw *hw = &adapter->hw;
        int cpu = get_cpu();
-       int q = rx_ring->reg_idx;
 
-       if (rx_ring->cpu != cpu) {
-               dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
-               if (hw->mac.type == e1000_82576) {
-                       dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
-                       dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                     E1000_DCA_RXCTRL_CPUID_SHIFT;
+       if (q_vector->cpu == cpu)
+               goto out_no_update;
+
+       if (q_vector->tx_ring) {
+               int q = q_vector->tx_ring->reg_idx;
+               u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
+               if (hw->mac.type == e1000_82575) {
+                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
+                       dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
                } else {
+                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
+                       dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+                                     E1000_DCA_TXCTRL_CPUID_SHIFT;
+               }
+               dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
+               wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
+       }
+       if (q_vector->rx_ring) {
+               int q = q_vector->rx_ring->reg_idx;
+               u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
+               if (hw->mac.type == e1000_82575) {
                        dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
                        dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               } else {
+                       dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
+                       dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+                                     E1000_DCA_RXCTRL_CPUID_SHIFT;
                }
                dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
                dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
                dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
                wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
-               rx_ring->cpu = cpu;
-       }
-       put_cpu();
-}
-
-static void igb_update_tx_dca(struct igb_ring *tx_ring)
-{
-       u32 dca_txctrl;
-       struct igb_adapter *adapter = tx_ring->adapter;
-       struct e1000_hw *hw = &adapter->hw;
-       int cpu = get_cpu();
-       int q = tx_ring->reg_idx;
-
-       if (tx_ring->cpu != cpu) {
-               dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
-               if (hw->mac.type == e1000_82576) {
-                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
-                       dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                     E1000_DCA_TXCTRL_CPUID_SHIFT;
-               } else {
-                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
-                       dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-               }
-               dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
-               wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
-               tx_ring->cpu = cpu;
        }
+       q_vector->cpu = cpu;
+out_no_update:
        put_cpu();
 }
 
@@ -3937,13 +4044,10 @@ static void igb_setup_dca(struct igb_adapter *adapter)
        /* Always use CB2 mode, difference is masked in the CB driver. */
        wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               adapter->tx_ring[i].cpu = -1;
-               igb_update_tx_dca(&adapter->tx_ring[i]);
-       }
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               adapter->rx_ring[i].cpu = -1;
-               igb_update_rx_dca(&adapter->rx_ring[i]);
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               q_vector->cpu = -1;
+               igb_update_dca(q_vector);
        }
 }
 
@@ -3972,7 +4076,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
        case DCA_PROVIDER_REMOVE:
                if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
                        /* without this a class_device is left
-                        * hanging around in the sysfs model */
+                        * hanging around in the sysfs model */
                        dca_remove_requester(dev);
                        dev_info(&adapter->pdev->dev, "DCA disabled\n");
                        adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
@@ -4004,12 +4108,51 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
 
        for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
                ping = E1000_PF_CONTROL_MSG;
-               if (adapter->vf_data[i].clear_to_send)
+               if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
                        ping |= E1000_VT_MSGTYPE_CTS;
                igb_write_mbx(hw, &ping, 1, i);
        }
 }
 
+static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 vmolr = rd32(E1000_VMOLR(vf));
+       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+
+       vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
+                           IGB_VF_FLAG_MULTI_PROMISC);
+       vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+       if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
+               vmolr |= E1000_VMOLR_MPME;
+               *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
+       } else {
+               /*
+                * if we have hashes and we are clearing a multicast promisc
+                * flag we need to write the hashes to the MTA as this step
+                * was previously skipped
+                */
+               if (vf_data->num_vf_mc_hashes > 30) {
+                       vmolr |= E1000_VMOLR_MPME;
+               } else if (vf_data->num_vf_mc_hashes) {
+                       int j;
+                       vmolr |= E1000_VMOLR_ROMPE;
+                       for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+                               igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+               }
+       }
+
+       wr32(E1000_VMOLR(vf), vmolr);
+
+       /* there are flags left unprocessed, likely not supported */
+       if (*msgbuf & E1000_VT_MSGINFO_MASK)
+               return -EINVAL;
+
+       return 0;
+
+}
+
 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
                                  u32 *msgbuf, u32 vf)
 {
@@ -4018,18 +4161,17 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
        struct vf_data_storage *vf_data = &adapter->vf_data[vf];
        int i;
 
-       /* only up to 30 hash values supported */
-       if (n > 30)
-               n = 30;
-
-       /* salt away the number of multi cast addresses assigned
+       /* salt away the number of multicast addresses assigned
         * to this VF for later use to restore when the PF multi cast
         * list changes
         */
        vf_data->num_vf_mc_hashes = n;
 
-       /* VFs are limited to using the MTA hash table for their multicast
-        * addresses */
+       /* only up to 30 hash values supported */
+       if (n > 30)
+               n = 30;
+
+       /* store the hashes for later use */
        for (i = 0; i < n; i++)
                vf_data->vf_mc_hashes[i] = hash_list[i];
 
@@ -4046,9 +4188,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
        int i, j;
 
        for (i = 0; i < adapter->vfs_allocated_count; i++) {
+               u32 vmolr = rd32(E1000_VMOLR(i));
+               vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
                vf_data = &adapter->vf_data[i];
-               for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
-                       igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+
+               if ((vf_data->num_vf_mc_hashes > 30) ||
+                   (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
+                       vmolr |= E1000_VMOLR_MPME;
+               } else if (vf_data->num_vf_mc_hashes) {
+                       vmolr |= E1000_VMOLR_ROMPE;
+                       for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+                               igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+               }
+               wr32(E1000_VMOLR(i), vmolr);
        }
 }
 
@@ -4086,7 +4239,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
        struct e1000_hw *hw = &adapter->hw;
        u32 reg, i;
 
-       /* It is an error to call this function when VFs are not enabled */
+       /* The vlvf table only exists on 82576 hardware and newer */
+       if (hw->mac.type < e1000_82576)
+               return -1;
+
+       /* we only need to do this if VMDq is enabled */
        if (!adapter->vfs_allocated_count)
                return -1;
 
@@ -4116,16 +4273,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 
                        /* if !enabled we need to set this up in vfta */
                        if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
-                               /* add VID to filter table, if bit already set
-                                * PF must have added it outside of table */
-                               if (igb_vfta_set(hw, vid, true))
-                                       reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
-                                               adapter->vfs_allocated_count);
+                               /* add VID to filter table */
+                               igb_vfta_set(hw, vid, true);
                                reg |= E1000_VLVF_VLANID_ENABLE;
                        }
                        reg &= ~E1000_VLVF_VLANID_MASK;
                        reg |= vid;
-
                        wr32(E1000_VLVF(i), reg);
 
                        /* do not modify RLPML for PF devices */
@@ -4141,8 +4294,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
                                reg |= size;
                                wr32(E1000_VMOLR(vf), reg);
                        }
-                       adapter->vf_data[vf].vlans_enabled++;
 
+                       adapter->vf_data[vf].vlans_enabled++;
                        return 0;
                }
        } else {
@@ -4184,15 +4337,14 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
        return igb_vlvf_set(adapter, vid, add, vf);
 }
 
-static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
 {
-       struct e1000_hw *hw = &adapter->hw;
-
-       /* disable mailbox functionality for vf */
-       adapter->vf_data[vf].clear_to_send = false;
+       /* clear all flags */
+       adapter->vf_data[vf].flags = 0;
+       adapter->vf_data[vf].last_nack = jiffies;
 
        /* reset offloads to defaults */
-       igb_set_vmolr(hw, vf);
+       igb_set_vmolr(adapter, vf);
 
        /* reset vlans for device */
        igb_clear_vf_vfta(adapter, vf);
@@ -4204,7 +4356,18 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
        igb_set_rx_mode(adapter->netdev);
 }
 
-static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+{
+       unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+
+       /* generate a new mac address as we were hotplug removed/added */
+       random_ether_addr(vf_mac);
+
+       /* process remaining reset events */
+       igb_vf_reset(adapter, vf);
+}
+
+static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 {
        struct e1000_hw *hw = &adapter->hw;
        unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
@@ -4213,7 +4376,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
        u8 *addr = (u8 *)(&msgbuf[1]);
 
        /* process all the same items cleared in a function level reset */
-       igb_vf_reset_event(adapter, vf);
+       igb_vf_reset(adapter, vf);
 
        /* set vf mac address */
        igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
@@ -4224,8 +4387,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
        reg = rd32(E1000_VFRE);
        wr32(E1000_VFRE, reg | (1 << vf));
 
-       /* enable mailbox functionality for vf */
-       adapter->vf_data[vf].clear_to_send = true;
+       adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
 
        /* reply to reset with ack and vf mac address */
        msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -4235,66 +4397,45 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 
 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
 {
-               unsigned char *addr = (char *)&msg[1];
-               int err = -1;
+       unsigned char *addr = (char *)&msg[1];
+       int err = -1;
 
-               if (is_valid_ether_addr(addr))
-                       err = igb_set_vf_mac(adapter, vf, addr);
-
-               return err;
+       if (is_valid_ether_addr(addr))
+               err = igb_set_vf_mac(adapter, vf, addr);
 
+       return err;
 }
 
 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
 {
        struct e1000_hw *hw = &adapter->hw;
+       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
        u32 msg = E1000_VT_MSGTYPE_NACK;
 
        /* if device isn't clear to send it shouldn't be reading either */
-       if (!adapter->vf_data[vf].clear_to_send)
+       if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
+           time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
                igb_write_mbx(hw, &msg, 1, vf);
-}
-
-
-static void igb_msg_task(struct igb_adapter *adapter)
-{
-       struct e1000_hw *hw = &adapter->hw;
-       u32 vf;
-
-       for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
-               /* process any reset requests */
-               if (!igb_check_for_rst(hw, vf)) {
-                       adapter->vf_data[vf].clear_to_send = false;
-                       igb_vf_reset_event(adapter, vf);
-               }
-
-               /* process any messages pending */
-               if (!igb_check_for_msg(hw, vf))
-                       igb_rcv_msg_from_vf(adapter, vf);
-
-               /* process any acks */
-               if (!igb_check_for_ack(hw, vf))
-                       igb_rcv_ack_from_vf(adapter, vf);
-
+               vf_data->last_nack = jiffies;
        }
 }
 
-static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
+static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 {
-       u32 mbx_size = E1000_VFMAILBOX_SIZE;
-       u32 msgbuf[mbx_size];
+       struct pci_dev *pdev = adapter->pdev;
+       u32 msgbuf[E1000_VFMAILBOX_SIZE];
        struct e1000_hw *hw = &adapter->hw;
+       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
        s32 retval;
 
-       retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
+       retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
 
        if (retval)
-               dev_err(&adapter->pdev->dev,
-                       "Error receiving message from VF\n");
+               dev_err(&pdev->dev, "Error receiving message from VF\n");
 
        /* this is a message we already processed, do nothing */
        if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
-               return retval;
+               return;
 
        /*
         * until the vf completes a reset it should not be
@@ -4303,20 +4444,25 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 
        if (msgbuf[0] == E1000_VF_RESET) {
                igb_vf_reset_msg(adapter, vf);
-
-               return retval;
+               return;
        }
 
-       if (!adapter->vf_data[vf].clear_to_send) {
-               msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
-               igb_write_mbx(hw, msgbuf, 1, vf);
-               return retval;
+       if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
+               msgbuf[0] = E1000_VT_MSGTYPE_NACK;
+               if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
+                       igb_write_mbx(hw, msgbuf, 1, vf);
+                       vf_data->last_nack = jiffies;
+               }
+               return;
        }
 
        switch ((msgbuf[0] & 0xFFFF)) {
        case E1000_VF_SET_MAC_ADDR:
                retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
                break;
+       case E1000_VF_SET_PROMISC:
+               retval = igb_set_vf_promisc(adapter, msgbuf, vf);
+               break;
        case E1000_VF_SET_MULTICAST:
                retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
                break;
@@ -4341,8 +4487,26 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
        msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
 
        igb_write_mbx(hw, msgbuf, 1, vf);
+}
 
-       return retval;
+static void igb_msg_task(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 vf;
+
+       for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+               /* process any reset requests */
+               if (!igb_check_for_rst(hw, vf))
+                       igb_vf_reset_event(adapter, vf);
+
+               /* process any messages pending */
+               if (!igb_check_for_msg(hw, vf))
+                       igb_rcv_msg_from_vf(adapter, vf);
+
+               /* process any acks */
+               if (!igb_check_for_ack(hw, vf))
+                       igb_rcv_ack_from_vf(adapter, vf);
+       }
 }
 
 /**
@@ -4379,15 +4543,15 @@ static void igb_set_uta(struct igb_adapter *adapter)
  **/
 static irqreturn_t igb_intr_msi(int irq, void *data)
 {
-       struct net_device *netdev = data;
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = data;
+       struct igb_q_vector *q_vector = adapter->q_vector[0];
        struct e1000_hw *hw = &adapter->hw;
        /* read ICR disables interrupts using IAM */
        u32 icr = rd32(E1000_ICR);
 
-       igb_write_itr(adapter->rx_ring);
+       igb_write_itr(q_vector);
 
-       if(icr & E1000_ICR_DOUTSYNC) {
+       if (icr & E1000_ICR_DOUTSYNC) {
                /* HW is reporting DMA is out of sync */
                adapter->stats.doosync++;
        }
@@ -4398,7 +4562,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       napi_schedule(&adapter->rx_ring[0].napi);
+       napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
@@ -4410,8 +4574,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
  **/
 static irqreturn_t igb_intr(int irq, void *data)
 {
-       struct net_device *netdev = data;
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = data;
+       struct igb_q_vector *q_vector = adapter->q_vector[0];
        struct e1000_hw *hw = &adapter->hw;
        /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
         * need for the IMC write */
@@ -4419,14 +4583,14 @@ static irqreturn_t igb_intr(int irq, void *data)
        if (!icr)
                return IRQ_NONE;  /* Not our interrupt */
 
-       igb_write_itr(adapter->rx_ring);
+       igb_write_itr(q_vector);
 
        /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
         * not set, then the adapter didn't send an interrupt */
        if (!(icr & E1000_ICR_INT_ASSERTED))
                return IRQ_NONE;
 
-       if(icr & E1000_ICR_DOUTSYNC) {
+       if (icr & E1000_ICR_DOUTSYNC) {
                /* HW is reporting DMA is out of sync */
                adapter->stats.doosync++;
        }
@@ -4438,26 +4602,27 @@ static irqreturn_t igb_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       napi_schedule(&adapter->rx_ring[0].napi);
+       napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
 
-static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
+static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
 {
-       struct igb_adapter *adapter = rx_ring->adapter;
+       struct igb_adapter *adapter = q_vector->adapter;
        struct e1000_hw *hw = &adapter->hw;
 
-       if (adapter->itr_setting & 3) {
-               if (adapter->num_rx_queues == 1)
+       if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
+           (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
+               if (!adapter->msix_entries)
                        igb_set_itr(adapter);
                else
-                       igb_update_ring_itr(rx_ring);
+                       igb_update_ring_itr(q_vector);
        }
 
        if (!test_bit(__IGB_DOWN, &adapter->state)) {
                if (adapter->msix_entries)
-                       wr32(E1000_EIMS, rx_ring->eims_value);
+                       wr32(E1000_EIMS, q_vector->eims_value);
                else
                        igb_irq_enable(adapter);
        }
@@ -4470,76 +4635,94 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
  **/
 static int igb_poll(struct napi_struct *napi, int budget)
 {
-       struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
-       int work_done = 0;
+       struct igb_q_vector *q_vector = container_of(napi,
+                                                    struct igb_q_vector,
+                                                    napi);
+       int tx_clean_complete = 1, work_done = 0;
 
 #ifdef CONFIG_IGB_DCA
-       if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-               igb_update_rx_dca(rx_ring);
+       if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
+               igb_update_dca(q_vector);
 #endif
-       igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
+       if (q_vector->tx_ring)
+               tx_clean_complete = igb_clean_tx_irq(q_vector);
 
-       if (rx_ring->buddy) {
-#ifdef CONFIG_IGB_DCA
-               if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-                       igb_update_tx_dca(rx_ring->buddy);
-#endif
-               if (!igb_clean_tx_irq(rx_ring->buddy))
-                       work_done = budget;
-       }
+       if (q_vector->rx_ring)
+               igb_clean_rx_irq_adv(q_vector, &work_done, budget);
+
+       if (!tx_clean_complete)
+               work_done = budget;
 
        /* If not enough Rx work done, exit the polling mode */
        if (work_done < budget) {
                napi_complete(napi);
-               igb_rx_irq_enable(rx_ring);
+               igb_ring_irq_enable(q_vector);
        }
 
        return work_done;
 }
 
 /**
- * igb_hwtstamp - utility function which checks for TX time stamp
+ * igb_systim_to_hwtstamp - convert system time value to hw timestamp
  * @adapter: board private structure
+ * @shhwtstamps: timestamp structure to update
+ * @regval: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions
+ */
+static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+                                   struct skb_shared_hwtstamps *shhwtstamps,
+                                   u64 regval)
+{
+       u64 ns;
+
+       ns = timecounter_cyc2time(&adapter->clock, regval);
+       timecompare_update(&adapter->compare, ns);
+       memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+       shhwtstamps->hwtstamp = ns_to_ktime(ns);
+       shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
+}
+
+/**
+ * igb_tx_hwtstamp - utility function which checks for TX time stamp
+ * @q_vector: pointer to q_vector containing needed info
  * @skb: packet that was just sent
  *
  * If we were asked to do hardware stamping and such a time stamp is
  * available, then it must have been for this skb here because we only
  * allow only one such packet into the queue.
  */
-static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
 {
+       struct igb_adapter *adapter = q_vector->adapter;
        union skb_shared_tx *shtx = skb_tx(skb);
        struct e1000_hw *hw = &adapter->hw;
+       struct skb_shared_hwtstamps shhwtstamps;
+       u64 regval;
 
-       if (unlikely(shtx->hardware)) {
-               u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
-               if (valid) {
-                       u64 regval = rd32(E1000_TXSTMPL);
-                       u64 ns;
-                       struct skb_shared_hwtstamps shhwtstamps;
-
-                       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-                       regval |= (u64)rd32(E1000_TXSTMPH) << 32;
-                       ns = timecounter_cyc2time(&adapter->clock,
-                                                 regval);
-                       timecompare_update(&adapter->compare, ns);
-                       shhwtstamps.hwtstamp = ns_to_ktime(ns);
-                       shhwtstamps.syststamp =
-                               timecompare_transform(&adapter->compare, ns);
-                       skb_tstamp_tx(skb, &shhwtstamps);
-               }
-       }
+       /* if skb does not support hw timestamp or TX stamp not valid exit */
+       if (likely(!shtx->hardware) ||
+           !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
+               return;
+
+       regval = rd32(E1000_TXSTMPL);
+       regval |= (u64)rd32(E1000_TXSTMPH) << 32;
+
+       igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+       skb_tstamp_tx(skb, &shhwtstamps);
 }
 
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: pointer to q_vector containing needed info
  * returns true if ring is completely cleaned
  **/
-static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
 {
-       struct igb_adapter *adapter = tx_ring->adapter;
-       struct net_device *netdev = adapter->netdev;
+       struct igb_adapter *adapter = q_vector->adapter;
+       struct igb_ring *tx_ring = q_vector->tx_ring;
+       struct net_device *netdev = tx_ring->netdev;
        struct e1000_hw *hw = &adapter->hw;
        struct igb_buffer *buffer_info;
        struct sk_buff *skb;
@@ -4570,10 +4753,10 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
                                total_packets += segs;
                                total_bytes += bytecount;
 
-                               igb_tx_hwtstamp(adapter, skb);
+                               igb_tx_hwtstamp(q_vector, skb);
                        }
 
-                       igb_unmap_and_free_tx_resource(adapter, buffer_info);
+                       igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
                        tx_desc->wb.status = 0;
 
                        i++;
@@ -4596,7 +4779,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
                if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
                    !(test_bit(__IGB_DOWN, &adapter->state))) {
                        netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++adapter->restart_queue;
+                       tx_ring->tx_stats.restart_queue++;
                }
        }
 
@@ -4611,7 +4794,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
                         E1000_STATUS_TXOFF)) {
 
                        /* detected Tx unit hang */
-                       dev_err(&adapter->pdev->dev,
+                       dev_err(&tx_ring->pdev->dev,
                                "Detected Tx Unit Hang\n"
                                "  Tx Queue             <%d>\n"
                                "  TDH                  <%x>\n"
@@ -4624,11 +4807,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
                                "  jiffies              <%lx>\n"
                                "  desc.status          <%x>\n",
                                tx_ring->queue_index,
-                               readl(adapter->hw.hw_addr + tx_ring->head),
-                               readl(adapter->hw.hw_addr + tx_ring->tail),
+                               readl(tx_ring->head),
+                               readl(tx_ring->tail),
                                tx_ring->next_to_use,
                                tx_ring->next_to_clean,
-                               tx_ring->buffer_info[i].time_stamp,
+                               tx_ring->buffer_info[eop].time_stamp,
                                eop,
                                jiffies,
                                eop_desc->wb.status);
@@ -4639,43 +4822,38 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
        tx_ring->total_packets += total_packets;
        tx_ring->tx_stats.bytes += total_bytes;
        tx_ring->tx_stats.packets += total_packets;
-       netdev->stats.tx_bytes += total_bytes;
-       netdev->stats.tx_packets += total_packets;
        return (count < tx_ring->count);
 }
 
 /**
  * igb_receive_skb - helper function to handle rx indications
- * @ring: pointer to receive ring receving this packet
- * @status: descriptor status field as written by hardware
- * @rx_desc: receive descriptor containing vlan and type information.
- * @skb: pointer to sk_buff to be indicated to stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
  **/
-static void igb_receive_skb(struct igb_ring *ring, u8 status,
-                            union e1000_adv_rx_desc * rx_desc,
-                            struct sk_buff *skb)
-{
-       struct igb_adapter * adapter = ring->adapter;
-       bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
-
-       skb_record_rx_queue(skb, ring->queue_index);
-       if (vlan_extracted)
-               vlan_gro_receive(&ring->napi, adapter->vlgrp,
-                                le16_to_cpu(rx_desc->wb.upper.vlan),
-                                skb);
+static void igb_receive_skb(struct igb_q_vector *q_vector,
+                            struct sk_buff *skb,
+                            u16 vlan_tag)
+{
+       struct igb_adapter *adapter = q_vector->adapter;
+
+       if (vlan_tag)
+               vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
+                                vlan_tag, skb);
        else
-               napi_gro_receive(&ring->napi, skb);
+               napi_gro_receive(&q_vector->napi, skb);
 }
 
-static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+static inline void igb_rx_checksum_adv(struct igb_ring *ring,
                                       u32 status_err, struct sk_buff *skb)
 {
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Ignore Checksum bit is set or checksum is disabled through ethtool */
-       if ((status_err & E1000_RXD_STAT_IXSM) ||
-           (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED))
+       if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
+            (status_err & E1000_RXD_STAT_IXSM))
                return;
+
        /* TCP/UDP checksum error bit is set */
        if (status_err &
            (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4684,9 +4862,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
                 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
                 * packets, (aka let the stack check the crc32c)
                 */
-               if (!((adapter->hw.mac.type == e1000_82576) &&
-                     (skb->len == 60)))
-                       adapter->hw_csum_err++;
+               if ((skb->len == 60) &&
+                   (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
+                       ring->rx_stats.csum_err++;
+
                /* let the stack verify checksum errors */
                return;
        }
@@ -4694,11 +4873,38 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
        if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err);
-       adapter->hw_csum_good++;
+       dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
 }
 
-static inline u16 igb_get_hlen(struct igb_adapter *adapter,
+static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
+                                   struct sk_buff *skb)
+{
+       struct igb_adapter *adapter = q_vector->adapter;
+       struct e1000_hw *hw = &adapter->hw;
+       u64 regval;
+
+       /*
+        * If this bit is set, then the RX registers contain the time stamp. No
+        * other packet will be time stamped until we read these registers, so
+        * read the registers to make them available again. Because only one
+        * packet can be time stamped at a time, we know that the register
+        * values must belong to this one here and therefore we don't need to
+        * compare any of the additional attributes stored for it.
+        *
+        * If nothing went wrong, then it should have a skb_shared_tx that we
+        * can turn into a skb_shared_hwtstamps.
+        */
+       if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
+               return;
+       if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+               return;
+
+       regval = rd32(E1000_RXSTMPL);
+       regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+
+       igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
                                union e1000_adv_rx_desc *rx_desc)
 {
        /* HW will not DMA in data larger than the given buffer, even if it
@@ -4707,18 +4913,17 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
         */
        u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
                   E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
-       if (hlen > adapter->rx_ps_hdr_size)
-               hlen = adapter->rx_ps_hdr_size;
+       if (hlen > rx_ring->rx_buffer_len)
+               hlen = rx_ring->rx_buffer_len;
        return hlen;
 }
 
-static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
-                                int *work_done, int budget)
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
+                                 int *work_done, int budget)
 {
-       struct igb_adapter *adapter = rx_ring->adapter;
-       struct net_device *netdev = adapter->netdev;
-       struct e1000_hw *hw = &adapter->hw;
-       struct pci_dev *pdev = adapter->pdev;
+       struct igb_ring *rx_ring = q_vector->rx_ring;
+       struct net_device *netdev = rx_ring->netdev;
+       struct pci_dev *pdev = rx_ring->pdev;
        union e1000_adv_rx_desc *rx_desc , *next_rxd;
        struct igb_buffer *buffer_info , *next_buffer;
        struct sk_buff *skb;
@@ -4728,6 +4933,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
        unsigned int i;
        u32 staterr;
        u16 length;
+       u16 vlan_tag;
 
        i = rx_ring->next_to_clean;
        buffer_info = &rx_ring->buffer_info[i];
@@ -4754,23 +4960,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
                cleaned = true;
                cleaned_count++;
 
-               /* this is the fast path for the non-packet split case */
-               if (!adapter->rx_ps_hdr_size) {
-                       pci_unmap_single(pdev, buffer_info->dma,
-                                        adapter->rx_buffer_len,
-                                        PCI_DMA_FROMDEVICE);
-                       buffer_info->dma = 0;
-                       skb_put(skb, length);
-                       goto send_up;
-               }
-
                if (buffer_info->dma) {
-                       u16 hlen = igb_get_hlen(adapter, rx_desc);
                        pci_unmap_single(pdev, buffer_info->dma,
-                                        adapter->rx_ps_hdr_size,
+                                        rx_ring->rx_buffer_len,
                                         PCI_DMA_FROMDEVICE);
                        buffer_info->dma = 0;
-                       skb_put(skb, hlen);
+                       if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
+                               skb_put(skb, length);
+                               goto send_up;
+                       }
+                       skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
                }
 
                if (length) {
@@ -4783,8 +4982,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
                                                buffer_info->page_offset,
                                                length);
 
-                       if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
-                           (page_count(buffer_info->page) != 1))
+                       if (page_count(buffer_info->page) != 1)
                                buffer_info->page = NULL;
                        else
                                get_page(buffer_info->page);
@@ -4803,60 +5001,24 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
                        goto next_desc;
                }
 send_up:
-               /*
-                * If this bit is set, then the RX registers contain
-                * the time stamp. No other packet will be time
-                * stamped until we read these registers, so read the
-                * registers to make them available again. Because
-                * only one packet can be time stamped at a time, we
-                * know that the register values must belong to this
-                * one here and therefore we don't need to compare
-                * any of the additional attributes stored for it.
-                *
-                * If nothing went wrong, then it should have a
-                * skb_shared_tx that we can turn into a
-                * skb_shared_hwtstamps.
-                *
-                * TODO: can time stamping be triggered (thus locking
-                * the registers) without the packet reaching this point
-                * here? In that case RX time stamping would get stuck.
-                *
-                * TODO: in "time stamp all packets" mode this bit is
-                * not set. Need a global flag for this mode and then
-                * always read the registers. Cannot be done without
-                * a race condition.
-                */
-               if (unlikely(staterr & E1000_RXD_STAT_TS)) {
-                       u64 regval;
-                       u64 ns;
-                       struct skb_shared_hwtstamps *shhwtstamps =
-                               skb_hwtstamps(skb);
-
-                       WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
-                            "igb: no RX time stamp available for time stamped packet");
-                       regval = rd32(E1000_RXSTMPL);
-                       regval |= (u64)rd32(E1000_RXSTMPH) << 32;
-                       ns = timecounter_cyc2time(&adapter->clock, regval);
-                       timecompare_update(&adapter->compare, ns);
-                       memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-                       shhwtstamps->hwtstamp = ns_to_ktime(ns);
-                       shhwtstamps->syststamp =
-                               timecompare_transform(&adapter->compare, ns);
-               }
-
                if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
 
+               igb_rx_hwtstamp(q_vector, staterr, skb);
                total_bytes += skb->len;
                total_packets++;
 
-               igb_rx_checksum_adv(adapter, staterr, skb);
+               igb_rx_checksum_adv(rx_ring, staterr, skb);
 
                skb->protocol = eth_type_trans(skb, netdev);
+               skb_record_rx_queue(skb, rx_ring->queue_index);
 
-               igb_receive_skb(rx_ring, staterr, rx_desc, skb);
+               vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
+                           le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
+
+               igb_receive_skb(q_vector, skb, vlan_tag);
 
 next_desc:
                rx_desc->wb.upper.status_error = 0;
@@ -4883,8 +5045,6 @@ next_desc:
        rx_ring->total_bytes += total_bytes;
        rx_ring->rx_stats.packets += total_packets;
        rx_ring->rx_stats.bytes += total_bytes;
-       netdev->stats.rx_bytes += total_bytes;
-       netdev->stats.rx_packets += total_packets;
        return cleaned;
 }
 
@@ -4892,12 +5052,9 @@ next_desc:
  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
  * @adapter: address of board private structure
  **/
-static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
-                                    int cleaned_count)
+void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
 {
-       struct igb_adapter *adapter = rx_ring->adapter;
-       struct net_device *netdev = adapter->netdev;
-       struct pci_dev *pdev = adapter->pdev;
+       struct net_device *netdev = rx_ring->netdev;
        union e1000_adv_rx_desc *rx_desc;
        struct igb_buffer *buffer_info;
        struct sk_buff *skb;
@@ -4907,19 +5064,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
 
-       if (adapter->rx_ps_hdr_size)
-               bufsz = adapter->rx_ps_hdr_size;
-       else
-               bufsz = adapter->rx_buffer_len;
+       bufsz = rx_ring->rx_buffer_len;
 
        while (cleaned_count--) {
                rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
 
-               if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
+               if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
                        if (!buffer_info->page) {
                                buffer_info->page = alloc_page(GFP_ATOMIC);
                                if (!buffer_info->page) {
-                                       adapter->alloc_rx_buff_failed++;
+                                       rx_ring->rx_stats.alloc_failed++;
                                        goto no_buffers;
                                }
                                buffer_info->page_offset = 0;
@@ -4927,7 +5081,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
                                buffer_info->page_offset ^= PAGE_SIZE / 2;
                        }
                        buffer_info->page_dma =
-                               pci_map_page(pdev, buffer_info->page,
+                               pci_map_page(rx_ring->pdev, buffer_info->page,
                                             buffer_info->page_offset,
                                             PAGE_SIZE / 2,
                                             PCI_DMA_FROMDEVICE);
@@ -4936,18 +5090,19 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
                if (!buffer_info->skb) {
                        skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        if (!skb) {
-                               adapter->alloc_rx_buff_failed++;
+                               rx_ring->rx_stats.alloc_failed++;
                                goto no_buffers;
                        }
 
                        buffer_info->skb = skb;
-                       buffer_info->dma = pci_map_single(pdev, skb->data,
+                       buffer_info->dma = pci_map_single(rx_ring->pdev,
+                                                         skb->data,
                                                          bufsz,
                                                          PCI_DMA_FROMDEVICE);
                }
                /* Refresh the desc even if buffer_addrs didn't change because
                 * each write-back erases this info. */
-               if (adapter->rx_ps_hdr_size) {
+               if (bufsz < IGB_RXBUFFER_1024) {
                        rx_desc->read.pkt_addr =
                             cpu_to_le64(buffer_info->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
@@ -4976,7 +5131,7 @@ no_buffers:
                 * applicable for weak-ordered memory model archs,
                 * such as IA-64). */
                wmb();
-               writel(i, adapter->hw.hw_addr + rx_ring->tail);
+               writel(i, rx_ring->tail);
        }
 }
 
@@ -5035,13 +5190,11 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        struct hwtstamp_config config;
-       u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
-       u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
-       u32 tsync_rx_ctl_type = 0;
+       u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+       u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
        u32 tsync_rx_cfg = 0;
-       int is_l4 = 0;
-       int is_l2 = 0;
-       short port = 319; /* PTP */
+       bool is_l4 = false;
+       bool is_l2 = false;
        u32 regval;
 
        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -5053,10 +5206,8 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
 
        switch (config.tx_type) {
        case HWTSTAMP_TX_OFF:
-               tsync_tx_ctl_bit = 0;
-               break;
+               tsync_tx_ctl = 0;
        case HWTSTAMP_TX_ON:
-               tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
                break;
        default:
                return -ERANGE;
@@ -5064,7 +5215,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
 
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
-               tsync_rx_ctl_bit = 0;
+               tsync_rx_ctl = 0;
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -5075,86 +5226,97 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
                 * possible to time stamp both Sync and Delay_Req messages
                 * => fall back to time stamping all packets
                 */
-               tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
                config.rx_filter = HWTSTAMP_FILTER_ALL;
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-               tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
                tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
-               is_l4 = 1;
+               is_l4 = true;
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-               tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
                tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
-               is_l4 = 1;
+               is_l4 = true;
                break;
        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-               tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
                tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
-               is_l2 = 1;
-               is_l4 = 1;
+               is_l2 = true;
+               is_l4 = true;
                config.rx_filter = HWTSTAMP_FILTER_SOME;
                break;
        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-               tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
                tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
-               is_l2 = 1;
-               is_l4 = 1;
+               is_l2 = true;
+               is_l4 = true;
                config.rx_filter = HWTSTAMP_FILTER_SOME;
                break;
        case HWTSTAMP_FILTER_PTP_V2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-               tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
                config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-               is_l2 = 1;
+               is_l2 = true;
                break;
        default:
                return -ERANGE;
        }
 
+       if (hw->mac.type == e1000_82575) {
+               if (tsync_rx_ctl | tsync_tx_ctl)
+                       return -EINVAL;
+               return 0;
+       }
+
        /* enable/disable TX */
        regval = rd32(E1000_TSYNCTXCTL);
-       regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
+       regval &= ~E1000_TSYNCTXCTL_ENABLED;
+       regval |= tsync_tx_ctl;
        wr32(E1000_TSYNCTXCTL, regval);
 
-       /* enable/disable RX, define which PTP packets are time stamped */
+       /* enable/disable RX */
        regval = rd32(E1000_TSYNCRXCTL);
-       regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
-       regval = (regval & ~0xE) | tsync_rx_ctl_type;
+       regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+       regval |= tsync_rx_ctl;
        wr32(E1000_TSYNCRXCTL, regval);
-       wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
 
-       /*
-        * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
-        *                                          (Ethertype to filter on)
-        * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
-        * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
-        */
-       wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
-
-       /* L4 Queue Filter[0]: only filter by source and destination port */
-       wr32(E1000_SPQF0, htons(port));
-       wr32(E1000_IMIREXT(0), is_l4 ?
-            ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
-       wr32(E1000_IMIR(0), is_l4 ?
-            (htons(port)
-             | (0<<16) /* immediate interrupt disabled */
-             | 0 /* (1<<17) bit cleared: do not bypass
-                    destination port check */)
-               : 0);
-       wr32(E1000_FTQF0, is_l4 ?
-            (0x11 /* UDP */
-             | (1<<15) /* VF not compared */
-             | (1<<27) /* Enable Timestamping */
-             | (7<<28) /* only source port filter enabled,
-                          source/target address and protocol
-                          masked */)
-            : ((1<<15) | (15<<28) /* all mask bits set = filter not
-                                     enabled */));
+       /* define which PTP packets are time stamped */
+       wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
 
+       /* define ethertype filter for timestamped packets */
+       if (is_l2)
+               wr32(E1000_ETQF(3),
+                               (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+                                E1000_ETQF_1588 | /* enable timestamping */
+                                ETH_P_1588));     /* 1588 eth protocol type */
+       else
+               wr32(E1000_ETQF(3), 0);
+
+#define PTP_PORT 319
+       /* L4 Queue Filter[3]: filter by destination port and protocol */
+       if (is_l4) {
+               u32 ftqf = (IPPROTO_UDP /* UDP */
+                       | E1000_FTQF_VF_BP /* VF not compared */
+                       | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+                       | E1000_FTQF_MASK); /* mask all inputs */
+               ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+               wr32(E1000_IMIR(3), htons(PTP_PORT));
+               wr32(E1000_IMIREXT(3),
+                    (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+               if (hw->mac.type == e1000_82576) {
+                       /* enable source port check */
+                       wr32(E1000_SPQF(3), htons(PTP_PORT));
+                       ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+               }
+               wr32(E1000_FTQF(3), ftqf);
+       } else {
+               wr32(E1000_FTQF(3), E1000_FTQF_MASK);
+       }
        wrfl();
 
        adapter->hwtstamp_config = config;
@@ -5231,21 +5393,15 @@ static void igb_vlan_rx_register(struct net_device *netdev,
                ctrl |= E1000_CTRL_VME;
                wr32(E1000_CTRL, ctrl);
 
-               /* enable VLAN receive filtering */
+               /* Disable CFI check */
                rctl = rd32(E1000_RCTL);
                rctl &= ~E1000_RCTL_CFIEN;
                wr32(E1000_RCTL, rctl);
-               igb_update_mng_vlan(adapter);
        } else {
                /* disable VLAN tag insert/strip */
                ctrl = rd32(E1000_CTRL);
                ctrl &= ~E1000_CTRL_VME;
                wr32(E1000_CTRL, ctrl);
-
-               if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
-                       igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-                       adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-               }
        }
 
        igb_rlpml_set(adapter);
@@ -5260,16 +5416,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        struct e1000_hw *hw = &adapter->hw;
        int pf_id = adapter->vfs_allocated_count;
 
-       if ((hw->mng_cookie.status &
-            E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-           (vid == adapter->mng_vlan_id))
-               return;
-
-       /* add vid to vlvf if sr-iov is enabled,
-        * if that fails add directly to filter table */
-       if (igb_vlvf_set(adapter, vid, true, pf_id))
-               igb_vfta_set(hw, vid, true);
+       /* attempt to add filter to vlvf array */
+       igb_vlvf_set(adapter, vid, true, pf_id);
 
+       /* add the filter since PF can receive vlans w/o entry in vlvf */
+       igb_vfta_set(hw, vid, true);
 }
 
 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -5277,6 +5428,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        int pf_id = adapter->vfs_allocated_count;
+       s32 err;
 
        igb_irq_disable(adapter);
        vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -5284,17 +5436,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        if (!test_bit(__IGB_DOWN, &adapter->state))
                igb_irq_enable(adapter);
 
-       if ((adapter->hw.mng_cookie.status &
-            E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-           (vid == adapter->mng_vlan_id)) {
-               /* release control to f/w */
-               igb_release_hw_control(adapter);
-               return;
-       }
+       /* remove vlan from VLVF table array */
+       err = igb_vlvf_set(adapter, vid, false, pf_id);
 
-       /* remove vid from vlvf if sr-iov is enabled,
-        * if not in vlvf remove from vfta */
-       if (igb_vlvf_set(adapter, vid, false, pf_id))
+       /* if vid was not present in VLVF just remove it from table */
+       if (err)
                igb_vfta_set(hw, vid, false);
 }
 
@@ -5360,9 +5506,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
        if (netif_running(netdev))
                igb_close(netdev);
 
-       igb_reset_interrupt_capability(adapter);
-
-       igb_free_queues(adapter);
+       igb_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -5457,9 +5601,7 @@ static int igb_resume(struct pci_dev *pdev)
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
 
-       igb_set_interrupt_capability(adapter);
-
-       if (igb_alloc_queues(adapter)) {
+       if (igb_init_interrupt_scheme(adapter)) {
                dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
                return -ENOMEM;
        }
@@ -5511,22 +5653,16 @@ static void igb_netpoll(struct net_device *netdev)
        int i;
 
        if (!adapter->msix_entries) {
+               struct igb_q_vector *q_vector = adapter->q_vector[0];
                igb_irq_disable(adapter);
-               napi_schedule(&adapter->rx_ring[0].napi);
+               napi_schedule(&q_vector->napi);
                return;
        }
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct igb_ring *tx_ring = &adapter->tx_ring[i];
-               wr32(E1000_EIMC, tx_ring->eims_value);
-               igb_clean_tx_irq(tx_ring);
-               wr32(E1000_EIMS, tx_ring->eims_value);
-       }
-
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *rx_ring = &adapter->rx_ring[i];
-               wr32(E1000_EIMC, rx_ring->eims_value);
-               napi_schedule(&rx_ring->napi);
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               wr32(E1000_EIMC, q_vector->eims_value);
+               napi_schedule(&q_vector->napi);
        }
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -5671,19 +5807,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
 static void igb_vmm_control(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 reg_data;
+       u32 reg;
 
-       if (!adapter->vfs_allocated_count)
+       /* replication is not supported for 82575 */
+       if (hw->mac.type == e1000_82575)
                return;
 
-       /* VF's need PF reset indication before they
-        * can send/receive mail */
-       reg_data = rd32(E1000_CTRL_EXT);
-       reg_data |= E1000_CTRL_EXT_PFRSTD;
-       wr32(E1000_CTRL_EXT, reg_data);
+       /* enable replication vlan tag stripping */
+       reg = rd32(E1000_RPLOLR);
+       reg |= E1000_RPLOLR_STRVLAN;
+       wr32(E1000_RPLOLR, reg);
 
-       igb_vmdq_set_loopback_pf(hw, true);
-       igb_vmdq_set_replication_pf(hw, true);
+       /* notify HW that the MAC is adding vlan tags */
+       reg = rd32(E1000_DTXCTL);
+       reg |= E1000_DTXCTL_VLAN_ADDED;
+       wr32(E1000_DTXCTL, reg);
+
+       if (adapter->vfs_allocated_count) {
+               igb_vmdq_set_loopback_pf(hw, true);
+               igb_vmdq_set_replication_pf(hw, true);
+       } else {
+               igb_vmdq_set_loopback_pf(hw, false);
+               igb_vmdq_set_replication_pf(hw, false);
+       }
 }
 
 /* igb_main.c */