Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
authorDavid S. Miller <davem@davemloft.net>
Fri, 31 Oct 2014 16:29:42 +0000 (12:29 -0400)
committerDavid S. Miller <davem@davemloft.net>
Fri, 31 Oct 2014 16:29:42 +0000 (12:29 -0400)
Pablo Neira Ayuso says:

====================
netfilter/ipvs fixes for net

The following patchset contains fixes for netfilter/ipvs. This round of
fixes is larger than usual at this stage, specifically because of the
nf_tables bridge reject fixes that I would like to see in 3.18. The
patches are:

1) Fix a null-pointer dereference that may occur when logging
   errors. This problem was introduced by 4a4739d56b0 ("ipvs: Pull
   out crosses_local_route_boundary logic") in v3.17-rc5.

2) Update hook mask in nft_reject_bridge so we can also filter out
   packets from there. This fixes 36d2af5 ("netfilter: nf_tables: allow
   to filter from prerouting and postrouting"), which needs this chunk
   to work.

3) Two patches to refactor common code to forge the IPv4 and IPv6
   reject packets from the bridge. These are required by the nf_tables
   reject bridge fix.

4) Fix nft_reject_bridge by avoiding the use of the IP stack to reject
   packets from the bridge. The idea is to forge the reject packets and
   inject them to the original port via br_deliver() which is now
   exported for that purpose.

5) Restrict nft_reject_bridge to bridge prerouting and input hooks.
   the original skbuff may cloned after prerouting when the bridge stack
   needs to flood it to several bridge ports, it is too late to reject
   the traffic.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
101 files changed:
Documentation/ptp/testptp.mk [new file with mode: 0644]
drivers/bcma/host_pci.c
drivers/bcma/main.c
drivers/infiniband/hw/mlx4/main.c
drivers/net/Kconfig
drivers/net/dsa/mv88e6171.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/mac-fec.c
drivers/net/ethernet/freescale/fs_enet/mac-scc.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/phy/marvell.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath9k/common.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/regd.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
include/linux/skbuff.h
include/linux/usb/usbnet.h
include/net/ipv6.h
init/Kconfig
kernel/Makefile
kernel/bpf/Makefile
kernel/bpf/core.c
net/Kconfig
net/core/dev.c
net/core/tso.c
net/dsa/dsa.c
net/ipv4/gre_offload.c
net/ipv4/inet_fragment.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/output_core.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_policy.c
net/mac80211/cfg.c
net/mac80211/rate.c
net/mac80211/rc80211_minstrel_debugfs.c
net/mac80211/rc80211_minstrel_ht_debugfs.c
net/mac80211/sta_info.h
net/sched/sch_pie.c
net/wireless/nl80211.c
net/xfrm/xfrm_policy.c

diff --git a/Documentation/ptp/testptp.mk b/Documentation/ptp/testptp.mk
new file mode 100644 (file)
index 0000000..4ef2d97
--- /dev/null
@@ -0,0 +1,33 @@
+# PTP 1588 clock support - User space test program
+#
+# Copyright (C) 2010 OMICRON electronics GmbH
+#
+#  This program is free software; you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; either version 2 of the License, or
+#  (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software
+#  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+CC        = $(CROSS_COMPILE)gcc
+INC       = -I$(KBUILD_OUTPUT)/usr/include
+CFLAGS    = -Wall $(INC)
+LDLIBS    = -lrt
+PROGS     = testptp
+
+all: $(PROGS)
+
+testptp: testptp.o
+
+clean:
+       rm -f testptp.o
+
+distclean: clean
+       rm -f $(PROGS)
index 1e5ac0a796968b61e3de79a4ae2a82f9c2339916..cd9161a8b3a196bcc67634dc86bb84ae5b6190d1 100644 (file)
@@ -275,7 +275,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
 static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },  /* 0xa8d8 */
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
@@ -285,7 +285,8 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xA8DB */
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xa8db, BCM43217 (sic!) */
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) },  /* 0xa8dc */
        { 0, },
 };
 MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
index d1656c2f70afa43198066681f6a02ded26eab80b..1000955ce09d870db23979e3950d75e32b75cfc1 100644 (file)
@@ -132,7 +132,7 @@ static bool bcma_is_core_needed_early(u16 core_id)
        return false;
 }
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
 static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
                                                     struct bcma_device *core)
 {
index bda5994ceb68c910097c712f5d44306840eb8727..8b72cf392b34454abbeca0c5a1c8e17aca39ba13 100644 (file)
@@ -1173,18 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
                                            &mflow->reg_id[i]);
                if (err)
-                       goto err_free;
+                       goto err_create_flow;
                i++;
        }
 
        if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
                err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
                if (err)
-                       goto err_free;
+                       goto err_create_flow;
+               i++;
        }
 
        return &mflow->ibflow;
 
+err_create_flow:
+       while (i) {
+               (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
+               i--;
+       }
 err_free:
        kfree(mflow);
        return ERR_PTR(err);
index 4706386b7d34c2d719e30eb602defd49ee92a72d..f9009be3f3077057222984bbf6d03fdac8fcbe7c 100644 (file)
@@ -135,6 +135,7 @@ config MACVLAN
 config MACVTAP
        tristate "MAC-VLAN based tap driver"
        depends on MACVLAN
+       depends on INET
        help
          This adds a specialized tap character device driver that is based
          on the MAC-VLAN network interface, called macvtap. A macvtap device
@@ -200,6 +201,7 @@ config RIONET_RX_SIZE
 
 config TUN
        tristate "Universal TUN/TAP device driver support"
+       depends on INET
        select CRC32
        ---help---
          TUN/TAP provides packet reception and transmission for user space
index 1020a7af67cf6d8bbaf5016d2367454c63b393da..78d8e876f3aaada8ae88c429d007d4c8e3361633 100644 (file)
@@ -395,7 +395,7 @@ static int mv88e6171_get_sset_count(struct dsa_switch *ds)
 }
 
 struct dsa_switch_driver mv88e6171_switch_driver = {
-       .tag_protocol           = DSA_TAG_PROTO_DSA,
+       .tag_protocol           = DSA_TAG_PROTO_EDSA,
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6171_probe,
        .setup                  = mv88e6171_setup,
index 29554992215aa18301f26b7e431b1fc19dfff9cb..2349ea9702557b95de0860eb5921589104cd5c9d 100644 (file)
@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev,
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       unsigned int rxcsum, rxvlan, rxvlan_filter;
+       netdev_features_t rxcsum, rxvlan, rxvlan_filter;
 
        rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
        rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
@@ -1598,7 +1598,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
        struct skb_shared_hwtstamps *hwtstamps;
        unsigned int incomplete, error, context_next, context;
        unsigned int len, put_len, max_len;
-       int received = 0;
+       unsigned int received = 0;
+       int packet_count = 0;
 
        DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
 
@@ -1608,7 +1609,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
 
        rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        packet = &ring->packet_data;
-       while (received < budget) {
+       while (packet_count < budget) {
                DBGPR("  cur = %d\n", ring->cur);
 
                /* First time in loop see if we need to restore state */
@@ -1662,7 +1663,7 @@ read_again:
                        if (packet->errors)
                                DBGPR("Error in received packet\n");
                        dev_kfree_skb(skb);
-                       continue;
+                       goto next_packet;
                }
 
                if (!context) {
@@ -1677,7 +1678,7 @@ read_again:
                                        }
 
                                        dev_kfree_skb(skb);
-                                       continue;
+                                       goto next_packet;
                                }
                                memcpy(skb_tail_pointer(skb), rdata->skb->data,
                                       put_len);
@@ -1694,7 +1695,7 @@ read_again:
 
                /* Stray Context Descriptor? */
                if (!skb)
-                       continue;
+                       goto next_packet;
 
                /* Be sure we don't exceed the configured MTU */
                max_len = netdev->mtu + ETH_HLEN;
@@ -1705,7 +1706,7 @@ read_again:
                if (skb->len > max_len) {
                        DBGPR("packet length exceeds configured MTU\n");
                        dev_kfree_skb(skb);
-                       continue;
+                       goto next_packet;
                }
 
 #ifdef XGMAC_ENABLE_RX_PKT_DUMP
@@ -1739,6 +1740,9 @@ read_again:
 
                netdev->last_rx = jiffies;
                napi_gro_receive(&pdata->napi, skb);
+
+next_packet:
+               packet_count++;
        }
 
        /* Check if we need to save state before leaving */
@@ -1752,9 +1756,9 @@ read_again:
                rdata->state.error = error;
        }
 
-       DBGPR("<--xgbe_rx_poll: received = %d\n", received);
+       DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
 
-       return received;
+       return packet_count;
 }
 
 static int xgbe_poll(struct napi_struct *napi, int budget)
index e6d24c2101982444bde4d500d7559633e8685a2e..c22f32622fa9a50ad3789d8000991ede58502ca0 100644 (file)
@@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
 {
        struct net_device *ndev = p->ndev;
        u32 data;
-       int i;
+       int i = 0;
 
        xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
-       for (i = 0; i < 10 && data != ~0U ; i++) {
+       do {
                usleep_range(100, 110);
                data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
-       }
+               if (data == ~0U)
+                       return 0;
+       } while (++i < 10);
 
-       if (data != ~0U) {
-               netdev_err(ndev, "Failed to release memory from shutdown\n");
-               return -ENODEV;
-       }
-
-       return 0;
+       netdev_err(ndev, "Failed to release memory from shutdown\n");
+       return -ENODEV;
 }
 
 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
index 9ae36979bdee010aa2e8a276dc17611593e3383c..3a6778a667f4558f52f1327f413e088882490f7d 100644 (file)
@@ -1397,6 +1397,9 @@ static void bcm_sysport_netif_start(struct net_device *dev)
        /* Enable NAPI */
        napi_enable(&priv->napi);
 
+       /* Enable RX interrupt and TX ring full interrupt */
+       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
        phy_start(priv->phydev);
 
        /* Enable TX interrupts for the 32 TXQs */
@@ -1499,9 +1502,6 @@ static int bcm_sysport_open(struct net_device *dev)
        if (ret)
                goto out_free_rx_ring;
 
-       /* Enable RX interrupt and TX ring full interrupt */
-       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
-
        /* Turn on TDMA */
        ret = tdma_enable_set(priv, 1);
        if (ret)
@@ -1858,6 +1858,8 @@ static int bcm_sysport_resume(struct device *d)
        if (!netif_running(dev))
                return 0;
 
+       umac_reset(priv);
+
        /* We may have been suspended and never received a WOL event that
         * would turn off MPD detection, take care of that now
         */
@@ -1885,9 +1887,6 @@ static int bcm_sysport_resume(struct device *d)
 
        netif_device_attach(dev);
 
-       /* Enable RX interrupt and TX ring full interrupt */
-       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
-
        /* RX pipe enable */
        topctrl_writel(priv, 0, RX_FLUSH_CNTL);
 
index 23f23c97c2ad7b9fa52b8d589b1627a560635f92..f05fab65d78ac62b3b7905102d39893087b1450b 100644 (file)
@@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
                if (l5_cid >= MAX_CM_SK_TBL_SZ)
                        break;
 
-               rcu_read_lock();
                if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
                        rc = -ENODEV;
-                       rcu_read_unlock();
                        break;
                }
                csk = &cp->csk_tbl[l5_cid];
@@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
                        }
                }
                csk_put(csk);
-               rcu_read_unlock();
                rc = 0;
        }
        }
@@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
                cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 
        mutex_lock(&cnic_lock);
-       if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+       if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
                RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
                cnic_put(dev);
        } else {
index 8edf0f5bd679bf24b366887ff973a5d454c3af73..6fe300e316c3c41e398b06f1864cfdc7f1f0ebd6 100644 (file)
@@ -60,6 +60,42 @@ void cxgb4_dcb_version_init(struct net_device *dev)
        dcb->dcb_version = FW_PORT_DCB_VER_AUTO;
 }
 
+static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       struct port_dcb_info *dcb = &pi->dcb;
+       struct dcb_app app;
+       int i, err;
+
+       /* zero priority implies remove */
+       app.priority = 0;
+
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               /* Check if app list is exhausted */
+               if (!dcb->app_priority[i].protocolid)
+                       break;
+
+               app.protocol = dcb->app_priority[i].protocolid;
+
+               if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+                       app.selector = dcb->app_priority[i].sel_field + 1;
+                       err = dcb_ieee_setapp(dev, &app);
+               } else {
+                       app.selector = !!(dcb->app_priority[i].sel_field);
+                       err = dcb_setapp(dev, &app);
+               }
+
+               if (err) {
+                       dev_err(adap->pdev_dev,
+                               "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+                               dcb_ver_array[dcb->dcb_version], app.selector,
+                               app.protocol, -err);
+                       break;
+               }
+       }
+}
+
 /* Finite State machine for Data Center Bridging.
  */
 void cxgb4_dcb_state_fsm(struct net_device *dev,
@@ -80,7 +116,6 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
                        /* we're going to use Host DCB */
                        dcb->state = CXGB4_DCB_STATE_HOST;
                        dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
-                       dcb->enabled = 1;
                        break;
                }
 
@@ -145,6 +180,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
                         * state.  We need to reset back to a ground state
                         * of incomplete.
                         */
+                       cxgb4_dcb_cleanup_apps(dev);
                        cxgb4_dcb_state_init(dev);
                        dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
                        dcb->supported = CXGB4_DCBX_FW_SUPPORT;
@@ -349,6 +385,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
 {
        struct port_info *pi = netdev2pinfo(dev);
 
+       /* If DCBx is host-managed, dcb is enabled by outside lldp agents */
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST) {
+               pi->dcb.enabled = enabled;
+               return 0;
+       }
+
        /* Firmware doesn't provide any mechanism to control the DCB state.
         */
        if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
@@ -833,11 +875,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
 
 /* Return whether IEEE Data Center Bridging has been negotiated.
  */
-static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev)
+static inline int
+cxgb4_ieee_negotiation_complete(struct net_device *dev,
+                               enum cxgb4_dcb_fw_msgs dcb_subtype)
 {
        struct port_info *pi = netdev2pinfo(dev);
        struct port_dcb_info *dcb = &pi->dcb;
 
+       if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+               return 0;
+
        return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
                (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
 }
@@ -850,7 +897,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
 {
        int prio;
 
-       if (!cxgb4_ieee_negotiation_complete(dev))
+       if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
                return -EINVAL;
        if (!(app->selector && app->protocol))
                return -EINVAL;
@@ -872,7 +919,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
 {
        int ret;
 
-       if (!cxgb4_ieee_negotiation_complete(dev))
+       if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
                return -EINVAL;
        if (!(app->selector && app->protocol))
                return -EINVAL;
index 3f60070f2519d2f1df97eb2d9dd39bfae0c0ae92..8520d5529df872fad60a377231f5154fcf91a4e3 100644 (file)
@@ -694,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev)
 #ifdef CONFIG_CHELSIO_T4_DCB
        struct port_info *pi = netdev_priv(dev);
 
-       return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
+       if (!pi->dcb.enabled)
+               return 0;
+
+       return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
+               (pi->dcb.state == CXGB4_DCB_STATE_HOST));
 #else
        return 0;
 #endif
@@ -6610,6 +6614,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        spin_lock_init(&adapter->stats_lock);
        spin_lock_init(&adapter->tid_release_lock);
+       spin_lock_init(&adapter->win0_lock);
 
        INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
        INIT_WORK(&adapter->db_full_task, process_db_full);
index bfa398d9182681c92203f5a7687010e5fdcad7d8..0b42bddaf28443dbdc9317feee3dc43fcab8ade9 100644 (file)
@@ -2929,14 +2929,14 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = {
        CH_DEVICE(0x480d),      /* T480-cr */
        CH_DEVICE(0x480e),      /* T440-lp-cr */
        CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4880),
+       CH_DEVICE(0x4881),
+       CH_DEVICE(0x4882),
+       CH_DEVICE(0x4883),
+       CH_DEVICE(0x4884),
+       CH_DEVICE(0x4885),
+       CH_DEVICE(0x4886),
+       CH_DEVICE(0x4887),
+       CH_DEVICE(0x4888),
        CH_DEVICE(0x5801),      /* T520-cr */
        CH_DEVICE(0x5802),      /* T522-cr */
        CH_DEVICE(0x5803),      /* T540-cr */
index 81b96cf875740f6faa418885718dcc78c2113570..50a851db2852e28979a0b78f481347fc9e29778a 100644 (file)
@@ -1581,7 +1581,8 @@ fec_enet_interrupt(int irq, void *dev_id)
                complete(&fep->mdio_done);
        }
 
-       fec_ptp_check_pps_event(fep);
+       if (fep->ptp_clock)
+               fec_ptp_check_pps_event(fep);
 
        return ret;
 }
index 3d4e08be170970b7f981c19a23e65c35f7ae860b..b34214e2df5f6e55bdbb29e6a8016c139e74345c 100644 (file)
@@ -341,6 +341,9 @@ static void restart(struct net_device *dev)
                FC(fecp, x_cntrl, FEC_TCNTRL_FDEN);     /* FD disable */
        }
 
+       /* Restore multicast and promiscuous settings */
+       set_multicast_list(dev);
+
        /*
         * Enable interrupts we wish to service.
         */
index f30411f0701f694175392198941b0d69d3798682..7a184e8816a48f9eb74f4890b60b32345dbcf5f1 100644 (file)
@@ -355,6 +355,9 @@ static void restart(struct net_device *dev)
        if (fep->phydev->duplex)
                S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
 
+       /* Restore multicast and promiscuous settings */
+       set_multicast_list(dev);
+
        S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
 }
 
index 5f6aded512f539f9a12cb692f810fd3d8e560a3f..24f3986cfae2950f582d3a5e4644c4e1b666391e 100644 (file)
@@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                  NETIF_F_HW_CSUM |
                                  NETIF_F_SG);
 
-       netdev->priv_flags |= IFF_UNICAST_FLT;
+       /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
+       if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
+           hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
+               netdev->priv_flags |= IFF_UNICAST_FLT;
 
        adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
 
index ed5f1c15fb0f4cd5fe7ce797920fd3b6cd21f7df..c3a7f4a4b7757a35afa4a23b944a64e0ac4d673b 100644 (file)
@@ -6151,7 +6151,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                                I40E_GL_MDET_TX_PF_NUM_SHIFT;
                u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
                                I40E_GL_MDET_TX_VF_NUM_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
+               u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
                                I40E_GL_MDET_TX_EVENT_SHIFT;
                u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
                                I40E_GL_MDET_TX_QUEUE_SHIFT;
@@ -6165,7 +6165,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
        if (reg & I40E_GL_MDET_RX_VALID_MASK) {
                u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
                                I40E_GL_MDET_RX_FUNCTION_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
+               u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
                                I40E_GL_MDET_RX_EVENT_SHIFT;
                u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
                                I40E_GL_MDET_RX_QUEUE_SHIFT;
index a21b14495ebd0d7b953a1d8e4b2319d23452b95e..a2d72a87cbde40465c16277e32d4a242a2873ee3 100644 (file)
@@ -6537,6 +6537,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
        if (unlikely(page_to_nid(page) != numa_node_id()))
                return false;
 
+       if (unlikely(page->pfmemalloc))
+               return false;
+
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
        if (unlikely(page_count(page) != 1))
@@ -6603,7 +6606,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
                /* we can reuse buffer as-is, just make sure it is local */
-               if (likely(page_to_nid(page) == numa_node_id()))
+               if (likely((page_to_nid(page) == numa_node_id()) &&
+                          !page->pfmemalloc))
                        return true;
 
                /* this page cannot be reused so discard it */
index 3ce4a258f94534da25304c9138e0bf28fdd3c9ff..0ae038b9af90cfc7dc15bd01c0b4014e37fa1911 100644 (file)
@@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
                if (old == advertised)
                        return err;
                /* this sets the link speed and restarts auto-neg */
+               while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+                       usleep_range(1000, 2000);
+
                hw->mac.autotry_restart = true;
                err = hw->mac.ops.setup_link(hw, advertised, true);
                if (err) {
                        e_info(probe, "setup link failed with code %d\n", err);
                        hw->mac.ops.setup_link(hw, old, true);
                }
+               clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
        } else {
                /* in this case we currently only support 10Gb/FULL */
                u32 speed = ethtool_cmd_speed(ecmd);
index fec5212d43374835156049f8b7c5f0bc828dcdde..d2df4e3d1032496dbf294f4d7b0b741ddfaac6d8 100644 (file)
@@ -4321,8 +4321,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
                                IXGBE_CB(skb)->page_released = false;
                        }
                        dev_kfree_skb(skb);
+                       rx_buffer->skb = NULL;
                }
-               rx_buffer->skb = NULL;
                if (rx_buffer->dma)
                        dma_unmap_page(dev, rx_buffer->dma,
                                       ixgbe_rx_pg_size(rx_ring),
index 34c137878545fc672dad1a3d86e11c034c0ac368..454d9fea640ecb0aa849b8d8106695d017ca3309 100644 (file)
@@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
         * whether LSO is used */
        tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-               tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
-                                                        MLX4_WQE_CTRL_TCP_UDP_CSUM);
+               if (!skb->encapsulation)
+                       tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+                                                                MLX4_WQE_CTRL_TCP_UDP_CSUM);
+               else
+                       tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
                ring->tx_csum++;
        }
 
index a49c9d11d8a58e5a8bcc5419133a7909204a0a41..49290a4059039a72c86e119c59d2d03ec6c3eb4c 100644 (file)
@@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
                                pr_cont("\n");
                }
        }
+       synchronize_irq(eq->irq);
 
        mlx4_mtt_cleanup(dev, &eq->mtt);
        for (i = 0; i < npages; ++i)
index ca0f98c951054945cd29fc3be2284fc7eeb7d729..872843179f44af3c7442614477bd2af131118347 100644 (file)
@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
                                        cur->ib.dst_gid_msk);
                        break;
 
+               case MLX4_NET_TRANS_RULE_ID_VXLAN:
+                       len += snprintf(buf + len, BUF_SIZE - len,
+                                       "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
+                       break;
                case MLX4_NET_TRANS_RULE_ID_IPV6:
                        break;
 
index ed53291468f3226e644aa039025a962aff0b0fe0..a278238a2db643ba04b924dd9920a284abcd8908 100644 (file)
@@ -420,6 +420,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
        if (err)
                mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
                               eq->eqn);
+       synchronize_irq(table->msix_arr[eq->irqn].vector);
        mlx5_buf_free(dev, &eq->buf);
 
        return err;
index ee84a90e371c5dedf2fa5b0b31ba520d8611c85b..aaf2987512b5db7472bdef518c4aa4c510612ef7 100644 (file)
@@ -343,8 +343,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        unsigned short dma_flags;
        int i = 0;
 
-       EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
-
        if (skb_shinfo(skb)->gso_size)
                return efx_enqueue_skb_tso(tx_queue, skb);
 
@@ -1258,8 +1256,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
        /* Find the packet protocol and sanity-check it */
        state.protocol = efx_tso_check_protocol(skb);
 
-       EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
-
        rc = tso_start(&state, efx, skb);
        if (rc)
                goto mem_err;
index 952e1e4764b74d64688875815b25dc90316f3214..d81b84b5e3df2d20245ae4fa61dce98686ac0b73 100644 (file)
@@ -2006,7 +2006,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                parp = of_get_property(slave_node, "phy_id", &lenp);
                if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
                        dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
-                       return -EINVAL;
+                       goto no_phy_slave;
                }
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
@@ -2019,6 +2019,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
                         PHY_ID_FMT, mdio->name, phyid);
 
+               slave_data->phy_if = of_get_phy_mode(slave_node);
+               if (slave_data->phy_if < 0) {
+                       dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+                               i);
+                       return slave_data->phy_if;
+               }
+
+no_phy_slave:
                mac_addr = of_get_mac_address(slave_node);
                if (mac_addr) {
                        memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
@@ -2030,14 +2038,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                                        return ret;
                        }
                }
-
-               slave_data->phy_if = of_get_phy_mode(slave_node);
-               if (slave_data->phy_if < 0) {
-                       dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
-                               i);
-                       return slave_data->phy_if;
-               }
-
                if (data->dual_emac) {
                        if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
                                                 &prop)) {
index 9e17d1a91e7193fb3cf566b468abdd4386889495..78ec33f5100b183e31bfc949763acfa4685b9836 100644 (file)
@@ -550,6 +550,7 @@ do_lso:
 do_send:
        /* Start filling in the page buffers with the rndis hdr */
        rndis_msg->msg_len += rndis_msg_size;
+       packet->total_data_buflen = rndis_msg->msg_len;
        packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
                                        skb, &packet->page_buf[0]);
 
index 29b3bb410781c2530a41ae689b8a91d51e1de3ef..bfb0b6ec8c56e26d67fa94814fe460af34c95fed 100644 (file)
@@ -272,7 +272,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
        struct sk_buff *skb;
        struct sk_buff_head list;
 
-       skb_queue_head_init(&list);
+       __skb_queue_head_init(&list);
 
        spin_lock_bh(&port->bc_queue.lock);
        skb_queue_splice_tail_init(&port->bc_queue, &list);
@@ -1082,9 +1082,15 @@ static void macvlan_port_destroy(struct net_device *dev)
 {
        struct macvlan_port *port = macvlan_port_get_rtnl(dev);
 
-       cancel_work_sync(&port->bc_work);
        dev->priv_flags &= ~IFF_MACVLAN_PORT;
        netdev_rx_handler_unregister(dev);
+
+       /* After this point, no packet can schedule bc_work anymore,
+        * but we need to cancel it and purge left skbs if any.
+        */
+       cancel_work_sync(&port->bc_work);
+       __skb_queue_purge(&port->bc_queue);
+
        kfree_rcu(port, rcu);
 }
 
index 65e2892342bd0cdf31a6c790e3344dc1a4f12816..6f226de655a40759874356c38352b928d1e425f3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/idr.h>
 #include <linux/fs.h>
 
+#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/rtnetlink.h>
 #include <net/sock.h>
@@ -65,7 +66,7 @@ static struct cdev macvtap_cdev;
 static const struct proto_ops macvtap_socket_ops;
 
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
-                     NETIF_F_TSO6 | NETIF_F_UFO)
+                     NETIF_F_TSO6)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
 
@@ -569,7 +570,11 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
                        gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
+                       pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
+                                    current->comm);
                        gso_type = SKB_GSO_UDP;
+                       if (skb->protocol == htons(ETH_P_IPV6))
+                               ipv6_proxy_select_ident(skb);
                        break;
                default:
                        return -EINVAL;
@@ -614,8 +619,6 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (sinfo->gso_type & SKB_GSO_TCPV6)
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
-               else if (sinfo->gso_type & SKB_GSO_UDP)
-                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -950,9 +953,6 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
                        if (arg & TUN_F_TSO6)
                                feature_mask |= NETIF_F_TSO6;
                }
-
-               if (arg & TUN_F_UFO)
-                       feature_mask |= NETIF_F_UFO;
        }
 
        /* tun/tap driver inverts the usage for TSO offloads, where
@@ -963,7 +963,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
         * When user space turns off TSO, we turn off GSO/LRO so that
         * user-space will not receive TSO frames.
         */
-       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
+       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
                features |= RX_OFFLOADS;
        else
                features &= ~RX_OFFLOADS;
@@ -1064,7 +1064,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
        case TUNSETOFFLOAD:
                /* let the user check for future flags */
                if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
-                           TUN_F_TSO_ECN | TUN_F_UFO))
+                           TUN_F_TSO_ECN))
                        return -EINVAL;
 
                rtnl_lock();
index bd37e45c89c0e13f8128c82ac284c4ba6530d52d..225c033b08f3fbc4217fc238ec7048b611b8d87b 100644 (file)
 #define MII_M1011_PHY_SCR              0x10
 #define MII_M1011_PHY_SCR_AUTO_CROSS   0x0060
 
+#define MII_M1145_PHY_EXT_SR           0x1b
 #define MII_M1145_PHY_EXT_CR           0x14
 #define MII_M1145_RGMII_RX_DELAY       0x0080
 #define MII_M1145_RGMII_TX_DELAY       0x0002
 
+#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK      0x4
+#define MII_M1145_HWCFG_MODE_MASK              0xf
+#define MII_M1145_HWCFG_FIBER_COPPER_AUTO      0x8000
+
 #define MII_M1111_PHY_LED_CONTROL      0x18
 #define MII_M1111_PHY_LED_DIRECT       0x4100
 #define MII_M1111_PHY_LED_COMBINE      0x411c
@@ -676,6 +681,20 @@ static int m88e1145_config_init(struct phy_device *phydev)
                }
        }
 
+       if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+               int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
+               if (temp < 0)
+                       return temp;
+
+               temp &= ~MII_M1145_HWCFG_MODE_MASK;
+               temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK;
+               temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO;
+
+               err = phy_write(phydev, MII_M1145_PHY_EXT_SR, temp);
+               if (err < 0)
+                       return err;
+       }
+
        err = marvell_of_reg_init(phydev);
        if (err < 0)
                return err;
index 186ce541c65762f8ee1720aae7f573f145982406..7302398f0b1fff89ffe4a4e373e936c928180645 100644 (file)
@@ -65,6 +65,7 @@
 #include <linux/nsproxy.h>
 #include <linux/virtio_net.h>
 #include <linux/rcupdate.h>
+#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
@@ -174,7 +175,7 @@ struct tun_struct {
        struct net_device       *dev;
        netdev_features_t       set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
-                         NETIF_F_TSO6|NETIF_F_UFO)
+                         NETIF_F_TSO6)
 
        int                     vnet_hdr_sz;
        int                     sndbuf;
@@ -1139,6 +1140,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                break;
        }
 
+       skb_reset_network_header(skb);
+
        if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                pr_debug("GSO!\n");
                switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1149,8 +1152,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
+               {
+                       static bool warned;
+
+                       if (!warned) {
+                               warned = true;
+                               netdev_warn(tun->dev,
+                                           "%s: using disabled UFO feature; please fix this program\n",
+                                           current->comm);
+                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+                       if (skb->protocol == htons(ETH_P_IPV6))
+                               ipv6_proxy_select_ident(skb);
                        break;
+               }
                default:
                        tun->dev->stats.rx_frame_errors++;
                        kfree_skb(skb);
@@ -1179,7 +1194,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        }
 
-       skb_reset_network_header(skb);
        skb_probe_transport_header(skb, 0);
 
        rxhash = skb_get_hash(skb);
@@ -1251,8 +1265,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                        else if (sinfo->gso_type & SKB_GSO_TCPV6)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
-                       else if (sinfo->gso_type & SKB_GSO_UDP)
-                               gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                        else {
                                pr_err("unexpected GSO type: "
                                       "0x%x, gso_size %d, hdr_len %d\n",
@@ -1762,11 +1774,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
                                features |= NETIF_F_TSO6;
                        arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
                }
-
-               if (arg & TUN_F_UFO) {
-                       features |= NETIF_F_UFO;
-                       arg &= ~TUN_F_UFO;
-               }
        }
 
        /* This gives the user a way to test for new features in future by
index 2a32d9167d3b931fb82c434bb0198e27bfc1d8ae..d3920b54a92ce3de23a581d3cf80e99aa7d61e72 100644 (file)
@@ -67,6 +67,35 @@ static const u8 mbm_guid[16] = {
        0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
 };
 
+static void usbnet_cdc_update_filter(struct usbnet *dev)
+{
+       struct cdc_state        *info = (void *) &dev->data;
+       struct usb_interface    *intf = info->control;
+
+       u16 cdc_filter =
+           USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED |
+           USB_CDC_PACKET_TYPE_BROADCAST;
+
+       if (dev->net->flags & IFF_PROMISC)
+               cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS;
+
+       /* FIXME cdc-ether has some multicast code too, though it complains
+        * in routine cases.  info->ether describes the multicast support.
+        * Implement that here, manipulating the cdc filter as needed.
+        */
+
+       usb_control_msg(dev->udev,
+                       usb_sndctrlpipe(dev->udev, 0),
+                       USB_CDC_SET_ETHERNET_PACKET_FILTER,
+                       USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+                       cdc_filter,
+                       intf->cur_altsetting->desc.bInterfaceNumber,
+                       NULL,
+                       0,
+                       USB_CTRL_SET_TIMEOUT
+               );
+}
+
 /* probes control interface, claims data interface, collects the bulk
  * endpoints, activates data interface (if needed), maybe sets MTU.
  * all pure cdc, except for certain firmware workarounds, and knowing
@@ -347,16 +376,8 @@ next_desc:
         * don't do reset all the way. So the packet filter should
         * be set to a sane initial value.
         */
-       usb_control_msg(dev->udev,
-                       usb_sndctrlpipe(dev->udev, 0),
-                       USB_CDC_SET_ETHERNET_PACKET_FILTER,
-                       USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-                       USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
-                       intf->cur_altsetting->desc.bInterfaceNumber,
-                       NULL,
-                       0,
-                       USB_CTRL_SET_TIMEOUT
-               );
+       usbnet_cdc_update_filter(dev);
+
        return 0;
 
 bad_desc:
@@ -468,10 +489,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
                return status;
        }
 
-       /* FIXME cdc-ether has some multicast code too, though it complains
-        * in routine cases.  info->ether describes the multicast support.
-        * Implement that here, manipulating the cdc filter as needed.
-        */
        return 0;
 }
 EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
@@ -482,6 +499,7 @@ static const struct driver_info     cdc_info = {
        .bind =         usbnet_cdc_bind,
        .unbind =       usbnet_cdc_unbind,
        .status =       usbnet_cdc_status,
+       .set_rx_mode =  usbnet_cdc_update_filter,
        .manage_power = usbnet_manage_power,
 };
 
@@ -491,6 +509,7 @@ static const struct driver_info wwan_info = {
        .bind =         usbnet_cdc_bind,
        .unbind =       usbnet_cdc_unbind,
        .status =       usbnet_cdc_status,
+       .set_rx_mode =  usbnet_cdc_update_filter,
        .manage_power = usbnet_manage_power,
 };
 
index e3d84c322e4ec2589d496ad9a6355bd14e08da31..ca3c5d5f93eb2571080ef060d79a4122064a33e8 100644 (file)
@@ -2891,6 +2891,9 @@ static int rtl8152_open(struct net_device *netdev)
        if (res)
                goto out;
 
+       /* set speed to 0 to avoid autoresume try to submit rx */
+       tp->speed = 0;
+
        res = usb_autopm_get_interface(tp->intf);
        if (res < 0) {
                free_all_mem(tp);
@@ -2904,6 +2907,8 @@ static int rtl8152_open(struct net_device *netdev)
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
                cancel_delayed_work_sync(&tp->schedule);
+
+               /* disable the tx/rx, if the workqueue has enabled them. */
                if (tp->speed & LINK_STATUS)
                        tp->rtl_ops.disable(tp);
        }
@@ -2955,10 +2960,7 @@ static int rtl8152_close(struct net_device *netdev)
                 * be disable when autoresume occurs, because the
                 * netif_running() would be false.
                 */
-               if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-                       rtl_runtime_suspend_enable(tp, false);
-                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
-               }
+               rtl_runtime_suspend_enable(tp, false);
 
                tasklet_disable(&tp->tl);
                tp->rtl_ops.down(tp);
@@ -3205,7 +3207,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
                netif_device_detach(netdev);
        }
 
-       if (netif_running(netdev)) {
+       if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
                tasklet_disable(&tp->tl);
@@ -3253,6 +3255,8 @@ static int rtl8152_resume(struct usb_interface *intf)
                        set_bit(WORK_ENABLE, &tp->flags);
                }
                usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+       } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+               clear_bit(SELECTIVE_SUSPEND, &tp->flags);
        }
 
        mutex_unlock(&tp->control);
index 20615bbd693b2eb782f8a6bba08adaff3cfa724a..3a6770a65d7836ace177cff2bb45c925b474a84a 100644 (file)
@@ -1052,6 +1052,21 @@ static void __handle_link_change(struct usbnet *dev)
        clear_bit(EVENT_LINK_CHANGE, &dev->flags);
 }
 
+static void usbnet_set_rx_mode(struct net_device *net)
+{
+       struct usbnet           *dev = netdev_priv(net);
+
+       usbnet_defer_kevent(dev, EVENT_SET_RX_MODE);
+}
+
+static void __handle_set_rx_mode(struct usbnet *dev)
+{
+       if (dev->driver_info->set_rx_mode)
+               (dev->driver_info->set_rx_mode)(dev);
+
+       clear_bit(EVENT_SET_RX_MODE, &dev->flags);
+}
+
 /* work that cannot be done in interrupt context uses keventd.
  *
  * NOTE:  with 2.5 we could do more of this using completion callbacks,
@@ -1157,6 +1172,10 @@ skip_reset:
        if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
                __handle_link_change(dev);
 
+       if (test_bit (EVENT_SET_RX_MODE, &dev->flags))
+               __handle_set_rx_mode(dev);
+
+
        if (dev->flags)
                netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
 }
@@ -1525,6 +1544,7 @@ static const struct net_device_ops usbnet_netdev_ops = {
        .ndo_stop               = usbnet_stop,
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_set_rx_mode        = usbnet_set_rx_mode,
        .ndo_change_mtu         = usbnet_change_mtu,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
index d75256bd1a6ab8bb6284ad4088e9513469fffe8a..ec2a8b41ed41a1484c697f1897e0286f5d5d845a 100644 (file)
@@ -491,8 +491,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
+               {
+                       static bool warned;
+
+                       if (!warned) {
+                               warned = true;
+                               netdev_warn(dev,
+                                           "host using disabled UFO feature; please fix it\n");
+                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
                        break;
+               }
                case VIRTIO_NET_HDR_GSO_TCPV6:
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
@@ -881,8 +890,6 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
-               else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
-                       hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1705,7 +1712,7 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
 
                if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
-                       dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
+                       dev->hw_features |= NETIF_F_TSO
                                | NETIF_F_TSO_ECN | NETIF_F_TSO6;
                }
                /* Individual feature bits: what can host handle? */
@@ -1715,11 +1722,9 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->hw_features |= NETIF_F_TSO6;
                if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
                        dev->hw_features |= NETIF_F_TSO_ECN;
-               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
-                       dev->hw_features |= NETIF_F_UFO;
 
                if (gso)
-                       dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
+                       dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
                /* (!csum && gso) case will be fixed by register_netdev() */
        }
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1757,8 +1762,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* If we can receive ANY GSO packets, we must allocate large ones. */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
-           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
-           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
                vi->big_packets = true;
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1952,9 +1956,9 @@ static struct virtio_device_id id_table[] = {
 static unsigned int features[] = {
        VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
        VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
-       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
+       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
        VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
-       VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
+       VIRTIO_NET_F_GUEST_ECN,
        VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
        VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
        VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
index e5ba6faf32815173fa47fa81fd303082688202bd..86907e5ba6ca21a5e7e6d8ff8b6b20499e004cce 100644 (file)
@@ -80,6 +80,7 @@ struct reg_dmn_pair_mapping {
 
 struct ath_regulatory {
        char alpha2[2];
+       enum nl80211_dfs_regions region;
        u16 country_code;
        u16 max_power_level;
        u16 current_rd;
index c6dd7f1fed65ed52b2ed1d0fd72c474260b5656f..33b0c7aef2ea39f527962f483a8014a052ac10ce 100644 (file)
@@ -368,11 +368,11 @@ void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
 {
        struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
 
-       if (reg->power_limit != new_txpow) {
+       if (reg->power_limit != new_txpow)
                ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
-               /* read back in case value is clamped */
-               *txpower = reg->max_power_level;
-       }
+
+       /* read back in case value is clamped */
+       *txpower = reg->max_power_level;
 }
 EXPORT_SYMBOL(ath9k_cmn_update_txpow);
 
index 156a944134dcfb09ee139ccc2ffde1af306bb936..3bd030494986a087bfd9744873649442fa67b572 100644 (file)
@@ -734,6 +734,32 @@ static const struct ieee80211_iface_combination if_comb[] = {
 #endif
 };
 
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!ath9k_is_chanctx_enabled())
+               return;
+
+       hw->flags |= IEEE80211_HW_QUEUE_CONTROL;
+       hw->queues = ATH9K_NUM_TX_QUEUES;
+       hw->offchannel_tx_hw_queue = hw->queues - 1;
+       hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
+       hw->wiphy->iface_combinations = if_comb_multi;
+       hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
+       hw->wiphy->max_scan_ssids = 255;
+       hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+       hw->wiphy->max_remain_on_channel_duration = 10000;
+       hw->chanctx_data_size = sizeof(void *);
+       hw->extra_beacon_tailroom =
+               sizeof(struct ieee80211_p2p_noa_attr) + 9;
+
+       ath_dbg(common, CHAN_CTX, "Use channel contexts\n");
+}
+#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
+
 static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 {
        struct ath_hw *ah = sc->sc_ah;
@@ -746,7 +772,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                IEEE80211_HW_SPECTRUM_MGMT |
                IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                IEEE80211_HW_SUPPORTS_RC_TABLE |
-               IEEE80211_HW_QUEUE_CONTROL |
                IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
 
        if (ath9k_ps_enable)
@@ -781,24 +806,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                        hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
        }
 
-#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
-
-       if (ath9k_is_chanctx_enabled()) {
-               hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
-               hw->wiphy->iface_combinations = if_comb_multi;
-               hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
-               hw->wiphy->max_scan_ssids = 255;
-               hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
-               hw->wiphy->max_remain_on_channel_duration = 10000;
-               hw->chanctx_data_size = sizeof(void *);
-               hw->extra_beacon_tailroom =
-                       sizeof(struct ieee80211_p2p_noa_attr) + 9;
-
-               ath_dbg(common, CHAN_CTX, "Use channel contexts\n");
-       }
-
-#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
-
        hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -808,12 +815,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
 
-       /* allow 4 queues per channel context +
-        * 1 cab queue + 1 offchannel tx queue
-        */
-       hw->queues = ATH9K_NUM_TX_QUEUES;
-       /* last queue for offchannel */
-       hw->offchannel_tx_hw_queue = hw->queues - 1;
+       hw->queues = 4;
        hw->max_rates = 4;
        hw->max_listen_interval = 10;
        hw->max_rate_tries = 10;
@@ -837,6 +839,9 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &common->sbands[IEEE80211_BAND_5GHZ];
 
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+       ath9k_set_mcc_capab(sc, hw);
+#endif
        ath9k_init_wow(hw);
        ath9k_cmn_reload_chainmask(ah);
 
index 6f6a974f7fdb265960db2098f2c45b9684ffca2b..30c66dfcd7a04b8489186c82db741b87f0afbad0 100644 (file)
@@ -1162,6 +1162,9 @@ static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
 {
        int i;
 
+       if (!ath9k_is_chanctx_enabled())
+               return;
+
        for (i = 0; i < IEEE80211_NUM_ACS; i++)
                vif->hw_queue[i] = i;
 
index 493a183d0aaf7d04c77103a8c4ea2b2c477089ad..d6e54a3c88f671f08ae3174ddabf6721fb39e051 100644 (file)
@@ -169,7 +169,10 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
 
        if (txq->stopped &&
            txq->pending_frames < sc->tx.txq_max_pending[q]) {
-               ieee80211_wake_queue(sc->hw, info->hw_queue);
+               if (ath9k_is_chanctx_enabled())
+                       ieee80211_wake_queue(sc->hw, info->hw_queue);
+               else
+                       ieee80211_wake_queue(sc->hw, q);
                txq->stopped = false;
        }
 }
@@ -2247,7 +2250,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                fi->txq = q;
                if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
                    !txq->stopped) {
-                       ieee80211_stop_queue(sc->hw, info->hw_queue);
+                       if (ath9k_is_chanctx_enabled())
+                               ieee80211_stop_queue(sc->hw, info->hw_queue);
+                       else
+                               ieee80211_stop_queue(sc->hw, q);
                        txq->stopped = true;
                }
        }
index 415393dfb6fc17b51d33f4d24e70c6700620d4b5..06ea6cc9e30a5e07c379116a8e1ec0996e3f189e 100644 (file)
@@ -515,6 +515,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
        if (!request)
                return;
 
+       reg->region = request->dfs_region;
        switch (request->initiator) {
        case NL80211_REGDOM_SET_BY_CORE:
                /*
@@ -779,6 +780,19 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
                return SD_NO_CTL;
        }
 
+       if (ath_regd_get_eepromRD(reg) == CTRY_DEFAULT) {
+               switch (reg->region) {
+               case NL80211_DFS_FCC:
+                       return CTL_FCC;
+               case NL80211_DFS_ETSI:
+                       return CTL_ETSI;
+               case NL80211_DFS_JP:
+                       return CTL_MKK;
+               default:
+                       break;
+               }
+       }
+
        switch (band) {
        case IEEE80211_BAND_2GHZ:
                return reg->regpair->reg_2ghz_ctl;
index f55f625fd06b4aab850ffa16761714a5dff196f6..d20d4e6f391ae89706e422e2b817034ee030c0ae 100644 (file)
@@ -670,7 +670,6 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
                                  struct brcmf_sdio_dev *sdiodev)
 {
        int i;
-       uint fw_len, nv_len;
        char end;
 
        for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
@@ -684,25 +683,25 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
                return -ENODEV;
        }
 
-       fw_len = sizeof(sdiodev->fw_name) - 1;
-       nv_len = sizeof(sdiodev->nvram_name) - 1;
        /* check if firmware path is provided by module parameter */
        if (brcmf_firmware_path[0] != '\0') {
-               strncpy(sdiodev->fw_name, brcmf_firmware_path, fw_len);
-               strncpy(sdiodev->nvram_name, brcmf_firmware_path, nv_len);
-               fw_len -= strlen(sdiodev->fw_name);
-               nv_len -= strlen(sdiodev->nvram_name);
+               strlcpy(sdiodev->fw_name, brcmf_firmware_path,
+                       sizeof(sdiodev->fw_name));
+               strlcpy(sdiodev->nvram_name, brcmf_firmware_path,
+                       sizeof(sdiodev->nvram_name));
 
                end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
                if (end != '/') {
-                       strncat(sdiodev->fw_name, "/", fw_len);
-                       strncat(sdiodev->nvram_name, "/", nv_len);
-                       fw_len--;
-                       nv_len--;
+                       strlcat(sdiodev->fw_name, "/",
+                               sizeof(sdiodev->fw_name));
+                       strlcat(sdiodev->nvram_name, "/",
+                               sizeof(sdiodev->nvram_name));
                }
        }
-       strncat(sdiodev->fw_name, brcmf_fwname_data[i].bin, fw_len);
-       strncat(sdiodev->nvram_name, brcmf_fwname_data[i].nv, nv_len);
+       strlcat(sdiodev->fw_name, brcmf_fwname_data[i].bin,
+               sizeof(sdiodev->fw_name));
+       strlcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv,
+               sizeof(sdiodev->nvram_name));
 
        return 0;
 }
index 2364a3c09b9eb8037ba6237112c679749a3ac94c..cae692ff1013f9ee00b93861558c6144cfad4cc1 100644 (file)
@@ -1095,6 +1095,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                             u32 queues, bool drop)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+       u32 scd_queues;
 
        mutex_lock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1108,18 +1109,19 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                goto done;
        }
 
-       /*
-        * mac80211 will not push any more frames for transmit
-        * until the flush is completed
-        */
-       if (drop) {
-               IWL_DEBUG_MAC80211(priv, "send flush command\n");
-               if (iwlagn_txfifo_flush(priv, 0)) {
-                       IWL_ERR(priv, "flush request fail\n");
-                       goto done;
-               }
+       scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1;
+       scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
+                       BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
+
+       if (vif)
+               scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
+
+       IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
+       if (iwlagn_txfifo_flush(priv, scd_queues)) {
+               IWL_ERR(priv, "flush request fail\n");
+               goto done;
        }
-       IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
+       IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
        iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
 done:
        mutex_unlock(&priv->mutex);
index e4351487ca7269aefe06d4bf2da3c2fedc1f34da..d2b7234b1c73e00f76ed17094d52e0e18431d81f 100644 (file)
@@ -82,7 +82,8 @@
 #define IWL8000_TX_POWER_VERSION       0xffff /* meaningless */
 
 #define IWL8000_FW_PRE "iwlwifi-8000"
-#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
+#define IWL8000_MODULE_FIRMWARE(api) \
+       IWL8000_FW_PRE "-" __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_8000         10
 #define DEFAULT_NVM_FILE_FAMILY_8000           "iwl_nvm_8000.bin"
index 9eb85249e89c46aa980ae22893b48af2150ee4df..d8fc548c0d6cd561993eebb6df185e3ccad0d6ac 100644 (file)
@@ -563,6 +563,7 @@ enum iwl_trans_state {
  *     Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
+ * @ltr_enabled: set to true if the LTR is enabled
  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
  *     The user should use iwl_trans_{alloc,free}_tx_cmd.
  * @dev_cmd_headroom: room needed for the transport's private use before the
@@ -589,6 +590,7 @@ struct iwl_trans {
        u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
 
        bool pm_support;
+       bool ltr_enabled;
 
        /* The following fields are internal only */
        struct kmem_cache *dev_cmd_pool;
index 8df2021f9856365b2bec8465bb4ddbb02e907586..da2ffb78519431a1b14e9b355280a3c7a3446ed6 100644 (file)
@@ -303,8 +303,8 @@ static const __le64 iwl_ci_mask[][3] = {
 };
 
 static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
-       cpu_to_le32(0x28412201),
-       cpu_to_le32(0x11118451),
+       cpu_to_le32(0x2e402280),
+       cpu_to_le32(0x7711a751),
 };
 
 struct corunning_block_luts {
index 585c0ab4a3ec5fc59ca8d2e84395c74b27b4fcd2..8a1d2f33d5b7da2768a7e5849e305b6386b0b480 100644 (file)
@@ -291,8 +291,8 @@ static const __le64 iwl_ci_mask[][3] = {
 };
 
 static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
-       cpu_to_le32(0x28412201),
-       cpu_to_le32(0x11118451),
+       cpu_to_le32(0x2e402280),
+       cpu_to_le32(0x7711a751),
 };
 
 struct corunning_block_luts {
index 27dd86395b39dd0f5800efe9c73a52294a2099ed..2fd8ad4633e0ee717096461398c96fc67181d288 100644 (file)
 
 /* Power Management Commands, Responses, Notifications */
 
+/**
+ * enum iwl_ltr_config_flags - masks for LTR config command flags
+ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
+ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
+ *     memory access
+ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
+ *     reg change
+ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
+ *     D0 to D3
+ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
+ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
+ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ */
+enum iwl_ltr_config_flags {
+       LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
+       LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
+       LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
+       LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
+       LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
+       LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
+       LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
+};
+
+/**
+ * struct iwl_ltr_config_cmd - configures the LTR
+ * @flags: See %enum iwl_ltr_config_flags
+ */
+struct iwl_ltr_config_cmd {
+       __le32 flags;
+       __le32 static_long;
+       __le32 static_short;
+} __packed;
+
 /* Radio LP RX Energy Threshold measured in dBm */
 #define POWER_LPRX_RSSI_THRESHOLD      75
 #define POWER_LPRX_RSSI_THRESHOLD_MAX  94
 #define POWER_LPRX_RSSI_THRESHOLD_MIN  30
 
 /**
- * enum iwl_scan_flags - masks for power table command flags
+ * enum iwl_power_flags - masks for power table command flags
  * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
  *             receiver and transmitter. '0' - does not allow.
  * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
index 667a92274c87773c1d123b41911afb5e38d19405..c62575d86bcdbe5e75c724cacf8605e8569309e6 100644 (file)
@@ -157,6 +157,7 @@ enum {
        /* Power - legacy power table command */
        POWER_TABLE_CMD = 0x77,
        PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
+       LTR_CONFIG = 0xee,
 
        /* Thermal Throttling*/
        REPLY_THERMAL_MNG_BACKOFF = 0x7e,
index 23fd711a67e456055b50d171ec243a124ca3ce6c..e0d9f19650b07e5df8b56342ec5e6d1bef88194f 100644 (file)
@@ -480,6 +480,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        /* Initialize tx backoffs to the minimal possible */
        iwl_mvm_tt_tx_backoff(mvm, 0);
 
+       if (mvm->trans->ltr_enabled) {
+               struct iwl_ltr_config_cmd cmd = {
+                       .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
+               };
+
+               WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
+                                            sizeof(cmd), &cmd));
+       }
+
        ret = iwl_mvm_power_update_device(mvm);
        if (ret)
                goto error;
index c7a73c68bdabddcb2eb2536256e0a7736be06dc1..585fe5b7100fb8750bed29eda65d91121422497c 100644 (file)
@@ -526,7 +526,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
        }
 
        if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
-           !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+           !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
+           !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
                goto drop;
 
        /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
@@ -1734,6 +1735,13 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
        if (changes & BSS_CHANGED_BEACON &&
            iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
                IWL_WARN(mvm, "Failed updating beacon data\n");
+
+       if (changes & BSS_CHANGED_TXPOWER) {
+               IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
+                               bss_conf->txpower);
+               iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+       }
+
 }
 
 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -2367,14 +2375,19 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
        /* Set the node address */
        memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
 
+       lockdep_assert_held(&mvm->mutex);
+
+       spin_lock_bh(&mvm->time_event_lock);
+
+       if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
+               spin_unlock_bh(&mvm->time_event_lock);
+               return -EIO;
+       }
+
        te_data->vif = vif;
        te_data->duration = duration;
        te_data->id = HOT_SPOT_CMD;
 
-       lockdep_assert_held(&mvm->mutex);
-
-       spin_lock_bh(&mvm->time_event_lock);
-       list_add_tail(&te_data->list, &mvm->time_event_list);
        spin_unlock_bh(&mvm->time_event_lock);
 
        /*
@@ -2430,22 +2443,23 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
                           duration, type);
 
+       mutex_lock(&mvm->mutex);
+
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
                /* Use aux roc framework (HS20) */
                ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
                                               vif, duration);
-               return ret;
+               goto out_unlock;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* handle below */
                break;
        default:
                IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_unlock;
        }
 
-       mutex_lock(&mvm->mutex);
-
        for (i = 0; i < NUM_PHY_CTX; i++) {
                phy_ctxt = &mvm->phy_ctxts[i];
                if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
index 15aa298ee79c749b5e7e7e7401d92375a270c29e..48cb25a93591a44678fd051e390dc15c02078744 100644 (file)
@@ -336,6 +336,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(DTS_MEASUREMENT_NOTIFICATION),
        CMD(REPLY_THERMAL_MNG_BACKOFF),
        CMD(MAC_PM_POWER_TABLE),
+       CMD(LTR_CONFIG),
        CMD(BT_COEX_CI),
        CMD(BT_COEX_UPDATE_SW_BOOST),
        CMD(BT_COEX_UPDATE_CORUN_LUT),
index cb85e63c20aa340d70f2be0a3c4f48c3781e8182..b280d5d87127e87ea80f1eb36a477dad3dfcc6e2 100644 (file)
@@ -459,7 +459,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
                                basic_ssid ? 1 : 0);
 
        cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
-                                          TX_CMD_FLG_BT_DIS);
+                                          3 << TX_CMD_FLG_BT_PRIO_POS);
+
        cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
        cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
        cmd->tx_cmd.rate_n_flags =
index b7f9e61d14e276f5505060a0e1cc78e3f2d1af4c..6dfad230be5ed658f2cb3f295e71b43d724d293c 100644 (file)
@@ -305,8 +305,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
                te_data->running = false;
                te_data->vif = NULL;
                te_data->uid = 0;
+               te_data->id = TE_MAX;
        } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
-               set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
                set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
                te_data->running = true;
                ieee80211_ready_on_channel(mvm->hw); /* Start TE */
index 1cb793a498ac88a221cccb9e5efa31267e4cb028..c6a517c771df5045cd5d2cecc80b4b29848a6c9c 100644 (file)
@@ -175,14 +175,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
 
        /*
         * for data packets, rate info comes from the table inside the fw. This
-        * table is controlled by LINK_QUALITY commands. Exclude ctrl port
-        * frames like EAPOLs which should be treated as mgmt frames. This
-        * avoids them being sent initially in high rates which increases the
-        * chances for completion of the 4-Way handshake.
+        * table is controlled by LINK_QUALITY commands
         */
 
-       if (ieee80211_is_data(fc) && sta &&
-           !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
+       if (ieee80211_is_data(fc) && sta) {
                tx_cmd->initial_rate_index = 0;
                tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
                return;
index 1393bac0025c59de0b24b1453eb7932bebda1b22..3781b029e54a328f6304bfb6cf61d52c053cbce7 100644 (file)
@@ -174,6 +174,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u16 lctl;
+       u16 cap;
 
        /*
         * HW bug W/A for instability in PCIe bus L0S->L1 transition.
@@ -184,16 +185,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
         *    power savings, even without L1.
         */
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
-       if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
-               /* L1-ASPM enabled; disable(!) L0S */
+       if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
                iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-               dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
-       } else {
-               /* L1-ASPM disabled; enable(!) L0S */
+       else
                iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-               dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
-       }
        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+
+       pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
+       trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+       dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
+                (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
+                trans->ltr_enabled ? "En" : "Dis");
 }
 
 /*
@@ -428,7 +430,7 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
        ret = iwl_poll_bit(trans, CSR_RESET,
                           CSR_RESET_REG_FLAG_MASTER_DISABLED,
                           CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
-       if (ret)
+       if (ret < 0)
                IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
 
        IWL_DEBUG_INFO(trans, "stop master\n");
@@ -544,7 +546,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
                msleep(25);
        }
 
-       IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
+       IWL_ERR(trans, "Couldn't prepare the card\n");
 
        return ret;
 }
@@ -1043,7 +1045,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
                           CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
                           CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
                           25000);
-       if (ret) {
+       if (ret < 0) {
                IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
                return ret;
        }
index 58ba718308862391b108a41e0eab1e7ff7fa226a..40b6d1d006d7ab7dc9076a967fedae75259dc1fd 100644 (file)
@@ -467,7 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
                    rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
        /* <2> work queue */
        rtlpriv->works.hw = hw;
-       rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0);
+       rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
        INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
                          (void *)rtl_watchdog_wq_callback);
        INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
index 667aba81246c9ac094db11a5c455be9c29d273ad..25daa8715219c6b89f110bb048cace12e3414159 100644 (file)
@@ -1796,7 +1796,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
        rtl_pci_reset_trx_ring(hw);
 
        rtlpci->driver_is_goingto_unload = false;
-       if (rtlpriv->cfg->ops->get_btc_status()) {
+       if (rtlpriv->cfg->ops->get_btc_status &&
+           rtlpriv->cfg->ops->get_btc_status()) {
                rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
                rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
        }
index a00861b26ece7ae6b2c6168ca22bdaed42c71033..29983bc96a89289187617a5cf202386f74386639 100644 (file)
@@ -656,7 +656,8 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 };
 
-void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+        bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *))
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -722,7 +723,10 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
        memcpy((u8 *)skb_put(skb, totalpacketlen),
               &reserved_page_packet, totalpacketlen);
 
-       rtstatus = rtl_cmd_send_packet(hw, skb);
+       if (cmd_send_packet)
+               rtstatus = cmd_send_packet(hw, skb);
+       else
+               rtstatus = rtl_cmd_send_packet(hw, skb);
 
        if (rtstatus)
                b_dlok = true;
index a815bd6273da0645cf86f6f77050f220875ec6ff..b64ae45dc6743869da3746a7382683936e86f88c 100644 (file)
@@ -109,7 +109,9 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
                         u32 cmd_len, u8 *p_cmdbuffer);
 void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
 void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
-void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl92c_set_fw_rsvdpagepkt
+       (struct ieee80211_hw *hw,
+        bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *));
 void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
 void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
 void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
index 8ec0f031f48a575035dbe59af4b2f236d4260c72..55357d69397a3370d8f4081ecec3d578daa3b8b9 100644 (file)
@@ -459,7 +459,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
                                               tmp_reg422 & (~BIT(6)));
 
-                               rtl92c_set_fw_rsvdpagepkt(hw, 0);
+                               rtl92c_set_fw_rsvdpagepkt(hw, NULL);
 
                                _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
                                _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
index 04aa0b5f5b3d10a1ae16eb8db5ea74d4f38e1890..873363acbacfac12df0d0c724dc962c74b2a2fc7 100644 (file)
@@ -1592,6 +1592,20 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        }
 }
 
+bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+  /* Currently nothing happens here.
+   * Traffic stops after some seconds in WPA2 802.11n mode.
+   * Maybe because rtl8192cu chip should be set from here?
+   * If I understand correctly, the realtek vendor driver sends some urbs
+   * if its "here".
+   *
+   * This is maybe necessary:
+   * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
+   */
+       return true;
+}
+
 void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1939,7 +1953,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        recover = true;
                                rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
                                               tmp_reg422 & (~BIT(6)));
-                               rtl92c_set_fw_rsvdpagepkt(hw, 0);
+                               rtl92c_set_fw_rsvdpagepkt(hw,
+                                                         &usb_cmd_send_packet);
                                _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
                                _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
                                if (recover)
index 0f7812e0c8aa0c75d5382c52b4872431447c8108..c1e33b0228c0176e1ae31f5a8f99085a4f840397 100644 (file)
@@ -104,7 +104,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
 void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
 int rtl92c_download_fw(struct ieee80211_hw *hw);
 void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
-void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
 void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
 void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
                         u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
index 7c5fbaf5fee0d790d1cfbeaa563a1c360a1a976b..e06bafee37f9764b4041bd64665c46e3a889d238 100644 (file)
@@ -101,6 +101,12 @@ static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
        }
 }
 
+/* get bt coexist status */
+static bool rtl92cu_get_btc_status(void)
+{
+       return false;
+}
+
 static struct rtl_hal_ops rtl8192cu_hal_ops = {
        .init_sw_vars = rtl92cu_init_sw_vars,
        .deinit_sw_vars = rtl92cu_deinit_sw_vars,
@@ -148,6 +154,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
        .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
        .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
        .fill_h2c_cmd = rtl92c_fill_h2c_cmd,
+       .get_btc_status = rtl92cu_get_btc_status,
 };
 
 static struct rtl_mod_params rtl92cu_mod_params = {
index dfdc9b20e4ad4d868012c80f4233559e58050052..1a87edca2c3f36a1d540eabc4c3247da5b1959d6 100644 (file)
@@ -362,7 +362,7 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
                         "switch case not process %x\n", variable);
                break;
        }
@@ -591,7 +591,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_BEQEN);
                                break;
                        default:
-                               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
                                         "switch case not process\n");
                                break;
                        }
@@ -710,7 +710,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
                         "switch case not process %x\n", variable);
                break;
        }
@@ -2424,7 +2424,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
                        enc_algo = CAM_AES;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
                                 "switch case not process\n");
                        enc_algo = CAM_TKIP;
                        break;
index 9786313dc62f2b3a50143bffc59255e8f1d45af8..1e9570fa874fe6309962d2de2251fc923da424cb 100644 (file)
@@ -1889,15 +1889,18 @@ static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw,
        struct rtl_phy *rtlphy = &rtlpriv->phy;
        u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr);
 
-       if (band != BAND_ON_2_4G && band != BAND_ON_5G)
+       if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band);
-
-       if (rfpath >= MAX_RF_PATH)
+               band = BAND_ON_2_4G;
+       }
+       if (rfpath >= MAX_RF_PATH) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath);
-
-       if (txnum >= MAX_RF_PATH)
+               rfpath = MAX_RF_PATH - 1;
+       }
+       if (txnum >= MAX_RF_PATH) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum);
-
+               txnum = MAX_RF_PATH - 1;
+       }
        rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
                 "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
index 10cf69c4bc42ea93adefdab5569cbe90a19a4a63..46ee956d0235dc79dbb8130242b5b2586fd4db5e 100644 (file)
@@ -1117,7 +1117,18 @@ int rtl_usb_probe(struct usb_interface *intf,
        }
        rtlpriv->cfg->ops->init_sw_leds(hw);
 
+       err = ieee80211_register_hw(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "Can't register mac80211 hw.\n");
+               err = -ENODEV;
+               goto error_out;
+       }
+       rtlpriv->mac80211.mac80211_registered = 1;
+
+       set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
        return 0;
+
 error_out:
        rtl_deinit_core(hw);
        _rtl_usb_io_handler_release(hw);
index d4eb8d2e9cb7f7f2bc0b3816dd788e6eb01c7927..083ecc93fe5e7b0941aedba3ccb9fd27158cc03b 100644 (file)
@@ -176,10 +176,11 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
        struct xen_netif_rx_back_ring rx;
        struct sk_buff_head rx_queue;
-       RING_IDX rx_last_skb_slots;
-       unsigned long status;
 
-       struct timer_list rx_stalled;
+       unsigned int rx_queue_max;
+       unsigned int rx_queue_len;
+       unsigned long last_rx_time;
+       bool stalled;
 
        struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
 
@@ -199,18 +200,14 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        struct xenvif_stats stats;
 };
 
+/* Maximum number of Rx slots a to-guest packet may use, including the
+ * slot needed for GSO meta-data.
+ */
+#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
+
 enum state_bit_shift {
        /* This bit marks that the vif is connected */
        VIF_STATUS_CONNECTED,
-       /* This bit signals the RX thread that queuing was stopped (in
-        * start_xmit), and either the timer fired or an RX interrupt came
-        */
-       QUEUE_STATUS_RX_PURGE_EVENT,
-       /* This bit tells the interrupt handler that this queue was the reason
-        * for the carrier off, so it should kick the thread. Only queues which
-        * brought it down can turn on the carrier.
-        */
-       QUEUE_STATUS_RX_STALLED
 };
 
 struct xenvif {
@@ -228,9 +225,6 @@ struct xenvif {
        u8 ip_csum:1;
        u8 ipv6_csum:1;
 
-       /* Internal feature information. */
-       u8 can_queue:1;     /* can queue packets for receiver? */
-
        /* Is this interface disabled? True when backend discovers
         * frontend is rogue.
         */
@@ -240,6 +234,9 @@ struct xenvif {
        /* Queues */
        struct xenvif_queue *queues;
        unsigned int num_queues; /* active queues, resource allocated */
+       unsigned int stalled_queues;
+
+       spinlock_t lock;
 
 #ifdef CONFIG_DEBUG_FS
        struct dentry *xenvif_dbg_root;
@@ -249,6 +246,14 @@ struct xenvif {
        struct net_device *dev;
 };
 
+struct xenvif_rx_cb {
+       unsigned long expires;
+       int meta_slots_used;
+       bool full_coalesce;
+};
+
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
 static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
 {
        return to_xenbus_device(vif->dev->dev.parent);
@@ -272,8 +277,6 @@ void xenvif_xenbus_fini(void);
 
 int xenvif_schedulable(struct xenvif *vif);
 
-int xenvif_must_stop_queue(struct xenvif_queue *queue);
-
 int xenvif_queue_stopped(struct xenvif_queue *queue);
 void xenvif_wake_queue(struct xenvif_queue *queue);
 
@@ -296,6 +299,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
 
 int xenvif_dealloc_kthread(void *data);
 
+void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
+
 /* Determine whether the needed number of slots (req) are available,
  * and set req_event if not.
  */
index f379689dde309b7bf63eb511ef7f28a827f71fc5..895fe84011e7ea4a2138b33d9e84ab802f1142f6 100644 (file)
@@ -43,6 +43,9 @@
 #define XENVIF_QUEUE_LENGTH 32
 #define XENVIF_NAPI_WEIGHT  64
 
+/* Number of bytes allowed on the internal guest Rx queue. */
+#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
+
 /* This function is used to set SKBTX_DEV_ZEROCOPY as well as
  * increasing the inflight counter. We need to increase the inflight
  * counter because core driver calls into xenvif_zerocopy_callback
@@ -60,20 +63,11 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
        atomic_dec(&queue->inflight_packets);
 }
 
-static inline void xenvif_stop_queue(struct xenvif_queue *queue)
-{
-       struct net_device *dev = queue->vif->dev;
-
-       if (!queue->vif->can_queue)
-               return;
-
-       netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
-}
-
 int xenvif_schedulable(struct xenvif *vif)
 {
        return netif_running(vif->dev) &&
-               test_bit(VIF_STATUS_CONNECTED, &vif->status);
+               test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
+               !vif->disabled;
 }
 
 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -114,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget)
 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
 {
        struct xenvif_queue *queue = dev_id;
-       struct netdev_queue *net_queue =
-               netdev_get_tx_queue(queue->vif->dev, queue->id);
 
-       /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
-        * the carrier went down and this queue was previously blocked
-        */
-       if (unlikely(netif_tx_queue_stopped(net_queue) ||
-                    (!netif_carrier_ok(queue->vif->dev) &&
-                     test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
-               set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
        xenvif_kick_thread(queue);
 
        return IRQ_HANDLED;
@@ -151,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
        netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
 }
 
-/* Callback to wake the queue's thread and turn the carrier off on timeout */
-static void xenvif_rx_stalled(unsigned long data)
-{
-       struct xenvif_queue *queue = (struct xenvif_queue *)data;
-
-       if (xenvif_queue_stopped(queue)) {
-               set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
-               xenvif_kick_thread(queue);
-       }
-}
-
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
        unsigned int num_queues = vif->num_queues;
        u16 index;
-       int min_slots_needed;
+       struct xenvif_rx_cb *cb;
 
        BUG_ON(skb->dev != dev);
 
@@ -191,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
            !xenvif_schedulable(vif))
                goto drop;
 
-       /* At best we'll need one slot for the header and one for each
-        * frag.
-        */
-       min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
-
-       /* If the skb is GSO then we'll also need an extra slot for the
-        * metadata.
-        */
-       if (skb_is_gso(skb))
-               min_slots_needed++;
+       cb = XENVIF_RX_CB(skb);
+       cb->expires = jiffies + rx_drain_timeout_jiffies;
 
-       /* If the skb can't possibly fit in the remaining slots
-        * then turn off the queue to give the ring a chance to
-        * drain.
-        */
-       if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
-               queue->rx_stalled.function = xenvif_rx_stalled;
-               queue->rx_stalled.data = (unsigned long)queue;
-               xenvif_stop_queue(queue);
-               mod_timer(&queue->rx_stalled,
-                         jiffies + rx_drain_timeout_jiffies);
-       }
-
-       skb_queue_tail(&queue->rx_queue, skb);
+       xenvif_rx_queue_tail(queue, skb);
        xenvif_kick_thread(queue);
 
        return NETDEV_TX_OK;
@@ -465,6 +419,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->queues = NULL;
        vif->num_queues = 0;
 
+       spin_lock_init(&vif->lock);
+
        dev->netdev_ops = &xenvif_netdev_ops;
        dev->hw_features = NETIF_F_SG |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -508,6 +464,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
        init_timer(&queue->credit_timeout);
        queue->credit_window_start = get_jiffies_64();
 
+       queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
+
        skb_queue_head_init(&queue->rx_queue);
        skb_queue_head_init(&queue->tx_queue);
 
@@ -539,8 +497,6 @@ int xenvif_init_queue(struct xenvif_queue *queue)
                queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
        }
 
-       init_timer(&queue->rx_stalled);
-
        return 0;
 }
 
@@ -551,7 +507,6 @@ void xenvif_carrier_on(struct xenvif *vif)
                dev_set_mtu(vif->dev, ETH_DATA_LEN);
        netdev_update_features(vif->dev);
        set_bit(VIF_STATUS_CONNECTED, &vif->status);
-       netif_carrier_on(vif->dev);
        if (netif_running(vif->dev))
                xenvif_up(vif);
        rtnl_unlock();
@@ -611,6 +566,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
                disable_irq(queue->rx_irq);
        }
 
+       queue->stalled = true;
+
        task = kthread_create(xenvif_kthread_guest_rx,
                              (void *)queue, "%s-guest-rx", queue->name);
        if (IS_ERR(task)) {
@@ -674,7 +631,6 @@ void xenvif_disconnect(struct xenvif *vif)
                netif_napi_del(&queue->napi);
 
                if (queue->task) {
-                       del_timer_sync(&queue->rx_stalled);
                        kthread_stop(queue->task);
                        queue->task = NULL;
                }
index 08f65996534cbd1b861c0031b70ebd6d78d47720..6563f0713fc0f5e324a18e2d87ba1f4614e1a008 100644 (file)
 bool separate_tx_rx_irq = 1;
 module_param(separate_tx_rx_irq, bool, 0644);
 
-/* When guest ring is filled up, qdisc queues the packets for us, but we have
- * to timeout them, otherwise other guests' packets can get stuck there
+/* The time that packets can stay on the guest Rx internal queue
+ * before they are dropped.
  */
 unsigned int rx_drain_timeout_msecs = 10000;
 module_param(rx_drain_timeout_msecs, uint, 0444);
 unsigned int rx_drain_timeout_jiffies;
 
+/* The length of time before the frontend is considered unresponsive
+ * because it isn't providing Rx slots.
+ */
+static unsigned int rx_stall_timeout_msecs = 60000;
+module_param(rx_stall_timeout_msecs, uint, 0444);
+static unsigned int rx_stall_timeout_jiffies;
+
 unsigned int xenvif_max_queues;
 module_param_named(max_queues, xenvif_max_queues, uint, 0644);
 MODULE_PARM_DESC(max_queues,
@@ -83,7 +90,6 @@ static void make_tx_response(struct xenvif_queue *queue,
                             s8       st);
 
 static inline int tx_work_todo(struct xenvif_queue *queue);
-static inline int rx_work_todo(struct xenvif_queue *queue);
 
 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
                                             u16      id,
@@ -163,6 +169,69 @@ bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
        return false;
 }
 
+void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&queue->rx_queue.lock, flags);
+
+       __skb_queue_tail(&queue->rx_queue, skb);
+
+       queue->rx_queue_len += skb->len;
+       if (queue->rx_queue_len > queue->rx_queue_max)
+               netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
+
+       spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+}
+
+static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
+{
+       struct sk_buff *skb;
+
+       spin_lock_irq(&queue->rx_queue.lock);
+
+       skb = __skb_dequeue(&queue->rx_queue);
+       if (skb)
+               queue->rx_queue_len -= skb->len;
+
+       spin_unlock_irq(&queue->rx_queue.lock);
+
+       return skb;
+}
+
+static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
+{
+       spin_lock_irq(&queue->rx_queue.lock);
+
+       if (queue->rx_queue_len < queue->rx_queue_max)
+               netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
+
+       spin_unlock_irq(&queue->rx_queue.lock);
+}
+
+
+static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
+{
+       struct sk_buff *skb;
+       while ((skb = xenvif_rx_dequeue(queue)) != NULL)
+               kfree_skb(skb);
+}
+
+static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
+{
+       struct sk_buff *skb;
+
+       for(;;) {
+               skb = skb_peek(&queue->rx_queue);
+               if (!skb)
+                       break;
+               if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
+                       break;
+               xenvif_rx_dequeue(queue);
+               kfree_skb(skb);
+       }
+}
+
 /*
  * Returns true if we should start a new receive buffer instead of
  * adding 'size' bytes to a buffer which currently contains 'offset'
@@ -237,13 +306,6 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
        return meta;
 }
 
-struct xenvif_rx_cb {
-       int meta_slots_used;
-       bool full_coalesce;
-};
-
-#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
-
 /*
  * Set up the grant operations for this fragment. If it's a flipping
  * interface, we also set up the unmap request from here.
@@ -587,12 +649,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
 
        skb_queue_head_init(&rxq);
 
-       while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
+       while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
+              && (skb = xenvif_rx_dequeue(queue)) != NULL) {
                RING_IDX max_slots_needed;
                RING_IDX old_req_cons;
                RING_IDX ring_slots_used;
                int i;
 
+               queue->last_rx_time = jiffies;
+
                /* We need a cheap worse case estimate for the number of
                 * slots we'll use.
                 */
@@ -634,15 +699,6 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
                        max_slots_needed++;
 
-               /* If the skb may not fit then bail out now */
-               if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
-                       skb_queue_head(&queue->rx_queue, skb);
-                       need_to_notify = true;
-                       queue->rx_last_skb_slots = max_slots_needed;
-                       break;
-               } else
-                       queue->rx_last_skb_slots = 0;
-
                old_req_cons = queue->rx.req_cons;
                XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
                ring_slots_used = queue->rx.req_cons - old_req_cons;
@@ -1869,12 +1925,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
        }
 }
 
-static inline int rx_work_todo(struct xenvif_queue *queue)
-{
-       return (!skb_queue_empty(&queue->rx_queue) &&
-              xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots));
-}
-
 static inline int tx_work_todo(struct xenvif_queue *queue)
 {
        if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
@@ -1931,92 +1981,121 @@ err:
        return err;
 }
 
-static void xenvif_start_queue(struct xenvif_queue *queue)
+static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
 {
-       if (xenvif_schedulable(queue->vif))
-               xenvif_wake_queue(queue);
+       struct xenvif *vif = queue->vif;
+
+       queue->stalled = true;
+
+       /* At least one queue has stalled? Disable the carrier. */
+       spin_lock(&vif->lock);
+       if (vif->stalled_queues++ == 0) {
+               netdev_info(vif->dev, "Guest Rx stalled");
+               netif_carrier_off(vif->dev);
+       }
+       spin_unlock(&vif->lock);
 }
 
-/* Only called from the queue's thread, it handles the situation when the guest
- * doesn't post enough requests on the receiving ring.
- * First xenvif_start_xmit disables QDisc and start a timer, and then either the
- * timer fires, or the guest send an interrupt after posting new request. If it
- * is the timer, the carrier is turned off here.
- * */
-static void xenvif_rx_purge_event(struct xenvif_queue *queue)
+static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
 {
-       /* Either the last unsuccesful skb or at least 1 slot should fit */
-       int needed = queue->rx_last_skb_slots ?
-                    queue->rx_last_skb_slots : 1;
+       struct xenvif *vif = queue->vif;
 
-       /* It is assumed that if the guest post new slots after this, the RX
-        * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up
-        * the thread again
-        */
-       set_bit(QUEUE_STATUS_RX_STALLED, &queue->status);
-       if (!xenvif_rx_ring_slots_available(queue, needed)) {
-               rtnl_lock();
-               if (netif_carrier_ok(queue->vif->dev)) {
-                       /* Timer fired and there are still no slots. Turn off
-                        * everything except the interrupts
-                        */
-                       netif_carrier_off(queue->vif->dev);
-                       skb_queue_purge(&queue->rx_queue);
-                       queue->rx_last_skb_slots = 0;
-                       if (net_ratelimit())
-                               netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id);
-               } else {
-                       /* Probably an another queue already turned the carrier
-                        * off, make sure nothing is stucked in the internal
-                        * queue of this queue
-                        */
-                       skb_queue_purge(&queue->rx_queue);
-                       queue->rx_last_skb_slots = 0;
-               }
-               rtnl_unlock();
-       } else if (!netif_carrier_ok(queue->vif->dev)) {
-               unsigned int num_queues = queue->vif->num_queues;
-               unsigned int i;
-               /* The carrier was down, but an interrupt kicked
-                * the thread again after new requests were
-                * posted
-                */
-               clear_bit(QUEUE_STATUS_RX_STALLED,
-                         &queue->status);
-               rtnl_lock();
-               netif_carrier_on(queue->vif->dev);
-               netif_tx_wake_all_queues(queue->vif->dev);
-               rtnl_unlock();
+       queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
+       queue->stalled = false;
 
-               for (i = 0; i < num_queues; i++) {
-                       struct xenvif_queue *temp = &queue->vif->queues[i];
+       /* All queues are ready? Enable the carrier. */
+       spin_lock(&vif->lock);
+       if (--vif->stalled_queues == 0) {
+               netdev_info(vif->dev, "Guest Rx ready");
+               netif_carrier_on(vif->dev);
+       }
+       spin_unlock(&vif->lock);
+}
 
-                       xenvif_napi_schedule_or_enable_events(temp);
-               }
-               if (net_ratelimit())
-                       netdev_err(queue->vif->dev, "Carrier on again\n");
-       } else {
-               /* Queuing were stopped, but the guest posted
-                * new requests and sent an interrupt
-                */
-               clear_bit(QUEUE_STATUS_RX_STALLED,
-                         &queue->status);
-               del_timer_sync(&queue->rx_stalled);
-               xenvif_start_queue(queue);
+static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+{
+       RING_IDX prod, cons;
+
+       prod = queue->rx.sring->req_prod;
+       cons = queue->rx.req_cons;
+
+       return !queue->stalled
+               && prod - cons < XEN_NETBK_RX_SLOTS_MAX
+               && time_after(jiffies,
+                             queue->last_rx_time + rx_stall_timeout_jiffies);
+}
+
+static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
+{
+       RING_IDX prod, cons;
+
+       prod = queue->rx.sring->req_prod;
+       cons = queue->rx.req_cons;
+
+       return queue->stalled
+               && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
+}
+
+static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+{
+       return (!skb_queue_empty(&queue->rx_queue)
+               && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
+               || xenvif_rx_queue_stalled(queue)
+               || xenvif_rx_queue_ready(queue)
+               || kthread_should_stop()
+               || queue->vif->disabled;
+}
+
+static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
+{
+       struct sk_buff *skb;
+       long timeout;
+
+       skb = skb_peek(&queue->rx_queue);
+       if (!skb)
+               return MAX_SCHEDULE_TIMEOUT;
+
+       timeout = XENVIF_RX_CB(skb)->expires - jiffies;
+       return timeout < 0 ? 0 : timeout;
+}
+
+/* Wait until the guest Rx thread has work.
+ *
+ * The timeout needs to be adjusted based on the current head of the
+ * queue (and not just the head at the beginning).  In particular, if
+ * the queue is initially empty an infinite timeout is used and this
+ * needs to be reduced when a skb is queued.
+ *
+ * This cannot be done with wait_event_timeout() because it only
+ * calculates the timeout once.
+ */
+static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
+{
+       DEFINE_WAIT(wait);
+
+       if (xenvif_have_rx_work(queue))
+               return;
+
+       for (;;) {
+               long ret;
+
+               prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
+               if (xenvif_have_rx_work(queue))
+                       break;
+               ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
+               if (!ret)
+                       break;
        }
+       finish_wait(&queue->wq, &wait);
 }
 
 int xenvif_kthread_guest_rx(void *data)
 {
        struct xenvif_queue *queue = data;
-       struct sk_buff *skb;
+       struct xenvif *vif = queue->vif;
 
-       while (!kthread_should_stop()) {
-               wait_event_interruptible(queue->wq,
-                                        rx_work_todo(queue) ||
-                                        queue->vif->disabled ||
-                                        test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) ||
-                                        kthread_should_stop());
+       for (;;) {
+               xenvif_wait_for_rx_work(queue);
 
                if (kthread_should_stop())
                        break;
@@ -2028,35 +2107,38 @@ int xenvif_kthread_guest_rx(void *data)
                 * context so we defer it here, if this thread is
                 * associated with queue 0.
                 */
-               if (unlikely(queue->vif->disabled && queue->id == 0)) {
-                       xenvif_carrier_off(queue->vif);
-               } else if (unlikely(queue->vif->disabled)) {
-                       /* kthread_stop() would be called upon this thread soon,
-                        * be a bit proactive
-                        */
-                       skb_queue_purge(&queue->rx_queue);
-                       queue->rx_last_skb_slots = 0;
-               } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT,
-                                                    &queue->status))) {
-                       xenvif_rx_purge_event(queue);
-               } else if (!netif_carrier_ok(queue->vif->dev)) {
-                       /* Another queue stalled and turned the carrier off, so
-                        * purge the internal queue of queues which were not
-                        * blocked
-                        */
-                       skb_queue_purge(&queue->rx_queue);
-                       queue->rx_last_skb_slots = 0;
+               if (unlikely(vif->disabled && queue->id == 0)) {
+                       xenvif_carrier_off(vif);
+                       xenvif_rx_queue_purge(queue);
+                       continue;
                }
 
                if (!skb_queue_empty(&queue->rx_queue))
                        xenvif_rx_action(queue);
 
+               /* If the guest hasn't provided any Rx slots for a
+                * while it's probably not responsive, drop the
+                * carrier so packets are dropped earlier.
+                */
+               if (xenvif_rx_queue_stalled(queue))
+                       xenvif_queue_carrier_off(queue);
+               else if (xenvif_rx_queue_ready(queue))
+                       xenvif_queue_carrier_on(queue);
+
+               /* Queued packets may have foreign pages from other
+                * domains.  These cannot be queued indefinitely as
+                * this would starve guests of grant refs and transmit
+                * slots.
+                */
+               xenvif_rx_queue_drop_expired(queue);
+
+               xenvif_rx_queue_maybe_wake(queue);
+
                cond_resched();
        }
 
        /* Bin any remaining skbs */
-       while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
-               dev_kfree_skb(skb);
+       xenvif_rx_queue_purge(queue);
 
        return 0;
 }
@@ -2113,6 +2195,7 @@ static int __init netback_init(void)
                goto failed_init;
 
        rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
+       rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
 
 #ifdef CONFIG_DEBUG_FS
        xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
index 8079c31ac5e64372fe978405c8faa72981998320..4e56a27f9689a925ff3cf26118c6ee505eb963a4 100644 (file)
@@ -52,6 +52,7 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v)
        struct xenvif_queue *queue = m->private;
        struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
        struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
+       struct netdev_queue *dev_queue;
 
        if (tx_ring->sring) {
                struct xen_netif_tx_sring *sring = tx_ring->sring;
@@ -112,6 +113,13 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v)
                   queue->credit_timeout.expires,
                   jiffies);
 
+       dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
+
+       seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
+                  queue->rx_queue_len, queue->rx_queue_max,
+                  skb_queue_len(&queue->rx_queue),
+                  netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
+
        return 0;
 }
 
@@ -703,6 +711,7 @@ static void connect(struct backend_info *be)
        be->vif->queues = vzalloc(requested_num_queues *
                                  sizeof(struct xenvif_queue));
        be->vif->num_queues = requested_num_queues;
+       be->vif->stalled_queues = requested_num_queues;
 
        for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
                queue = &be->vif->queues[queue_index];
@@ -873,15 +882,10 @@ static int read_xenbus_vif_flags(struct backend_info *be)
        if (!rx_copy)
                return -EOPNOTSUPP;
 
-       if (vif->dev->tx_queue_len != 0) {
-               if (xenbus_scanf(XBT_NIL, dev->otherend,
-                                "feature-rx-notify", "%d", &val) < 0)
-                       val = 0;
-               if (val)
-                       vif->can_queue = 1;
-               else
-                       /* Must be non-zero for pfifo_fast to work. */
-                       vif->dev->tx_queue_len = 1;
+       if (xenbus_scanf(XBT_NIL, dev->otherend,
+                        "feature-rx-notify", "%d", &val) < 0 || val == 0) {
+               xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
+               return -EINVAL;
        }
 
        if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
index a59d9343c25bdc9575eefa1ac361732599db9ddf..6c8b6f604e7609f2d320a7aa572a8ec6a870f4b8 100644 (file)
@@ -557,7 +557,9 @@ struct sk_buff {
        /* fields enclosed in headers_start/headers_end are copied
         * using a single memcpy() in __copy_skb_header()
         */
+       /* private: */
        __u32                   headers_start[0];
+       /* public: */
 
 /* if you move pkt_type around you also must adapt those constants */
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -642,7 +644,9 @@ struct sk_buff {
        __u16                   network_header;
        __u16                   mac_header;
 
+       /* private: */
        __u32                   headers_end[0];
+       /* public: */
 
        /* These elements must be at the end, see alloc_skb() for details.  */
        sk_buff_data_t          tail;
@@ -795,15 +799,19 @@ struct sk_buff_fclones {
  *     @skb: buffer
  *
  * Returns true is skb is a fast clone, and its clone is not freed.
+ * Some drivers call skb_orphan() in their ndo_start_xmit(),
+ * so we also check that this didnt happen.
  */
-static inline bool skb_fclone_busy(const struct sk_buff *skb)
+static inline bool skb_fclone_busy(const struct sock *sk,
+                                  const struct sk_buff *skb)
 {
        const struct sk_buff_fclones *fclones;
 
        fclones = container_of(skb, struct sk_buff_fclones, skb1);
 
        return skb->fclone == SKB_FCLONE_ORIG &&
-              fclones->skb2.fclone == SKB_FCLONE_CLONE;
+              fclones->skb2.fclone == SKB_FCLONE_CLONE &&
+              fclones->skb2.sk == sk;
 }
 
 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
index 26088feb660813448ac93391bee76c1e73b57d70..d9a4905e01d0c98b88e89c7db5c85bbf7d5be8a1 100644 (file)
@@ -78,6 +78,7 @@ struct usbnet {
 #              define EVENT_NO_RUNTIME_PM      9
 #              define EVENT_RX_KILL    10
 #              define EVENT_LINK_CHANGE        11
+#              define EVENT_SET_RX_MODE        12
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -159,6 +160,9 @@ struct driver_info {
        /* called by minidriver when receiving indication */
        void    (*indication)(struct usbnet *dev, void *ind, int indlen);
 
+       /* rx mode change (device changes address list filtering) */
+       void    (*set_rx_mode)(struct usbnet *dev);
+
        /* for new devices, use the descriptor-reading code instead */
        int             in;             /* rx endpoint */
        int             out;            /* tx endpoint */
index 97f472012438b10a0bcbff9eef6c553d7357ad35..4292929392b0127479c49c5da3bb33f053f4428c 100644 (file)
@@ -671,6 +671,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
+void ipv6_proxy_select_ident(struct sk_buff *skb);
+
 int ip6_dst_hoplimit(struct dst_entry *dst);
 
 static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
index 3ee28ae02cc89cb0c0d3b2a9ca112c1edfda2e96..2081a4d3d9171f5323a077a1f3a9805223e715be 100644 (file)
@@ -1341,6 +1341,10 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
 config HAVE_PCSPKR_PLATFORM
        bool
 
+# interpreter that classic socket filters depend on
+config BPF
+       bool
+
 menuconfig EXPERT
        bool "Configure standard kernel features (expert users)"
        # Unhide debug options, to make the on-by-default options visible
@@ -1521,6 +1525,16 @@ config EVENTFD
 
          If unsure, say Y.
 
+# syscall, maps, verifier
+config BPF_SYSCALL
+       bool "Enable bpf() system call" if EXPERT
+       select ANON_INODES
+       select BPF
+       default n
+       help
+         Enable the bpf() system call that allows to manipulate eBPF
+         programs and maps via file descriptors.
+
 config SHMEM
        bool "Use full shmem filesystem" if EXPERT
        default y
index dc5c77544fd69f6924adc25c3f4a8d3530708392..17ea6d4a9a247ce261f8ae22a3f07f0bb80393e1 100644 (file)
@@ -86,7 +86,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/
 obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_CPU_PM) += cpu_pm.o
-obj-$(CONFIG_NET) += bpf/
+obj-$(CONFIG_BPF) += bpf/
 
 obj-$(CONFIG_PERF_EVENTS) += events/
 
index 45427239f375c564049951e00e3925aacff8bc3f..0daf7f6ae7df565c43273b8fb3a553bc1ffdd622 100644 (file)
@@ -1,5 +1,5 @@
-obj-y := core.o syscall.o verifier.o
-
+obj-y := core.o
+obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o
 ifdef CONFIG_TEST_BPF
-obj-y += test_stub.o
+obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
 endif
index f0c30c59b31755ff1e124e0a5a0aaa0e356efc9e..d6594e457a25af32e41a4b46a00a43ddf0a2c46b 100644 (file)
@@ -655,3 +655,12 @@ void bpf_prog_free(struct bpf_prog *fp)
        schedule_work(&aux->work);
 }
 EXPORT_SYMBOL_GPL(bpf_prog_free);
+
+/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
+ * skb_copy_bits(), so provide a weak definition of it for NET-less config.
+ */
+int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
+                        int len)
+{
+       return -EFAULT;
+}
index 6272420a721b0f01e15ffc572251a8f0794d5d8f..99815b5454bf4d68fdc36153951e434f7219b4d0 100644 (file)
@@ -6,7 +6,7 @@ menuconfig NET
        bool "Networking support"
        select NLATTR
        select GENERIC_NET_UTILS
-       select ANON_INODES
+       select BPF
        ---help---
          Unless you really know what you are doing, you should say Y here.
          The reason is that some programs need kernel networking support even
index b793e3521a3631319bf4d0e7c17c0c9a933331da..945bbd0013592634be3bc840544d8b25919ea11d 100644 (file)
@@ -4157,6 +4157,10 @@ EXPORT_SYMBOL(napi_gro_receive);
 
 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
 {
+       if (unlikely(skb->pfmemalloc)) {
+               consume_skb(skb);
+               return;
+       }
        __skb_pull(skb, skb_headlen(skb));
        /* restore the reserve we had after netdev_alloc_skb_ip_align() */
        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
index 8c3203c585b0c116e7d8ea8f66feeda7c4a73452..630b30b4fb5368332428289e449b3247be0cd97b 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/export.h>
 #include <net/ip.h>
 #include <net/tso.h>
+#include <asm/unaligned.h>
 
 /* Calculate expected number of TX descriptors */
 int tso_count_descs(struct sk_buff *skb)
@@ -23,7 +24,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
        iph->id = htons(tso->ip_id);
        iph->tot_len = htons(size + hdr_len - mac_hdr_len);
        tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
-       tcph->seq = htonl(tso->tcp_seq);
+       put_unaligned_be32(tso->tcp_seq, &tcph->seq);
        tso->ip_id++;
 
        if (!is_last) {
index 22f34cf4cb27d1aad06dcb41a1f6d84d4dffd688..6317b41c99b0a58bb82ede4a1205d8c3e7f94283 100644 (file)
@@ -174,8 +174,11 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
                        dst->rcv = brcm_netdev_ops.rcv;
                        break;
 #endif
-               default:
+               case DSA_TAG_PROTO_NONE:
                        break;
+               default:
+                       ret = -ENOPROTOOPT;
+                       goto out;
                }
 
                dst->tag_protocol = drv->tag_protocol;
index f6e345c0bc23949957732c6786deb1b8bc1dd68f..bb5947b0ce2dfbd94a0b85a13983c060b2984075 100644 (file)
@@ -47,7 +47,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
 
        greh = (struct gre_base_hdr *)skb_transport_header(skb);
 
-       ghl = skb_inner_network_header(skb) - skb_transport_header(skb);
+       ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
        if (unlikely(ghl < sizeof(*greh)))
                goto out;
 
index 9eb89f3f0ee4cb88bf065956c45453999d799aa8..19419b60cb37d1c59c8238b49eab77d87ff0b4a1 100644 (file)
@@ -146,7 +146,6 @@ evict_again:
                        atomic_inc(&fq->refcnt);
                        spin_unlock(&hb->chain_lock);
                        del_timer_sync(&fq->timer);
-                       WARN_ON(atomic_read(&fq->refcnt) != 1);
                        inet_frag_put(fq, f);
                        goto evict_again;
                }
@@ -285,7 +284,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
        struct inet_frag_bucket *hb;
 
        hb = get_frag_bucket_locked(fq, f);
-       hlist_del(&fq->list);
+       if (!(fq->flags & INET_FRAG_EVICTED))
+               hlist_del(&fq->list);
        spin_unlock(&hb->chain_lock);
 }
 
index 2d4ae469b471a4fff941e0dd13d014dfd222f6a8..6a2155b02602b100c7ce3bbfda28a38090add7a4 100644 (file)
@@ -1798,6 +1798,7 @@ local_input:
 no_route:
        RT_CACHE_STAT_INC(in_no_route);
        res.type = RTN_UNREACHABLE;
+       res.fi = NULL;
        goto local_input;
 
        /*
index 1bec4e76d88c5852d8ba3392b22aa58d6453ab4d..39ec0c379545afe07d6016c9180e9c408d22d4df 100644 (file)
@@ -2868,61 +2868,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
 #ifdef CONFIG_TCP_MD5SIG
-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
+static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
 static DEFINE_MUTEX(tcp_md5sig_mutex);
-
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
-
-               if (p->md5_desc.tfm)
-                       crypto_free_hash(p->md5_desc.tfm);
-       }
-       free_percpu(pool);
-}
+static bool tcp_md5sig_pool_populated = false;
 
 static void __tcp_alloc_md5sig_pool(void)
 {
        int cpu;
-       struct tcp_md5sig_pool __percpu *pool;
-
-       pool = alloc_percpu(struct tcp_md5sig_pool);
-       if (!pool)
-               return;
 
        for_each_possible_cpu(cpu) {
-               struct crypto_hash *hash;
-
-               hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
-               if (IS_ERR_OR_NULL(hash))
-                       goto out_free;
+               if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
+                       struct crypto_hash *hash;
 
-               per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
+                       hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+                       if (IS_ERR_OR_NULL(hash))
+                               return;
+                       per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
+               }
        }
-       /* before setting tcp_md5sig_pool, we must commit all writes
-        * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
+       /* before setting tcp_md5sig_pool_populated, we must commit all writes
+        * to memory. See smp_rmb() in tcp_get_md5sig_pool()
         */
        smp_wmb();
-       tcp_md5sig_pool = pool;
-       return;
-out_free:
-       __tcp_free_md5sig_pool(pool);
+       tcp_md5sig_pool_populated = true;
 }
 
 bool tcp_alloc_md5sig_pool(void)
 {
-       if (unlikely(!tcp_md5sig_pool)) {
+       if (unlikely(!tcp_md5sig_pool_populated)) {
                mutex_lock(&tcp_md5sig_mutex);
 
-               if (!tcp_md5sig_pool)
+               if (!tcp_md5sig_pool_populated)
                        __tcp_alloc_md5sig_pool();
 
                mutex_unlock(&tcp_md5sig_mutex);
        }
-       return tcp_md5sig_pool != NULL;
+       return tcp_md5sig_pool_populated;
 }
 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
 
@@ -2936,13 +2917,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
  */
 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
 {
-       struct tcp_md5sig_pool __percpu *p;
-
        local_bh_disable();
-       p = ACCESS_ONCE(tcp_md5sig_pool);
-       if (p)
-               return raw_cpu_ptr(p);
 
+       if (tcp_md5sig_pool_populated) {
+               /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
+               smp_rmb();
+               return this_cpu_ptr(&tcp_md5sig_pool);
+       }
        local_bh_enable();
        return NULL;
 }
index 94d1a7757ff7462edf6c32406df7187b4839bf4f..9c7d7621466b1241f404a5ca11de809dcff2d02a 100644 (file)
@@ -206,8 +206,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->inet_dport = usin->sin_port;
        inet->inet_daddr = daddr;
 
-       inet_set_txhash(sk);
-
        inet_csk(sk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
                inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -224,6 +222,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (err)
                goto failure;
 
+       inet_set_txhash(sk);
+
        rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
                               inet->inet_sport, inet->inet_dport, sk);
        if (IS_ERR(rt)) {
index 3af21296d96788b899daaa25562301e38036e802..a3d453b94747c22dd0fad07dfe606fdd11c414b1 100644 (file)
@@ -2126,7 +2126,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
 static bool skb_still_in_host_queue(const struct sock *sk,
                                    const struct sk_buff *skb)
 {
-       if (unlikely(skb_fclone_busy(skb))) {
+       if (unlikely(skb_fclone_busy(sk, skb))) {
                NET_INC_STATS_BH(sock_net(sk),
                                 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
                return true;
index 725c763270a067e3945ec3056c4f5893cf9f7060..0169ccf5aa4fec8f691ea3bed77b4c7e5587d2a5 100644 (file)
@@ -4531,6 +4531,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
        }
 
        write_unlock_bh(&idev->lock);
+       inet6_ifinfo_notify(RTM_NEWLINK, idev);
        addrconf_verify_rtnl();
        return 0;
 }
index fc24c390af0541050782c76434ccbbf9e00c24f0..97f41a3e68d98b89d6b8f643780f8a883bd46f55 100644 (file)
@@ -3,11 +3,45 @@
  * not configured or static.  These functions are needed by GSO/GRO implementation.
  */
 #include <linux/export.h>
+#include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 #include <net/addrconf.h>
 #include <net/secure_seq.h>
 
+/* This function exists only for tap drivers that must support broken
+ * clients requesting UFO without specifying an IPv6 fragment ID.
+ *
+ * This is similar to ipv6_select_ident() but we use an independent hash
+ * seed to limit information leakage.
+ *
+ * The network header must be set before calling this.
+ */
+void ipv6_proxy_select_ident(struct sk_buff *skb)
+{
+       static u32 ip6_proxy_idents_hashrnd __read_mostly;
+       struct in6_addr buf[2];
+       struct in6_addr *addrs;
+       u32 hash, id;
+
+       addrs = skb_header_pointer(skb,
+                                  skb_network_offset(skb) +
+                                  offsetof(struct ipv6hdr, saddr),
+                                  sizeof(buf), buf);
+       if (!addrs)
+               return;
+
+       net_get_random_once(&ip6_proxy_idents_hashrnd,
+                           sizeof(ip6_proxy_idents_hashrnd));
+
+       hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
+       hash = __ipv6_addr_jhash(&addrs[0], hash);
+
+       id = ip_idents_reserve(hash, 1);
+       skb_shinfo(skb)->ip6_frag_id = htonl(id);
+}
+EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
        u16 offset = sizeof(struct ipv6hdr);
index 831495529b829c8ff4bc4ec3a8f0e7cf33b011b5..ace29b60813cf8a1d7182ad2262cbcbd21810fa7 100644 (file)
@@ -200,8 +200,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk->sk_v6_daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
-       ip6_set_txhash(sk);
-
        /*
         *      TCP over IPv4
         */
@@ -297,6 +295,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (err)
                goto late_failure;
 
+       ip6_set_txhash(sk);
+
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
                                                             sk->sk_v6_daddr.s6_addr32,
index ac49f84fe2c34026b7af5392df06fc232ea3b9e1..5f983644373a230890b25189865af73f5e2b3b44 100644 (file)
@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                case IPPROTO_DCCP:
                        if (!onlyproto && (nh + offset + 4 < skb->data ||
                             pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
-                               __be16 *ports = (__be16 *)exthdr;
+                               __be16 *ports;
 
+                               nh = skb_network_header(skb);
+                               ports = (__be16 *)(nh + offset);
                                fl6->fl6_sport = ports[!!reverse];
                                fl6->fl6_dport = ports[!reverse];
                        }
@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 
                case IPPROTO_ICMPV6:
                        if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
-                               u8 *icmp = (u8 *)exthdr;
+                               u8 *icmp;
 
+                               nh = skb_network_header(skb);
+                               icmp = (u8 *)(nh + offset);
                                fl6->fl6_icmp_type = icmp[0];
                                fl6->fl6_icmp_code = icmp[1];
                        }
@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                case IPPROTO_MH:
                        if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
                                struct ip6_mh *mh;
-                               mh = (struct ip6_mh *)exthdr;
 
+                               nh = skb_network_header(skb);
+                               mh = (struct ip6_mh *)(nh + offset);
                                fl6->fl6_mh_type = mh->ip6mh_type;
                        }
                        fl6->flowi6_proto = nexthdr;
index fb6a1502b6dfa64980305c3bba380408ef3dde14..343da1e35025199bbef7d6680e0770fbdc3eadd2 100644 (file)
@@ -3458,7 +3458,7 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
        if (chanctx_conf) {
-               *chandef = chanctx_conf->def;
+               *chandef = sdata->vif.bss_conf.chandef;
                ret = 0;
        } else if (local->open_count > 0 &&
                   local->open_count == local->monitors &&
index 8fdadfd94ba8576ae8bc0ee2e99e8c656c2f6a5b..6081329784dd4475ae95317dde31910efd760525 100644 (file)
@@ -448,7 +448,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
         */
        if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
                u32 basic_rates = vif->bss_conf.basic_rates;
-               s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0;
+               s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
 
                rate = &sband->bitrates[rates[0].idx];
 
index edde723f9f009e33388f5a6f89129f445fae37a2..2acab1bcaa4b377012a31c6354ceeb61fadbb171 100644 (file)
@@ -62,14 +62,14 @@ minstrel_stats_open(struct inode *inode, struct file *file)
        unsigned int i, tp, prob, eprob;
        char *p;
 
-       ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL);
+       ms = kmalloc(2048, GFP_KERNEL);
        if (!ms)
                return -ENOMEM;
 
        file->private_data = ms;
        p = ms->buf;
-       p += sprintf(p, "rate      throughput  ewma prob  this prob  "
-                       "this succ/attempt   success    attempts\n");
+       p += sprintf(p, "rate          tpt eprob *prob"
+                       "  *ok(*cum)        ok(      cum)\n");
        for (i = 0; i < mi->n_rates; i++) {
                struct minstrel_rate *mr = &mi->r[i];
                struct minstrel_rate_stats *mrs = &mi->r[i].stats;
@@ -86,8 +86,8 @@ minstrel_stats_open(struct inode *inode, struct file *file)
                prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
                eprob = MINSTREL_TRUNC(mrs->probability * 1000);
 
-               p += sprintf(p, "  %6u.%1u   %6u.%1u   %6u.%1u        "
-                               "   %3u(%3u)  %8llu    %8llu\n",
+               p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u"
+                               " %4u(%4u) %9llu(%9llu)\n",
                                tp / 10, tp % 10,
                                eprob / 10, eprob % 10,
                                prob / 10, prob % 10,
@@ -102,6 +102,8 @@ minstrel_stats_open(struct inode *inode, struct file *file)
                        mi->sample_packets);
        ms->len = p - ms->buf;
 
+       WARN_ON(ms->len + sizeof(*ms) > 2048);
+
        return 0;
 }
 
index a72ad46f2a04b6e557044d52da1f033c7be2c531..d537bec9375463eb115b0f26260f8f3d03759596 100644 (file)
@@ -63,8 +63,8 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
                prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
                eprob = MINSTREL_TRUNC(mr->probability * 1000);
 
-               p += sprintf(p, "      %6u.%1u   %6u.%1u    %6u.%1u    "
-                               "%3u            %3u(%3u)  %8llu    %8llu\n",
+               p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u "
+                               "%3u %4u(%4u) %9llu(%9llu)\n",
                                tp / 10, tp % 10,
                                eprob / 10, eprob % 10,
                                prob / 10, prob % 10,
@@ -96,14 +96,15 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
                return ret;
        }
 
-       ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL);
+       ms = kmalloc(8192, GFP_KERNEL);
        if (!ms)
                return -ENOMEM;
 
        file->private_data = ms;
        p = ms->buf;
-       p += sprintf(p, "type           rate     throughput  ewma prob   "
-                    "this prob  retry   this succ/attempt   success    attempts\n");
+       p += sprintf(p, "type           rate     tpt eprob *prob "
+                       "ret  *ok(*cum)        ok(      cum)\n");
+
 
        p = minstrel_ht_stats_dump(mi, max_mcs, p);
        for (i = 0; i < max_mcs; i++)
@@ -118,6 +119,8 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
                MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
        ms->len = p - ms->buf;
 
+       WARN_ON(ms->len + sizeof(*ms) > 8192);
+
        return nonseekable_open(inode, file);
 }
 
index 42f68cb8957e5a29d1a4e7965b65aff086bfe55e..bcda2ac7d84402f83d03f24390f22f2f32ea701e 100644 (file)
@@ -336,6 +336,7 @@ struct ieee80211_tx_latency_stat {
  * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for
  *     AP only.
  * @cipher_scheme: optional cipher scheme for this station
+ * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed
  */
 struct sta_info {
        /* General information, mostly static */
index 33d7a98a7a9799b3cb5b3ce62c9013c49483184c..b783a446d884d85af054c9a139f91a63a162bc9f 100644 (file)
@@ -445,7 +445,6 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
        sch->limit = q->params.limit;
 
        setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch);
-       mod_timer(&q->adapt_timer, jiffies + HZ / 2);
 
        if (opt) {
                int err = pie_change(sch, opt);
@@ -454,6 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
                        return err;
        }
 
+       mod_timer(&q->adapt_timer, jiffies + HZ / 2);
        return 0;
 }
 
index cb9f5a44ffadf7175d109cadefb091bbfdbb41e4..5839c85075f15407e86d84526cfb29087c70aa81 100644 (file)
@@ -5927,6 +5927,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        int err;
        bool need_new_beacon = false;
        int len, i;
+       u32 cs_count;
 
        if (!rdev->ops->channel_switch ||
            !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
@@ -5963,7 +5964,14 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES])
                return -EINVAL;
 
-       params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
+       /* Even though the attribute is u32, the specification says
+        * u8, so let's make sure we don't overflow.
+        */
+       cs_count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
+       if (cs_count > 255)
+               return -EINVAL;
+
+       params.count = cs_count;
 
        if (!need_new_beacon)
                goto skip_beacons;
index 4c4e457e788861a1255dc654cc4d4eec80c22fda..88bf289abdc925a05730d6695944bdef1ec67eaf 100644 (file)
@@ -1962,7 +1962,7 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
        struct xfrm_policy *pol = xdst->pols[0];
        struct xfrm_policy_queue *pq = &pol->polq;
 
-       if (unlikely(skb_fclone_busy(skb))) {
+       if (unlikely(skb_fclone_busy(sk, skb))) {
                kfree_skb(skb);
                return 0;
        }