Merge tag 'pm-4.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Aug 2017 19:32:49 +0000 (12:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Aug 2017 19:32:49 +0000 (12:32 -0700)
Pull power management fixes from Rafael Wysocki:
 "These fix two cpufreq issues, one introduced recently and one related
  to recent changes, fix cpufreq documentation, fix up recently added
  code in the Thunderbolt driver and update runtime PM framework
  documentation.

  Specifics:

   - Fix the handling of the scaling_cur_freq cpufreq policy attribute
     on x86 systems with the MPERF/APERF registers present to make it
     behave more as expected after recent changes (Rafael Wysocki).

   - Drop a leftover callback from the intel_pstate driver which also
     prevents the cpuinfo_cur_freq cpufreq policy attribute from being
     incorrectly exposed when intel_pstate works in the active mode
     (Rafael Wysocki).

   - Add a missing piece describing the cpuinfo_cur_freq policy
     attribute to cpufreq documentation (Rafael Wysocki).

   - Fix up a recently added part of the Thunderbolt driver to avoid
     aborting system suspends if its mailbox commands time out (Rafael
     Wysocki).

   - Update device runtime PM framework documentation to reflect the
     current behavior of the code (Johan Hovold)"

* tag 'pm-4.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  thunderbolt: icm: Ignore mailbox errors in icm_suspend()
  cpufreq: x86: Make scaling_cur_freq behave more as expected
  PM / runtime: Document new pm_runtime_set_suspended() constraint
  cpufreq: docs: Add missing cpuinfo_cur_freq description
  cpufreq: intel_pstate: Drop ->get from intel_pstate structure

113 files changed:
Documentation/devicetree/bindings/ata/sata_rcar.txt
MAINTAINERS
arch/parisc/Kconfig
arch/parisc/include/asm/thread_info.h
arch/parisc/kernel/cache.c
arch/parisc/kernel/irq.c
drivers/ata/Kconfig
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/sata_rcar.c
drivers/isdn/i4l/isdn_common.c
drivers/isdn/i4l/isdn_net.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/sun/sunhme.h
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/irda/mcs7780.c
drivers/net/phy/Kconfig
drivers/net/phy/phy.c
drivers/net/ppp/pptp.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/virtio_net.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/realtek/rtlwifi/wifi.h
drivers/parisc/pdc_stable.c
drivers/phy/broadcom/Kconfig
drivers/platform/x86/Kconfig
drivers/platform/x86/dell-wmi.c
drivers/platform/x86/wmi.c
drivers/scsi/Kconfig
drivers/scsi/aic7xxx/Makefile
drivers/scsi/aic7xxx/aicasm/Makefile
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/qedi/Kconfig
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/scsi_transport_fc.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
include/linux/ipv6.h
include/linux/libata.h
include/linux/mlx5/mlx5_ifc.h
include/linux/nfs_xdr.h
include/linux/phy.h
include/linux/workqueue.h
include/net/sctp/sctp.h
include/net/udp.h
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-internal.h
kernel/cgroup/cgroup.c
kernel/workqueue.c
lib/test_rhashtable.c
net/core/dev_ioctl.c
net/core/netpoll.c
net/dccp/feat.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dsa/dsa2.c
net/ipv4/fib_semantics.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/exthdrs.c
net/ipv6/ip6_output.c
net/ipv6/udp.c
net/openvswitch/conntrack.c
net/packet/af_packet.c
net/socket.c
samples/bpf/tcbpf2_kern.c
samples/bpf/test_tunnel_bpf.sh
tools/lib/bpf/bpf.c
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_verifier.c

index 0764f9ab63dcde31f7efff01f74e56c5c6fa1c56..e20eac7a30874f0db82adefa0cdf3a77ac09aec2 100644 (file)
@@ -1,14 +1,22 @@
 * Renesas R-Car SATA
 
 Required properties:
-- compatible           : should contain one of the following:
+- compatible           : should contain one or more of the following:
                          - "renesas,sata-r8a7779" for R-Car H1
-                           ("renesas,rcar-sata" is deprecated)
                          - "renesas,sata-r8a7790-es1" for R-Car H2 ES1
                          - "renesas,sata-r8a7790" for R-Car H2 other than ES1
                          - "renesas,sata-r8a7791" for R-Car M2-W
                          - "renesas,sata-r8a7793" for R-Car M2-N
                          - "renesas,sata-r8a7795" for R-Car H3
+                         - "renesas,rcar-gen2-sata" for a generic R-Car Gen2 compatible device
+                         - "renesas,rcar-gen3-sata" for a generic R-Car Gen3 compatible device
+                         - "renesas,rcar-sata" is deprecated
+
+                         When compatible with the generic version nodes
+                         must list the SoC-specific version corresponding
+                         to the platform first followed by the generic
+                         version.
+
 - reg                  : address and length of the SATA registers;
 - interrupts           : must consist of one interrupt specifier.
 - clocks               : must contain a reference to the functional clock.
@@ -16,7 +24,7 @@ Required properties:
 Example:
 
 sata0: sata@ee300000 {
-       compatible = "renesas,sata-r8a7791";
+       compatible = "renesas,sata-r8a7791", "renesas,rcar-gen2-sata";
        reg = <0 0xee300000 0 0x2000>;
        interrupt-parent = <&gic>;
        interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
index f66488dfdbc9cc59fc744ce9f8349b9b69bd945c..567343b8ffaa633bfae244a7592781feb6243e7b 100644 (file)
@@ -5090,12 +5090,20 @@ M:      Andrew Lunn <andrew@lunn.ch>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
-F:     include/linux/phy.h
-F:     include/linux/phy_fixed.h
-F:     drivers/net/phy/
+F:     Documentation/ABI/testing/sysfs-bus-mdio
+F:     Documentation/devicetree/bindings/net/mdio*
 F:     Documentation/networking/phy.txt
+F:     drivers/net/phy/
 F:     drivers/of/of_mdio.c
 F:     drivers/of/of_net.c
+F:     include/linux/*mdio*.h
+F:     include/linux/of_net.h
+F:     include/linux/phy.h
+F:     include/linux/phy_fixed.h
+F:     include/linux/platform_data/mdio-gpio.h
+F:     include/trace/events/mdio.h
+F:     include/uapi/linux/mdio.h
+F:     include/uapi/linux/mii.h
 
 EXT2 FILE SYSTEM
 M:     Jan Kara <jack@suse.com>
index 531da9eb8f4363ad95c65e16b3d7c9653ac973b8..dda1f558ef35c5fa9113c763c846924ee24f93dc 100644 (file)
@@ -47,6 +47,9 @@ config PARISC
          and later HP3000 series).  The PA-RISC Linux project home page is
          at <http://www.parisc-linux.org/>.
 
+config CPU_BIG_ENDIAN
+       def_bool y
+
 config MMU
        def_bool y
 
index 88fe0aad4390b10830ce1bc1be62925d4b2d4bbc..bc208136bbb26a837a978cdf77304f1c9dec0b2d 100644 (file)
@@ -34,7 +34,7 @@ struct thread_info {
 
 /* thread information allocation */
 
-#define THREAD_SIZE_ORDER      2 /* PA-RISC requires at least 16k stack */
+#define THREAD_SIZE_ORDER      3 /* PA-RISC requires at least 32k stack */
 /* Be sure to hunt all references to this down when you change the size of
  * the kernel stack */
 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
index 85a92db70afc99ab9151924823a3e277f1c0e8c5..19c0c141bc3f9f0edd509708f978a2d7ca16c230 100644 (file)
@@ -587,13 +587,12 @@ void flush_cache_range(struct vm_area_struct *vma,
        if (parisc_requires_coherency())
                flush_tlb_range(vma, start, end);
 
-       if ((end - start) >= parisc_cache_flush_threshold) {
+       if ((end - start) >= parisc_cache_flush_threshold
+           || vma->vm_mm->context != mfsp(3)) {
                flush_cache_all();
                return;
        }
 
-       BUG_ON(vma->vm_mm->context != mfsp(3));
-
        flush_user_dcache_range_asm(start, end);
        if (vma->vm_flags & VM_EXEC)
                flush_user_icache_range_asm(start, end);
index 5404e4086cb95d5e7a5b49e06e783f0cfdf89b8b..0ca254085a6626374ed488401618ca3af19872c7 100644 (file)
@@ -380,7 +380,7 @@ static inline int eirr_to_irq(unsigned long eirr)
 /*
  * IRQ STACK - used for irq handler
  */
-#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */
+#define IRQ_STACK_SIZE      (4096 << 3) /* 32k irq stack size */
 
 union irq_stack_union {
        unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
index 948fc86980a1b75d1b9f99e9588bf894e024a88c..363fc5330c211456762e049d1569a04940c580ef 100644 (file)
@@ -215,7 +215,7 @@ config SATA_FSL
 
 config SATA_GEMINI
        tristate "Gemini SATA bridge support"
-       depends on PATA_FTIDE010
+       depends on ARCH_GEMINI || COMPILE_TEST
        default ARCH_GEMINI
        help
          This enabled support for the FTIDE010 to SATA bridge
@@ -613,7 +613,7 @@ config PATA_FTIDE010
        tristate "Faraday Technology FTIDE010 PATA support"
        depends on OF
        depends on ARM
-       default ARCH_GEMINI
+       depends on SATA_GEMINI
        help
          This option enables support for the Faraday FTIDE010
          PATA controller found in the Cortina Gemini SoCs.
index 8453f9a4682f855e910f59d1d78860f639d4bf59..fa7dd4394c02b642c00119bd00a4b59b18921a0d 100644 (file)
@@ -2083,7 +2083,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
 retry:
        ata_tf_init(dev, &tf);
        if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
-           !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
+           !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
                tf.command = ATA_CMD_READ_LOG_DMA_EXT;
                tf.protocol = ATA_PROT_DMA;
                dma = true;
@@ -2102,8 +2102,8 @@ retry:
                                     buf, sectors * ATA_SECT_SIZE, 0);
 
        if (err_mask && dma) {
-               dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
-               ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
+               dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
+               ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
                goto retry;
        }
 
index b70bcf6d2914b6a82967fee346f5865b5c3b66b8..3dbd05532c09cdde995935539efa917e4f96dcd9 100644 (file)
@@ -1434,7 +1434,7 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
 
 /**
  *     ata_eh_done - EH action complete
-*      @ap: target ATA port
+ *     @link: ATA link for which EH actions are complete
  *     @dev: target ATA dev for per-dev action (can be NULL)
  *     @action: action just completed
  *
@@ -1576,7 +1576,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
 
 /**
  *     ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- *     @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
+ *     @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
  *     @cmd: scsi command for which the sense code should be set
  *
  *     Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
@@ -4175,7 +4175,6 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
        struct ata_link *link;
        struct ata_device *dev;
        unsigned long flags;
-       int rc = 0;
 
        /* are we resuming? */
        spin_lock_irqsave(ap->lock, flags);
@@ -4202,7 +4201,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
        ata_acpi_set_state(ap, ap->pm_mesg);
 
        if (ap->ops->port_resume)
-               rc = ap->ops->port_resume(ap);
+               ap->ops->port_resume(ap);
 
        /* tell ACPI that we're resuming */
        ata_acpi_on_resume(ap);
index d462c5a3a7efd8b3c4933df845c0f3f1d8637bac..44ba292f2cd793f07e279ada3b2c83c44578f0cf 100644 (file)
@@ -3030,10 +3030,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
 static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
 {
        if (!sata_pmp_attached(ap)) {
-               if (likely(devno < ata_link_max_devices(&ap->link)))
+               if (likely(devno >= 0 &&
+                          devno < ata_link_max_devices(&ap->link)))
                        return &ap->link.device[devno];
        } else {
-               if (likely(devno < ap->nr_pmp_links))
+               if (likely(devno >= 0 &&
+                          devno < ap->nr_pmp_links))
                        return &ap->pmp_link[devno].device[0];
        }
 
index ee98447587366f240a580e56a49e0983f58727e2..537d11869069aae7d26e1e253ed3f4d587d25bbf 100644 (file)
@@ -858,6 +858,14 @@ static const struct of_device_id sata_rcar_match[] = {
                .compatible = "renesas,sata-r8a7795",
                .data = (void *)RCAR_GEN2_SATA
        },
+       {
+               .compatible = "renesas,rcar-gen2-sata",
+               .data = (void *)RCAR_GEN2_SATA
+       },
+       {
+               .compatible = "renesas,rcar-gen3-sata",
+               .data = (void *)RCAR_GEN2_SATA
+       },
        { },
 };
 MODULE_DEVICE_TABLE(of, sata_rcar_match);
index 89b09c51ab7cff64d20e24ff43fff8bd720d1ef3..38a5bb764c7b55cb8b742639e49756e413b4ab26 100644 (file)
@@ -1376,6 +1376,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
                        if (arg) {
                                if (copy_from_user(bname, argp, sizeof(bname) - 1))
                                        return -EFAULT;
+                               bname[sizeof(bname)-1] = 0;
                        } else
                                return -EINVAL;
                        ret = mutex_lock_interruptible(&dev->mtx);
index c151c6daa67ee8aa2ae600b4462f146d85776335..f63a110b7bcb2d2257869484bd9b894d94b9b5d2 100644 (file)
@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm)
        char newname[10];
 
        if (p) {
-               /* Slave-Name MUST not be empty */
-               if (!strlen(p + 1))
+               /* Slave-Name MUST not be empty or overflow 'newname' */
+               if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
                        return NULL;
-               strcpy(newname, p + 1);
                *p = 0;
                /* Master must already exist */
                if (!(n = isdn_net_findif(parm)))
index 181839d6fbea44a8b6962f94703e4293645cc21c..9bee6c1c70cca33941ae8db8002d69e760693bac 100644 (file)
@@ -2050,6 +2050,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                                continue;
 
                        bond_propose_link_state(slave, BOND_LINK_FAIL);
+                       commit++;
                        slave->delay = bond->params.downdelay;
                        if (slave->delay) {
                                netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -2088,6 +2089,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                                continue;
 
                        bond_propose_link_state(slave, BOND_LINK_BACK);
+                       commit++;
                        slave->delay = bond->params.updelay;
 
                        if (slave->delay) {
index 041cfb7952f81d71278279454a394c9754e0fb97..e94159507847b33962f99d63561301b924fd2dd1 100644 (file)
@@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
                mac_mode |= HALF_DUPLEX;
 
        if (gigabit) {
-               if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
+               if (phy_interface_is_rgmii(dev->phydev))
                        mac_mode |= RGMII_MODE;
 
                mac_mode |= GMAC_MODE;
@@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev)
                break;
 
        case PHY_INTERFACE_MODE_RGMII:
-               pad_mode = PAD_MODE_RGMII;
-               break;
-
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
        case PHY_INTERFACE_MODE_RGMII_TXID:
-               pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
+               pad_mode = PAD_MODE_RGMII;
                break;
 
        default:
index 7b0b399aaedd44778b96a38fe0fc5c7659494da5..a981c4ee9d72deab705231088043e8690853d563 100644 (file)
@@ -3669,7 +3669,7 @@ static int bcmgenet_resume(struct device *d)
 
        phy_init_hw(priv->phydev);
        /* Speed settings must be restored */
-       bcmgenet_mii_config(priv->dev);
+       bcmgenet_mii_config(priv->dev, false);
 
        /* disable ethernet MAC while updating its registers */
        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
index b9344de669f84d2d1fa8e983c704b97ba09c7195..3a34fdba5301185e9939dc2feff52bef4f3ea68f 100644 (file)
@@ -698,7 +698,7 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev, bool init);
 int bcmgenet_mii_probe(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
 void bcmgenet_mii_reset(struct net_device *dev);
index 071fcbd14e6a4e17534950107d5081662eba5c3c..30cb97b4a1d7a27f4be917e4d718f86d596734a5 100644 (file)
@@ -238,7 +238,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
                                          bcmgenet_fixed_phy_link_update);
 }
 
-int bcmgenet_mii_config(struct net_device *dev)
+int bcmgenet_mii_config(struct net_device *dev, bool init)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct phy_device *phydev = priv->phydev;
@@ -327,7 +327,8 @@ int bcmgenet_mii_config(struct net_device *dev)
                bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
        }
 
-       dev_info_once(kdev, "configuring instance for %s\n", phy_name);
+       if (init)
+               dev_info(kdev, "configuring instance for %s\n", phy_name);
 
        return 0;
 }
@@ -375,7 +376,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
         * PHY speed which is needed for bcmgenet_mii_config() to configure
         * things appropriately.
         */
-       ret = bcmgenet_mii_config(dev);
+       ret = bcmgenet_mii_config(dev, true);
        if (ret) {
                phy_disconnect(priv->phydev);
                return ret;
index 79112563a25ae08db35bfb2de214c2b3e84a15a6..5e5c4d7796b8882168eba6fe858ac4bfff2aaf53 100644 (file)
@@ -292,11 +292,30 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
        u64 cmr_cfg;
        u64 port_cfg = 0;
        u64 misc_ctl = 0;
+       bool tx_en, rx_en;
 
        cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
-       cmr_cfg &= ~CMR_EN;
+       tx_en = cmr_cfg & CMR_PKT_TX_EN;
+       rx_en = cmr_cfg & CMR_PKT_RX_EN;
+       cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
        bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
 
+       /* Wait for BGX RX to be idle */
+       if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
+                        GMI_PORT_CFG_RX_IDLE, false)) {
+               dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n",
+                       bgx->bgx_id, lmac->lmacid);
+               return;
+       }
+
+       /* Wait for BGX TX to be idle */
+       if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
+                        GMI_PORT_CFG_TX_IDLE, false)) {
+               dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n",
+                       bgx->bgx_id, lmac->lmacid);
+               return;
+       }
+
        port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
        misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
 
@@ -347,10 +366,8 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
        bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
        bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
 
-       port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
-
-       /* Re-enable lmac */
-       cmr_cfg |= CMR_EN;
+       /* Restore CMR config settings */
+       cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
        bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
 
        if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
index 6b7fe6fdd13b9b27b4a1573e8c7b3869b17bcc19..23acdc5ab896306f3f1e6d2fbc7d8afb58273888 100644 (file)
 #define  GMI_PORT_CFG_DUPLEX                   BIT_ULL(2)
 #define  GMI_PORT_CFG_SLOT_TIME                        BIT_ULL(3)
 #define  GMI_PORT_CFG_SPEED_MSB                        BIT_ULL(8)
+#define  GMI_PORT_CFG_RX_IDLE                  BIT_ULL(12)
+#define  GMI_PORT_CFG_TX_IDLE                  BIT_ULL(13)
 #define BGX_GMP_GMI_RXX_JABBER         0x38038
 #define BGX_GMP_GMI_TXX_THRESH         0x38210
 #define BGX_GMP_GMI_TXX_APPEND         0x38218
index 95bf5e89cfd17b30d3543b33cb069f8aaee69d31..34dae51effd45a19c9a2b8b607dafeaec3aa0456 100644 (file)
@@ -125,7 +125,7 @@ static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
        iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
        iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
                  priv->base + FTGMAC100_OFFSET_MACCR);
-       for (i = 0; i < 50; i++) {
+       for (i = 0; i < 200; i++) {
                unsigned int maccr;
 
                maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
@@ -392,7 +392,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
        struct net_device *netdev = priv->netdev;
        struct sk_buff *skb;
        dma_addr_t map;
-       int err;
+       int err = 0;
 
        skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
        if (unlikely(!skb)) {
@@ -428,7 +428,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
        else
                rxdes->rxdes0 = 0;
 
-       return 0;
+       return err;
 }
 
 static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
@@ -1682,6 +1682,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
        priv->mii_bus->name = "ftgmac100_mdio";
        snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
                 pdev->name, pdev->id);
+       priv->mii_bus->parent = priv->dev;
        priv->mii_bus->priv = priv->netdev;
        priv->mii_bus->read = ftgmac100_mdiobus_read;
        priv->mii_bus->write = ftgmac100_mdiobus_write;
index 5794d98d946f35c132f010b58368a15b5534070d..9c94ea9b2b802306c0286472c2255571e8e3ed36 100644 (file)
@@ -2734,7 +2734,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
        ppd.shared = pdev;
 
        memset(&res, 0, sizeof(res));
-       if (!of_irq_to_resource(pnp, 0, &res)) {
+       if (of_irq_to_resource(pnp, 0, &res) <= 0) {
                dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
                return -EINVAL;
        }
index b3d0c2e6347a636aa23a6f637233d27d7b55f646..e588a0cdb074040fe83ba8caaec9cd30c4189689 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/if_vlan.h>
 #include <linux/reset.h>
 #include <linux/tcp.h>
+#include <linux/interrupt.h>
 
 #include "mtk_eth_soc.h"
 
@@ -947,6 +948,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                      RX_DMA_FPORT_MASK;
                mac--;
 
+               if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+                            !eth->netdev[mac]))
+                       goto release_desc;
+
                netdev = eth->netdev[mac];
 
                if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
index f5a2c605749ff2dad1db091bb93d3b6957fa4058..31cbe5e86a01300bacd92db829d9ddc186e5db7a 100644 (file)
@@ -786,6 +786,10 @@ static void cb_timeout_handler(struct work_struct *work)
        mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 }
 
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
+static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
+                             struct mlx5_cmd_msg *msg);
+
 static void cmd_work_handler(struct work_struct *work)
 {
        struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -796,17 +800,28 @@ static void cmd_work_handler(struct work_struct *work)
        struct semaphore *sem;
        unsigned long flags;
        bool poll_cmd = ent->polling;
+       int alloc_ret;
 
 
        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
        down(sem);
        if (!ent->page_queue) {
-               ent->idx = alloc_ent(cmd);
-               if (ent->idx < 0) {
+               alloc_ret = alloc_ent(cmd);
+               if (alloc_ret < 0) {
                        mlx5_core_err(dev, "failed to allocate command entry\n");
+                       if (ent->callback) {
+                               ent->callback(-EAGAIN, ent->context);
+                               mlx5_free_cmd_msg(dev, ent->out);
+                               free_msg(dev, ent->in);
+                               free_cmd(ent);
+                       } else {
+                               ent->ret = -EAGAIN;
+                               complete(&ent->done);
+                       }
                        up(sem);
                        return;
                }
+               ent->idx = alloc_ret;
        } else {
                ent->idx = cmd->max_reg_cmds;
                spin_lock_irqsave(&cmd->alloc_lock, flags);
@@ -967,7 +982,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
 
        err = wait_func(dev, ent);
        if (err == -ETIMEDOUT)
-               goto out_free;
+               goto out;
 
        ds = ent->ts2 - ent->ts1;
        op = MLX5_GET(mbox_in, in->first.data, opcode);
@@ -1430,6 +1445,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
                                        mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
                                                      ent->idx);
                                        free_ent(cmd, ent->idx);
+                                       free_cmd(ent);
                                }
                                continue;
                        }
@@ -1488,7 +1504,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
                                free_msg(dev, ent->in);
 
                                err = err ? err : ent->status;
-                               free_cmd(ent);
+                               if (!forced)
+                                       free_cmd(ent);
                                callback(err, context);
                        } else {
                                complete(&ent->done);
index e1b7ddfecd011436c1520edc93eb30e8e15221d4..0039b4725405fcf0a961fa53eab250adc2d51f3c 100644 (file)
@@ -266,6 +266,14 @@ struct mlx5e_dcbx {
 };
 #endif
 
+#define MAX_PIN_NUM    8
+struct mlx5e_pps {
+       u8                         pin_caps[MAX_PIN_NUM];
+       struct work_struct         out_work;
+       u64                        start[MAX_PIN_NUM];
+       u8                         enabled;
+};
+
 struct mlx5e_tstamp {
        rwlock_t                   lock;
        struct cyclecounter        cycles;
@@ -277,7 +285,7 @@ struct mlx5e_tstamp {
        struct mlx5_core_dev      *mdev;
        struct ptp_clock          *ptp;
        struct ptp_clock_info      ptp_info;
-       u8                        *pps_pin_caps;
+       struct mlx5e_pps           pps_info;
 };
 
 enum {
index 66f432385dbbd5ea6ce3ceb256abb7ce6dada7b0..84dd63e740414e75d5aa5ac1ebf871c801718bda 100644 (file)
@@ -53,6 +53,15 @@ enum {
        MLX5E_EVENT_MODE_ONCE_TILL_ARM  = 0x2,
 };
 
+enum {
+       MLX5E_MTPPS_FS_ENABLE                   = BIT(0x0),
+       MLX5E_MTPPS_FS_PATTERN                  = BIT(0x2),
+       MLX5E_MTPPS_FS_PIN_MODE                 = BIT(0x3),
+       MLX5E_MTPPS_FS_TIME_STAMP               = BIT(0x4),
+       MLX5E_MTPPS_FS_OUT_PULSE_DURATION       = BIT(0x5),
+       MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ          = BIT(0x7),
+};
+
 void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
                        struct skb_shared_hwtstamps *hwts)
 {
@@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
        return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
 }
 
+static void mlx5e_pps_out(struct work_struct *work)
+{
+       struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
+                                                 out_work);
+       struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
+                                                  pps_info);
+       u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+       unsigned long flags;
+       int i;
+
+       for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
+               u64 tstart;
+
+               write_lock_irqsave(&tstamp->lock, flags);
+               tstart = tstamp->pps_info.start[i];
+               tstamp->pps_info.start[i] = 0;
+               write_unlock_irqrestore(&tstamp->lock, flags);
+               if (!tstart)
+                       continue;
+
+               MLX5_SET(mtpps_reg, in, pin, i);
+               MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
+               MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
+               mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
+       }
+}
+
 static void mlx5e_timestamp_overflow(struct work_struct *work)
 {
        struct delayed_work *dwork = to_delayed_work(work);
        struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
                                                   overflow_work);
+       struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
        unsigned long flags;
 
        write_lock_irqsave(&tstamp->lock, flags);
        timecounter_read(&tstamp->clock);
        write_unlock_irqrestore(&tstamp->lock, flags);
-       schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
+       queue_delayed_work(priv->wq, &tstamp->overflow_work,
+                          msecs_to_jiffies(tstamp->overflow_period * 1000));
 }
 
 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -213,18 +251,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
        int neg_adj = 0;
        struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
                                                  ptp_info);
-       struct mlx5e_priv *priv =
-               container_of(tstamp, struct mlx5e_priv, tstamp);
-
-       if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
-               u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
-
-               /* For future use need to add a loop for finding all 1PPS out pins */
-               MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
-               MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
-
-               mlx5_set_mtpps(priv->mdev, in, sizeof(in));
-       }
 
        if (delta < 0) {
                neg_adj = 1;
@@ -253,12 +279,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
        struct mlx5e_priv *priv =
                container_of(tstamp, struct mlx5e_priv, tstamp);
        u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+       u32 field_select = 0;
+       u8 pin_mode = 0;
        u8 pattern = 0;
        int pin = -1;
        int err = 0;
 
-       if (!MLX5_CAP_GEN(priv->mdev, pps) ||
-           !MLX5_CAP_GEN(priv->mdev, pps_modify))
+       if (!MLX5_PPS_CAP(priv->mdev))
                return -EOPNOTSUPP;
 
        if (rq->extts.index >= tstamp->ptp_info.n_pins)
@@ -268,15 +295,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
                pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
                if (pin < 0)
                        return -EBUSY;
+               pin_mode = MLX5E_PIN_MODE_IN;
+               pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
+               field_select = MLX5E_MTPPS_FS_PIN_MODE |
+                              MLX5E_MTPPS_FS_PATTERN |
+                              MLX5E_MTPPS_FS_ENABLE;
+       } else {
+               pin = rq->extts.index;
+               field_select = MLX5E_MTPPS_FS_ENABLE;
        }
 
-       if (rq->extts.flags & PTP_FALLING_EDGE)
-               pattern = 1;
-
        MLX5_SET(mtpps_reg, in, pin, pin);
-       MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
+       MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
        MLX5_SET(mtpps_reg, in, pattern, pattern);
        MLX5_SET(mtpps_reg, in, enable, on);
+       MLX5_SET(mtpps_reg, in, field_select, field_select);
 
        err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
        if (err)
@@ -295,14 +328,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
        struct mlx5e_priv *priv =
                container_of(tstamp, struct mlx5e_priv, tstamp);
        u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
-       u64 nsec_now, nsec_delta, time_stamp;
+       u64 nsec_now, nsec_delta, time_stamp = 0;
        u64 cycles_now, cycles_delta;
        struct timespec64 ts;
        unsigned long flags;
+       u32 field_select = 0;
+       u8 pin_mode = 0;
+       u8 pattern = 0;
        int pin = -1;
+       int err = 0;
        s64 ns;
 
-       if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
+       if (!MLX5_PPS_CAP(priv->mdev))
                return -EOPNOTSUPP;
 
        if (rq->perout.index >= tstamp->ptp_info.n_pins)
@@ -313,32 +350,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
                                   rq->perout.index);
                if (pin < 0)
                        return -EBUSY;
-       }
 
-       ts.tv_sec = rq->perout.period.sec;
-       ts.tv_nsec = rq->perout.period.nsec;
-       ns = timespec64_to_ns(&ts);
-       if (on)
+               pin_mode = MLX5E_PIN_MODE_OUT;
+               pattern = MLX5E_OUT_PATTERN_PERIODIC;
+               ts.tv_sec = rq->perout.period.sec;
+               ts.tv_nsec = rq->perout.period.nsec;
+               ns = timespec64_to_ns(&ts);
+
                if ((ns >> 1) != 500000000LL)
                        return -EINVAL;
-       ts.tv_sec = rq->perout.start.sec;
-       ts.tv_nsec = rq->perout.start.nsec;
-       ns = timespec64_to_ns(&ts);
-       cycles_now = mlx5_read_internal_timer(tstamp->mdev);
-       write_lock_irqsave(&tstamp->lock, flags);
-       nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
-       nsec_delta = ns - nsec_now;
-       cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
-                                tstamp->cycles.mult);
-       write_unlock_irqrestore(&tstamp->lock, flags);
-       time_stamp = cycles_now + cycles_delta;
+
+               ts.tv_sec = rq->perout.start.sec;
+               ts.tv_nsec = rq->perout.start.nsec;
+               ns = timespec64_to_ns(&ts);
+               cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+               write_lock_irqsave(&tstamp->lock, flags);
+               nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+               nsec_delta = ns - nsec_now;
+               cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+                                        tstamp->cycles.mult);
+               write_unlock_irqrestore(&tstamp->lock, flags);
+               time_stamp = cycles_now + cycles_delta;
+               field_select = MLX5E_MTPPS_FS_PIN_MODE |
+                              MLX5E_MTPPS_FS_PATTERN |
+                              MLX5E_MTPPS_FS_ENABLE |
+                              MLX5E_MTPPS_FS_TIME_STAMP;
+       } else {
+               pin = rq->perout.index;
+               field_select = MLX5E_MTPPS_FS_ENABLE;
+       }
+
        MLX5_SET(mtpps_reg, in, pin, pin);
-       MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
-       MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
+       MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+       MLX5_SET(mtpps_reg, in, pattern, pattern);
        MLX5_SET(mtpps_reg, in, enable, on);
        MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
+       MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+       err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+       if (err)
+               return err;
 
-       return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+       return mlx5_set_mtppse(priv->mdev, pin, 0,
+                              MLX5E_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
+                              struct ptp_clock_request *rq,
+                              int on)
+{
+       struct mlx5e_tstamp *tstamp =
+               container_of(ptp, struct mlx5e_tstamp, ptp_info);
+
+       tstamp->pps_info.enabled = !!on;
+       return 0;
 }
 
 static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
@@ -350,6 +415,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
                return mlx5e_extts_configure(ptp, rq, on);
        case PTP_CLK_REQ_PEROUT:
                return mlx5e_perout_configure(ptp, rq, on);
+       case PTP_CLK_REQ_PPS:
+               return mlx5e_pps_configure(ptp, rq, on);
        default:
                return -EOPNOTSUPP;
        }
@@ -395,6 +462,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
                return -ENOMEM;
        tstamp->ptp_info.enable = mlx5e_ptp_enable;
        tstamp->ptp_info.verify = mlx5e_ptp_verify;
+       tstamp->ptp_info.pps = 1;
 
        for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
                snprintf(tstamp->ptp_info.pin_config[i].name,
@@ -422,22 +490,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
        tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
                                              cap_max_num_of_pps_out_pins);
 
-       tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
-       tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
-       tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
-       tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
-       tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
-       tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
-       tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
-       tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+       tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+       tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+       tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+       tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+       tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+       tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+       tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+       tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
 }
 
 void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
                             struct ptp_clock_event *event)
 {
+       struct net_device *netdev = priv->netdev;
        struct mlx5e_tstamp *tstamp = &priv->tstamp;
+       struct timespec64 ts;
+       u64 nsec_now, nsec_delta;
+       u64 cycles_now, cycles_delta;
+       int pin = event->index;
+       s64 ns;
+       unsigned long flags;
 
-       ptp_clock_event(tstamp->ptp, event);
+       switch (tstamp->ptp_info.pin_config[pin].func) {
+       case PTP_PF_EXTTS:
+               if (tstamp->pps_info.enabled) {
+                       event->type = PTP_CLOCK_PPSUSR;
+                       event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
+               } else {
+                       event->type = PTP_CLOCK_EXTTS;
+               }
+               ptp_clock_event(tstamp->ptp, event);
+               break;
+       case PTP_PF_PEROUT:
+               mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
+               cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+               ts.tv_sec += 1;
+               ts.tv_nsec = 0;
+               ns = timespec64_to_ns(&ts);
+               write_lock_irqsave(&tstamp->lock, flags);
+               nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+               nsec_delta = ns - nsec_now;
+               cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+                                        tstamp->cycles.mult);
+               tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
+               queue_work(priv->wq, &tstamp->pps_info.out_work);
+               write_unlock_irqrestore(&tstamp->lock, flags);
+               break;
+       default:
+               netdev_err(netdev, "%s: Unhandled event\n", __func__);
+       }
 }
 
 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@@ -473,9 +575,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
        do_div(ns, NSEC_PER_SEC / 2 / HZ);
        tstamp->overflow_period = ns;
 
+       INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
        INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
        if (tstamp->overflow_period)
-               schedule_delayed_work(&tstamp->overflow_work, 0);
+               queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
        else
                mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
 
@@ -484,16 +587,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
        snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
 
        /* Initialize 1PPS data structures */
-#define MAX_PIN_NUM    8
-       tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
-       if (tstamp->pps_pin_caps) {
-               if (MLX5_CAP_GEN(priv->mdev, pps))
-                       mlx5e_get_pps_caps(priv, tstamp);
-               if (tstamp->ptp_info.n_pins)
-                       mlx5e_init_pin_config(tstamp);
-       } else {
-               mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
-       }
+       if (MLX5_PPS_CAP(priv->mdev))
+               mlx5e_get_pps_caps(priv, tstamp);
+       if (tstamp->ptp_info.n_pins)
+               mlx5e_init_pin_config(tstamp);
 
        tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
                                         &priv->mdev->pdev->dev);
@@ -516,8 +613,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
                priv->tstamp.ptp = NULL;
        }
 
-       kfree(tstamp->pps_pin_caps);
-       kfree(tstamp->ptp_info.pin_config);
-
+       cancel_work_sync(&tstamp->pps_info.out_work);
        cancel_delayed_work_sync(&tstamp->overflow_work);
+       kfree(tstamp->ptp_info.pin_config);
 }
index bdd82c9b399262428247471798e7fd4dbf03ca92..eafc59280adae527374e321b3cfcd8549ac140d8 100644 (file)
@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
 
 static bool outer_header_zero(u32 *match_criteria)
 {
-       int size = MLX5_ST_SZ_BYTES(fte_match_param);
+       int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
        char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
                                             outer_headers);
 
@@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
 
        spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
        flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
-       rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
+       rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
        if (IS_ERR(rule)) {
                err = PTR_ERR(rule);
                netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
index 1eac5003084fb9d131392ab7e87ec82d7c4d20b7..57f31fa478ceee5b83a3b4041cfef12457acbd03 100644 (file)
@@ -377,7 +377,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
                break;
        case MLX5_DEV_EVENT_PPS:
                eqe = (struct mlx5_eqe *)param;
-               ptp_event.type = PTP_CLOCK_EXTTS;
                ptp_event.index = eqe->data.pps.pin;
                ptp_event.timestamp =
                        timecounter_cyc2time(&priv->tstamp.clock,
index af51a5d2b912721222fa2434e7919231ccaacdcb..52b9a64cd3a20be3eb8a8e5beb1990d583d2db93 100644 (file)
@@ -698,7 +698,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
        else
                mlx5_core_dbg(dev, "port_module_event is not set\n");
 
-       if (MLX5_CAP_GEN(dev, pps))
+       if (MLX5_PPS_CAP(dev))
                async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
 
        if (MLX5_CAP_GEN(dev, fpga))
index 89bfda419efe96efc1778ec832ff0eac56588cc5..8b18cc9ec026f99d68d23626bde3d695fa1aa895 100644 (file)
@@ -1668,7 +1668,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
        int i;
 
        if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
-           MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+           MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH ||
+           esw->mode == SRIOV_NONE)
                return;
 
        esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
index 1ee5bce8590118a2075bfd4ce2cc52d0da8c1f27..85298051a3e4fcf74767196dcc6660114222b4cf 100644 (file)
@@ -178,8 +178,6 @@ out:
 
 static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
 {
-       mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
-
        mlx5_core_destroy_qp(mdev, qp);
 }
 
@@ -194,8 +192,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
                return err;
        }
 
-       mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
-
        err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
        if (err) {
                mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
@@ -253,6 +249,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
 
 static int mlx5i_init_rx(struct mlx5e_priv *priv)
 {
+       struct mlx5i_priv *ipriv  = priv->ppriv;
        int err;
 
        err = mlx5e_create_indirect_rqt(priv);
@@ -271,12 +268,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
        if (err)
                goto err_destroy_indirect_tirs;
 
-       err = mlx5i_create_flow_steering(priv);
+       err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
        if (err)
                goto err_destroy_direct_tirs;
 
+       err = mlx5i_create_flow_steering(priv);
+       if (err)
+               goto err_remove_rx_underlay_qpn;
+
        return 0;
 
+err_remove_rx_underlay_qpn:
+       mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
 err_destroy_direct_tirs:
        mlx5e_destroy_direct_tirs(priv);
 err_destroy_indirect_tirs:
@@ -290,6 +293,9 @@ err_destroy_indirect_rqts:
 
 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
 {
+       struct mlx5i_priv *ipriv  = priv->ppriv;
+
+       mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
        mlx5i_destroy_flow_steering(priv);
        mlx5e_destroy_direct_tirs(priv);
        mlx5e_destroy_indirect_tirs(priv);
index a3a836bdcfd29c18e2ba46822a559c8e6982d7fb..f26f97fe46666ff7a3f2ce6c496cb8936ee5cbb3 100644 (file)
@@ -162,22 +162,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
                                           u8 *port1, u8 *port2)
 {
-       if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
-               if (tracker->netdev_state[0].tx_enabled) {
-                       *port1 = 1;
-                       *port2 = 1;
-               } else {
-                       *port1 = 2;
-                       *port2 = 2;
-               }
-       } else {
-               *port1 = 1;
-               *port2 = 2;
-               if (!tracker->netdev_state[0].link_up)
-                       *port1 = 2;
-               else if (!tracker->netdev_state[1].link_up)
-                       *port2 = 1;
+       *port1 = 1;
+       *port2 = 2;
+       if (!tracker->netdev_state[0].tx_enabled ||
+           !tracker->netdev_state[0].link_up) {
+               *port1 = 2;
+               return;
        }
+
+       if (!tracker->netdev_state[1].tx_enabled ||
+           !tracker->netdev_state[1].link_up)
+               *port2 = 1;
 }
 
 static void mlx5_activate_lag(struct mlx5_lag *ldev,
index 6a3d6bef7dd4a4a99d6e8427a0a35a78c0b56d20..6a263e8d883a6bae2791a011026e03bc0cd1dead 100644 (file)
@@ -154,6 +154,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
 int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
 int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
 
+#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) &&               \
+                           MLX5_CAP_GEN((mdev), pps_modify) &&         \
+                           MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) &&  \
+                           MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
+
 int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
 
 void mlx5e_init(void);
index bcdf7779c48d79ffb85f3505e122210d16e9b46d..bf99d40e30b4e07a234ea68366d226267d365d7b 100644 (file)
@@ -88,7 +88,11 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
        int vf;
 
        if (!sriov->enabled_vfs)
+#ifdef CONFIG_MLX5_CORE_EN
+               goto disable_sriov_resources;
+#else
                return;
+#endif
 
        for (vf = 0; vf < sriov->num_vfs; vf++) {
                if (!sriov->vfs_ctx[vf].enabled)
@@ -103,6 +107,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
        }
 
 #ifdef CONFIG_MLX5_CORE_EN
+disable_sriov_resources:
        mlx5_eswitch_disable_sriov(dev->priv.eswitch);
 #endif
 
index 383fef5a8e24203e20137671c0924f7cc7cbfbf3..4b2e0fd7d51e0a6635cdea75055eec9365d21588 100644 (file)
@@ -1512,6 +1512,10 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
                                     struct mlxsw_sp_fib_entry *fib_entry);
 
+static bool
+mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
+                                const struct mlxsw_sp_fib_entry *fib_entry);
+
 static int
 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
                                    struct mlxsw_sp_nexthop_group *nh_grp)
@@ -1520,6 +1524,9 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
        int err;
 
        list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+               if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
+                                                     fib_entry))
+                       continue;
                err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
                if (err)
                        return err;
index 22cf6353ba0418ab5c64ad1fbb343c797d82f18d..7ecf549c7f1cd594f86f165c02743f415f7ada85 100644 (file)
@@ -205,7 +205,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
 {
        int i;
 
-       for (i = 0; i < 23; i++)
+       for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
                if ((i < 12) || (i > 17))
                        reg_space[DMA_BUS_MODE / 4 + i] =
                                readl(ioaddr + DMA_BUS_MODE + i * 4);
index eef2f222ce9a87f91a1d02fa6a6c7354486d4231..6502b9aa3bf587d0095816052066ce301a765634 100644 (file)
@@ -70,7 +70,7 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
 {
        int i;
 
-       for (i = 0; i < 9; i++)
+       for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++)
                reg_space[DMA_BUS_MODE / 4 + i] =
                        readl(ioaddr + DMA_BUS_MODE + i * 4);
 
index 9091df86723a3988075cbda535d5d6ba21826b7b..adc54006f8843a5bb2081de064291068e66fcf28 100644 (file)
 #define DMA_STATUS_TI  0x00000001      /* Transmit Interrupt */
 #define DMA_CONTROL_FTF                0x00100000      /* Flush transmit FIFO */
 
+#define NUM_DWMAC100_DMA_REGS  9
+#define NUM_DWMAC1000_DMA_REGS 23
+
 void dwmac_enable_dma_transmission(void __iomem *ioaddr);
 void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
 void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
index babb39c646ff2f64c96880b437cbca5fc5668453..af30b4857c3b9352d4b9745ebdb07779b7621afc 100644 (file)
@@ -33,6 +33,8 @@
 #define MAC100_ETHTOOL_NAME    "st_mac100"
 #define GMAC_ETHTOOL_NAME      "st_gmac"
 
+#define ETHTOOL_DMA_OFFSET     55
+
 struct stmmac_stats {
        char stat_string[ETH_GSTRING_LEN];
        int sizeof_stat;
@@ -442,6 +444,9 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
 
        priv->hw->mac->dump_regs(priv->hw, reg_space);
        priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
+       /* Copy DMA registers to where ethtool expects them */
+       memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
+              NUM_DWMAC1000_DMA_REGS * 4);
 }
 
 static void
index 3af540adb3c58e83b4265c77fa436d7d44da06c7..fca1bca7f69d1c4c1307c1cc7e2a11f80d6ec866 100644 (file)
@@ -13,9 +13,9 @@
 /* Happy Meal global registers. */
 #define GREG_SWRESET   0x000UL /* Software Reset  */
 #define GREG_CFG       0x004UL /* Config Register */
-#define GREG_STAT      0x108UL /* Status          */
-#define GREG_IMASK     0x10cUL /* Interrupt Mask  */
-#define GREG_REG_SIZE  0x110UL
+#define GREG_STAT      0x100UL /* Status          */
+#define GREG_IMASK     0x104UL /* Interrupt Mask  */
+#define GREG_REG_SIZE  0x108UL
 
 /* Global reset register. */
 #define GREG_RESET_ETX         0x01
index d9db8a06afd26ef901aa0e5cf559559657ea3179..cce9c9ed46aa9a8462080c5949bdb6621e247f0d 100644 (file)
@@ -1338,7 +1338,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
 static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
 {
        static int count;
-       printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
+       printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):",
               dev->name, status);
        if (status & Int_IntPCI)
                printk(" IntPCI");
index 63c98bbbc596dbe11cc74ba71c384da02c5d763b..0d78727f1a14dd9c4ae301f053769437cbe4eb3b 100644 (file)
@@ -315,14 +315,34 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
        return slots_used;
 }
 
-/* Estimate number of page buffers neede to transmit
- * Need at most 2 for RNDIS header plus skb body and fragments.
- */
-static unsigned int netvsc_get_slots(const struct sk_buff *skb)
+static int count_skb_frag_slots(struct sk_buff *skb)
+{
+       int i, frags = skb_shinfo(skb)->nr_frags;
+       int pages = 0;
+
+       for (i = 0; i < frags; i++) {
+               skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+               unsigned long size = skb_frag_size(frag);
+               unsigned long offset = frag->page_offset;
+
+               /* Skip unused frames from start of page */
+               offset &= ~PAGE_MASK;
+               pages += PFN_UP(offset + size);
+       }
+       return pages;
+}
+
+static int netvsc_get_slots(struct sk_buff *skb)
 {
-       return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb))
-               + skb_shinfo(skb)->nr_frags
-               + 2;
+       char *data = skb->data;
+       unsigned int offset = offset_in_page(data);
+       unsigned int len = skb_headlen(skb);
+       int slots;
+       int frag_slots;
+
+       slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+       frag_slots = count_skb_frag_slots(skb);
+       return slots + frag_slots;
 }
 
 static u32 net_checksum_info(struct sk_buff *skb)
@@ -360,18 +380,21 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
        struct hv_page_buffer *pb = page_buf;
 
-       /* We can only transmit MAX_PAGE_BUFFER_COUNT number
+       /* We will atmost need two pages to describe the rndis
+        * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
         * of pages in a single packet. If skb is scattered around
         * more pages we try linearizing it.
         */
-       num_data_pgs = netvsc_get_slots(skb);
+
+       num_data_pgs = netvsc_get_slots(skb) + 2;
+
        if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
                ++net_device_ctx->eth_stats.tx_scattered;
 
                if (skb_linearize(skb))
                        goto no_memory;
 
-               num_data_pgs = netvsc_get_slots(skb);
+               num_data_pgs = netvsc_get_slots(skb) + 2;
                if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
                        ++net_device_ctx->eth_stats.tx_too_big;
                        goto drop;
index 6f6ed75b63c97e7eaa2e49e7fcb385ca965f1ca5..765de3bedb8817d018b4f6143b3195c581cc3834 100644 (file)
@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
 static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
 {
        struct usb_device *dev = mcs->usbdev;
-       int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
-                                 MCS_RD_RTYPE, 0, reg, val, 2,
-                                 msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+       void *dmabuf;
+       int ret;
+
+       dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
+       if (!dmabuf)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+                             MCS_RD_RTYPE, 0, reg, dmabuf, 2,
+                             msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+
+       memcpy(val, dmabuf, sizeof(__u16));
+       kfree(dmabuf);
 
        return ret;
 }
index 2dda72004a7d5ba0ede8fb34c853bce2266ace4a..928fd892f167546e329f913ec10ea56e454cea0d 100644 (file)
@@ -7,7 +7,16 @@ menuconfig MDIO_DEVICE
        help
           MDIO devices and driver infrastructure code.
 
-if MDIO_DEVICE
+config MDIO_BUS
+       tristate
+       default m if PHYLIB=m
+       default MDIO_DEVICE
+       help
+         This internal symbol is used for link time dependencies and it
+         reflects whether the mdio_bus/mdio_device code is built as a
+         loadable module or built-in.
+
+if MDIO_BUS
 
 config MDIO_BCM_IPROC
        tristate "Broadcom iProc MDIO bus controller"
@@ -28,7 +37,6 @@ config MDIO_BCM_UNIMAC
 
 config MDIO_BITBANG
        tristate "Bitbanged MDIO buses"
-       depends on !(MDIO_DEVICE=y && PHYLIB=m)
        help
          This module implements the MDIO bus protocol in software,
          for use by low level drivers that export the ability to
@@ -127,7 +135,6 @@ config MDIO_THUNDER
        tristate "ThunderX SOCs MDIO buses"
        depends on 64BIT
        depends on PCI
-       depends on !(MDIO_DEVICE=y && PHYLIB=m)
        select MDIO_CAVIUM
        help
          This driver supports the MDIO interfaces found on Cavium
index d0626bf5c540911b0d15bdbab1b960145b6d124c..5068c582d502c6944a01a2ee87062d7ffb409034 100644 (file)
@@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev)
        if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
                phydev->state = PHY_UP;
        mutex_unlock(&phydev->lock);
+
+       /* Now we can run the state machine synchronously */
+       phy_state_machine(&phydev->state_queue.work);
 }
 
 /**
index eac499c58aa706a40ebf76024767ad80b1749678..6dde9a0cfe76ca3b09fcb355b26b75c3dad7c622 100644 (file)
@@ -131,7 +131,6 @@ static void del_chan(struct pppox_sock *sock)
        clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
        RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
        spin_unlock(&chan_lock);
-       synchronize_rcu();
 }
 
 static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
@@ -520,6 +519,7 @@ static int pptp_release(struct socket *sock)
 
        po = pppox_sk(sk);
        del_chan(po);
+       synchronize_rcu();
 
        pppox_unbind_sock(sk);
        sk->sk_state = PPPOX_DEAD;
index 4645704097963ca2b57bd90642171f6dfb8c892a..ae53e899259f8d6f919d6f6bef07524c92c6fea5 100644 (file)
@@ -60,11 +60,11 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
 static int __set_port_dev_addr(struct net_device *port_dev,
                               const unsigned char *dev_addr)
 {
-       struct sockaddr addr;
+       struct sockaddr_storage addr;
 
-       memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
-       addr.sa_family = port_dev->type;
-       return dev_set_mac_address(port_dev, &addr);
+       memcpy(addr.__data, dev_addr, port_dev->addr_len);
+       addr.ss_family = port_dev->type;
+       return dev_set_mac_address(port_dev, (struct sockaddr *)&addr);
 }
 
 static int team_port_set_orig_dev_addr(struct team_port *port)
index 3d4c24572ecdcda8ffdd47070ce75dcb625441da..32ad87345f5798498584d8dcfbda2f9ac993e619 100644 (file)
@@ -2598,8 +2598,16 @@ static int __init tun_init(void)
                goto err_misc;
        }
 
-       register_netdevice_notifier(&tun_notifier_block);
+       ret = register_netdevice_notifier(&tun_notifier_block);
+       if (ret) {
+               pr_err("Can't register netdevice notifier\n");
+               goto err_notifier;
+       }
+
        return  0;
+
+err_notifier:
+       misc_deregister(&tun_miscdev);
 err_misc:
        rtnl_link_unregister(&tun_link_ops);
 err_linkops:
index f41ab0ea942a6e75a66ded6f9dcab2a999f920bb..98f17b05c68b745276ecbe67d2934918eaf340bd 100644 (file)
@@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
 
        buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
        buf += headroom; /* advance address leaving hole at front of pkt */
-       ctx = (void *)(unsigned long)len;
        get_page(alloc_frag->page);
        alloc_frag->offset += len + headroom;
        hole = alloc_frag->size - alloc_frag->offset;
        if (hole < len + headroom) {
                /* To avoid internal fragmentation, if there is very likely not
                 * enough space for another buffer, add the remaining space to
-                * the current buffer. This extra space is not included in
-                * the truesize stored in ctx.
+                * the current buffer.
                 */
                len += hole;
                alloc_frag->offset += hole;
        }
 
        sg_init_one(rq->sg, buf, len);
+       ctx = (void *)(unsigned long)len;
        err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
        if (err < 0)
                put_page(virt_to_head_page(buf));
index 2153e8062b4cefcca2ee92c6459fae295fbf4500..5cc3a07dda9e6acf202ba29a1f0a420814c4a96d 100644 (file)
@@ -214,7 +214,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
 
        /* Make sure there's enough writeable headroom */
        if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
-               head_delta = drvr->hdrlen - skb_headroom(skb);
+               head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
 
                brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
                          brcmf_ifname(ifp), head_delta);
index fbcbb43259366ccd87c37119a5024dbb825099ed..f3556122c6ace17c419e13023057861957a507fa 100644 (file)
@@ -2053,12 +2053,13 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
                                atomic_inc(&stats->pktcow_failed);
                                return -ENOMEM;
                        }
+                       head_pad = 0;
                }
                skb_push(pkt, head_pad);
                dat_buf = (u8 *)(pkt->data);
        }
        memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
-       return 0;
+       return head_pad;
 }
 
 /**
@@ -4174,11 +4175,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
                goto fail;
        }
 
-       /* allocate scatter-gather table. sg support
-        * will be disabled upon allocation failure.
-        */
-       brcmf_sdiod_sgtable_alloc(bus->sdiodev);
-
        /* Query the F2 block size, set roundup accordingly */
        bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
        bus->roundup = min(max_roundup, bus->blocksize);
index adaa2f0097cc085b964ad4fd571b1ca17e85b8b0..fb40ddfced999ca3b4516bec1f6f4133b6b61f1a 100644 (file)
@@ -1189,11 +1189,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
                                next_reclaimed;
                        IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
                                                  next_reclaimed);
+                       iwlagn_check_ratid_empty(priv, sta_id, tid);
                }
 
                iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
 
-               iwlagn_check_ratid_empty(priv, sta_id, tid);
                freed = 0;
 
                /* process frames */
index 545d14b0bc92fbca03b786f537b523e6ab453600..f5c1127253cb13fcccde047ba2d210b9f41fb556 100644 (file)
@@ -55,8 +55,8 @@ static inline bool iwl_trace_data(struct sk_buff *skb)
        /* also account for the RFC 1042 header, of course */
        offs += 6;
 
-       return skb->len > offs + 2 &&
-              *(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE);
+       return skb->len <= offs + 2 ||
+               *(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE);
 }
 
 static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
index bcde1ba0f1c8b620c6046286e67c46343c14a07e..c7b1e58e33847a8250ab708695373a0817f85874 100644 (file)
@@ -1084,7 +1084,13 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+       if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
+               /*
+                * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
+                * so later code will - from now on - see that we're doing it.
+                */
+               set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+               clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
                /* Clean up some internal and mac80211 state on restart */
                iwl_mvm_restart_cleanup(mvm);
        } else {
index eaacfaf3720680e8e1bb8cbcd63d268d537d512a..ddd8719f27b8f7a9c22c7a25b9c78618bbaba95e 100644 (file)
@@ -1090,6 +1090,7 @@ struct iwl_mvm {
  * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
  * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
  * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
+ * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
  * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
  * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
  * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
@@ -1101,6 +1102,7 @@ enum iwl_mvm_status {
        IWL_MVM_STATUS_HW_RFKILL,
        IWL_MVM_STATUS_HW_CTKILL,
        IWL_MVM_STATUS_ROC_RUNNING,
+       IWL_MVM_STATUS_HW_RESTART_REQUESTED,
        IWL_MVM_STATUS_IN_HW_RESTART,
        IWL_MVM_STATUS_IN_D0I3,
        IWL_MVM_STATUS_ROC_AUX_RUNNING,
index 4d1188b8736ab40b36095f4027689d516aaf0100..9c175d5e9d67971266c6828a4191b3bda8f89756 100644 (file)
@@ -1235,9 +1235,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         */
        if (!mvm->fw_restart && fw_error) {
                iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
-                                           NULL);
-       } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
-                                   &mvm->status)) {
+                                       NULL);
+       } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
 
                IWL_ERR(mvm,
@@ -1268,6 +1267,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
 
                if (fw_error && mvm->fw_restart > 0)
                        mvm->fw_restart--;
+               set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
                ieee80211_restart_hw(mvm->hw);
        }
 }
index 4df5f13fcdae7949804d2c8aa4d5fd6a556cb452..ab66b4394dfc8ca2afc0cf1321f93f28f186d1cb 100644 (file)
@@ -277,6 +277,18 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data)
 
        /* Timer expired */
        sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
+
+       /*
+        * sta should be valid unless the following happens:
+        * The firmware asserts which triggers a reconfig flow, but
+        * the reconfig fails before we set the pointer to sta into
+        * the fw_id_to_mac_id pointer table. Mac80211 can't stop
+        * A-MDPU and hence the timer continues to run. Then, the
+        * timer expires and sta is NULL.
+        */
+       if (!sta)
+               goto unlock;
+
        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
                                          sta->addr, ba_data->tid);
@@ -2015,7 +2027,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                                IWL_MAX_TID_COUNT,
                                                wdg_timeout);
 
-               if (vif->type == NL80211_IFTYPE_AP)
+               if (vif->type == NL80211_IFTYPE_AP ||
+                   vif->type == NL80211_IFTYPE_ADHOC)
                        mvm->probe_queue = queue;
                else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
                        mvm->p2p_dev_queue = queue;
index 92b3a55d0fbc2633bd6e06c5ef2e772fe98b679b..f95eec52508e9bc4784f5cda71548bbd7d761b0b 100644 (file)
@@ -3150,7 +3150,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        init_waitqueue_head(&trans_pcie->d0i3_waitq);
 
        if (trans_pcie->msix_enabled) {
-               if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
+               ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
+               if (ret)
                        goto out_no_pci;
         } else {
                ret = iwl_pcie_alloc_ict(trans);
index de50418adae5013173facd2bcfaf05c9bb5bf90b..034bdb4a0b06f41b86915e568bb6a5e2c14eb0bf 100644 (file)
@@ -298,6 +298,9 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
        for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
                struct iwl_txq *txq = trans_pcie->txq[i];
 
+               if (!test_bit(i, trans_pcie->queue_used))
+                       continue;
+
                spin_lock_bh(&txq->lock);
                if (txq->need_update) {
                        iwl_pcie_txq_inc_wr_ptr(trans, txq);
index 2a7ad5ffe997d1b2e65e9d62d9303830df74477a..cd5dc6dcb19f8a5050b7ce922b0fce612992c996 100644 (file)
@@ -846,9 +846,6 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
                return false;
        }
 
-       if (rtlpriv->cfg->ops->get_btc_status())
-               rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
-
        bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
        rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
 
index fb1ebb01133f8ffba8dd785cfe27b1052a0e56f3..70723e67b7d75a4fc01ae6bac319855d0bdeb511 100644 (file)
@@ -2547,7 +2547,6 @@ struct bt_coexist_info {
 struct rtl_btc_ops {
        void (*btc_init_variables) (struct rtl_priv *rtlpriv);
        void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
-       void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
        void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
        void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
        void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
index 7147aa53e9a218b78303968b7444383a39e576ee..b1ff46fe45478e35904f13b4c4a4e2445706533b 100644 (file)
@@ -333,11 +333,11 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
        
        /* Update the symlink to the real device */
        sysfs_remove_link(&entry->kobj, "device");
+       write_unlock(&entry->rw_lock);
+
        ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device");
        WARN_ON(ret);
 
-       write_unlock(&entry->rw_lock);
-       
        printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n",
                entry->name, buf);
        
@@ -998,6 +998,7 @@ pdcs_register_pathentries(void)
                /* kobject is now registered */
                write_lock(&entry->rw_lock);
                entry->ready = 2;
+               write_unlock(&entry->rw_lock);
                
                /* Add a nice symlink to the real device */
                if (entry->dev) {
@@ -1005,7 +1006,6 @@ pdcs_register_pathentries(void)
                        WARN_ON(err);
                }
 
-               write_unlock(&entry->rw_lock);
                kobject_uevent(&entry->kobj, KOBJ_ADD);
        }
        
index 37371b89b14f3259e708debd883c57f9ae770a20..64fc59c3ae6d982ec2a7e9a0e56c54e7705d16aa 100644 (file)
@@ -30,8 +30,8 @@ config PHY_BCM_NS_USB3
        tristate "Broadcom Northstar USB 3.0 PHY Driver"
        depends on ARCH_BCM_IPROC || COMPILE_TEST
        depends on HAS_IOMEM && OF
+       depends on MDIO_BUS
        select GENERIC_PHY
-       select MDIO_DEVICE
        help
          Enable this to support Broadcom USB 3.0 PHY connected to the USB
          controller on Northstar family.
index b0486070374028fec219f3bdee1f46fc9e50fb93..80b87954f6ddf686fe3a1b593fbebaeaf1f2c229 100644 (file)
@@ -675,6 +675,7 @@ config PEAQ_WMI
        tristate "PEAQ 2-in-1 WMI hotkey driver"
        depends on ACPI_WMI
        depends on INPUT
+       select INPUT_POLLDEV
        help
         Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
 
index f8978464df31c9a8273b93776fc1224776d71471..dad8f4afa17cd4882a9f5e4f6e816f060e459588 100644 (file)
@@ -626,7 +626,7 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev)
  * WMI Interface Version     8       4    <version>
  * WMI buffer length        12       4    4096
  */
-static int __init dell_wmi_check_descriptor_buffer(void)
+static int dell_wmi_check_descriptor_buffer(void)
 {
        struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
@@ -717,9 +717,15 @@ static int dell_wmi_events_set_enabled(bool enable)
 
 static int dell_wmi_probe(struct wmi_device *wdev)
 {
+       int err;
+
        struct dell_wmi_priv *priv = devm_kzalloc(
                &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL);
 
+       err = dell_wmi_check_descriptor_buffer();
+       if (err)
+               return err;
+
        dev_set_drvdata(&wdev->dev, priv);
 
        return dell_wmi_input_setup(wdev);
@@ -749,10 +755,6 @@ static int __init dell_wmi_init(void)
 {
        int err;
 
-       err = dell_wmi_check_descriptor_buffer();
-       if (err)
-               return err;
-
        dmi_check_system(dell_wmi_smbios_list);
 
        if (wmi_requires_smbios_request) {
index 1a764e311e11a2a9c4eeea310fbd8ac1960db704..e32ba575e8d9e2bfe3f1ff398c90ddbfe87196bf 100644 (file)
@@ -1252,12 +1252,12 @@ static int __init acpi_wmi_init(void)
 
        return 0;
 
-err_unreg_class:
-       class_unregister(&wmi_bus_class);
-
 err_unreg_bus:
        bus_unregister(&wmi_bus_type);
 
+err_unreg_class:
+       class_unregister(&wmi_bus_class);
+
        return error;
 }
 
index d384f4f86c263c57bcef36ceb0ca930fbfed9094..f4538d7a3016e2b1514df38c9e6e9b48939d6ce7 100644 (file)
@@ -1230,6 +1230,8 @@ config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
        depends on SCSI_FC_ATTRS
+       depends on NVME_TARGET_FC || NVME_TARGET_FC=n
+       depends on NVME_FC || NVME_FC=n
        select CRC_T10DIF
        ---help---
           This lpfc driver supports the Emulex LightPulse
index 741d81861d17f777e8fd814edd469004e0491040..07b60a780c06b8b2be234e1a5f4ea97403f1c54f 100644 (file)
@@ -55,9 +55,9 @@ aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \
 
 ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
 $(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
-       $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \
+       $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic7xxx_reg.h \
                              $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
-                             $(src)/aic7xxx.seq
+                             $(srctree)/$(src)/aic7xxx.seq
 
 $(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h
 else
@@ -72,14 +72,14 @@ aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \
 
 ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
 $(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
-       $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \
+       $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic79xx_reg.h \
                              $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
-                             $(src)/aic79xx.seq
+                             $(srctree)/$(src)/aic79xx.seq
 
 $(aic79xx-gen-y): $(obj)/aic79xx_seq.h
 else
 $(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
 endif
 
-$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl]
-       $(MAKE) -C $(src)/aicasm
+$(obj)/aicasm/aicasm: $(srctree)/$(src)/aicasm/*.[chyl]
+       $(MAKE) -C $(srctree)/$(src)/aicasm OUTDIR=$(shell pwd)/$(obj)/aicasm/
index b98c5c1056c380a10dddaa0c6b4a3e6c5d50796c..45e2d49c1fff5e5f2c6f0c11b8e5e6874a45f514 100644 (file)
@@ -1,19 +1,21 @@
 PROG=  aicasm
 
+OUTDIR ?= ./
+
 .SUFFIXES= .l .y .c .h
 
 CSRCS= aicasm.c aicasm_symbol.c
 YSRCS= aicasm_gram.y aicasm_macro_gram.y
 LSRCS= aicasm_scan.l aicasm_macro_scan.l
 
-GENHDRS=       aicdb.h $(YSRCS:.y=.h)
-GENSRCS=       $(YSRCS:.y=.c) $(LSRCS:.l=.c)
+GENHDRS=       $(addprefix ${OUTDIR}/,aicdb.h $(YSRCS:.y=.h))
+GENSRCS=       $(addprefix ${OUTDIR}/,$(YSRCS:.y=.c) $(LSRCS:.l=.c))
 
 SRCS=  ${CSRCS} ${GENSRCS}
 LIBS=  -ldb
 clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG)
 # Override default kernel CFLAGS.  This is a userland app.
-AICASM_CFLAGS:= -I/usr/include -I.
+AICASM_CFLAGS:= -I/usr/include -I. -I$(OUTDIR)
 LEX= flex
 YACC= bison
 YFLAGS= -d
@@ -32,22 +34,25 @@ YFLAGS+= -t -v
 LFLAGS= -d
 endif
 
-$(PROG):  ${GENHDRS} $(SRCS)
-       $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(PROG) $(LIBS)
+$(PROG):  $(OUTDIR) ${GENHDRS} $(SRCS)
+       $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(OUTDIR)/$(PROG) $(LIBS)
+
+$(OUTDIR):
+       mkdir -p $(OUTDIR)
 
-aicdb.h:
+$(OUTDIR)/aicdb.h:
        @if [ -e "/usr/include/db4/db_185.h" ]; then            \
-               echo "#include <db4/db_185.h>" > aicdb.h;       \
+               echo "#include <db4/db_185.h>" > $@;    \
         elif [ -e "/usr/include/db3/db_185.h" ]; then          \
-               echo "#include <db3/db_185.h>" > aicdb.h;       \
+               echo "#include <db3/db_185.h>" > $@;    \
         elif [ -e "/usr/include/db2/db_185.h" ]; then          \
-               echo "#include <db2/db_185.h>" > aicdb.h;       \
+               echo "#include <db2/db_185.h>" > $@;    \
         elif [ -e "/usr/include/db1/db_185.h" ]; then          \
-               echo "#include <db1/db_185.h>" > aicdb.h;       \
+               echo "#include <db1/db_185.h>" > $@;    \
         elif [ -e "/usr/include/db/db_185.h" ]; then           \
-               echo "#include <db/db_185.h>" > aicdb.h;        \
+               echo "#include <db/db_185.h>" > $@;     \
         elif [ -e "/usr/include/db_185.h" ]; then              \
-               echo "#include <db_185.h>" > aicdb.h;           \
+               echo "#include <db_185.h>" > $@;                \
         else                                                   \
                echo "*** Install db development libraries";    \
         fi
@@ -58,23 +63,23 @@ clean:
 # Create a dependency chain in generated files
 # to avoid concurrent invocations of the single
 # rule that builds them all.
-aicasm_gram.c: aicasm_gram.h
-aicasm_gram.c aicasm_gram.h: aicasm_gram.y
+$(OUTDIR)/aicasm_gram.c: $(OUTDIR)/aicasm_gram.h
+$(OUTDIR)/aicasm_gram.c $(OUTDIR)/aicasm_gram.h: aicasm_gram.y
        $(YACC) $(YFLAGS) -b $(<:.y=) $<
-       mv $(<:.y=).tab.c $(<:.y=.c)
-       mv $(<:.y=).tab.h $(<:.y=.h)
+       mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
+       mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
 
 # Create a dependency chain in generated files
 # to avoid concurrent invocations of the single
 # rule that builds them all.
-aicasm_macro_gram.c: aicasm_macro_gram.h
-aicasm_macro_gram.c aicasm_macro_gram.h: aicasm_macro_gram.y
+$(OUTDIR)/aicasm_macro_gram.c: $(OUTDIR)/aicasm_macro_gram.h
+$(OUTDIR)/aicasm_macro_gram.c $(OUTDIR)/aicasm_macro_gram.h: aicasm_macro_gram.y
        $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $<
-       mv $(<:.y=).tab.c $(<:.y=.c)
-       mv $(<:.y=).tab.h $(<:.y=.h)
+       mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
+       mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
 
-aicasm_scan.c: aicasm_scan.l
-       $(LEX) $(LFLAGS) -o$@ $<
+$(OUTDIR)/aicasm_scan.c: aicasm_scan.l
+       $(LEX) $(LFLAGS) -o $@ $<
 
-aicasm_macro_scan.c: aicasm_macro_scan.l
-       $(LEX) $(LFLAGS) -Pmm -o$@ $<
+$(OUTDIR)/aicasm_macro_scan.c: aicasm_macro_scan.l
+       $(LEX) $(LFLAGS) -Pmm -o $@ $<
index e4c83b7c96a8180c856627562549c4aae362dcdf..1a4cfa562a6087206ee2f68eaf3fc63988d1934a 100644 (file)
@@ -2128,6 +2128,13 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
        struct iscsi_tcp_task *tcp_task = task->dd_data;
        struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
 
+       if (!tcp_task || !tdata || (tcp_task->dd_data != tdata)) {
+               pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
+                       task, task->sc, tcp_task,
+                       tcp_task ? tcp_task->dd_data : NULL, tdata);
+               return;
+       }
+
        log_debug(1 << CXGBI_DBG_ISCSI,
                "task 0x%p, skb 0x%p, itt 0x%x.\n",
                task, tdata->skb, task->hdr_itt);
index f990ab4d45e1bf72b3adf8991b11c01309c7530b..985510628f564286f5df386085476e9020e2ad47 100644 (file)
@@ -425,7 +425,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 int
 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
 {
-       u32 max_mpt_cmd, i;
+       u32 max_mpt_cmd, i, j;
        struct fusion_context *fusion;
 
        fusion = instance->ctrl_context;
@@ -450,11 +450,15 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
                fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
                                              GFP_KERNEL);
                if (!fusion->cmd_list[i]) {
+                       for (j = 0; j < i; j++)
+                               kfree(fusion->cmd_list[j]);
+                       kfree(fusion->cmd_list);
                        dev_err(&instance->pdev->dev,
                                "Failed from %s %d\n",  __func__, __LINE__);
                        return -ENOMEM;
                }
        }
+
        return 0;
 }
 int
index 21331453db7bd29a0c2bc8cac430b8c8bb60b9c2..2ff753ce6e27a49a9e395c52057998b508c3e66f 100644 (file)
@@ -5,6 +5,7 @@ config QEDI
        select SCSI_ISCSI_ATTRS
        select QED_LL2
        select QED_ISCSI
+       select ISCSI_BOOT_SYSFS
        ---help---
        This driver supports iSCSI offload for the QLogic FastLinQ
        41000 Series Converged Network Adapters.
index 80edd28b635f5dc6fa11d9d867e295a19122e586..37da9a8b43b1f1dfa25133cab878ce39d6efcde3 100644 (file)
@@ -824,7 +824,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
        u32 iscsi_cid = QEDI_CID_RESERVED;
        u16 len = 0;
        char *buf = NULL;
-       int ret;
+       int ret, tmp;
 
        if (!shost) {
                ret = -ENXIO;
@@ -940,10 +940,10 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
 
 ep_rel_conn:
        qedi->ep_tbl[iscsi_cid] = NULL;
-       ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
-       if (ret)
+       tmp = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+       if (tmp)
                QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
-                         ret);
+                         tmp);
 ep_free_sq:
        qedi_free_sq(qedi, qedi_ep);
 ep_conn_exit:
index 7e24aa30c3b05b487130b028d4aa5688d3f7b6cf..892fbd9800d986a8dba22efac7ac4803be2c5f17 100644 (file)
@@ -1286,7 +1286,7 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
        unsigned long flags;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+       if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING | FC_VPORT_DELETING)) {
                spin_unlock_irqrestore(shost->host_lock, flags);
                return -EBUSY;
        }
@@ -2430,8 +2430,10 @@ fc_remove_host(struct Scsi_Host *shost)
        spin_lock_irqsave(shost->host_lock, flags);
 
        /* Remove any vports */
-       list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
+       list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+               vport->flags |= FC_VPORT_DELETING;
                fc_queue_work(shost, &vport->vport_delete_work);
+       }
 
        /* Remove any remote ports */
        list_for_each_entry_safe(rport, next_rport,
index e4613a3c362dae8ffbb1e701b4dca51bb858625a..9cb3f722dce13aea78b5e81152e27ca1346f10f9 100644 (file)
@@ -308,7 +308,6 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        vq->avail = NULL;
        vq->used = NULL;
        vq->last_avail_idx = 0;
-       vq->last_used_event = 0;
        vq->avail_idx = 0;
        vq->last_used_idx = 0;
        vq->signalled_used = 0;
@@ -1402,7 +1401,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                        r = -EINVAL;
                        break;
                }
-               vq->last_avail_idx = vq->last_used_event = s.num;
+               vq->last_avail_idx = s.num;
                /* Forget the cached index value. */
                vq->avail_idx = vq->last_avail_idx;
                break;
@@ -2241,6 +2240,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
        __u16 old, new;
        __virtio16 event;
        bool v;
+       /* Flush out used index updates. This is paired
+        * with the barrier that the Guest executes when enabling
+        * interrupts. */
+       smp_mb();
 
        if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
            unlikely(vq->avail_idx == vq->last_avail_idx))
@@ -2248,10 +2251,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 
        if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
                __virtio16 flags;
-               /* Flush out used index updates. This is paired
-                * with the barrier that the Guest executes when enabling
-                * interrupts. */
-               smp_mb();
                if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
                        vq_err(vq, "Failed to get flags");
                        return true;
@@ -2266,26 +2265,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
        if (unlikely(!v))
                return true;
 
-       /* We're sure if the following conditions are met, there's no
-        * need to notify guest:
-        * 1) cached used event is ahead of new
-        * 2) old to new updating does not cross cached used event. */
-       if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
-           !vring_need_event(vq->last_used_event, new, old))
-               return false;
-
-       /* Flush out used index updates. This is paired
-        * with the barrier that the Guest executes when enabling
-        * interrupts. */
-       smp_mb();
-
        if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
                vq_err(vq, "Failed to get used event idx");
                return true;
        }
-       vq->last_used_event = vhost16_to_cpu(vq, event);
-
-       return vring_need_event(vq->last_used_event, new, old);
+       return vring_need_event(vhost16_to_cpu(vq, event), new, old);
 }
 
 /* This actually signals the guest, using eventfd. */
index f72095868b933735a08c3e6c343504ba112cfefd..bb7c29b8b9fc83e63cdd6be02acd58690e36cb57 100644 (file)
@@ -115,9 +115,6 @@ struct vhost_virtqueue {
        /* Last index we used. */
        u16 last_used_idx;
 
-       /* Last used evet we've seen */
-       u16 last_used_event;
-
        /* Used flags */
        u16 used_flags;
 
index 50566acb54694ac4f14330f4bbb7ab360ba15f88..e9bea90dc0179770b9fbfa39fe6651c5949f0d62 100644 (file)
@@ -660,9 +660,6 @@ int nfs4_detect_session_trunking(struct nfs_client *clp,
        if (!nfs4_check_server_scope(clp->cl_serverscope, res->server_scope))
                goto out_err;
 
-       /* Session trunking passed, add the xprt */
-       rpc_clnt_xprt_switch_add_xprt(clp->cl_rpcclient, xprt);
-
        pr_info("NFS:  %s: Session trunking succeeded for %s\n",
                clp->cl_hostname,
                xprt->address_strings[RPC_DISPLAY_ADDR]);
index 18ca6879d8de9ecebf22d0f4f9ba73b66fdeeddc..ffd2e712595d8ac875dc2780b52731543c687ca8 100644 (file)
@@ -7461,7 +7461,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, void *data)
                        cdata->res.server_scope = NULL;
                }
                /* Save the EXCHANGE_ID verifier session trunk tests */
-               memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
+               memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
                       sizeof(clp->cl_confirm.data));
        }
 out:
@@ -7474,10 +7474,6 @@ static void nfs4_exchange_id_release(void *data)
        struct nfs41_exchange_id_data *cdata =
                                        (struct nfs41_exchange_id_data *)data;
 
-       if (cdata->xprt) {
-               xprt_put(cdata->xprt);
-               rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
-       }
        nfs_put_client(cdata->args.client);
        kfree(cdata->res.impl_id);
        kfree(cdata->res.server_scope);
@@ -7498,7 +7494,6 @@ static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
                        u32 sp4_how, struct rpc_xprt *xprt)
 {
-       nfs4_verifier verifier;
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
                .rpc_cred = cred,
@@ -7507,7 +7502,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
                .rpc_client = clp->cl_rpcclient,
                .callback_ops = &nfs4_exchange_id_call_ops,
                .rpc_message = &msg,
-               .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
+               .flags = RPC_TASK_TIMEOUT,
        };
        struct nfs41_exchange_id_data *calldata;
        struct rpc_task *task;
@@ -7522,8 +7517,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
                return -ENOMEM;
        }
 
-       if (!xprt)
-               nfs4_init_boot_verifier(clp, &verifier);
+       nfs4_init_boot_verifier(clp, &calldata->args.verifier);
 
        status = nfs4_init_uniform_client_string(clp);
        if (status)
@@ -7562,11 +7556,9 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
        if (xprt) {
                calldata->xprt = xprt;
                task_setup_data.rpc_xprt = xprt;
-               task_setup_data.flags =
-                               RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
-               calldata->args.verifier = &clp->cl_confirm;
-       } else {
-               calldata->args.verifier = &verifier;
+               task_setup_data.flags |= RPC_TASK_SOFTCONN;
+               memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
+                               sizeof(calldata->args.verifier.data));
        }
        calldata->args.client = clp;
 #ifdef CONFIG_NFS_V4_1_MIGRATION
@@ -7585,12 +7577,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
        if (IS_ERR(task))
                return PTR_ERR(task);
 
-       if (!xprt) {
-               status = rpc_wait_for_completion_task(task);
-               if (!status)
-                       status = calldata->rpc_status;
-       } else  /* session trunking test */
-               status = calldata->rpc_status;
+       status = calldata->rpc_status;
 
        rpc_put_task(task);
 out:
index fa3eb361d4f863be18072272293dcbdcedfd887f..37c8af00327588d772610cac487fcf0651cf8fbf 100644 (file)
@@ -1785,7 +1785,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
        int len = 0;
 
        encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
-       encode_nfs4_verifier(xdr, args->verifier);
+       encode_nfs4_verifier(xdr, &args->verifier);
 
        encode_string(xdr, strlen(args->client->cl_owner_id),
                        args->client->cl_owner_id);
index e1b442996f810529a755533270b6d21c350fbd5a..474d6bbc158ccb3797a93a02740852b6046ccac8 100644 (file)
@@ -128,6 +128,7 @@ struct inet6_skb_parm {
 #define IP6SKB_FRAGMENTED      16
 #define IP6SKB_HOPBYHOP        32
 #define IP6SKB_L3SLAVE         64
+#define IP6SKB_JUMBOGRAM      128
 };
 
 #if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -152,6 +153,11 @@ static inline int inet6_iif(const struct sk_buff *skb)
        return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
 }
 
+static inline bool inet6_is_jumbogram(const struct sk_buff *skb)
+{
+       return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM);
+}
+
 /* can not be used in TCP layer after tcp_v6_fill_cb */
 static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
 {
index 55de3da58b1c4f5582f95e09a868f6ffb1de852a..931c32f1f18d38835425eb21a4c952e1195e4f10 100644 (file)
@@ -435,7 +435,7 @@ enum {
        ATA_HORKAGE_NOLPM       = (1 << 20),    /* don't use LPM */
        ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21),  /* some WDs have broken LPM */
        ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
-       ATA_HORKAGE_NO_NCQ_LOG  = (1 << 23),    /* don't use NCQ for log read */
+       ATA_HORKAGE_NO_DMA_LOG  = (1 << 23),    /* don't use DMA for log read */
        ATA_HORKAGE_NOTRIM      = (1 << 24),    /* don't use TRIM */
        ATA_HORKAGE_MAX_SEC_1024 = (1 << 25),   /* Limit max sects to 1024 */
 
index 87869c04849ad6681cb3604320c8c39a5745e995..3030121b474601b3dc7003bb5b0a48a1a5d5f5ba 100644 (file)
@@ -7749,8 +7749,10 @@ struct mlx5_ifc_pcam_reg_bits {
 };
 
 struct mlx5_ifc_mcam_enhanced_features_bits {
-       u8         reserved_at_0[0x7f];
+       u8         reserved_at_0[0x7d];
 
+       u8         mtpps_enh_out_per_adj[0x1];
+       u8         mtpps_fs[0x1];
        u8         pcie_performance_group[0x1];
 };
 
@@ -8159,7 +8161,8 @@ struct mlx5_ifc_mtpps_reg_bits {
        u8         reserved_at_78[0x4];
        u8         cap_pin_4_mode[0x4];
 
-       u8         reserved_at_80[0x80];
+       u8         field_select[0x20];
+       u8         reserved_at_a0[0x60];
 
        u8         enable[0x1];
        u8         reserved_at_101[0xb];
@@ -8174,8 +8177,9 @@ struct mlx5_ifc_mtpps_reg_bits {
 
        u8         out_pulse_duration[0x10];
        u8         out_periodic_adjustment[0x10];
+       u8         enhanced_out_periodic_adjustment[0x20];
 
-       u8         reserved_at_1a0[0x60];
+       u8         reserved_at_1c0[0x20];
 };
 
 struct mlx5_ifc_mtppse_reg_bits {
index ca3bcc4ed4e55563ecd505fada0cc0dc1f5dbb8d..62cbcb842f99c2cbda7121bcb5a4ebc5e818a3bc 100644 (file)
@@ -1235,7 +1235,7 @@ struct nfs41_state_protection {
 
 struct nfs41_exchange_id_args {
        struct nfs_client               *client;
-       nfs4_verifier                   *verifier;
+       nfs4_verifier                   verifier;
        u32                             flags;
        struct nfs41_state_protection   state_protect;
 };
index 2a9567bb818636ddf979d8a2d6708991ee06d1a5..0bb5b212ab42e4a430ac721362d317f51c0f637d 100644 (file)
@@ -830,7 +830,7 @@ static inline int phy_read_status(struct phy_device *phydev)
        dev_err(&_phydev->mdio.dev, format, ##args)
 
 #define phydev_dbg(_phydev, format, args...)   \
-       dev_dbg(&_phydev->mdio.dev, format, ##args);
+       dev_dbg(&_phydev->mdio.dev, format, ##args)
 
 static inline const char *phydev_name(const struct phy_device *phydev)
 {
index c102ef65cb64a94269b950ff09c47d90bbc2d315..db6dc9dc0482bf1299c010f54873f2ae36a4684e 100644 (file)
@@ -323,6 +323,7 @@ enum {
 
        __WQ_DRAINING           = 1 << 16, /* internal: workqueue is draining */
        __WQ_ORDERED            = 1 << 17, /* internal: workqueue is ordered */
+       __WQ_ORDERED_EXPLICIT   = 1 << 18, /* internal: alloc_ordered_workqueue() */
        __WQ_LEGACY             = 1 << 18, /* internal: create*_workqueue() */
 
        WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
@@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
 #define alloc_ordered_workqueue(fmt, flags, args...)                   \
-       alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
+       alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |                \
+                       __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
 
 #define create_workqueue(name)                                         \
        alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
index 980807d7506fe3ae6f5c316da091e126e619ed69..45fd4c6056b53cd5fd88d84d45509277921aaf29 100644 (file)
@@ -469,7 +469,7 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
 
 #define _sctp_walk_params(pos, chunk, end, member)\
 for (pos.v = chunk->member;\
-     (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <\
+     (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
       (void *)chunk + end) &&\
      pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
      ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\
@@ -481,7 +481,7 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
 #define _sctp_walk_errors(err, chunk_hdr, end)\
 for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
            sizeof(struct sctp_chunkhdr));\
-     ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <\
+     ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
       (void *)chunk_hdr + end) &&\
      (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
      ntohs(err->length) >= sizeof(sctp_errhdr_t); \
index 972ce4baab6b2a4b0539624d1a671c632d77514c..cc8036987dcb885012c6c5eda0fb2bed2e588841 100644 (file)
@@ -260,6 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
 }
 
 void udp_v4_early_demux(struct sk_buff *skb);
+void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
 int udp_get_port(struct sock *sk, unsigned short snum,
                 int (*saddr_cmp)(const struct sock *,
                                  const struct sock *));
@@ -305,33 +306,44 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
  * possibly multiple cache miss on dequeue()
  */
-#if BITS_PER_LONG == 64
-
-/* truesize, len and the bit needed to compute skb_csum_unnecessary will be on
- * cold cache lines at recvmsg time.
- * skb->len can be stored on 16 bits since the udp header has been already
- * validated and pulled.
- */
 struct udp_dev_scratch {
-       u32 truesize;
+       /* skb->truesize and the stateless bit are embedded in a single field;
+        * do not use a bitfield since the compiler emits better/smaller code
+        * this way
+        */
+       u32 _tsize_state;
+
+#if BITS_PER_LONG == 64
+       /* len and the bit needed to compute skb_csum_unnecessary
+        * will be on cold cache lines at recvmsg time.
+        * skb->len can be stored on 16 bits since the udp header has been
+        * already validated and pulled.
+        */
        u16 len;
        bool is_linear;
        bool csum_unnecessary;
+#endif
 };
 
+static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
+{
+       return (struct udp_dev_scratch *)&skb->dev_scratch;
+}
+
+#if BITS_PER_LONG == 64
 static inline unsigned int udp_skb_len(struct sk_buff *skb)
 {
-       return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
+       return udp_skb_scratch(skb)->len;
 }
 
 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
 {
-       return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
+       return udp_skb_scratch(skb)->csum_unnecessary;
 }
 
 static inline bool udp_skb_is_linear(struct sk_buff *skb)
 {
-       return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
+       return udp_skb_scratch(skb)->is_linear;
 }
 
 #else
index 045646da97cc5dc55a5063301a2e76e4178a8ea3..6c772adabad2909628ea9199cfb10d0b07f2ac82 100644 (file)
@@ -1289,7 +1289,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        info_len = min_t(u32, sizeof(info), info_len);
 
        if (copy_from_user(&info, uinfo, info_len))
-               return err;
+               return -EFAULT;
 
        info.type = prog->type;
        info.id = prog->aux->id;
@@ -1312,7 +1312,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        }
 
        ulen = info.xlated_prog_len;
-       info.xlated_prog_len = bpf_prog_size(prog->len);
+       info.xlated_prog_len = bpf_prog_insn_size(prog);
        if (info.xlated_prog_len && ulen) {
                uinsns = u64_to_user_ptr(info.xlated_prog_insns);
                ulen = min_t(u32, info.xlated_prog_len, ulen);
index af9e84a4944e60fdd617c0c69ecbc46d53e230ac..664d939723739e0333fb5921533e3327912a3b84 100644 (file)
@@ -1865,10 +1865,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
         * do our normal operations to the register, we need to set the values
         * to the min/max since they are undefined.
         */
-       if (min_val == BPF_REGISTER_MIN_RANGE)
-               dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
-       if (max_val == BPF_REGISTER_MAX_RANGE)
-               dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+       if (opcode != BPF_SUB) {
+               if (min_val == BPF_REGISTER_MIN_RANGE)
+                       dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+               if (max_val == BPF_REGISTER_MAX_RANGE)
+                       dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+       }
 
        switch (opcode) {
        case BPF_ADD:
@@ -1879,10 +1881,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                dst_reg->min_align = min(src_align, dst_align);
                break;
        case BPF_SUB:
+               /* If one of our values was at the end of our ranges, then the
+                * _opposite_ value in the dst_reg goes to the end of our range.
+                */
+               if (min_val == BPF_REGISTER_MIN_RANGE)
+                       dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+               if (max_val == BPF_REGISTER_MAX_RANGE)
+                       dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
                if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
-                       dst_reg->min_value -= min_val;
+                       dst_reg->min_value -= max_val;
                if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
-                       dst_reg->max_value -= max_val;
+                       dst_reg->max_value -= min_val;
                dst_reg->min_align = min(src_align, dst_align);
                break;
        case BPF_MUL:
index 793565c057426656312ab7b31471f6c523b7ae59..8b4c3c2f2509d2b49666410b011bf604926194f3 100644 (file)
@@ -33,6 +33,9 @@ struct cgroup_taskset {
        struct list_head        src_csets;
        struct list_head        dst_csets;
 
+       /* the number of tasks in the set */
+       int                     nr_tasks;
+
        /* the subsys currently being processed */
        int                     ssid;
 
index 620794a20a339c7e10948da829fae5a6818e598b..df2e0f14a95d8e75ecbf424a0e308cc52d5b5eb2 100644 (file)
@@ -2006,6 +2006,8 @@ static void cgroup_migrate_add_task(struct task_struct *task,
        if (!cset->mg_src_cgrp)
                return;
 
+       mgctx->tset.nr_tasks++;
+
        list_move_tail(&task->cg_list, &cset->mg_tasks);
        if (list_empty(&cset->mg_node))
                list_add_tail(&cset->mg_node,
@@ -2094,21 +2096,19 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
        struct css_set *cset, *tmp_cset;
        int ssid, failed_ssid, ret;
 
-       /* methods shouldn't be called if no task is actually migrating */
-       if (list_empty(&tset->src_csets))
-               return 0;
-
        /* check that we can legitimately attach to the cgroup */
-       do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
-               if (ss->can_attach) {
-                       tset->ssid = ssid;
-                       ret = ss->can_attach(tset);
-                       if (ret) {
-                               failed_ssid = ssid;
-                               goto out_cancel_attach;
+       if (tset->nr_tasks) {
+               do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+                       if (ss->can_attach) {
+                               tset->ssid = ssid;
+                               ret = ss->can_attach(tset);
+                               if (ret) {
+                                       failed_ssid = ssid;
+                                       goto out_cancel_attach;
+                               }
                        }
-               }
-       } while_each_subsys_mask();
+               } while_each_subsys_mask();
+       }
 
        /*
         * Now that we're guaranteed success, proceed to move all tasks to
@@ -2137,25 +2137,29 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
         */
        tset->csets = &tset->dst_csets;
 
-       do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
-               if (ss->attach) {
-                       tset->ssid = ssid;
-                       ss->attach(tset);
-               }
-       } while_each_subsys_mask();
+       if (tset->nr_tasks) {
+               do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+                       if (ss->attach) {
+                               tset->ssid = ssid;
+                               ss->attach(tset);
+                       }
+               } while_each_subsys_mask();
+       }
 
        ret = 0;
        goto out_release_tset;
 
 out_cancel_attach:
-       do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
-               if (ssid == failed_ssid)
-                       break;
-               if (ss->cancel_attach) {
-                       tset->ssid = ssid;
-                       ss->cancel_attach(tset);
-               }
-       } while_each_subsys_mask();
+       if (tset->nr_tasks) {
+               do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+                       if (ssid == failed_ssid)
+                               break;
+                       if (ss->cancel_attach) {
+                               tset->ssid = ssid;
+                               ss->cancel_attach(tset);
+                       }
+               } while_each_subsys_mask();
+       }
 out_release_tset:
        spin_lock_irq(&css_set_lock);
        list_splice_init(&tset->dst_csets, &tset->src_csets);
@@ -2997,11 +3001,11 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
        cgrp->subtree_control &= ~disable;
 
        ret = cgroup_apply_control(cgrp);
-
        cgroup_finalize_control(cgrp, ret);
+       if (ret)
+               goto out_unlock;
 
        kernfs_activate(cgrp->kn);
-       ret = 0;
 out_unlock:
        cgroup_kn_unlock(of->kn);
        return ret ?: nbytes;
@@ -4669,6 +4673,10 @@ int __init cgroup_init(void)
 
                if (ss->bind)
                        ss->bind(init_css_set.subsys[ssid]);
+
+               mutex_lock(&cgroup_mutex);
+               css_populate_dir(init_css_set.subsys[ssid]);
+               mutex_unlock(&cgroup_mutex);
        }
 
        /* init_css_set.subsys[] has been updated, re-hash */
index a86688fabc55c4a4ec2bad843f6dc1e0e411f2d1..ca937b0c3a968f247a227d68812c9062fb4916c5 100644 (file)
@@ -3577,6 +3577,13 @@ static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
 
        /* yeap, return possible CPUs in @node that @attrs wants */
        cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
+
+       if (cpumask_empty(cpumask)) {
+               pr_warn_once("WARNING: workqueue cpumask: online intersect > "
+                               "possible intersect\n");
+               return false;
+       }
+
        return !cpumask_equal(cpumask, attrs->cpumask);
 
 use_dfl:
@@ -3744,8 +3751,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
                return -EINVAL;
 
        /* creating multiple pwqs breaks ordering guarantee */
-       if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
-               return -EINVAL;
+       if (!list_empty(&wq->pwqs)) {
+               if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+                       return -EINVAL;
+
+               wq->flags &= ~__WQ_ORDERED;
+       }
 
        ctx = apply_wqattrs_prepare(wq, attrs);
        if (!ctx)
@@ -3929,6 +3940,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        struct workqueue_struct *wq;
        struct pool_workqueue *pwq;
 
+       /*
+        * Unbound && max_active == 1 used to imply ordered, which is no
+        * longer the case on NUMA machines due to per-node pools.  While
+        * alloc_ordered_workqueue() is the right way to create an ordered
+        * workqueue, keep the previous behavior to avoid subtle breakages
+        * on NUMA.
+        */
+       if ((flags & WQ_UNBOUND) && max_active == 1)
+               flags |= __WQ_ORDERED;
+
        /* see the comment above the definition of WQ_POWER_EFFICIENT */
        if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
                flags |= WQ_UNBOUND;
@@ -4119,13 +4140,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
        struct pool_workqueue *pwq;
 
        /* disallow meddling with max_active for ordered workqueues */
-       if (WARN_ON(wq->flags & __WQ_ORDERED))
+       if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
                return;
 
        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
 
        mutex_lock(&wq->mutex);
 
+       wq->flags &= ~__WQ_ORDERED;
        wq->saved_max_active = max_active;
 
        for_each_pwq(pwq, wq)
@@ -5253,7 +5275,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
         * attributes breaks ordering guarantee.  Disallow exposing ordered
         * workqueues.
         */
-       if (WARN_ON(wq->flags & __WQ_ORDERED))
+       if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
                return -EINVAL;
 
        wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
index 64e899b633371d252deaf057156408a8ea8f4625..0ffca990a83370d13553266be315f9373d6995da 100644 (file)
@@ -56,8 +56,13 @@ static bool enomem_retry = false;
 module_param(enomem_retry, bool, 0);
 MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
 
+struct test_obj_val {
+       int     id;
+       int     tid;
+};
+
 struct test_obj {
-       int                     value;
+       struct test_obj_val     value;
        struct rhash_head       node;
 };
 
@@ -72,7 +77,7 @@ static struct test_obj array[MAX_ENTRIES];
 static struct rhashtable_params test_rht_params = {
        .head_offset = offsetof(struct test_obj, node),
        .key_offset = offsetof(struct test_obj, value),
-       .key_len = sizeof(int),
+       .key_len = sizeof(struct test_obj_val),
        .hashfn = jhash,
        .nulls_base = (3U << RHT_BASE_SHIFT),
 };
@@ -109,24 +114,26 @@ static int __init test_rht_lookup(struct rhashtable *ht)
        for (i = 0; i < entries * 2; i++) {
                struct test_obj *obj;
                bool expected = !(i % 2);
-               u32 key = i;
+               struct test_obj_val key = {
+                       .id = i,
+               };
 
-               if (array[i / 2].value == TEST_INSERT_FAIL)
+               if (array[i / 2].value.id == TEST_INSERT_FAIL)
                        expected = false;
 
                obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
 
                if (expected && !obj) {
-                       pr_warn("Test failed: Could not find key %u\n", key);
+                       pr_warn("Test failed: Could not find key %u\n", key.id);
                        return -ENOENT;
                } else if (!expected && obj) {
                        pr_warn("Test failed: Unexpected entry found for key %u\n",
-                               key);
+                               key.id);
                        return -EEXIST;
                } else if (expected && obj) {
-                       if (obj->value != i) {
+                       if (obj->value.id != i) {
                                pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
-                                       obj->value, i);
+                                       obj->value.id, i);
                                return -EINVAL;
                        }
                }
@@ -195,7 +202,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
        for (i = 0; i < entries; i++) {
                struct test_obj *obj = &array[i];
 
-               obj->value = i * 2;
+               obj->value.id = i * 2;
                err = insert_retry(ht, &obj->node, test_rht_params);
                if (err > 0)
                        insert_retries += err;
@@ -216,9 +223,11 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
 
        pr_info("  Deleting %d keys\n", entries);
        for (i = 0; i < entries; i++) {
-               u32 key = i * 2;
+               struct test_obj_val key = {
+                       .id = i * 2,
+               };
 
-               if (array[i].value != TEST_INSERT_FAIL) {
+               if (array[i].value.id != TEST_INSERT_FAIL) {
                        obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
                        BUG_ON(!obj);
 
@@ -242,18 +251,21 @@ static int thread_lookup_test(struct thread_data *tdata)
 
        for (i = 0; i < entries; i++) {
                struct test_obj *obj;
-               int key = (tdata->id << 16) | i;
+               struct test_obj_val key = {
+                       .id = i,
+                       .tid = tdata->id,
+               };
 
                obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
-               if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) {
-                       pr_err("  found unexpected object %d\n", key);
+               if (obj && (tdata->objs[i].value.id == TEST_INSERT_FAIL)) {
+                       pr_err("  found unexpected object %d-%d\n", key.tid, key.id);
                        err++;
-               } else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) {
-                       pr_err("  object %d not found!\n", key);
+               } else if (!obj && (tdata->objs[i].value.id != TEST_INSERT_FAIL)) {
+                       pr_err("  object %d-%d not found!\n", key.tid, key.id);
                        err++;
-               } else if (obj && (obj->value != key)) {
-                       pr_err("  wrong object returned (got %d, expected %d)\n",
-                              obj->value, key);
+               } else if (obj && memcmp(&obj->value, &key, sizeof(key))) {
+                       pr_err("  wrong object returned (got %d-%d, expected %d-%d)\n",
+                              obj->value.tid, obj->value.id, key.tid, key.id);
                        err++;
                }
 
@@ -272,7 +284,8 @@ static int threadfunc(void *data)
                pr_err("  thread[%d]: down_interruptible failed\n", tdata->id);
 
        for (i = 0; i < entries; i++) {
-               tdata->objs[i].value = (tdata->id << 16) | i;
+               tdata->objs[i].value.id = i;
+               tdata->objs[i].value.tid = tdata->id;
                err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params);
                if (err > 0) {
                        insert_retries += err;
@@ -295,7 +308,7 @@ static int threadfunc(void *data)
 
        for (step = 10; step > 0; step--) {
                for (i = 0; i < entries; i += step) {
-                       if (tdata->objs[i].value == TEST_INSERT_FAIL)
+                       if (tdata->objs[i].value.id == TEST_INSERT_FAIL)
                                continue;
                        err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
                                                     test_rht_params);
@@ -304,7 +317,7 @@ static int threadfunc(void *data)
                                       tdata->id);
                                goto out;
                        }
-                       tdata->objs[i].value = TEST_INSERT_FAIL;
+                       tdata->objs[i].value.id = TEST_INSERT_FAIL;
 
                        cond_resched();
                }
index 06b147d7d9e2e7d8d305dd9b173d9bc1a7e9a676..709a4e6fb447fda886046308de5b613a88ff9dfa 100644 (file)
@@ -263,6 +263,8 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
                return dev_set_mtu(dev, ifr->ifr_mtu);
 
        case SIOCSIFHWADDR:
+               if (dev->addr_len > sizeof(struct sockaddr))
+                       return -EINVAL;
                return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
 
        case SIOCSIFHWBROADCAST:
index 8357f164c66092b9c99996782d29de246c54dbd6..912731bed7b71b9208f3947b49c4b93922f7ab0d 100644 (file)
@@ -666,7 +666,7 @@ int netpoll_setup(struct netpoll *np)
        int err;
 
        rtnl_lock();
-       if (np->dev_name) {
+       if (np->dev_name[0]) {
                struct net *net = current->nsproxy->net_ns;
                ndev = __dev_get_by_name(net, np->dev_name);
        }
index 1704948e6a12bc87827c71ccb4e81de8b8c2c9f7..f227f002c73d382fecd98c8857ce4c9139cb7a8a 100644 (file)
@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
         * singleton values (which always leads to failure).
         * These settings can still (later) be overridden via sockopts.
         */
-       if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
-           ccid_get_builtin_ccids(&rx.val, &rx.len))
+       if (ccid_get_builtin_ccids(&tx.val, &tx.len))
                return -ENOBUFS;
+       if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
+               kfree(tx.val);
+               return -ENOBUFS;
+       }
 
        if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
            !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
index f85d901f4e3fc02741ad20f3ff3066108af1e7e8..1b202f16531fce72860a78d89ca1af716f883d99 100644 (file)
@@ -631,6 +631,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+       reqsk_put(req);
        return 0;
 
 drop_and_free:
index c376af5bfdfb34774d82de6858ec0e4b6e3380bb..1b58eac8aad326b6db09a4cee82fdcc178c2afad 100644 (file)
@@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+       reqsk_put(req);
        return 0;
 
 drop_and_free:
index 56e46090526bb3baf26b5e06478cdd66ccc94280..c442051d5a55732d37ddc18187389d66d8d08bd9 100644 (file)
@@ -509,21 +509,22 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
                dst->cpu_dp->netdev = ethernet_dev;
        }
 
+       /* Initialize cpu_port_mask now for drv->setup()
+        * to have access to a correct value, just like what
+        * net/dsa/dsa.c::dsa_switch_setup_one does.
+        */
+       ds->cpu_port_mask |= BIT(index);
+
        tag_protocol = ds->ops->get_tag_protocol(ds);
        dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
        if (IS_ERR(dst->tag_ops)) {
                dev_warn(ds->dev, "No tagger for this switch\n");
+               ds->cpu_port_mask &= ~BIT(index);
                return PTR_ERR(dst->tag_ops);
        }
 
        dst->rcv = dst->tag_ops->rcv;
 
-       /* Initialize cpu_port_mask now for drv->setup()
-        * to have access to a correct value, just like what
-        * net/dsa/dsa.c::dsa_switch_setup_one does.
-        */
-       ds->cpu_port_mask |= BIT(index);
-
        return 0;
 }
 
index 2221001038084af89b6f9fa0e306c2320ece4a3f..b8d18171cca33ab5dab67408c3cd11ad57f25b83 100644 (file)
@@ -1452,7 +1452,7 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
                return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type,
                                          &info.info);
        case FIB_EVENT_NH_DEL:
-               if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+               if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
                     fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
                    (fib_nh->nh_flags & RTNH_F_DEAD))
                        return call_fib_notifiers(dev_net(fib_nh->nh_dev),
index 4e985dea1dd24fdecfbf9b47d51cff698e97cd2f..2f1588bf73dad9b34aebee45ce738a7e9a4515ae 100644 (file)
@@ -2202,9 +2202,10 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
 static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
 {
        const u32 now = tcp_jiffies32;
+       enum tcp_chrono old = tp->chrono_type;
 
-       if (tp->chrono_type > TCP_CHRONO_UNSPEC)
-               tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start;
+       if (old > TCP_CHRONO_UNSPEC)
+               tp->chrono_stat[old - 1] += now - tp->chrono_start;
        tp->chrono_start = now;
        tp->chrono_type = new;
 }
index b057653ceca9208d1e1765525e5492876c0b651e..e6276fa3750b909615668fddf84495369bd7d369 100644 (file)
@@ -1163,34 +1163,32 @@ out:
        return ret;
 }
 
-#if BITS_PER_LONG == 64
+#define UDP_SKB_IS_STATELESS 0x80000000
+
 static void udp_set_dev_scratch(struct sk_buff *skb)
 {
-       struct udp_dev_scratch *scratch;
+       struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
 
        BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
-       scratch = (struct udp_dev_scratch *)&skb->dev_scratch;
-       scratch->truesize = skb->truesize;
+       scratch->_tsize_state = skb->truesize;
+#if BITS_PER_LONG == 64
        scratch->len = skb->len;
        scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
        scratch->is_linear = !skb_is_nonlinear(skb);
+#endif
+       if (likely(!skb->_skb_refdst))
+               scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
 }
 
 static int udp_skb_truesize(struct sk_buff *skb)
 {
-       return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize;
-}
-#else
-static void udp_set_dev_scratch(struct sk_buff *skb)
-{
-       skb->dev_scratch = skb->truesize;
+       return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
 }
 
-static int udp_skb_truesize(struct sk_buff *skb)
+static bool udp_skb_has_head_state(struct sk_buff *skb)
 {
-       return skb->dev_scratch;
+       return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
 }
-#endif
 
 /* fully reclaim rmem/fwd memory allocated for skb */
 static void udp_rmem_release(struct sock *sk, int size, int partial,
@@ -1388,10 +1386,10 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
                unlock_sock_fast(sk, slow);
        }
 
-       /* we cleared the head states previously only if the skb lacks any IP
-        * options, see __udp_queue_rcv_skb().
+       /* In the more common cases we cleared the head states previously,
+        * see __udp_queue_rcv_skb().
         */
-       if (unlikely(IPCB(skb)->opt.optlen > 0))
+       if (unlikely(udp_skb_has_head_state(skb)))
                skb_release_head_state(skb);
        consume_stateless_skb(skb);
 }
@@ -1784,11 +1782,11 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                sk_mark_napi_id_once(sk, skb);
        }
 
-       /* At recvmsg() time we need skb->dst to process IP options-related
-        * cmsg, elsewhere can we clear all pending head states while they are
-        * hot in the cache
+       /* At recvmsg() time we may access skb->dst or skb->sp depending on
+        * the IP options and the cmsg flags, elsewhere can we clear all
+        * pending head states while they are hot in the cache
         */
-       if (likely(IPCB(skb)->opt.optlen == 0))
+       if (likely(IPCB(skb)->opt.optlen == 0 && !skb_sec_path(skb)))
                skb_release_head_state(skb);
 
        rc = __udp_enqueue_schedule_skb(sk, skb);
@@ -1930,7 +1928,7 @@ drop:
 /* For TCP sockets, sk_rx_dst is protected by socket lock
  * For UDP, we use xchg() to guard against concurrent changes.
  */
-static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
 {
        struct dst_entry *old;
 
@@ -1939,6 +1937,7 @@ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
                dst_release(old);
        }
 }
+EXPORT_SYMBOL(udp_sk_rx_dst_set);
 
 /*
  *     Multicasts and broadcasts go to each listener.
index 4996d734f1d2a8c535aa34f3bd7a7186279a07c2..3cec529c61130f5f43b7d65c4f4cb4ca63d1bf98 100644 (file)
@@ -756,6 +756,7 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
        if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
                goto drop;
 
+       IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM;
        return true;
 
 drop:
index 1422d6c083773549d667dc88ffe07d447d5c8e97..162efba0d0cd851848363588318cf6ade4a5a62c 100644 (file)
@@ -673,8 +673,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                *prevhdr = NEXTHDR_FRAGMENT;
                tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
                if (!tmp_hdr) {
-                       IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-                                     IPSTATS_MIB_FRAGFAILS);
                        err = -ENOMEM;
                        goto fail;
                }
@@ -789,8 +787,6 @@ slow_path:
                frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
                                 hroom + troom, GFP_ATOMIC);
                if (!frag) {
-                       IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-                                     IPSTATS_MIB_FRAGFAILS);
                        err = -ENOMEM;
                        goto fail;
                }
index 4a3e65626e8baddf5d7d1c246e6e5fded2b08b8a..578142b7ca3e6e91e528b8c81addec812bb6a5ca 100644 (file)
@@ -291,11 +291,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
                                          struct udp_table *udptable)
 {
        const struct ipv6hdr *iph = ipv6_hdr(skb);
-       struct sock *sk;
 
-       sk = skb_steal_sock(skb);
-       if (unlikely(sk))
-               return sk;
        return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
                                 &iph->daddr, dport, inet6_iif(skb),
                                 udptable, skb);
@@ -332,6 +328,15 @@ struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be
 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 #endif
 
+/* do not use the scratch area len for jumbogram: their length execeeds the
+ * scratch area space; note that the IP6CB flags is still in the first
+ * cacheline, so checking for jumbograms is cheap
+ */
+static int udp6_skb_len(struct sk_buff *skb)
+{
+       return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
+}
+
 /*
  *     This should be easy, if there is something there we
  *     return it, otherwise we block.
@@ -362,7 +367,7 @@ try_again:
        if (!skb)
                return err;
 
-       ulen = udp_skb_len(skb);
+       ulen = udp6_skb_len(skb);
        copied = len;
        if (copied > ulen - off)
                copied = ulen - off;
@@ -804,6 +809,24 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (udp6_csum_init(skb, uh, proto))
                goto csum_error;
 
+       /* Check if the socket is already available, e.g. due to early demux */
+       sk = skb_steal_sock(skb);
+       if (sk) {
+               struct dst_entry *dst = skb_dst(skb);
+               int ret;
+
+               if (unlikely(sk->sk_rx_dst != dst))
+                       udp_sk_rx_dst_set(sk, dst);
+
+               ret = udpv6_queue_rcv_skb(sk, skb);
+               sock_put(sk);
+
+               /* a return value > 0 means to resubmit the input */
+               if (ret > 0)
+                       return ret;
+               return 0;
+       }
+
        /*
         *      Multicast receive code
         */
@@ -812,11 +835,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                                saddr, daddr, udptable, proto);
 
        /* Unicast */
-
-       /*
-        * check socket cache ... must talk to Alan about his plans
-        * for sock caches... i'll skip this for now.
-        */
        sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
        if (sk) {
                int ret;
index e3c4c6c3fef7f274aef72cda0d21fc00c72de24a..03859e386b47c95838c50dba5e09b75163b759dc 100644 (file)
@@ -1310,8 +1310,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
 
        nla_for_each_nested(a, attr, rem) {
                int type = nla_type(a);
-               int maxlen = ovs_ct_attr_lens[type].maxlen;
-               int minlen = ovs_ct_attr_lens[type].minlen;
+               int maxlen;
+               int minlen;
 
                if (type > OVS_CT_ATTR_MAX) {
                        OVS_NLERR(log,
@@ -1319,6 +1319,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
                                  type, OVS_CT_ATTR_MAX);
                        return -EINVAL;
                }
+
+               maxlen = ovs_ct_attr_lens[type].maxlen;
+               minlen = ovs_ct_attr_lens[type].minlen;
                if (nla_len(a) < minlen || nla_len(a) > maxlen) {
                        OVS_NLERR(log,
                                  "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
index 008bb34ee3240495ecb0ed65bf55d5edd20e1186..0615c2a950fab992134d0071707b5b336f6fb231 100644 (file)
@@ -4329,7 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                register_prot_hook(sk);
        }
        spin_unlock(&po->bind_lock);
-       if (closing && (po->tp_version > TPACKET_V2)) {
+       if (pg_vec && (po->tp_version > TPACKET_V2)) {
                /* Because we don't support block-based V3 on tx-ring */
                if (!tx_ring)
                        prb_shutdown_retire_blk_timer(po, rb_queue);
index bf2122691fba25ae778d6fd68a179b1e9163defe..ad22df1ffbd13bbea78df55b8347aa32ae2105ad 100644 (file)
@@ -1916,7 +1916,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
        if (copy_from_user(&msg, umsg, sizeof(*umsg)))
                return -EFAULT;
 
-       kmsg->msg_control = msg.msg_control;
+       kmsg->msg_control = (void __force *)msg.msg_control;
        kmsg->msg_controllen = msg.msg_controllen;
        kmsg->msg_flags = msg.msg_flags;
 
@@ -1935,7 +1935,8 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
 
        if (msg.msg_name && kmsg->msg_namelen) {
                if (!save_addr) {
-                       err = move_addr_to_kernel(msg.msg_name, kmsg->msg_namelen,
+                       err = move_addr_to_kernel(msg.msg_name,
+                                                 kmsg->msg_namelen,
                                                  kmsg->msg_name);
                        if (err < 0)
                                return err;
index 9c823a609e75f8d66bbe1fa31a1ecebac7f65311..270edcc149a113c4026602f9d3a35e01475086df 100644 (file)
@@ -147,9 +147,9 @@ int _geneve_set_tunnel(struct __sk_buff *skb)
        __builtin_memset(&gopt, 0x0, sizeof(gopt));
        gopt.opt_class = 0x102; /* Open Virtual Networking (OVN) */
        gopt.type = 0x08;
-       gopt.r1 = 1;
+       gopt.r1 = 0;
        gopt.r2 = 0;
-       gopt.r3 = 1;
+       gopt.r3 = 0;
        gopt.length = 2; /* 4-byte multiple */
        *(int *) &gopt.opt_data = 0xdeadbeef;
 
index 1ff634f187b7fd927bef6a091b96e32dbca27870..a70d2ea90313fe9535abb92ea604681a73a1ce0c 100755 (executable)
@@ -149,6 +149,7 @@ function cleanup {
        ip link del veth1
        ip link del ipip11
        ip link del gretap11
+       ip link del vxlan11
        ip link del geneve11
        pkill tcpdump
        pkill cat
index 412a7c82995ae1cb595f6eb1637703ae8a1a359b..256f571f2ab525700c121c94b6a8e866acd79b73 100644 (file)
@@ -314,7 +314,6 @@ int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
        int err;
 
        bzero(&attr, sizeof(attr));
-       bzero(info, *info_len);
        attr.info.bpf_fd = prog_fd;
        attr.info.info_len = *info_len;
        attr.info.info = ptr_to_u64(info);
index 5855cd3d3d45cbd1ee967263c622d4e9d2e7b417..1f7dd35551b9136d24987d79deb2c1024cd2fc82 100644 (file)
@@ -340,6 +340,7 @@ static void test_bpf_obj_id(void)
 
                /* Check getting prog info */
                info_len = sizeof(struct bpf_prog_info) * 2;
+               bzero(&prog_infos[i], info_len);
                prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
                prog_infos[i].jited_prog_len = sizeof(jited_insns);
                prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
@@ -369,6 +370,7 @@ static void test_bpf_obj_id(void)
 
                /* Check getting map info */
                info_len = sizeof(struct bpf_map_info) * 2;
+               bzero(&map_infos[i], info_len);
                err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
                                             &info_len);
                if (CHECK(err ||
@@ -394,7 +396,7 @@ static void test_bpf_obj_id(void)
        nr_id_found = 0;
        next_id = 0;
        while (!bpf_prog_get_next_id(next_id, &next_id)) {
-               struct bpf_prog_info prog_info;
+               struct bpf_prog_info prog_info = {};
                int prog_fd;
 
                info_len = sizeof(prog_info);
@@ -418,6 +420,8 @@ static void test_bpf_obj_id(void)
                nr_id_found++;
 
                err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+               prog_infos[i].jited_prog_insns = 0;
+               prog_infos[i].xlated_prog_insns = 0;
                CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
                      memcmp(&prog_info, &prog_infos[i], info_len),
                      "get-prog-info(next_id->fd)",
@@ -436,7 +440,7 @@ static void test_bpf_obj_id(void)
        nr_id_found = 0;
        next_id = 0;
        while (!bpf_map_get_next_id(next_id, &next_id)) {
-               struct bpf_map_info map_info;
+               struct bpf_map_info map_info = {};
                int map_fd;
 
                info_len = sizeof(map_info);
index af7d173910f4b71023badb38011a27179f92fdf9..addea82f76c943edb42ebbb6075abe849b5b7f32 100644 (file)
@@ -5980,6 +5980,34 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .result_unpriv = REJECT,
        },
+       {
+               "subtraction bounds (map value)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)